From 6fb45515400488b32ac970bbd9f6f51023259b9f Mon Sep 17 00:00:00 2001 From: Bagus Indrayana Date: Wed, 12 Jul 2023 07:49:23 +0800 Subject: refactor/move provider from testing folder --- testing/aiservice/AiService.py | 62 ------------------------------------------ testing/aiservice/README.md | 2 -- testing/aiservice/testing.py | 30 -------------------- 3 files changed, 94 deletions(-) delete mode 100644 testing/aiservice/AiService.py delete mode 100644 testing/aiservice/README.md delete mode 100644 testing/aiservice/testing.py (limited to 'testing/aiservice') diff --git a/testing/aiservice/AiService.py b/testing/aiservice/AiService.py deleted file mode 100644 index 287a39ef..00000000 --- a/testing/aiservice/AiService.py +++ /dev/null @@ -1,62 +0,0 @@ -import os,sys -import requests -# from ...typing import get_type_hints - -url = "https://aiservice.vercel.app/api/chat/answer" -model = ['gpt-3.5-turbo'] -supports_stream = False -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - base = '' - for message in messages: - base += '%s: %s\n' % (message['role'], message['content']) - base += 'assistant:' - - headers = { - "accept": "*/*", - "content-type": "text/plain;charset=UTF-8", - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "Referer": "https://aiservice.vercel.app/chat", - } - data = { - "input": base - } - response = requests.post(url, headers=headers, json=data) - if response.status_code == 200: - _json = response.json() - yield _json['data'] - else: - print(f"Error Occurred::{response.status_code}") - return None - - - -# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ -# '(%s)' % ', '.join( -# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) - - -# Temporary For ChatCompletion Class -class ChatCompletion: - @staticmethod - def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs): - kwargs['auth'] = auth - - if provider and needs_auth and not auth: - print( - f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) - sys.exit(1) - - try: - return (_create_completion(model, messages, stream, **kwargs) - if stream else ''.join(_create_completion(model, messages, stream, **kwargs))) - except TypeError as e: - print(e) - arg: str = str(e).split("'")[1] - print( - f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr) - sys.exit(1) \ No newline at end of file diff --git a/testing/aiservice/README.md b/testing/aiservice/README.md deleted file mode 100644 index 83b06481..00000000 --- a/testing/aiservice/README.md +++ /dev/null @@ -1,2 +0,0 @@ -https://github.com/xtekky/gpt4free/issues/40#issuecomment-1629152431 -probably gpt-3.5 \ No newline at end of file diff --git a/testing/aiservice/testing.py b/testing/aiservice/testing.py deleted file mode 100644 index 5cb6c5ef..00000000 --- a/testing/aiservice/testing.py +++ /dev/null @@ -1,30 +0,0 @@ -from AiService import ChatCompletion - -# Test 1 -response = ChatCompletion.create(model="gpt-3.5-turbo", - provider="AiService", - stream=False, - messages=[{'role': 'user', 'content': 'who are you?'}]) - -print(response) - -# Test 2 -response = ChatCompletion.create(model="gpt-3.5-turbo", - provider="AiService", - stream=False, - messages=[{'role': 'user', 'content': 'what you can do?'}]) - -print(response) - - -# Test 3 -response = ChatCompletion.create(model="gpt-3.5-turbo", - provider="AiService", - stream=False, - messages=[ - {'role': 'user', 'content': 'now your name is Bob'}, - {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'}, - {'role': 'user', 'content': 'what your name again?'}, - ]) - -print(response) \ No newline at end of file -- cgit v1.2.3