diff options
-rw-r--r-- | testing/aiservice/AiService.py | 62 | ||||
-rw-r--r-- | testing/aiservice/README.md | 2 | ||||
-rw-r--r-- | testing/aiservice/testing.py | 30 |
3 files changed, 94 insertions, 0 deletions
diff --git a/testing/aiservice/AiService.py b/testing/aiservice/AiService.py new file mode 100644 index 00000000..287a39ef --- /dev/null +++ b/testing/aiservice/AiService.py @@ -0,0 +1,62 @@ +import os,sys +import requests +# from ...typing import get_type_hints + +url = "https://aiservice.vercel.app/api/chat/answer" +model = ['gpt-3.5-turbo'] +supports_stream = False +needs_auth = False + + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + base = '' + for message in messages: + base += '%s: %s\n' % (message['role'], message['content']) + base += 'assistant:' + + headers = { + "accept": "*/*", + "content-type": "text/plain;charset=UTF-8", + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "Referer": "https://aiservice.vercel.app/chat", + } + data = { + "input": base + } + response = requests.post(url, headers=headers, json=data) + if response.status_code == 200: + _json = response.json() + yield _json['data'] + else: + print(f"Error Occurred::{response.status_code}") + return None + + + +# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ +# '(%s)' % ', '.join( +# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) + + +# Temporary For ChatCompletion Class +class ChatCompletion: + @staticmethod + def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs): + kwargs['auth'] = auth + + if provider and needs_auth and not auth: + print( + f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) + sys.exit(1) + + try: + return (_create_completion(model, messages, stream, **kwargs) + if stream else ''.join(_create_completion(model, messages, stream, **kwargs))) + except TypeError as e: + print(e) + arg: str = str(e).split("'")[1] + print( + f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr) + sys.exit(1)
\ No newline at end of file diff --git a/testing/aiservice/README.md b/testing/aiservice/README.md new file mode 100644 index 00000000..83b06481 --- /dev/null +++ b/testing/aiservice/README.md @@ -0,0 +1,2 @@ +https://github.com/xtekky/gpt4free/issues/40#issuecomment-1629152431 +probably gpt-3.5
\ No newline at end of file diff --git a/testing/aiservice/testing.py b/testing/aiservice/testing.py new file mode 100644 index 00000000..5cb6c5ef --- /dev/null +++ b/testing/aiservice/testing.py @@ -0,0 +1,30 @@ +from AiService import ChatCompletion + +# Test 1 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="AiService", + stream=False, + messages=[{'role': 'user', 'content': 'who are you?'}]) + +print(response) + +# Test 2 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="AiService", + stream=False, + messages=[{'role': 'user', 'content': 'what you can do?'}]) + +print(response) + + +# Test 3 +response = ChatCompletion.create(model="gpt-3.5-turbo", + provider="AiService", + stream=False, + messages=[ + {'role': 'user', 'content': 'now your name is Bob'}, + {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'}, + {'role': 'user', 'content': 'what your name again?'}, + ]) + +print(response)
\ No newline at end of file |