diff options
author | xtekky <98614666+xtekky@users.noreply.github.com> | 2023-07-16 19:50:00 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-16 19:50:00 +0200 |
commit | 821c8dcd470456143deaa34ff4edb1ae05f0f147 (patch) | |
tree | b5e6ff34b32d6c29ea6890f06d903daebf94113c /g4f/Provider/Providers | |
parent | Merge pull request #742 from dikos1337/fix-anchors (diff) | |
parent | refactor/move provider from testing folder (diff) | |
download | gpt4free-821c8dcd470456143deaa34ff4edb1ae05f0f147.tar gpt4free-821c8dcd470456143deaa34ff4edb1ae05f0f147.tar.gz gpt4free-821c8dcd470456143deaa34ff4edb1ae05f0f147.tar.bz2 gpt4free-821c8dcd470456143deaa34ff4edb1ae05f0f147.tar.lz gpt4free-821c8dcd470456143deaa34ff4edb1ae05f0f147.tar.xz gpt4free-821c8dcd470456143deaa34ff4edb1ae05f0f147.tar.zst gpt4free-821c8dcd470456143deaa34ff4edb1ae05f0f147.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/Providers/AiService.py | 40 | ||||
-rw-r--r-- | g4f/Provider/Providers/BingHuan.py | 27 | ||||
-rw-r--r-- | g4f/Provider/Providers/Wewordle.py (renamed from testing/wewordle/Wewordle.py) | 32 | ||||
-rw-r--r-- | g4f/Provider/Providers/helpers/binghuan.py (renamed from testing/binghuan/helpers/binghuan.py) | 0 |
4 files changed, 71 insertions, 28 deletions
diff --git a/g4f/Provider/Providers/AiService.py b/g4f/Provider/Providers/AiService.py new file mode 100644 index 00000000..8d475118 --- /dev/null +++ b/g4f/Provider/Providers/AiService.py @@ -0,0 +1,40 @@ +import os,sys +import requests +from ...typing import get_type_hints + +url = "https://aiservice.vercel.app/api/chat/answer" +model = ['gpt-3.5-turbo'] +supports_stream = False +needs_auth = False + + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + base = '' + for message in messages: + base += '%s: %s\n' % (message['role'], message['content']) + base += 'assistant:' + + headers = { + "accept": "*/*", + "content-type": "text/plain;charset=UTF-8", + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "Referer": "https://aiservice.vercel.app/chat", + } + data = { + "input": base + } + response = requests.post(url, headers=headers, json=data) + if response.status_code == 200: + _json = response.json() + yield _json['data'] + else: + print(f"Error Occurred::{response.status_code}") + return None + + + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join( + [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file diff --git a/g4f/Provider/Providers/BingHuan.py b/g4f/Provider/Providers/BingHuan.py new file mode 100644 index 00000000..7344a342 --- /dev/null +++ b/g4f/Provider/Providers/BingHuan.py @@ -0,0 +1,27 @@ +import os,sys +import json +import subprocess +from ...typing import sha256, Dict, get_type_hints + +url = 'https://b.ai-huan.xyz' +model = ['gpt-3.5-turbo', 'gpt-4'] +supports_stream = True +needs_auth = False + +def _create_completion(model: str, messages: list, stream: bool, **kwargs): + path = os.path.dirname(os.path.realpath(__file__)) + config = json.dumps({ + 'messages': messages, + 'model': model}, separators=(',', ':')) + cmd = ['python', f'{path}/helpers/binghuan.py', config] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + for line in iter(p.stdout.readline, b''): + yield line.decode('cp1252') + + + +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join( + [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file diff --git a/testing/wewordle/Wewordle.py b/g4f/Provider/Providers/Wewordle.py index 0d79c5c7..95966fbd 100644 --- a/testing/wewordle/Wewordle.py +++ b/g4f/Provider/Providers/Wewordle.py @@ -4,7 +4,7 @@ import json import random import time import string -# from ...typing import sha256, Dict, get_type_hints +from ...typing import sha256, Dict, get_type_hints url = "https://wewordle.org/gptapi/v1/android/turbo" model = ['gpt-3.5-turbo'] @@ -68,30 +68,6 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs): print(f"Error Occurred::{response.status_code}") return None -# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ -# '(%s)' % ', '.join( -# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) - - -# Temporary For ChatCompletion Class -class ChatCompletion: - @staticmethod - def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs): - kwargs['auth'] = auth - - if provider and needs_auth and not auth: - print( - f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr) - sys.exit(1) - - try: - - - return (_create_completion(model, messages, stream, **kwargs) - if stream else ''.join(_create_completion(model, messages, stream, **kwargs))) - except TypeError as e: - print(e) - arg: str = str(e).split("'")[1] - print( - f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr) - sys.exit(1) +params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ + '(%s)' % ', '.join( + [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file diff --git a/testing/binghuan/helpers/binghuan.py b/g4f/Provider/Providers/helpers/binghuan.py index 203bbe45..203bbe45 100644 --- a/testing/binghuan/helpers/binghuan.py +++ b/g4f/Provider/Providers/helpers/binghuan.py |