From 1e0b09b8d5aae81afa9028b8197256cc71549934 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:27:35 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 341 ++++++++++++++++++++++------------------------------ 1 file changed, 141 insertions(+), 200 deletions(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index fec5606f..6991044a 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -1,227 +1,168 @@ -import typing -from .. import BaseProvider -import g4f; g4f.debug.logging = True +from fastapi import FastAPI, Response, Request +from fastapi.middleware.cors import CORSMiddleware +from typing import List, Union, Any, Dict, AnyStr +from ._tokenizer import tokenize +import sqlite3 +import g4f import time import json import random import string -import logging - -from typing import Union -from loguru import logger -from waitress import serve -from ._logging import hook_logging -from ._tokenizer import tokenize -from flask_cors import CORS -from werkzeug.serving import WSGIRequestHandler -from werkzeug.exceptions import default_exceptions -from werkzeug.middleware.proxy_fix import ProxyFix - -from flask import ( - Flask, - jsonify, - make_response, - request, +import uvicorn +import nest_asyncio + +app = FastAPI() +nest_asyncio.apply() + +origins = [ + "http://localhost", + "http://localhost:1337", +] + +app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], ) -class Api: - __default_ip = '127.0.0.1' - __default_port = 1337 - - def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False, - list_ignored_providers:typing.List[typing.Union[str, BaseProvider]]=None) -> None: - self.engine = engine - self.debug = debug - self.sentry = sentry - self.list_ignored_providers = list_ignored_providers - self.log_level = logging.DEBUG if debug else logging.WARN - - hook_logging(level=self.log_level, format='[%(asctime)s] %(levelname)s in %(module)s: %(message)s') - self.logger = logging.getLogger('waitress') - - self.app = Flask(__name__) - self.app.wsgi_app = ProxyFix(self.app.wsgi_app, x_port=1) - self.app.after_request(self.__after_request) - - def run(self, bind_str, threads=8): - host, port = self.__parse_bind(bind_str) - - CORS(self.app, resources={r'/v1/*': {'supports_credentials': True, 'expose_headers': [ - 'Content-Type', - 'Authorization', - 'X-Requested-With', - 'Accept', - 'Origin', - 'Access-Control-Request-Method', - 'Access-Control-Request-Headers', - 'Content-Disposition'], 'max_age': 600}}) - - self.app.route('/v1/models', methods=['GET'])(self.models) - self.app.route('/v1/models/', methods=['GET'])(self.model_info) - - self.app.route('/v1/chat/completions', methods=['POST'])(self.chat_completions) - self.app.route('/v1/completions', methods=['POST'])(self.completions) - - for ex in default_exceptions: - self.app.register_error_handler(ex, self.__handle_error) - - if not self.debug: - self.logger.warning(f'Serving on http://{host}:{port}') - - WSGIRequestHandler.protocol_version = 'HTTP/1.1' - serve(self.app, host=host, port=port, ident=None, threads=threads) - - def __handle_error(self, e: Exception): - self.logger.error(e) - - return make_response(jsonify({ - 'code': e.code, - 'message': str(e.original_exception if self.debug and hasattr(e, 'original_exception') else e.name)}), 500) - - @staticmethod - def __after_request(resp): - resp.headers['X-Server'] = f'g4f/{g4f.version}' +JSONObject = Dict[AnyStr, Any] +JSONArray = List[Any] +JSONStructure = Union[JSONArray, JSONObject] + +@app.get("/") +async def read_root(): + return Response(content=json.dumps({"info": "G4F API"}, indent=4), media_type="application/json") + +@app.get("/v1") +async def read_root_v1(): + return Response(content=json.dumps({"info": "Go to /v1/chat/completions or /v1/models."}, indent=4), media_type="application/json") + +@app.get("/v1/models") +async def models(): + model_list = [{ + 'id': model, + 'object': 'model', + 'created': 0, + 'owned_by': 'g4f'} for model in g4f.Model.__all__()] + + return Response(content=json.dumps({ + 'object': 'list', + 'data': model_list}, indent=4), media_type="application/json") + +@app.get("/v1/models/{model_name}") +async def model_info(model_name: str): + try: + model_info = (g4f.ModelUtils.convert[model_name]) - return resp - - def __parse_bind(self, bind_str): - sections = bind_str.split(':', 2) - if len(sections) < 2: - try: - port = int(sections[0]) - return self.__default_ip, port - except ValueError: - return sections[0], self.__default_port - - return sections[0], int(sections[1]) - - async def home(self): - return 'Hello world | https://127.0.0.1:1337/v1' + return Response(content=json.dumps({ + 'id': model_name, + 'object': 'model', + 'created': 0, + 'owned_by': model_info.base_provider + }, indent=4), media_type="application/json") + except: + return Response(content=json.dumps({"error": "The model does not exist."}, indent=4), media_type="application/json") + +@app.post("/v1/chat/completions") +async def chat_completions(request: Request, item: JSONStructure = None): + + item_data = { + 'model': 'gpt-3.5-turbo', + 'stream': False, + } - async def chat_completions(self): - model = request.json.get('model', 'gpt-3.5-turbo') - stream = request.json.get('stream', False) - messages = request.json.get('messages') - - logger.info(f'model: {model}, stream: {stream}, request: {messages[-1]["content"]}') + item_data.update(item or {}) + model = item_data.get('model') + stream = item_data.get('stream') + messages = item_data.get('messages') + + try: + response = g4f.ChatCompletion.create(model=model, stream=stream, messages=messages) + except: + return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json") + + completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) + completion_timestamp = int(time.time()) + + if not stream: + prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages])) + completion_tokens, _ = tokenize(response) + + json_data = { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion', + 'created': completion_timestamp, + 'model': model, + 'choices': [ + { + 'index': 0, + 'message': { + 'role': 'assistant', + 'content': response, + }, + 'finish_reason': 'stop', + } + ], + 'usage': { + 'prompt_tokens': prompt_tokens, + 'completion_tokens': completion_tokens, + 'total_tokens': prompt_tokens + completion_tokens, + }, + } - config = None - proxy = None + return Response(content=json.dumps(json_data, indent=4), media_type="application/json") + def streaming(): try: - config = json.load(open("config.json","r",encoding="utf-8")) - proxy = config["proxy"] + for chunk in response: + completion_data = { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion.chunk', + 'created': completion_timestamp, + 'model': model, + 'choices': [ + { + 'index': 0, + 'delta': { + 'content': chunk, + }, + 'finish_reason': None, + } + ], + } - except Exception: - pass + content = json.dumps(completion_data, separators=(',', ':')) + yield f'data: {content}\n\n' + time.sleep(0.03) - if proxy != None: - response = self.engine.ChatCompletion.create(model=model, - stream=stream, messages=messages, - ignored=self.list_ignored_providers, - proxy=proxy) - else: - response = self.engine.ChatCompletion.create(model=model, - stream=stream, messages=messages, - ignored=self.list_ignored_providers) - - completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) - completion_timestamp = int(time.time()) - - if not stream: - prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages])) - completion_tokens, _ = tokenize(response) - - return { + end_completion_data = { 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion', + 'object': 'chat.completion.chunk', 'created': completion_timestamp, 'model': model, 'choices': [ { 'index': 0, - 'message': { - 'role': 'assistant', - 'content': response, - }, + 'delta': {}, 'finish_reason': 'stop', } ], - 'usage': { - 'prompt_tokens': prompt_tokens, - 'completion_tokens': completion_tokens, - 'total_tokens': prompt_tokens + completion_tokens, - }, } - def streaming(): - try: - for chunk in response: - completion_data = { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion.chunk', - 'created': completion_timestamp, - 'model': model, - 'choices': [ - { - 'index': 0, - 'delta': { - 'content': chunk, - }, - 'finish_reason': None, - } - ], - } + content = json.dumps(end_completion_data, separators=(',', ':')) + yield f'data: {content}\n\n' - content = json.dumps(completion_data, separators=(',', ':')) - yield f'data: {content}\n\n' - time.sleep(0.03) + except GeneratorExit: + pass - end_completion_data = { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion.chunk', - 'created': completion_timestamp, - 'model': model, - 'choices': [ - { - 'index': 0, - 'delta': {}, - 'finish_reason': 'stop', - } - ], - } - - content = json.dumps(end_completion_data, separators=(',', ':')) - yield f'data: {content}\n\n' - - logger.success(f'model: {model}, stream: {stream}') - - except GeneratorExit: - pass + return Response(content=json.dumps(streaming(), indent=4), media_type="application/json") - return self.app.response_class(streaming(), mimetype='text/event-stream') - - async def completions(self): - return 'not working yet', 500 - - async def model_info(self, model_name): - model_info = (g4f.ModelUtils.convert[model_name]) - - return jsonify({ - 'id' : model_name, - 'object' : 'model', - 'created' : 0, - 'owned_by' : model_info.base_provider - }) - - async def models(self): - model_list = [{ - 'id' : model, - 'object' : 'model', - 'created' : 0, - 'owned_by' : 'g4f'} for model in g4f.Model.__all__()] - - return jsonify({ - 'object': 'list', - 'data': model_list}) - \ No newline at end of file +@app.post("/v1/completions") +async def completions(): + return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") + +def run(ip): + split_ip = ip.split(":") + uvicorn.run(app, host=split_ip[0], port=int(split_ip[1]), use_colors=False, loop='asyncio') -- cgit v1.2.3 From a195d6d568cfad949e8a4a96cfda1b61d247a481 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:28:07 -0300 Subject: Update run.py --- g4f/api/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/run.py b/g4f/api/run.py index 12bf9eed..5992ab60 100644 --- a/g4f/api/run.py +++ b/g4f/api/run.py @@ -3,4 +3,4 @@ import g4f.api if __name__ == "__main__": print(f'Starting server... [g4f v-{g4f.version}]') - g4f.api.Api(g4f).run('127.0.0.1:1337', 8) \ No newline at end of file + g4f.api.run('127.0.0.1:1337') -- cgit v1.2.3 From 98241884b156a667012e51a82e4427a4465c42f0 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:29:00 -0300 Subject: Update requirements.txt --- requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 3ef9b32e..28d00b85 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,8 +6,6 @@ certifi browser_cookie3 websockets js2py -flask[async] -flask-cors typing-extensions PyExecJS duckduckgo-search @@ -20,3 +18,4 @@ pillow platformdirs numpy asgiref +fastapi -- cgit v1.2.3 From 80321cc47af7835cd38ec81d32be617d3a82e146 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:29:17 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 6991044a..4408243a 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -2,7 +2,6 @@ from fastapi import FastAPI, Response, Request from fastapi.middleware.cors import CORSMiddleware from typing import List, Union, Any, Dict, AnyStr from ._tokenizer import tokenize -import sqlite3 import g4f import time import json -- cgit v1.2.3 From 6e75e2303336e10414384970c08e1c1c397dd4a6 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:29:32 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 4408243a..d86364d1 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -164,4 +164,4 @@ async def completions(): def run(ip): split_ip = ip.split(":") - uvicorn.run(app, host=split_ip[0], port=int(split_ip[1]), use_colors=False, loop='asyncio') + uvicorn.run(app, host=split_ip[0], port=int(split_ip[1]), use_colors=False) -- cgit v1.2.3 From 583d856c39ec08a8d93410dd3d2bf136de5db072 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:29:45 -0300 Subject: Update requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 28d00b85..ffadf62a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,3 +19,4 @@ platformdirs numpy asgiref fastapi +uvicorn -- cgit v1.2.3 From 40a8664806853072af37f5d7f567e902529aa53e Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Wed, 1 Nov 2023 22:30:23 -0300 Subject: Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 116213fc..b6be36e1 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh: with open("requirements.txt") as f: required = f.read().splitlines() -VERSION = "0.1.7.9" +VERSION = "0.1.8.0" DESCRIPTION = ( "The official gpt4free repository | various collection of powerful language models" ) -- cgit v1.2.3 From 045a3b1c4b774372705479c422986961626b3c20 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 17:47:17 -0300 Subject: Update __init__.py (#1) --- g4f/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/g4f/__init__.py b/g4f/__init__.py index a2eec9e2..e1ca6a9d 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -5,7 +5,7 @@ from .Provider import BaseProvider, RetryProvider from .typing import Messages, CreateResult, Union, List from . import debug -version = '0.1.7.9' +version = '0.1.8.0' version_check = True def check_pypi_version() -> None: @@ -115,4 +115,4 @@ class Completion: return result if stream else ''.join(result) if version_check: - check_pypi_version() \ No newline at end of file + check_pypi_version() -- cgit v1.2.3 From 87cf743a41c8bf1ac9950b336b8abfb98f9d66b1 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 17:51:32 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index d86364d1..8ea61dba 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -162,6 +162,6 @@ async def chat_completions(request: Request, item: JSONStructure = None): async def completions(): return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") -def run(ip): +def run(ip, thread_quantity): split_ip = ip.split(":") - uvicorn.run(app, host=split_ip[0], port=int(split_ip[1]), use_colors=False) + uvicorn.run(app, host=split_ip[0], port=int(split_ip[1]), use_colors=False, workers=thread_quantity) -- cgit v1.2.3 From 318112c8b9b881582e0bc283d6f1811c80d699fb Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 17:51:53 -0300 Subject: Update run.py --- g4f/api/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/run.py b/g4f/api/run.py index 5992ab60..bd76fcf2 100644 --- a/g4f/api/run.py +++ b/g4f/api/run.py @@ -3,4 +3,4 @@ import g4f.api if __name__ == "__main__": print(f'Starting server... [g4f v-{g4f.version}]') - g4f.api.run('127.0.0.1:1337') + g4f.api.run('127.0.0.1:1337', 8) -- cgit v1.2.3 From 0af4fc0997360720712f8b37d75431ce7de79e74 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:16:09 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 281 ++++++++++++++++++++++++++-------------------------- 1 file changed, 138 insertions(+), 143 deletions(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 8ea61dba..17951339 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -1,167 +1,162 @@ -from fastapi import FastAPI, Response, Request -from fastapi.middleware.cors import CORSMiddleware -from typing import List, Union, Any, Dict, AnyStr -from ._tokenizer import tokenize -import g4f +from fastapi import FastAPI, Response, Request +from typing import List, Union, Any, Dict, AnyStr +from ._tokenizer import tokenize +from .. import BaseProvider + import time import json import random import string import uvicorn import nest_asyncio +import g4f -app = FastAPI() -nest_asyncio.apply() - -origins = [ - "http://localhost", - "http://localhost:1337", -] - -app.add_middleware( - CORSMiddleware, - allow_origins=origins, - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -JSONObject = Dict[AnyStr, Any] -JSONArray = List[Any] -JSONStructure = Union[JSONArray, JSONObject] - -@app.get("/") -async def read_root(): - return Response(content=json.dumps({"info": "G4F API"}, indent=4), media_type="application/json") - -@app.get("/v1") -async def read_root_v1(): - return Response(content=json.dumps({"info": "Go to /v1/chat/completions or /v1/models."}, indent=4), media_type="application/json") - -@app.get("/v1/models") -async def models(): - model_list = [{ - 'id': model, - 'object': 'model', - 'created': 0, - 'owned_by': 'g4f'} for model in g4f.Model.__all__()] - - return Response(content=json.dumps({ - 'object': 'list', - 'data': model_list}, indent=4), media_type="application/json") - -@app.get("/v1/models/{model_name}") -async def model_info(model_name: str): - try: - model_info = (g4f.ModelUtils.convert[model_name]) - - return Response(content=json.dumps({ - 'id': model_name, - 'object': 'model', - 'created': 0, - 'owned_by': model_info.base_provider - }, indent=4), media_type="application/json") - except: - return Response(content=json.dumps({"error": "The model does not exist."}, indent=4), media_type="application/json") - -@app.post("/v1/chat/completions") -async def chat_completions(request: Request, item: JSONStructure = None): - - item_data = { - 'model': 'gpt-3.5-turbo', - 'stream': False, - } - - item_data.update(item or {}) - model = item_data.get('model') - stream = item_data.get('stream') - messages = item_data.get('messages') - - try: - response = g4f.ChatCompletion.create(model=model, stream=stream, messages=messages) - except: - return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json") - - completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) - completion_timestamp = int(time.time()) - - if not stream: - prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages])) - completion_tokens, _ = tokenize(response) - - json_data = { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion', - 'created': completion_timestamp, - 'model': model, - 'choices': [ - { - 'index': 0, - 'message': { - 'role': 'assistant', - 'content': response, - }, - 'finish_reason': 'stop', - } - ], - 'usage': { - 'prompt_tokens': prompt_tokens, - 'completion_tokens': completion_tokens, - 'total_tokens': prompt_tokens + completion_tokens, - }, - } - - return Response(content=json.dumps(json_data, indent=4), media_type="application/json") - - def streaming(): - try: - for chunk in response: - completion_data = { +class Api: + def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False, + list_ignored_providers: List[Union[str, BaseProvider]] = None) -> None: + self.engine = engine + self.debug = debug + self.sentry = sentry + self.list_ignored_providers = list_ignored_providers + + self.app = FastAPI() + nest_asyncio.apply() + + JSONObject = Dict[AnyStr, Any] + JSONArray = List[Any] + JSONStructure = Union[JSONArray, JSONObject] + + @self.app.get("/") + async def read_root(): + return Response(content=json.dumps({"info": "g4f API"}, indent=4), media_type="application/json") + + @self.app.get("/v1") + async def read_root_v1(): + return Response(content=json.dumps({"info": "Go to /v1/chat/completions or /v1/models."}, indent=4), media_type="application/json") + + @self.app.get("/v1/models") + async def models(): + model_list = [{ + 'id': model, + 'object': 'model', + 'created': 0, + 'owned_by': 'g4f'} for model in g4f.Model.__all__()] + + return Response(content=json.dumps({ + 'object': 'list', + 'data': model_list}, indent=4), media_type="application/json") + + @self.app.get("/v1/models/{model_name}") + async def model_info(model_name: str): + try: + model_info = (g4f.ModelUtils.convert[model_name]) + + return Response(content=json.dumps({ + 'id': model_name, + 'object': 'model', + 'created': 0, + 'owned_by': model_info.base_provider + }, indent=4), media_type="application/json") + except: + return Response(content=json.dumps({"error": "The model does not exist."}, indent=4), media_type="application/json") + + @self.app.post("/v1/chat/completions") + async def chat_completions(request: Request, item: JSONStructure = None): + item_data = { + 'model': 'gpt-3.5-turbo', + 'stream': False, + } + + item_data.update(item or {}) + model = item_data.get('model') + stream = item_data.get('stream') + messages = item_data.get('messages') + + try: + response = g4f.ChatCompletion.create(model=model, stream=stream, messages=messages) + except: + return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json") + + completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) + completion_timestamp = int(time.time()) + + if not stream: + prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages])) + completion_tokens, _ = tokenize(response) + + json_data = { 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion.chunk', + 'object': 'chat.completion', 'created': completion_timestamp, 'model': model, 'choices': [ { 'index': 0, - 'delta': { - 'content': chunk, + 'message': { + 'role': 'assistant', + 'content': response, }, - 'finish_reason': None, + 'finish_reason': 'stop', } ], + 'usage': { + 'prompt_tokens': prompt_tokens, + 'completion_tokens': completion_tokens, + 'total_tokens': prompt_tokens + completion_tokens, + }, } - content = json.dumps(completion_data, separators=(',', ':')) - yield f'data: {content}\n\n' - time.sleep(0.03) - - end_completion_data = { - 'id': f'chatcmpl-{completion_id}', - 'object': 'chat.completion.chunk', - 'created': completion_timestamp, - 'model': model, - 'choices': [ - { - 'index': 0, - 'delta': {}, - 'finish_reason': 'stop', + return Response(content=json.dumps(json_data, indent=4), media_type="application/json") + + def streaming(): + try: + for chunk in response: + completion_data = { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion.chunk', + 'created': completion_timestamp, + 'model': model, + 'choices': [ + { + 'index': 0, + 'delta': { + 'content': chunk, + }, + 'finish_reason': None, + } + ], + } + + content = json.dumps(completion_data, separators=(',', ':')) + yield f'data: {content}\n\n' + time.sleep(0.03) + + end_completion_data = { + 'id': f'chatcmpl-{completion_id}', + 'object': 'chat.completion.chunk', + 'created': completion_timestamp, + 'model': model, + 'choices': [ + { + 'index': 0, + 'delta': {}, + 'finish_reason': 'stop', + } + ], } - ], - } - content = json.dumps(end_completion_data, separators=(',', ':')) - yield f'data: {content}\n\n' + content = json.dumps(end_completion_data, separators=(',', ':')) + yield f'data: {content}\n\n' - except GeneratorExit: - pass + except GeneratorExit: + pass - return Response(content=json.dumps(streaming(), indent=4), media_type="application/json") + return Response(content=json.dumps(streaming(), indent=4), media_type="application/json") -@app.post("/v1/completions") -async def completions(): - return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") + @self.app.post("/v1/completions") + async def completions(): + return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") -def run(ip, thread_quantity): - split_ip = ip.split(":") - uvicorn.run(app, host=split_ip[0], port=int(split_ip[1]), use_colors=False, workers=thread_quantity) + def run(self, ip, thread_quantity): + split_ip = ip.split(":") + uvicorn.run(self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False, workers=thread_quantity) -- cgit v1.2.3 From f51fe6322fcbd2bee24613d63aa8ea28cf6a32af Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:16:23 -0300 Subject: Update run.py --- g4f/api/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/run.py b/g4f/api/run.py index bd76fcf2..c822237d 100644 --- a/g4f/api/run.py +++ b/g4f/api/run.py @@ -3,4 +3,4 @@ import g4f.api if __name__ == "__main__": print(f'Starting server... [g4f v-{g4f.version}]') - g4f.api.run('127.0.0.1:1337', 8) + g4f.api.Api(engine = g4f, debug = True).run(ip = "127.0.0.1:1337", thread_quantity = 8) -- cgit v1.2.3 From ef3e2975852bd96b34fbb097b74252c6016ffa6e Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:16:42 -0300 Subject: Update cli.py --- g4f/cli.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/g4f/cli.py b/g4f/cli.py index cb19dde1..2938a335 100644 --- a/g4f/cli.py +++ b/g4f/cli.py @@ -7,11 +7,9 @@ from g4f import Provider from g4f.api import Api from g4f.gui.run import gui_parser, run_gui_args - def run_gui(args): print("Running GUI...") - def main(): IgnoredProviders = Enum("ignore_providers", {key: key for key in Provider.__all__}) parser = argparse.ArgumentParser(description="Run gpt4free") @@ -26,8 +24,7 @@ def main(): args = parser.parse_args() if args.mode == "api": - controller=Api(g4f, debug=args.debug) - controller.list_ignored_providers=args.ignored_providers + controller=Api(g4f, debug=args.debug, list_ignored_providers=args.ignored_providers) controller.run(args.bind, args.num_threads) elif args.mode == "gui": run_gui_args(args) @@ -35,6 +32,5 @@ def main(): parser.print_help() exit(1) - if __name__ == "__main__": main() -- cgit v1.2.3 From de697f389cfe7adba518ea32858ac7a2b0ab19e1 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:17:14 -0300 Subject: Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b6be36e1..6b74c1a7 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh: with open("requirements.txt") as f: required = f.read().splitlines() -VERSION = "0.1.8.0" +VERSION = "0.1.8.1" DESCRIPTION = ( "The official gpt4free repository | various collection of powerful language models" ) -- cgit v1.2.3 From 1375a740896ceccdd50c752c56a2123b1e8a9723 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:17:27 -0300 Subject: Update __init__.py --- g4f/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/__init__.py b/g4f/__init__.py index e1ca6a9d..25e31833 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -5,7 +5,7 @@ from .Provider import BaseProvider, RetryProvider from .typing import Messages, CreateResult, Union, List from . import debug -version = '0.1.8.0' +version = '0.1.8.1' version_check = True def check_pypi_version() -> None: -- cgit v1.2.3 From 81bd9bab33378652c073a2e340b7dbc759aadee7 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:19:30 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 17951339..a1ab66de 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -159,4 +159,4 @@ class Api: def run(self, ip, thread_quantity): split_ip = ip.split(":") - uvicorn.run(self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False, workers=thread_quantity) + uvicorn.run("__init__:app", host=split_ip[0], port=int(split_ip[1]), use_colors=False, workers=thread_quantity) -- cgit v1.2.3 From 1726bc723d0fd09e6d52567c1c7e27516b96f94f Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:23:29 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index a1ab66de..541be47d 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -159,4 +159,4 @@ class Api: def run(self, ip, thread_quantity): split_ip = ip.split(":") - uvicorn.run("__init__:app", host=split_ip[0], port=int(split_ip[1]), use_colors=False, workers=thread_quantity) + uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False, workers=thread_quantity) -- cgit v1.2.3 From 7ef8135ed4ac6b62d6cd4ad6e723125f0513bc99 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:24:38 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 541be47d..5fb93971 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -159,4 +159,4 @@ class Api: def run(self, ip, thread_quantity): split_ip = ip.split(":") - uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False, workers=thread_quantity) + uvicorn.run(host=split_ip[0], port=int(split_ip[1]), use_colors=False) -- cgit v1.2.3 From 0a2ff2ba5ed33378a73524bd33892c91dbc5ada4 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:25:02 -0300 Subject: Update run.py --- g4f/api/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/run.py b/g4f/api/run.py index c822237d..88f34741 100644 --- a/g4f/api/run.py +++ b/g4f/api/run.py @@ -3,4 +3,4 @@ import g4f.api if __name__ == "__main__": print(f'Starting server... [g4f v-{g4f.version}]') - g4f.api.Api(engine = g4f, debug = True).run(ip = "127.0.0.1:1337", thread_quantity = 8) + g4f.api.Api(engine = g4f, debug = True).run(ip = "127.0.0.1:1337") -- cgit v1.2.3 From df7ba2c5eb0c62e427e8ab884d1883999812c25b Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:25:21 -0300 Subject: Update cli.py --- g4f/cli.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/g4f/cli.py b/g4f/cli.py index 2938a335..a8fcba47 100644 --- a/g4f/cli.py +++ b/g4f/cli.py @@ -17,7 +17,6 @@ def main(): api_parser=subparsers.add_parser("api") api_parser.add_argument("--bind", default="127.0.0.1:1337", help="The bind string.") api_parser.add_argument("--debug", type=bool, default=False, help="Enable verbose logging") - api_parser.add_argument("--num-threads", type=int, default=8, help="The number of threads.") api_parser.add_argument("--ignored-providers", nargs="+", choices=[provider.name for provider in IgnoredProviders], default=[], help="List of providers to ignore when processing request.") subparsers.add_parser("gui", parents=[gui_parser()], add_help=False) @@ -25,7 +24,7 @@ def main(): args = parser.parse_args() if args.mode == "api": controller=Api(g4f, debug=args.debug, list_ignored_providers=args.ignored_providers) - controller.run(args.bind, args.num_threads) + controller.run(args.bind) elif args.mode == "gui": run_gui_args(args) else: -- cgit v1.2.3 From 93a6d4499b86a64be577e54381951d782f45b04c Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:25:40 -0300 Subject: Update cli.py --- g4f/cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/cli.py b/g4f/cli.py index a8fcba47..20131e5d 100644 --- a/g4f/cli.py +++ b/g4f/cli.py @@ -23,7 +23,7 @@ def main(): args = parser.parse_args() if args.mode == "api": - controller=Api(g4f, debug=args.debug, list_ignored_providers=args.ignored_providers) + controller=Api(engine=g4f, debug=args.debug, list_ignored_providers=args.ignored_providers) controller.run(args.bind) elif args.mode == "gui": run_gui_args(args) -- cgit v1.2.3 From 65c3a88e0adb6836113130e52e70e4b824a86949 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:26:16 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 5fb93971..bbb0d1d5 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -157,6 +157,6 @@ class Api: async def completions(): return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") - def run(self, ip, thread_quantity): + def run(self, ip): split_ip = ip.split(":") uvicorn.run(host=split_ip[0], port=int(split_ip[1]), use_colors=False) -- cgit v1.2.3 From 90678515a67c00e503ab183c93c65698e7942934 Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:27:25 -0300 Subject: Update __init__.py --- g4f/api/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index bbb0d1d5..43bca2a5 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -159,4 +159,4 @@ class Api: def run(self, ip): split_ip = ip.split(":") - uvicorn.run(host=split_ip[0], port=int(split_ip[1]), use_colors=False) + uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False) -- cgit v1.2.3 From afbd8822fae74593ecb956a46d70ae8e8c4cc45c Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:32:20 -0300 Subject: Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6b74c1a7..116213fc 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh: with open("requirements.txt") as f: required = f.read().splitlines() -VERSION = "0.1.8.1" +VERSION = "0.1.7.9" DESCRIPTION = ( "The official gpt4free repository | various collection of powerful language models" ) -- cgit v1.2.3 From f1280da4ca12b6347a636e90266153075d877b2d Mon Sep 17 00:00:00 2001 From: ThatLukinhasGuy <139662282+thatlukinhasguy1@users.noreply.github.com> Date: Sat, 4 Nov 2023 18:32:32 -0300 Subject: Update __init__.py --- g4f/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/g4f/__init__.py b/g4f/__init__.py index 25e31833..8a1cb3cd 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -5,7 +5,7 @@ from .Provider import BaseProvider, RetryProvider from .typing import Messages, CreateResult, Union, List from . import debug -version = '0.1.8.1' +version = '0.1.7.9' version_check = True def check_pypi_version() -> None: -- cgit v1.2.3