From 2345588d383a9115e8e36098caefc7b5bae077f3 Mon Sep 17 00:00:00 2001 From: abc <98614666+xtekky@users.noreply.github.com> Date: Sun, 19 Nov 2023 22:59:18 +0000 Subject: ~ | improve compatibility with lower python versions remove tiktoken --- g4f/api/__init__.py | 12 ++++++------ g4f/api/_tokenizer.py | 14 +++++++------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index d8798ef2..d2244ff5 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -4,7 +4,7 @@ import logging from fastapi import FastAPI, Response, Request from fastapi.responses import StreamingResponse from typing import List, Union, Any, Dict, AnyStr -from ._tokenizer import tokenize +#from ._tokenizer import tokenize from .. import BaseProvider import time @@ -95,8 +95,8 @@ class Api: completion_timestamp = int(time.time()) if not stream: - prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages])) - completion_tokens, _ = tokenize(response) + #prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages])) + #completion_tokens, _ = tokenize(response) json_data = { 'id': f'chatcmpl-{completion_id}', @@ -114,9 +114,9 @@ class Api: } ], 'usage': { - 'prompt_tokens': prompt_tokens, - 'completion_tokens': completion_tokens, - 'total_tokens': prompt_tokens + completion_tokens, + 'prompt_tokens': 0, #prompt_tokens, + 'completion_tokens': 0, #completion_tokens, + 'total_tokens': 0, #prompt_tokens + completion_tokens, }, } diff --git a/g4f/api/_tokenizer.py b/g4f/api/_tokenizer.py index fd8f9d5a..de5877c4 100644 --- a/g4f/api/_tokenizer.py +++ b/g4f/api/_tokenizer.py @@ -1,9 +1,9 @@ -import tiktoken -from typing import Union +# import tiktoken +# from typing import Union -def tokenize(text: str, model: str = 'gpt-3.5-turbo') -> Union[int, str]: - encoding = tiktoken.encoding_for_model(model) - encoded = encoding.encode(text) - num_tokens = len(encoded) +# def tokenize(text: str, model: str = 'gpt-3.5-turbo') -> Union[int, str]: +# encoding = tiktoken.encoding_for_model(model) +# encoded = encoding.encode(text) +# num_tokens = len(encoded) - return num_tokens, encoded \ No newline at end of file +# return num_tokens, encoded \ No newline at end of file -- cgit v1.2.3