summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorabc <98614666+xtekky@users.noreply.github.com>2023-11-19 23:59:18 +0100
committerabc <98614666+xtekky@users.noreply.github.com>2023-11-19 23:59:18 +0100
commit2345588d383a9115e8e36098caefc7b5bae077f3 (patch)
tree03700c66f78410977e0dca9ce3c3cf0cbb7b4d6d
parent~ | g4f v-0.1.8.6 (diff)
downloadgpt4free-2345588d383a9115e8e36098caefc7b5bae077f3.tar
gpt4free-2345588d383a9115e8e36098caefc7b5bae077f3.tar.gz
gpt4free-2345588d383a9115e8e36098caefc7b5bae077f3.tar.bz2
gpt4free-2345588d383a9115e8e36098caefc7b5bae077f3.tar.lz
gpt4free-2345588d383a9115e8e36098caefc7b5bae077f3.tar.xz
gpt4free-2345588d383a9115e8e36098caefc7b5bae077f3.tar.zst
gpt4free-2345588d383a9115e8e36098caefc7b5bae077f3.zip
-rw-r--r--g4f/api/__init__.py12
-rw-r--r--g4f/api/_tokenizer.py14
2 files changed, 13 insertions, 13 deletions
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index d8798ef2..d2244ff5 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -4,7 +4,7 @@ import logging
from fastapi import FastAPI, Response, Request
from fastapi.responses import StreamingResponse
from typing import List, Union, Any, Dict, AnyStr
-from ._tokenizer import tokenize
+#from ._tokenizer import tokenize
from .. import BaseProvider
import time
@@ -95,8 +95,8 @@ class Api:
completion_timestamp = int(time.time())
if not stream:
- prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages]))
- completion_tokens, _ = tokenize(response)
+ #prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages]))
+ #completion_tokens, _ = tokenize(response)
json_data = {
'id': f'chatcmpl-{completion_id}',
@@ -114,9 +114,9 @@ class Api:
}
],
'usage': {
- 'prompt_tokens': prompt_tokens,
- 'completion_tokens': completion_tokens,
- 'total_tokens': prompt_tokens + completion_tokens,
+ 'prompt_tokens': 0, #prompt_tokens,
+ 'completion_tokens': 0, #completion_tokens,
+ 'total_tokens': 0, #prompt_tokens + completion_tokens,
},
}
diff --git a/g4f/api/_tokenizer.py b/g4f/api/_tokenizer.py
index fd8f9d5a..de5877c4 100644
--- a/g4f/api/_tokenizer.py
+++ b/g4f/api/_tokenizer.py
@@ -1,9 +1,9 @@
-import tiktoken
-from typing import Union
+# import tiktoken
+# from typing import Union
-def tokenize(text: str, model: str = 'gpt-3.5-turbo') -> Union[int, str]:
- encoding = tiktoken.encoding_for_model(model)
- encoded = encoding.encode(text)
- num_tokens = len(encoded)
+# def tokenize(text: str, model: str = 'gpt-3.5-turbo') -> Union[int, str]:
+# encoding = tiktoken.encoding_for_model(model)
+# encoded = encoding.encode(text)
+# num_tokens = len(encoded)
- return num_tokens, encoded \ No newline at end of file
+# return num_tokens, encoded \ No newline at end of file