From 3a81f9a2af777ae4fde3d3cd8cfa1ded608c16ae Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sat, 23 Dec 2023 20:50:56 +0100 Subject: Fix api and None provider --- etc/testing/test_api.py | 27 +++++++++++++++++++++++++++ g4f/api/__init__.py | 32 ++++++++++++-------------------- 2 files changed, 39 insertions(+), 20 deletions(-) create mode 100644 etc/testing/test_api.py diff --git a/etc/testing/test_api.py b/etc/testing/test_api.py new file mode 100644 index 00000000..57e2f117 --- /dev/null +++ b/etc/testing/test_api.py @@ -0,0 +1,27 @@ +import openai + +# Set your Hugging Face token as the API key if you use embeddings +# If you don't use embeddings, leave it empty +openai.api_key = "YOUR_HUGGING_FACE_TOKEN" # Replace with your actual token + +# Set the API base URL if needed, e.g., for a local development environment +openai.api_base = "http://localhost:1337/v1" + +def main(): + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "write a poem about a tree"}], + stream=True, + ) + if isinstance(response, dict): + # Not streaming + print(response.choices[0].message.content) + else: + # Streaming + for token in response: + content = token["choices"][0]["delta"].get("content") + if content is not None: + print(content, end="", flush=True) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index a79da7b0..8369d70f 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -83,28 +83,17 @@ class Api: model = item_data.get('model') stream = True if item_data.get("stream") == "True" else False messages = item_data.get('messages') - conversation = item_data.get('conversation') if item_data.get('conversation') != None else None - provider = item_data.get('provider').replace('g4f.Provider.', '') + provider = item_data.get('provider', '').replace('g4f.Provider.', '') provider = provider if provider and provider != "Auto" else None - if provider != None: - provider = g4f.Provider.ProviderUtils.convert.get(provider) try: - if model == 'pi': - response = g4f.ChatCompletion.create( - model=model, - stream=stream, - messages=messages, - conversation=conversation, - provider = provider, - ignored=self.list_ignored_providers) - else: - response = g4f.ChatCompletion.create( - model=model, - stream=stream, - messages=messages, - provider = provider, - ignored=self.list_ignored_providers) + response = g4f.ChatCompletion.create( + model=model, + stream=stream, + messages=messages, + provider = provider, + ignored=self.list_ignored_providers + ) except Exception as e: logging.exception(e) return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json") @@ -179,9 +168,12 @@ class Api: content = json.dumps(end_completion_data, separators=(',', ':')) yield f'data: {content}\n\n' - except GeneratorExit: pass + except Exception as e: + logging.exception(e) + content=json.dumps({"error": "An error occurred while generating the response."}, indent=4) + yield f'data: {content}\n\n' return StreamingResponse(streaming(), media_type="text/event-stream") -- cgit v1.2.3