diff options
Diffstat (limited to '')
149 files changed, 5841 insertions, 4447 deletions
diff --git a/LEGAL_NOTICE.md b/LEGAL_NOTICE.md index a2d0806c..50a1d141 100644 --- a/LEGAL_NOTICE.md +++ b/LEGAL_NOTICE.md @@ -1,46 +1,55 @@ ## Legal Notice -This repository is _not_ associated with or endorsed by providers of the APIs contained in this GitHub repository. This project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to improve their security or request the removal of their site from this repository. - -Please note the following: - -## Legal Notice +This repository is **not associated with or endorsed** by the providers of the APIs contained herein. This project is intended **for educational purposes only**. It is a personal project aimed at learning and exploration. Owners of any included sites or services may contact me to improve their security or request the removal of their content from this repository. ### **Affiliation Disclaimer** -This repository is not associated with or endorsed by the providers of the APIs contained in this repository. The project is intended for educational purposes only. The APIs, services, trademarks, and other intellectual property mentioned in this repository are the property of their respective owners, with no claim of ownership or affiliation by this project. + +This repository is not associated with or endorsed by any of the API providers mentioned herein. All trademarks, API services, and other intellectual property referenced are the property of their respective owners. No claim of ownership or affiliation is made by this project. ### **Liability Limitation** -Under no circumstances shall the author of this repository be liable for any direct, indirect, incidental, special, consequential, or punitive damages, including but not limited to, loss of profits, data, or use, arising out of or in connection with the repository, regardless of whether such damages were foreseeable and whether the author was advised of the possibility of such damages. + +Under no circumstances shall the author of this repository be liable for any direct, indirect, incidental, special, consequential, or punitive damages—including but not limited to loss of profits, data, or use—arising out of or in connection with the repository. This limitation applies regardless of whether such damages were foreseeable or whether the author was advised of the possibility of such damages. ### **No Warranties** -The repository is provided on an "as is" and "as available" basis without any warranties of any kind, either express or implied, including but not limited to, implied warranties of merchantability, fitness for a particular purpose, or non-infringement. + +This repository is provided on an "as is" and "as available" basis without any warranties of any kind, express or implied. This includes, but is not limited to, implied warranties of merchantability, fitness for a particular purpose, and non-infringement. ### **User Responsibility** -Users assume all risk for their use of this repository and are solely responsible for any damage or loss, including but not limited to financial loss, of any kind, to any party, that results from the use or misuse of the repository and its contents. + +Users assume all risks associated with the use of this repository. They are solely responsible for any damage or loss—including financial loss—that results from the use or misuse of the repository and its contents. ### **Legal Compliance** -Users are responsible for ensuring their use of the repository and its contents complies with all local, state, national, and international laws and regulations. + +Users are responsible for ensuring that their use of the repository and its contents complies with all applicable local, state, national, and international laws and regulations. ### **Indemnification** -Users agree to indemnify, defend, and hold harmless the author from any claims, liabilities, damages, losses, or expenses, including legal fees, arising out of or in any way connected with their use of this repository, violation of these terms, or infringement of any intellectual property or other rights of any person or entity. + +Users agree to indemnify, defend, and hold harmless the author from any claims, liabilities, damages, losses, or expenses—including legal fees—arising out of or in any way connected with their use of this repository, violation of these terms, or infringement of any intellectual property or other rights of any person or entity. ### **No Endorsement** + The inclusion of third-party content does not imply endorsement or recommendation of such content by the author. ### **Governing Law and Jurisdiction** -Any disputes arising out of or related to the use of this repository shall be governed by the laws of the author's jurisdiction, without regard to its conflict of law principles. + +Any disputes arising out of or related to the use of this repository shall be governed by the laws of the author's jurisdiction, without regard to conflict of law principles. ### **Severability** -If any provision of this notice is found to be unlawful, void, or unenforceable, then that provision shall be deemed severable from this notice and shall not affect the validity and enforceability of any remaining provisions. + +If any provision of this notice is found to be unlawful, void, or unenforceable, that provision shall be deemed severable from this notice and shall not affect the validity and enforceability of the remaining provisions. ### **Acknowledgment of Understanding** + By using this repository, users acknowledge that they have read, understood, and agree to be bound by these terms. ### **Updates and Changes** + The author reserves the right to modify, update, or remove any content, information, or features in this repository at any time without prior notice. Users are responsible for regularly reviewing the content and any changes made to this repository. ### **Unforeseen Consequences** -The author of this repository is not responsible for any consequences, damages, or losses arising from the use or misuse of this repository or the content provided by the third-party APIs. Users are solely responsible for their actions and any repercussions that may follow. + +The author is not responsible for any consequences, damages, or losses arising from the use or misuse of this repository or the content provided by third-party APIs. Users are solely responsible for their actions and any repercussions that may follow. ### **Educational Purpose** -Please note that this project and its content are provided strictly for educational purposes. Users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations. + +This project and its content are provided strictly for educational purposes. Users acknowledge that they are using the APIs and models at their own risk and agree to comply with all applicable laws and regulations. @@ -1,18 +1,23 @@ + + ![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9) <a href="https://trendshift.io/repositories/1692" target="_blank"><img src="https://trendshift.io/api/badge/repositories/1692" alt="xtekky%2Fgpt4free | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a> --- -Written by [@xtekky](https://github.com/xtekky) & maintained by [@hlohaus](https://github.com/hlohaus) +<p align="center"><strong>Written by <a href="https://github.com/xtekky">@xtekky</a></strong></p> <div id="top"></div> -> By using this repository or any code related to it, you agree to the [legal notice](https://github.com/xtekky/gpt4free/blob/main/LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses. +> [!IMPORTANT] +> By using this repository or any code related to it, you agree to the [legal notice](LEGAL_NOTICE.md). The author is **not responsible for the usage of this repository nor endorses it**, nor is the author responsible for any copies, forks, re-uploads made by other users, or anything else related to GPT4Free. This is the author's only account and repository. To prevent impersonation or irresponsible actions, please comply with the GNU GPL license this Repository uses. -> [!Warning] > _"gpt4free"_ serves as a **PoC** (proof of concept), demonstrating the development of an API package with multi-provider requests, with features like timeouts, load balance and flow control. +> [!WARNING] +> _"gpt4free"_ serves as a **PoC** (proof of concept), demonstrating the development of an API package with multi-provider requests, with features like timeouts, load balance and flow control. -> [!Note] > <sup><strong>Lastet version:</strong></sup> [![PyPI version](https://img.shields.io/pypi/v/g4f?color=blue)](https://pypi.org/project/g4f) [![Docker version](https://img.shields.io/docker/v/hlohaus789/g4f?label=docker&color=blue)](https://hub.docker.com/r/hlohaus789/g4f) +> [!NOTE] +> <sup><strong>Latest version:</strong></sup> [![PyPI version](https://img.shields.io/pypi/v/g4f?color=blue)](https://pypi.org/project/g4f) [![Docker version](https://img.shields.io/docker/v/hlohaus789/g4f?label=docker&color=blue)](https://hub.docker.com/r/hlohaus789/g4f) > <sup><strong>Stats:</strong></sup> [![Downloads](https://static.pepy.tech/badge/g4f)](https://pepy.tech/project/g4f) [![Downloads](https://static.pepy.tech/badge/g4f/month)](https://pepy.tech/project/g4f) ```sh @@ -24,64 +29,64 @@ docker pull hlohaus789/g4f ``` ## 🆕 What's New + - **For comprehensive details on new features and updates, please refer to our** [Releases](https://github.com/xtekky/gpt4free/releases) **page** + - **Installation Guide for Windows (.exe):** 💻 [Installation Guide for Windows (.exe)](#installation-guide-for-windows-exe) + - **Join our Telegram Channel:** 📨 [telegram.me/g4f_channel](https://telegram.me/g4f_channel) + - **Join our Discord Group:** 💬🆕️ [discord.gg/6yrm7H4B](https://discord.gg/6yrm7H4B) -- Added `gpt-4o`, simply use `gpt-4o` in `chat.completion.create`. -- Installation Guide for Windows (.exe): 💻 [#installation-guide-for-windows](#installation-guide-for-windows-exe) -- Join our Telegram Channel: 📨 [telegram.me/g4f_channel](https://telegram.me/g4f_channel) -- Join our Discord Group: 💬 [discord.gg/XfybzPXPH5](https://discord.gg/XfybzPXPH5) -- `g4f` now supports 100% local inference: 🧠 [local-docs](https://g4f.mintlify.app/docs/core/usage/local) ## 🔻 Site Takedown Is your site on this repository and you want to take it down? Send an email to takedown@g4f.ai with proof it is yours and it will be removed as fast as possible. To prevent reproduction please secure your API. 😉 ## 🚀 Feedback and Todo - -You can always leave some feedback here: https://forms.gle/FeWV9RLEedfdkmFN6 - -As per the survey, here is a list of improvements to come - -- [x] Update the repository to include the new openai library syntax (ex: `Openai()` class) | completed, use `g4f.client.Client` -- [ ] Golang implementation -- [ ] 🚧 Improve Documentation (in /docs & Guides, Howtos, & Do video tutorials) -- [x] Improve the provider status list & updates -- [ ] Tutorials on how to reverse sites to write your own wrapper (PoC only ofc) -- [x] Improve the Bing wrapper. (Wait and Retry or reuse conversation) -- [ ] 🚧 Write a standard provider performance test to improve the stability -- [ ] Potential support and development of local models -- [ ] 🚧 Improve compatibility and error handling +**You can always leave some feedback here:** https://forms.gle/FeWV9RLEedfdkmFN6 + +**As per the survey, here is a list of improvements to come** + - [x] Update the repository to include the new openai library syntax (ex: `Openai()` class) | completed, use `g4f.client.Client` + - [ ] Golang implementation + - [ ] 🚧 Improve Documentation (in /docs & Guides, Howtos, & Do video tutorials) + - [x] Improve the provider status list & updates + - [ ] Tutorials on how to reverse sites to write your own wrapper (PoC only ofc) + - [x] Improve the Bing wrapper. (Wait and Retry or reuse conversation) + - [ ] 🚧 Write a standard provider performance test to improve the stability + - [ ] Potential support and development of local models + - [ ] 🚧 Improve compatibility and error handling ## 📚 Table of Contents - -- [🆕 What's New](#-whats-new) -- [📚 Table of Contents](#-table-of-contents) -- [🛠️ Getting Started](#-getting-started) - - [Docker Container Guide](#docker-container-guide) - - [Installation Guide for Windows (.exe)](#installation-guide-for-windows-exe) - - [Use python](#use-python) - - [Prerequisites](#prerequisites) - - [Install using PyPI package:](#install-using-pypi-package) - - [Install from source:](#install-from-source) - - [Install using Docker:](#install-using-docker) -- [💡 Usage](#-usage) - - [Text Generation](#text-generation) - - [Image Generation](#image-generation) - - [Web UI](#web-ui) - - [Interference API](#interference-api) - - [Configuration](#configuration) -- [🚀 Providers and Models](#-providers-and-models) - - [GPT-4](#gpt-4) - - [GPT-3.5](#gpt-35) - - [Other](#other) - - [Models](#models) -- [🔗 Powered by gpt4free](#-powered-by-gpt4free) -- [🤝 Contribute](#-contribute) - - [How do i create a new Provider?](#guide-how-do-i-create-a-new-provider) - - [How can AI help me with writing code?](#guide-how-can-ai-help-me-with-writing-code) -- [🙌 Contributors](#-contributors) -- [©️ Copyright](#-copyright) -- [⭐ Star History](#-star-history) -- [📄 License](#-license) + - [🆕 What's New](#-whats-new) + - [📚 Table of Contents](#-table-of-contents) + - [🛠️ Getting Started](#-getting-started) + - [Docker Container Guide](#docker-container-guide) + - [Installation Guide for Windows (.exe)](#installation-guide-for-windows-exe) + - [Use python](#use-python) + - [Prerequisites](#prerequisites) + - [Install using PyPI package](#install-using-pypi-package) + - [Install from source](#install-from-source) + - [Install using Docker](#install-using-docker) + - [💡 Usage](#-usage) + - [Text Generation](#text-generation) + - [Image Generation](#image-generation) + - [Web UI](#web-ui) + - [Interference API](#interference-api) + - [Local Inference](docs/local.md) + - [Configuration](#configuration) + - [Full Documentation for Python API](#full-documentation-for-python-api) + - **New:** + - [Async Client API from G4F](docs/async_client.md) + - [Client API like the OpenAI Python library](docs/client.md) + - **Legacy** + - [Legacy API with python modules](docs/legacy/legacy.md) + - [Legacy AsyncClient API from G4F](docs/legacy/legacy_async_client.md) + - [🚀 Providers and Models](docs/providers-and-models.md) + - [🔗 Powered by gpt4free](#-powered-by-gpt4free) + - [🤝 Contribute](#-contribute) + - [How do i create a new Provider?](#guide-how-do-i-create-a-new-provider) + - [How can AI help me with writing code?](#guide-how-can-ai-help-me-with-writing-code) + - [🙌 Contributors](#-contributors) + - [©️ Copyright](#-copyright) + - [⭐ Star History](#-star-history) + - [📄 License](#-license) ## 🛠️ Getting Started @@ -125,15 +130,15 @@ To ensure the seamless operation of our application, please follow the instructi By following these steps, you should be able to successfully install and run the application on your Windows system. If you encounter any issues during the installation process, please refer to our Issue Tracker or try to get contact over Discord for assistance. -Run the **Webview UI** on other Platfroms: +Run the **Webview UI** on other Platforms: -- [/docs/guides/webview](https://github.com/xtekky/gpt4free/blob/main/docs/webview.md) +- [/docs/guides/webview](docs/webview.md) ##### Use your smartphone: Run the Web UI on Your Smartphone: -- [/docs/guides/phone](https://github.com/xtekky/gpt4free/blob/main/docs/guides/phone.md) +- [/docs/guides/phone](docs/guides/phone.md) #### Use python @@ -149,17 +154,16 @@ pip install -U g4f[all] ``` How do I install only parts or do disable parts? -Use partial requirements: [/docs/requirements](https://github.com/xtekky/gpt4free/blob/main/docs/requirements.md) +Use partial requirements: [/docs/requirements](docs/requirements.md) ##### Install from source: How do I load the project using git and installing the project requirements? -Read this tutorial and follow it step by step: [/docs/git](https://github.com/xtekky/gpt4free/blob/main/docs/git.md) +Read this tutorial and follow it step by step: [/docs/git](docs/git.md) ##### Install using Docker: - How do I build and run composer image from source? -Use docker-compose: [/docs/docker](https://github.com/xtekky/gpt4free/blob/main/docs/docker.md) +Use docker-compose: [/docs/docker](docs/docker.md) ## 💡 Usage @@ -170,9 +174,9 @@ from g4f.client import Client client = Client() response = client.chat.completions.create( - model="gpt-3.5-turbo", + model="gpt-4o-mini", messages=[{"role": "user", "content": "Hello"}], - ... + # Add any other necessary parameters ) print(response.choices[0].message.content) ``` @@ -182,49 +186,47 @@ Hello! How can I assist you today? ``` #### Image Generation - ```python from g4f.client import Client client = Client() response = client.images.generate( - model="gemini", - prompt="a white siamese cat", - ... + model="flux", + prompt="a white siamese cat", + # Add any other necessary parameters ) + image_url = response.data[0].url +print(f"Generated image URL: {image_url}") ``` -[![Image with cat](/docs/cat.jpeg)](https://github.com/xtekky/gpt4free/blob/main/docs/client.md) +[![Image with cat](/docs/cat.jpeg)](docs/client.md) -**Full Documentation for Python API** - -- New AsyncClient API from G4F: [/docs/async_client](https://github.com/xtekky/gpt4free/blob/main/docs/async_client.md) -- Client API like the OpenAI Python library: [/docs/client](https://github.com/xtekky/gpt4free/blob/main/docs/client.md) -- Legacy API with python modules: [/docs/legacy](https://github.com/xtekky/gpt4free/blob/main/docs/legacy.md) +#### **Full Documentation for Python API** + - **New:** + - **Async Client API from G4F:** [/docs/async_client](docs/async_client.md) + - **Client API like the OpenAI Python library:** [/docs/client](docs/client.md) + + - **Legacy:** + - **Legacy API with python modules:** [/docs/legacy/legacy](docs/legacy/legacy.md) + - **Legacy AsyncClient API from G4F:** [/docs/async_client](docs/legacy/legacy_async_client.md) #### Web UI - -To start the web interface, type the following codes in python: - +**To start the web interface, type the following codes in python:** ```python from g4f.gui import run_gui + run_gui() ``` - or execute the following command: - ```bash python -m g4f.cli gui -port 8080 -debug ``` #### Interference API - You can use the Interference API to serve other OpenAI integrations with G4F. - -See docs: [/docs/interference](https://github.com/xtekky/gpt4free/blob/main/docs/interference.md) - -Access with: http://localhost:1337/v1 +**See docs:** [/docs/interference](docs/interference-api.md) +**Access with:** http://localhost:1337/v1 ### Configuration @@ -298,159 +300,24 @@ To utilize the OpenaiChat provider, a .har file is required from https://chatgpt ##### Storing the .HAR File -- Place the exported .har file in the `./har_and_cookies` directory if you are using Docker. Alternatively, you can store it in any preferred location within your current working directory. +- Place the exported .har file in the `./har_and_cookies` directory if you are using Docker. Alternatively, if you are using Python from a terminal, you can store it in a `./har_and_cookies` directory within your current working directory. -Note: Ensure that your .har file is stored securely, as it may contain sensitive information. +> **Note:** Ensure that your .har file is stored securely, as it may contain sensitive information. #### Using Proxy If you want to hide or change your IP address for the providers, you can set a proxy globally via an environment variable: -- On macOS and Linux: - +**- On macOS and Linux:** ```bash export G4F_PROXY="http://host:port" ``` -- On Windows: - +**- On Windows:** ```bash set G4F_PROXY=http://host:port ``` -## 🚀 Providers and Models - -### GPT-4 - -| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth | -| -------------------------------------- | ------------------------- | ------- | ----- | ------ | ---------------------------------------------------------- | ----- | -| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ❌ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [chatgpt.com](https://chatgpt.com) | `g4f.Provider.OpenaiChat` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌+✔️ | -| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | -| [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | - -## Best OpenSource Models - -While we wait for gpt-5, here is a list of new models that are at least better than gpt-3.5-turbo. **Some are better than gpt-4**. Expect this list to grow. - -| Website | Provider | parameters | better than | -| ---------------------------------------------------------------------------------------- | ----------------------------------- | ----------------- | ------------------ | -| [claude-3-opus](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0125-preview | -| [command-r+](https://txt.cohere.com/command-r-plus-microsoft-azure/) | `g4f.Provider.HuggingChat` | 104B | gpt-4-0314 | -| [llama-3-70b](https://meta.ai/) | `g4f.Provider.Llama` or `DeepInfra` | 70B | gpt-4-0314 | -| [claude-3-sonnet](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0314 | -| [reka-core](https://chat.reka.ai/) | `g4f.Provider.Reka` | 21B | gpt-4-vision | -| [dbrx-instruct](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm) | `g4f.Provider.DeepInfra` | 132B / 36B active | gpt-3.5-turbo | -| [mixtral-8x22b](https://huggingface.co/mistral-community/Mixtral-8x22B-v0.1) | `g4f.Provider.DeepInfra` | 176B / 44b active | gpt-3.5-turbo | - -### GPT-3.5 - -| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth | -| ---------------------------------------------------------- | ----------------------------- | ------- | ----- | ------ | ---------------------------------------------------------- | ---- | -| [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [chat10.aichatos.xyz](https://chat10.aichatos.xyz) | `g4f.Provider.Aichatos` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DDG` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [feedough.com](https://www.feedough.com) | `g4f.Provider.Feedough` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [koala.sh](https://koala.sh) | `g4f.Provider.Koala` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [perplexity.ai](https://www.perplexity.ai) | `g4f.Provider.PerplexityAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [poe.com](https://poe.com) | `g4f.Provider.Poe` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | -| [talkai.info](https://talkai.info) | `g4f.Provider.TalkAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [chat.vercel.ai](https://chat.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [chatgpt.bestim.org](https://chatgpt.bestim.org) | `g4f.Provider.Bestim` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [chatgptdemo.info](https://chatgptdemo.info/chat) | `g4f.Provider.ChatgptDemo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [chat.chatgptdemo.ai](https://chat.chatgptdemo.ai) | `g4f.Provider.ChatgptDemoAi` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | -| [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | - -### Other - -| Website | Provider | Stream | Status | Auth | -| -------------------------------------------------------------------------------------------- | ----------------------------- | ------ | ---------------------------------------------------------- | ---- | -| [openchat.team](https://openchat.team) | `g4f.Provider.Aura` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [blackbox.ai](https://www.blackbox.ai) | `g4f.Provider.Blackbox` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [cohereforai-c4ai-command-r-plus.hf.space](https://cohereforai-c4ai-command-r-plus.hf.space) | `g4f.Provider.Cohere` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [gemini.google.com](https://gemini.google.com) | `g4f.Provider.Gemini` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | -| [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | -| [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [developers.sber.ru](https://developers.sber.ru/gigachat) | `g4f.Provider.GigaChat` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | -| [console.groq.com](https://console.groq.com/playground) | `g4f.Provider.Groq` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | -| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingFace` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [meta.ai](https://www.meta.ai) | `g4f.Provider.MetaAI` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [openrouter.ai](https://openrouter.ai) | `g4f.Provider.OpenRouter` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | -| [labs.perplexity.ai](https://labs.perplexity.ai) | `g4f.Provider.PerplexityLabs` | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | -| [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [replicate.com](https://replicate.com) | `g4f.Provider.Replicate` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | -| [theb.ai](https://theb.ai) | `g4f.Provider.ThebApi` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | -| [whiterabbitneo.com](https://www.whiterabbitneo.com) | `g4f.Provider.WhiteRabbitNeo` | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | -| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ | - -### Models - -| Model | Base Provider | Provider | Website | -| -------------------------- | ------------- | ------------------------ | ----------------------------------------------- | -| gpt-3.5-turbo | OpenAI | 8+ Providers | [openai.com](https://openai.com/) | -| gpt-4 | OpenAI | 2+ Providers | [openai.com](https://openai.com/) | -| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) | -| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | -| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | -| Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) | -| Meta-Llama-3-8b-instruct | Meta | 1+ Providers | [llama.meta.com](https://llama.meta.com/) | -| Meta-Llama-3-70b-instruct | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | -| CodeLlama-34b-Instruct-hf | Meta | g4f.Provider.HuggingChat | [llama.meta.com](https://llama.meta.com/) | -| CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | -| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) | -| Mistral-7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) | -| Mistral-7B-Instruct-v0.2 | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | -| zephyr-orpo-141b-A35b-v0.1 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) | -| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | -| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) | -| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) | -| claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) | -| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) | -| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) | -| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | -| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | -| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) | -| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) | - -### Image and Vision Models - -| Label | Provider | Image Model | Vision Model | Website | -| ------------------------- | ------------------------- | ----------------- | --------------- | ---------------------------------------------- | -| Microsoft Copilot in Bing | `g4f.Provider.Bing` | dall-e-3 | gpt-4-vision | [bing.com](https://bing.com/chat) | -| DeepInfra | `g4f.Provider.DeepInfra` | stability-ai/sdxl | llava-1.5-7b-hf | [deepinfra.com](https://deepinfra.com) | -| Gemini | `g4f.Provider.Gemini` | ✔️ | ✔️ | [gemini.google.com](https://gemini.google.com) | -| Gemini API | `g4f.Provider.GeminiPro` | ❌ | gemini-1.5-pro | [ai.google.dev](https://ai.google.dev) | -| Meta AI | `g4f.Provider.MetaAI` | ✔️ | ❌ | [meta.ai](https://www.meta.ai) | -| OpenAI ChatGPT | `g4f.Provider.OpenaiChat` | dall-e-3 | gpt-4-vision | [chatgpt.com](https://chatgpt.com) | -| Reka | `g4f.Provider.Reka` | ❌ | ✔️ | [chat.reka.ai](https://chat.reka.ai/) | -| Replicate | `g4f.Provider.Replicate` | stability-ai/sdxl | llava-v1.6-34b | [replicate.com](https://replicate.com) | -| You.com | `g4f.Provider.You` | dall-e-3 | ✔️ | [you.com](https://you.com) | - ## 🔗 Powered by gpt4free <table> @@ -879,23 +746,46 @@ While we wait for gpt-5, here is a list of new models that are at least better t </a> </td> </tr> + <tr> + <td> + <a href="https://github.com/yjg30737/pyqt-openai"> + <b>VividNode (pyqt-openai)</b> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/stargazers"> + <img alt="Stars" src="https://img.shields.io/github/stars/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/network/members"> + <img alt="Forks" src="https://img.shields.io/github/forks/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/issues"> + <img alt="Issues" src="https://img.shields.io/github/issues/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + <td> + <a href="https://github.com/yjg30737/pyqt-openai/pulls"> + <img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/yjg30737/pyqt-openai?style=flat-square&labelColor=343b41" /> + </a> + </td> + </tr> </tbody> </table> ## 🤝 Contribute - We welcome contributions from the community. Whether you're adding new providers or features, or simply fixing typos and making small improvements, your input is valued. Creating a pull request is all it takes – our co-pilot will handle the code review process. Once all changes have been addressed, we'll merge the pull request into the main branch and release the updates at a later time. ###### Guide: How do i create a new Provider? - -- Read: [/docs/guides/create_provider](https://github.com/xtekky/gpt4free/blob/main/docs/guides/create_provider.md) + - **Read:** [Create Provider Guide](docs/guides/create_provider.md) ###### Guide: How can AI help me with writing code? - -- Read: [/docs/guides/help_me](https://github.com/xtekky/gpt4free/blob/main/docs/guides/help_me.md) + - **Read:** [AI Assistance Guide](docs/guides/help_me.md) ## 🙌 Contributors - A list of all contributors is available [here](https://github.com/xtekky/gpt4free/graphs/contributors) <a href="https://github.com/xtekky" target="_blank"><img src="https://avatars.githubusercontent.com/u/98614666?v=4&s=45" width="45" title="xtekky"></a> @@ -985,4 +875,7 @@ This project is licensed under <a href="https://github.com/xtekky/gpt4free/blob/ </tr> </table> +--- + <p align="right">(<a href="#top">🔼 Back to top</a>)</p> + diff --git a/docker-compose.yml b/docker-compose.yml index 1b99ba97..3f8bc4ea 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,4 +12,6 @@ services: ports: - '8080:8080' - '1337:1337' - - '7900:7900'
\ No newline at end of file + - '7900:7900' + environment: + - OLLAMA_HOST=host.docker.internal diff --git a/docs/async_client.md b/docs/async_client.md index 003cfb20..7194c792 100644 --- a/docs/async_client.md +++ b/docs/async_client.md @@ -1,166 +1,396 @@ -# How to Use the G4F AsyncClient API - -The AsyncClient API is the asynchronous counterpart to the standard G4F Client API. It offers the same functionality as the synchronous API, but with the added benefit of improved performance due to its asynchronous nature. - -Designed to maintain compatibility with the existing OpenAI API, the G4F AsyncClient API ensures a seamless transition for users already familiar with the OpenAI client. +# G4F - Async client API Guide +The G4F async client API is a powerful asynchronous interface for interacting with various AI models. This guide provides comprehensive information on how to use the API effectively, including setup, usage examples, best practices, and important considerations for optimal performance. + + +## Compatibility Note +The G4F async client API is designed to be compatible with the OpenAI API, making it easy for developers familiar with OpenAI's interface to transition to G4F. + +## Table of Contents + - [Introduction](#introduction) + - [Key Features](#key-features) + - [Getting Started](#getting-started) + - [Initializing the Client](#initializing-the-client) + - [Creating Chat Completions](#creating-chat-completions) + - [Configuration](#configuration) + - [Usage Examples](#usage-examples) + - [Text Completions](#text-completions) + - [Streaming Completions](#streaming-completions) + - [Using a Vision Model](#using-a-vision-model) + - [Image Generation](#image-generation) + - [Concurrent Tasks](#concurrent-tasks-with-asynciogather) + - [Available Models and Providers](#available-models-and-providers) + - [Error Handling and Best Practices](#error-handling-and-best-practices) + - [Rate Limiting and API Usage](#rate-limiting-and-api-usage) + - [Conclusion](#conclusion) + + + +## Introduction +The G4F async client API is an asynchronous version of the standard G4F Client API. It offers the same functionality as the synchronous API but with improved performance due to its asynchronous nature. This guide will walk you through the key features and usage of the G4F async client API. + ## Key Features + - **Custom Providers**: Use custom providers for enhanced flexibility. + - **ChatCompletion Interface**: Interact with chat models through the ChatCompletion class. + - **Streaming Responses**: Get responses iteratively as they are received. + - **Non-Streaming Responses**: Generate complete responses in a single call. + - **Image Generation and Vision Models**: Support for image-related tasks. -The G4F AsyncClient API offers several key features: - -- **Custom Providers:** The G4F Client API allows you to use custom providers. This feature enhances the flexibility of the API, enabling it to cater to a wide range of use cases. -- **ChatCompletion Interface:** The G4F package provides an interface for interacting with chat models through the ChatCompletion class. This class provides methods for creating both streaming and non-streaming responses. -- **Streaming Responses:** The ChatCompletion.create method can return a response iteratively as and when they are received if the stream parameter is set to True. -- **Non-Streaming Responses:** The ChatCompletion.create method can also generate non-streaming responses. -- **Image Generation and Vision Models:** The G4F Client API also supports image generation and vision models, expanding its utility beyond text-based interactions. - -## Initializing the Client - -To utilize the G4F `AsyncClient`, you need to create a new instance. Below is an example showcasing how to initialize the client with custom providers: + +## Getting Started +### Initializing the Client +**To use the G4F `Client`, create a new instance:** ```python -from g4f.client import AsyncClient -from g4f.Provider import BingCreateImages, OpenaiChat, Gemini +from g4f.client import Client +from g4f.Provider import OpenaiChat, Gemini -client = AsyncClient( +client = Client( provider=OpenaiChat, image_provider=Gemini, - ... + # Add other parameters as needed ) ``` -In this example: -- `provider` specifies the primary provider for generating text completions. -- `image_provider` specifies the provider for image-related functionalities. -## Configuration +## Creating Chat Completions +**Here’s an improved example of creating chat completions:** +```python +response = await async_client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + # Add other parameters as needed +) +``` -You can configure the `AsyncClient` with additional settings, such as an API key for your provider and a proxy for all outgoing requests: +**This example:** + - Asks a specific question `Say this is a test` + - Configures various parameters like temperature and max_tokens for more control over the output + - Disables streaming for a complete response -```python -from g4f.client import AsyncClient +You can adjust these parameters based on your specific needs. + -client = AsyncClient( +### Configuration +**Configure the `Client` with additional settings:** +```python +client = Client( api_key="your_api_key_here", proxies="http://user:pass@host", - ... + # Add other parameters as needed ) ``` -- `api_key`: Your API key for the provider. -- `proxies`: The proxy configuration for routing requests. - -## Using AsyncClient + +## Usage Examples ### Text Completions +**Generate text completions using the ChatCompletions endpoint:** +```python +import asyncio +from g4f.client import Client -You can use the `ChatCompletions` endpoint to generate text completions. Here’s how you can do it: +async def main(): + client = Client() + + response = await client.chat.completions.async_create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + ) + + print(response.choices[0].message.content) -```python -response = await client.chat.completions.create( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Say this is a test"}], - ... -) -print(response.choices[0].message.content) +asyncio.run(main()) ``` + + ### Streaming Completions +**Process responses incrementally as they are generated:** +```python +import asyncio +from g4f.client import Client + +async def main(): + client = Client() + + stream = await client.chat.completions.async_create( + model="gpt-4", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ], + stream=True, + ) + + async for chunk in stream: + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="") + +asyncio.run(main()) +``` -The `AsyncClient` also supports streaming completions. This allows you to process the response incrementally as it is generated: + +### Using a Vision Model +**Analyze an image and generate a description:** ```python -stream = client.chat.completions.create( - model="gpt-4", - messages=[{"role": "user", "content": "Say this is a test"}], - stream=True, - ... -) -async for chunk in stream: - if chunk.choices[0].delta.content: - print(chunk.choices[0].delta.content or "", end="") +import g4f +import requests +import asyncio +from g4f.client import Client + +async def main(): + client = Client() + + image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw + + response = await client.chat.completions.async_create( + model=g4f.models.default, + provider=g4f.Provider.Bing, + messages=[ + { + "role": "user", + "content": "What's in this image?" + } + ], + image=image + ) + + print(response.choices[0].message.content) + +asyncio.run(main()) ``` -In this example: -- `stream=True` enables streaming of the response. + -### Example: Using a Vision Model +### Image Generation +**Generate images using a specified prompt:** +```python +import asyncio +from g4f.client import Client -The following code snippet demonstrates how to use a vision model to analyze an image and generate a description based on the content of the image. This example shows how to fetch an image, send it to the model, and then process the response. +async def main(): + client = Client() + + response = await client.images.async_generate( + prompt="a white siamese cat", + model="flux" + ) + + image_url = response.data[0].url + print(f"Generated image URL: {image_url}") +asyncio.run(main()) +``` + + + +#### Base64 Response Format ```python -import requests +import asyncio from g4f.client import Client -from g4f.Provider import Bing -client = AsyncClient( - provider=Bing -) +async def main(): + client = Client() + + response = await client.images.async_generate( + prompt="a white siamese cat", + model="flux", + response_format="b64_json" + ) + + base64_text = response.data[0].b64_json + print(base64_text) -image = requests.get("https://my_website/image.jpg", stream=True).raw -# Or: image = open("local_path/image.jpg", "rb") +asyncio.run(main()) +``` -response = client.chat.completions.create( - "", - messages=[{"role": "user", "content": "what is in this picture?"}], - image=image -) -print(response.choices[0].message.content) + + +### Concurrent Tasks with asyncio.gather +**Execute multiple tasks concurrently:** +```python +import asyncio +from g4f.client import Client + +async def main(): + client = Client() + + task1 = client.chat.completions.async_create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + ) + + task2 = client.images.async_generate( + model="flux", + prompt="a white siamese cat" + ) + + chat_response, image_response = await asyncio.gather(task1, task2) + + print("Chat Response:") + print(chat_response.choices[0].message.content) + + print("Image Response:") + print(image_response.data[0].url) + +asyncio.run(main()) ``` -### Image Generation: + + +## Available Models and Providers +The G4F AsyncClient supports a wide range of AI models and providers, allowing you to choose the best option for your specific use case. **Here's a brief overview of the available models and providers:** + +### Models + - GPT-3.5-Turbo + - GPT-4o-Mini + - GPT-4 + - DALL-E 3 + - Gemini + - Claude (Anthropic) + - And more... + + -You can generate images using a specified prompt: +### Providers + - OpenAI + - Google (for Gemini) + - Anthropic + - Bing + - Custom providers + + +**To use a specific model or provider, specify it when creating the client or in the API call:** ```python -response = await client.images.generate( - model="dall-e-3", - prompt="a white siamese cat", - ... +client = AsyncClient(provider=g4f.Provider.OpenaiChat) + +# or + +response = await client.chat.completions.async_create( + model="gpt-4", + provider=g4f.Provider.Bing, + messages=[ + { + "role": "user", + "content": "Hello, world!" + } + ] ) +``` + + -image_url = response.data[0].url +## Error Handling and Best Practices +Implementing proper error handling and following best practices is crucial when working with the G4F AsyncClient API. This ensures your application remains robust and can gracefully handle various scenarios. **Here are some key practices to follow:** + +1. **Use try-except blocks to catch and handle exceptions:** +```python +try: + response = await client.chat.completions.async_create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Hello, world!" + } + ] + ) +except Exception as e: + print(f"An error occurred: {e}") ``` -#### Base64 as the response format +2. **Check the response status and handle different scenarios:** +```python +if response.choices: + print(response.choices[0].message.content) +else: + print("No response generated") +``` +3. **Implement retries for transient errors:** ```python -response = await client.images.generate( - prompt="a cool cat", - response_format="b64_json" -) +import asyncio +from tenacity import retry, stop_after_attempt, wait_exponential -base64_text = response.data[0].b64_json +@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10)) +async def make_api_call(): + # Your API call here + pass ``` -### Example usage with asyncio.gather + -Start two tasks at the same time: +## Rate Limiting and API Usage +When working with the G4F AsyncClient API, it's important to implement rate limiting and monitor your API usage. This helps ensure fair usage, prevents overloading the service, and optimizes your application's performance. Here are some key strategies to consider: + +1. **Implement rate limiting in your application:** ```python import asyncio +from aiolimiter import AsyncLimiter -from g4f.client import AsyncClient -from g4f.Provider import BingCreateImages, OpenaiChat, Gemini +rate_limit = AsyncLimiter(max_rate=10, time_period=1) # 10 requests per second -async def main(): - client = AsyncClient( - provider=OpenaiChat, - image_provider=Gemini, - # other parameters... - ) +async def make_api_call(): + async with rate_limit: + # Your API call here + pass +``` - task1 = client.chat.completions.create( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Say this is a test"}], - ) - task2 = client.images.generate( - model="dall-e-3", - prompt="a white siamese cat", - ) - responses = await asyncio.gather(task1, task2) + - print(responses) +2. **Monitor your API usage and implement logging:** +```python +import logging -asyncio.run(main()) -```
\ No newline at end of file +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +async def make_api_call(): + try: + response = await client.chat.completions.async_create(...) + logger.info(f"API call successful. Tokens used: {response.usage.total_tokens}") + except Exception as e: + logger.error(f"API call failed: {e}") +``` + + + +3. **Use caching to reduce API calls for repeated queries:** +```python +from functools import lru_cache + +@lru_cache(maxsize=100) +def get_cached_response(query): + # Your API call here + pass +``` + +## Conclusion +The G4F async client API provides a powerful and flexible way to interact with various AI models asynchronously. By leveraging its features and following best practices, you can build efficient and responsive applications that harness the power of AI for text generation, image analysis, and image creation. + +Remember to handle errors gracefully, implement rate limiting, and monitor your API usage to ensure optimal performance and reliability in your applications. + +--- + +[Return to Home](/) diff --git a/docs/client.md b/docs/client.md index a889443c..da45d7fd 100644 --- a/docs/client.md +++ b/docs/client.md @@ -1,31 +1,52 @@ -### G4F - Client API - -#### Introduction +# G4F Client API Guide + + +## Table of Contents + - [Introduction](#introduction) + - [Getting Started](#getting-started) + - [Switching to G4F Client](#switching-to-g4f-client) + - [Initializing the Client](#initializing-the-client) + - [Creating Chat Completions](#creating-chat-completions) + - [Configuration](#configuration) + - [Usage Examples](#usage-examples) + - [Text Completions](#text-completions) + - [Streaming Completions](#streaming-completions) + - [Image Generation](#image-generation) + - [Creating Image Variations](#creating-image-variations) + - [Advanced Usage](#advanced-usage) + - [Using a List of Providers with RetryProvider](#using-a-list-of-providers-with-retryprovider) + - [Using GeminiProVision](#using-geminiprovision) + - [Using a Vision Model](#using-a-vision-model) + - [Command-line Chat Program](#command-line-chat-program) + + + +## Introduction Welcome to the G4F Client API, a cutting-edge tool for seamlessly integrating advanced AI capabilities into your Python applications. This guide is designed to facilitate your transition from using the OpenAI client to the G4F Client, offering enhanced features while maintaining compatibility with the existing OpenAI API. -#### Getting Started - -**Switching to G4F Client:** - -To begin using the G4F Client, simply update your import statement in your Python code: +## Getting Started +### Switching to G4F Client +**To begin using the G4F Client, simply update your import statement in your Python code:** -Old Import: +**Old Import:** ```python from openai import OpenAI ``` -New Import: + + +**New Import:** ```python from g4f.client import Client as OpenAI ``` -The G4F Client preserves the same familiar API interface as OpenAI, ensuring a smooth transition process. + -### Initializing the Client - -To utilize the G4F Client, create an new instance. Below is an example showcasing custom providers: +The G4F Client preserves the same familiar API interface as OpenAI, ensuring a smooth transition process. +## Initializing the Client +To utilize the G4F Client, create a new instance. **Below is an example showcasing custom providers:** ```python from g4f.client import Client from g4f.Provider import BingCreateImages, OpenaiChat, Gemini @@ -33,143 +54,244 @@ from g4f.Provider import BingCreateImages, OpenaiChat, Gemini client = Client( provider=OpenaiChat, image_provider=Gemini, - ... + # Add any other necessary parameters ) ``` -## Configuration +## Creating Chat Completions +**Here’s an improved example of creating chat completions:** +```python +response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + # Add any other necessary parameters +) +``` + +**This example:** + - Asks a specific question `Say this is a test` + - Configures various parameters like temperature and max_tokens for more control over the output + - Disables streaming for a complete response -You can set an "api_key" for your provider in the client. -And you also have the option to define a proxy for all outgoing requests: +You can adjust these parameters based on your specific needs. + +## Configuration +**You can set an `api_key` for your provider in the client and define a proxy for all outgoing requests:** ```python from g4f.client import Client client = Client( - api_key="...", + api_key="your_api_key_here", proxies="http://user:pass@host", - ... + # Add any other necessary parameters ) ``` -#### Usage Examples + -**Text Completions:** +## Usage Examples +### Text Completions +**Generate text completions using the `ChatCompletions` endpoint:** +```python +from g4f.client import Client -You can use the `ChatCompletions` endpoint to generate text completions as follows: +client = Client() -```python response = client.chat.completions.create( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Say this is a test"}], - ... + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + # Add any other necessary parameters ) + print(response.choices[0].message.content) ``` -Also streaming are supported: + +### Streaming Completions +**Process responses incrementally as they are generated:** ```python +from g4f.client import Client + +client = Client() + stream = client.chat.completions.create( model="gpt-4", - messages=[{"role": "user", "content": "Say this is a test"}], + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ], stream=True, - ... ) + for chunk in stream: if chunk.choices[0].delta.content: print(chunk.choices[0].delta.content or "", end="") ``` -**Image Generation:** - -Generate images using a specified prompt: + +### Image Generation +**Generate images using a specified prompt:** ```python +from g4f.client import Client + +client = Client() + response = client.images.generate( - model="dall-e-3", - prompt="a white siamese cat", - ... + model="flux", + prompt="a white siamese cat" + # Add any other necessary parameters ) image_url = response.data[0].url + +print(f"Generated image URL: {image_url}") ``` -**Creating Image Variations:** -Create variations of an existing image: +#### Base64 Response Format +```python +from g4f.client import Client + +client = Client() + +response = client.images.generate( + model="flux", + prompt="a white siamese cat", + response_format="b64_json" +) + +base64_text = response.data[0].b64_json +print(base64_text) +``` + + +### Creating Image Variations +**Create variations of an existing image:** ```python +from g4f.client import Client + +client = Client() + response = client.images.create_variation( image=open("cat.jpg", "rb"), - model="bing", - ... + model="bing" + # Add any other necessary parameters ) image_url = response.data[0].url + +print(f"Generated image URL: {image_url}") ``` -Original / Variant: -[![Original Image](/docs/cat.jpeg)](/docs/client.md) [![Variant Image](/docs/cat.webp)](/docs/client.md) + -#### Use a list of providers with RetryProvider +## Advanced Usage +### Using a List of Providers with RetryProvider ```python from g4f.client import Client from g4f.Provider import RetryProvider, Phind, FreeChatgpt, Liaobots - import g4f.debug + g4f.debug.logging = True +g4f.debug.version_check = False client = Client( provider=RetryProvider([Phind, FreeChatgpt, Liaobots], shuffle=False) ) + response = client.chat.completions.create( model="", - messages=[{"role": "user", "content": "Hello"}], + messages=[ + { + "role": "user", + "content": "Hello" + } + ] ) -print(response.choices[0].message.content) -``` -``` -Using RetryProvider provider -Using Phind provider -How can I assist you today? +print(response.choices[0].message.content) ``` -#### Advanced example using GeminiProVision - + +### Using GeminiProVision ```python from g4f.client import Client from g4f.Provider.GeminiPro import GeminiPro client = Client( - api_key="...", + api_key="your_api_key_here", provider=GeminiPro ) + response = client.chat.completions.create( model="gemini-pro-vision", - messages=[{"role": "user", "content": "What are on this image?"}], + messages=[ + { + "role": "user", + "content": "What are on this image?" + } + ], image=open("docs/waterfall.jpeg", "rb") ) + print(response.choices[0].message.content) ``` -``` -User: What are on this image? -``` -![Waterfall](/docs/waterfall.jpeg) -``` -Bot: There is a waterfall in the middle of a jungle. There is a rainbow over... + +### Using a Vision Model +**Analyze an image and generate a description:** +```python +import g4f +import requests +from g4f.client import Client + +image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw +# Or: image = open("docs/cat.jpeg", "rb") + +client = Client() + +response = client.chat.completions.create( + model=g4f.models.default, + messages=[ + { + "role": "user", + "content": "What are on this image?" + } + ], + provider=g4f.Provider.Bing, + image=image + # Add any other necessary parameters +) + +print(response.choices[0].message.content) ``` -#### Advanced example: A command-line program + +## Command-line Chat Program +**Here's an example of a simple command-line chat program using the G4F Client:** ```python import g4f from g4f.client import Client # Initialize the GPT client with the desired provider -client = Client(provider=g4f.Provider.Bing) +client = Client() # Initialize an empty conversation history messages = [] @@ -177,7 +299,7 @@ messages = [] while True: # Get user input user_input = input("You: ") - + # Check if the user wants to exit the chat if user_input.lower() == "exit": print("Exiting chat...") @@ -199,8 +321,13 @@ while True: # Update the conversation history with GPT's response messages.append({"role": "assistant", "content": gpt_response}) + except Exception as e: print(f"An error occurred: {e}") ``` + +This guide provides a comprehensive overview of the G4F Client API, demonstrating its versatility in handling various AI tasks, from text generation to image analysis and creation. By leveraging these features, you can build powerful and responsive applications that harness the capabilities of advanced AI models. + -[Return to Home](/)
\ No newline at end of file +--- +[Return to Home](/) diff --git a/docs/docker.md b/docs/docker.md index db33b925..8017715c 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -1,45 +1,114 @@ -### G4F - Docker Setup -Easily set up and run the G4F project using Docker without the hassle of manual dependency installation. +# G4F Docker Setup -1. **Prerequisites:** - - [Install Docker](https://docs.docker.com/get-docker/) - - [Install Docker Compose](https://docs.docker.com/compose/install/) +## Table of Contents + - [Prerequisites](#prerequisites) + - [Installation and Setup](#installation-and-setup) + - [Testing the API](#testing-the-api) + - [Troubleshooting](#troubleshooting) + - [Stopping the Service](#stopping-the-service) -2. **Clone the Repository:** -```bash -git clone https://github.com/xtekky/gpt4free.git -``` +## Prerequisites +**Before you begin, ensure you have the following installed on your system:** + - [Docker](https://docs.docker.com/get-docker/) + - [Docker Compose](https://docs.docker.com/compose/install/) + - Python 3.7 or higher + - pip (Python package manager) -3. **Navigate to the Project Directory:** +**Note:** If you encounter issues with Docker, you can run the project directly using Python. -```bash -cd gpt4free -``` +## Installation and Setup + +### Docker Method (Recommended) +1. **Clone the Repository** + ```bash + git clone https://github.com/xtekky/gpt4free.git + cd gpt4free + ``` + +2. **Build and Run with Docker Compose** + ```bash + docker-compose up --build + ``` + +3. **Access the API** + The server will be accessible at `http://localhost:1337` + +### Non-Docker Method +If you encounter issues with Docker, you can run the project directly using Python: + +1. **Clone the Repository** + ```bash + git clone https://github.com/xtekky/gpt4free.git + cd gpt4free + ``` + +2. **Install Dependencies** + ```bash + pip install -r requirements.txt + ``` -4. **Build the Docker Image:** +3. **Run the Server** + ```bash + python -m g4f.api.run + ``` +4. **Access the API** + The server will be accessible at `http://localhost:1337` + +## Testing the API +**You can test the API using curl or by creating a simple Python script:** +### Using curl ```bash -docker pull selenium/node-chrome -docker-compose build +curl -X POST -H "Content-Type: application/json" -d '{"prompt": "What is the capital of France?"}' http://localhost:1337/chat/completions ``` -5. **Start the Service:** +### Using Python +**Create a file named `test_g4f.py` with the following content:** +```python +import requests + +url = "http://localhost:1337/v1/chat/completions" +body = { + "model": "gpt-4o-mini", + "stream": False, + "messages": [ + {"role": "assistant", "content": "What can you do?"} + ] +} + +json_response = requests.post(url, json=body).json().get('choices', []) + +for choice in json_response: + print(choice.get('message', {}).get('content', '')) +``` +**Run the script:** ```bash -docker-compose up +python test_g4f.py ``` -Your server will now be accessible at `http://localhost:1337`. Interact with the API or run tests as usual. +## Troubleshooting +- If you encounter issues with Docker, try running the project directly using Python as described in the Non-Docker Method. +- Ensure that you have the necessary permissions to run Docker commands. You might need to use `sudo` or add your user to the `docker` group. +- If the server doesn't start, check the logs for any error messages and ensure all dependencies are correctly installed. -To stop the Docker containers, simply run: +**_For more detailed information on API endpoints and usage, refer to the [G4F API documentation](docs/interference-api.md)._** + + +## Stopping the Service + +### Docker Method +**To stop the Docker containers, use the following command:** ```bash docker-compose down ``` -> [!Note] -> Changes made to local files reflect in the Docker container due to volume mapping in `docker-compose.yml`. However, if you add or remove dependencies, rebuild the Docker image using `docker-compose build`. +### Non-Docker Method +If you're running the server directly with Python, you can stop it by pressing Ctrl+C in the terminal where it's running. + +--- -[Return to Home](/)
\ No newline at end of file +[Return to Home](/) diff --git a/docs/git.md b/docs/git.md index 89137ffc..ff6c8091 100644 --- a/docs/git.md +++ b/docs/git.md @@ -1,66 +1,129 @@ -### G4F - Installation Guide -Follow these steps to install G4F from the source code: +# G4F - Git Installation Guide -1. **Clone the Repository:** +This guide provides step-by-step instructions for installing G4F from the source code using Git. -```bash -git clone https://github.com/xtekky/gpt4free.git -``` -2. **Navigate to the Project Directory:** +## Table of Contents -```bash -cd gpt4free -``` +1. [Prerequisites](#prerequisites) +2. [Installation Steps](#installation-steps) + 1. [Clone the Repository](#1-clone-the-repository) + 2. [Navigate to the Project Directory](#2-navigate-to-the-project-directory) + 3. [Set Up a Python Virtual Environment](#3-set-up-a-python-virtual-environment-recommended) + 4. [Activate the Virtual Environment](#4-activate-the-virtual-environment) + 5. [Install Dependencies](#5-install-dependencies) + 6. [Verify Installation](#6-verify-installation) +3. [Usage](#usage) +4. [Troubleshooting](#troubleshooting) +5. [Additional Resources](#additional-resources) -3. **(Optional) Create a Python Virtual Environment:** +--- -It's recommended to isolate your project dependencies. You can follow the [Python official documentation](https://docs.python.org/3/tutorial/venv.html) for virtual environments. +## Prerequisites -```bash -python3 -m venv venv -``` +Before you begin, ensure you have the following installed on your system: +- Git +- Python 3.7 or higher +- pip (Python package installer) -4. **Activate the Virtual Environment:** - -- On Windows: +## Installation Steps +### 1. Clone the Repository +**Open your terminal and run the following command to clone the G4F repository:** ```bash -.\venv\Scripts\activate +git clone https://github.com/xtekky/gpt4free.git ``` -- On macOS and Linux: +### 2. Navigate to the Project Directory +**Change to the project directory:** +```bash +cd gpt4free +``` +### 3. Set Up a Python Virtual Environment (Recommended) +**It's best practice to use a virtual environment to manage project dependencies:** ```bash -source venv/bin/activate +python3 -m venv venv ``` -5. **Install Minimum Requirements:** +### 4. Activate the Virtual Environment +**Activate the virtual environment based on your operating system:** +- **Windows:** + ```bash + .\venv\Scripts\activate + ``` -Install the minimum required packages: +- **macOS and Linux:** + ```bash + source venv/bin/activate + ``` +### 5. Install Dependencies +**You have two options for installing dependencies:** + +#### Option A: Install Minimum Requirements +**For a lightweight installation, use:** ```bash pip install -r requirements-min.txt ``` -6. **Or Install All Packages from `requirements.txt`:** - -If you prefer, you can install all packages listed in `requirements.txt`: - +#### Option B: Install All Packages +**For a full installation with all features, use:** ```bash pip install -r requirements.txt ``` -7. **Start Using the Repository:** - +### 6. Verify Installation You can now create Python scripts and utilize the G4F functionalities. Here's a basic example: -Create a `test.py` file in the root folder and start using the repository: - +**Create a `g4f-test.py` file in the root folder and start using the repository:** ```python import g4f # Your code here ``` -[Return to Home](/)
\ No newline at end of file +## Usage +**After installation, you can start using G4F in your Python scripts. Here's a basic example:** +```python +import g4f + +# Your G4F code here +# For example: +from g4f.client import Client + +client = Client() + +response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + # Add any other necessary parameters +) + +print(response.choices[0].message.content) +``` + +## Troubleshooting +**If you encounter any issues during installation or usage:** + 1. Ensure all prerequisites are correctly installed. + 2. Check that you're in the correct directory and the virtual environment is activated. + 3. Try reinstalling the dependencies. + 4. Consult the [G4F documentation](https://github.com/xtekky/gpt4free) for more detailed information. + +## Additional Resources + - [G4F GitHub Repository](https://github.com/xtekky/gpt4free) + - [Python Virtual Environments Guide](https://docs.python.org/3/tutorial/venv.html) + - [pip Documentation](https://pip.pypa.io/en/stable/) + +--- + +**_For more information or support, please visit the [G4F GitHub Issues page](https://github.com/xtekky/gpt4free/issues)._** + + +--- +[Return to Home](/) diff --git a/docs/interference-api.md b/docs/interference-api.md new file mode 100644 index 00000000..a6999345 --- /dev/null +++ b/docs/interference-api.md @@ -0,0 +1,166 @@ +# G4F - Interference API Usage Guide + + +## Table of Contents + - [Introduction](#introduction) + - [Running the Interference API](#running-the-interference-api) + - [From PyPI Package](#from-pypi-package) + - [From Repository](#from-repository) + - [Using the Interference API](#using-the-interference-api) + - [Basic Usage](#basic-usage) + - [With OpenAI Library](#with-openai-library) + - [With Requests Library](#with-requests-library) + - [Key Points](#key-points) + - [Conclusion](#conclusion) + + +## Introduction +The G4F Interference API is a powerful tool that allows you to serve other OpenAI integrations using G4F (Gpt4free). It acts as a proxy, translating requests intended for the OpenAI API into requests compatible with G4F providers. This guide will walk you through the process of setting up, running, and using the Interference API effectively. + + +## Running the Interference API +**You can run the Interference API in two ways:** using the PyPI package or from the repository. + + +### From PyPI Package +**To run the Interference API directly from the G4F PyPI package, use the following Python code:** + +```python +from g4f.api import run_api + +run_api() +``` + + +### From Repository +**If you prefer to run the Interference API from the cloned repository, you have two options:** + +1. **Using the command line:** +```bash +g4f api +``` + +2. **Using Python:** +```bash +python -m g4f.api.run +``` + +**Once running, the API will be accessible at:** `http://localhost:1337/v1` + +**(Advanced) Bind to custom port:** +```bash +python -m g4f.cli api --bind "0.0.0.0:2400" +``` + +## Using the Interference API + +### Basic Usage +**You can interact with the Interference API using curl commands for both text and image generation:** + +**For text generation:** +```bash +curl -X POST "http://localhost:1337/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { + "role": "user", + "content": "Hello" + } + ], + "model": "gpt-4o-mini" + }' +``` + +**For image generation:** +1. **url:** +```bash +curl -X POST "http://localhost:1337/v1/images/generate" \ + -H "Content-Type: application/json" \ + -d '{ + "prompt": "a white siamese cat", + "model": "flux", + "response_format": "url" + }' +``` + +2. **b64_json** +```bash +curl -X POST "http://localhost:1337/v1/images/generate" \ + -H "Content-Type: application/json" \ + -d '{ + "prompt": "a white siamese cat", + "model": "flux", + "response_format": "b64_json" + }' +``` + + +### With OpenAI Library + +**You can use the Interference API with the OpenAI Python library by changing the `base_url`:** +```python +from openai import OpenAI + +client = OpenAI( + api_key="", + base_url="http://localhost:1337/v1" +) + +response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[{"role": "user", "content": "Write a poem about a tree"}], + stream=True, +) + +if isinstance(response, dict): + # Not streaming + print(response.choices[0].message.content) +else: + # Streaming + for token in response: + content = token.choices[0].delta.content + if content is not None: + print(content, end="", flush=True) + +``` + + +### With Requests Library + +**You can also send requests directly to the Interference API using the `requests` library:** +```python +import requests + +url = "http://localhost:1337/v1/chat/completions" + +body = { + "model": "gpt-4o-mini", + "stream": False, + "messages": [ + {"role": "assistant", "content": "What can you do?"} + ] +} + +json_response = requests.post(url, json=body).json().get('choices', []) + +for choice in json_response: + print(choice.get('message', {}).get('content', '')) + +``` + +## Key Points + - The Interference API translates OpenAI API requests into G4F provider requests. + - It can be run from either the PyPI package or the cloned repository. + - The API supports usage with the OpenAI Python library by changing the `base_url`. + - Direct requests can be sent to the API endpoints using libraries like `requests`. + - Both text and image generation are supported. + + +## Conclusion +The G4F Interference API provides a seamless way to integrate G4F with existing OpenAI-based applications and tools. By following this guide, you should now be able to set up, run, and use the Interference API effectively. Whether you're using it for text generation, image creation, or as a drop-in replacement for OpenAI in your projects, the Interference API offers flexibility and power for your AI-driven applications. + + +--- + +[Return to Home](/) diff --git a/docs/interference.md b/docs/interference.md deleted file mode 100644 index b140f66a..00000000 --- a/docs/interference.md +++ /dev/null @@ -1,69 +0,0 @@ -### Interference openai-proxy API - -#### Run interference API from PyPi package - -```python -from g4f.api import run_api - -run_api() -``` - -#### Run interference API from repo - -Run server: - -```sh -g4f api -``` - -or - -```sh -python -m g4f.api.run -``` - -```python -from openai import OpenAI - -client = OpenAI( - api_key="", - # Change the API base URL to the local interference API - base_url="http://localhost:1337/v1" -) - - response = client.chat.completions.create( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "write a poem about a tree"}], - stream=True, - ) - - if isinstance(response, dict): - # Not streaming - print(response.choices[0].message.content) - else: - # Streaming - for token in response: - content = token.choices[0].delta.content - if content is not None: - print(content, end="", flush=True) -``` - -#### API usage (POST) -Send the POST request to /v1/chat/completions with body containing the `model` method. This example uses python with requests library: -```python -import requests -url = "http://localhost:1337/v1/chat/completions" -body = { - "model": "gpt-3.5-turbo-16k", - "stream": False, - "messages": [ - {"role": "assistant", "content": "What can you do?"} - ] -} -json_response = requests.post(url, json=body).json().get('choices', []) - -for choice in json_response: - print(choice.get('message', {}).get('content', '')) -``` - -[Return to Home](/)
\ No newline at end of file diff --git a/docs/legacy.md b/docs/legacy/legacy.md index d5cd5a36..d5cd5a36 100644 --- a/docs/legacy.md +++ b/docs/legacy/legacy.md diff --git a/docs/legacy/legacy_async_client.md b/docs/legacy/legacy_async_client.md new file mode 100644 index 00000000..5ddc2671 --- /dev/null +++ b/docs/legacy/legacy_async_client.md @@ -0,0 +1,380 @@ +# G4F - Legacy AsyncClient API Guide + +**IMPORTANT: This guide refers to the old implementation of AsyncClient. The new version of G4F now supports both synchronous and asynchronous operations through a unified interface. Please refer to the [new AsyncClient documentation](https://github.com/xtekky/gpt4free/blob/main/docs/async_client.md) for the latest information.** + +This guide provides comprehensive information on how to use the G4F AsyncClient API, including setup, usage examples, best practices, and important considerations for optimal performance. + +## Compatibility Note +The G4F AsyncClient API is designed to be compatible with the OpenAI API, making it easy for developers familiar with OpenAI's interface to transition to G4F. However, please note that this is the old version, and you should migrate to the new implementation for better support and features. + +## Table of Contents + - [Introduction](#introduction) + - [Key Features](#key-features) + - [Getting Started](#getting-started) + - [Initializing the Client](#initializing-the-client) + - [Creating Chat Completions](#creating-chat-completions) + - [Configuration](#configuration) + - [Usage Examples](#usage-examples) + - [Text Completions](#text-completions) + - [Streaming Completions](#streaming-completions) + - [Using a Vision Model](#using-a-vision-model) + - [Image Generation](#image-generation) + - [Concurrent Tasks](#concurrent-tasks-with-asynciogather) + - [Available Models and Providers](#available-models-and-providers) + - [Error Handling and Best Practices](#error-handling-and-best-practices) + - [Rate Limiting and API Usage](#rate-limiting-and-api-usage) + - [Conclusion](#conclusion) + +## Introduction +This is the old version: The G4F AsyncClient API is an asynchronous version of the standard G4F Client API. It offers the same functionality as the synchronous API but with improved performance due to its asynchronous nature. This guide will walk you through the key features and usage of the G4F AsyncClient API. + +## Key Features + - **Custom Providers**: Use custom providers for enhanced flexibility. + - **ChatCompletion Interface**: Interact with chat models through the ChatCompletion class. + - **Streaming Responses**: Get responses iteratively as they are received. + - **Non-Streaming Responses**: Generate complete responses in a single call. + - **Image Generation and Vision Models**: Support for image-related tasks. + +## Getting Started +**To ignore DeprecationWarnings related to the AsyncClient, you can use the following code:*** +```python +import warnings + +# Ignore DeprecationWarning for AsyncClient +warnings.filterwarnings("ignore", category=DeprecationWarning, module="g4f.client") +``` + +### Initializing the Client +**To use the G4F `Client`, create a new instance:** +```python +from g4f.client import AsyncClient +from g4f.Provider import OpenaiChat, Gemini + +client = AsyncClient( + provider=OpenaiChat, + image_provider=Gemini, + # Add other parameters as needed +) +``` + +## Creating Chat Completions +**Here's an improved example of creating chat completions:** +```python +response = await async_client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + # Add other parameters as needed +) +``` + +**This example:** + - Asks a specific question `Say this is a test` + - Configures various parameters like temperature and max_tokens for more control over the output + - Disables streaming for a complete response + +You can adjust these parameters based on your specific needs. + +### Configuration +**Configure the `AsyncClient` with additional settings:** +```python +client = Client( + api_key="your_api_key_here", + proxies="http://user:pass@host", + # Add other parameters as needed +) +``` + +## Usage Examples +### Text Completions +**Generate text completions using the ChatCompletions endpoint:** +```python +import asyncio +import warnings +from g4f.client import AsyncClient + +# Ігноруємо DeprecationWarning +warnings.filterwarnings("ignore", category=DeprecationWarning) + +async def main(): + client = AsyncClient() + + response = await client.chat.completions.async_create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + ) + + print(response.choices[0].message.content) + +asyncio.run(main()) +``` + +### Streaming Completions +**Process responses incrementally as they are generated:** +```python +import asyncio +from g4f.client import AsyncClient + +async def main(): + client = AsyncClient() + + stream = await client.chat.completions.async_create( + model="gpt-4", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ], + stream=True, + ) + + async for chunk in stream: + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="") + +asyncio.run(main()) +``` + +### Using a Vision Model +**Analyze an image and generate a description:** +```python +import g4f +import requests +import asyncio +from g4f.client import AsyncClient + +async def main(): + client = AsyncClient() + + image = requests.get("https://raw.githubusercontent.com/xtekky/gpt4free/refs/heads/main/docs/cat.jpeg", stream=True).raw + + response = await client.chat.completions.async_create( + model=g4f.models.default, + provider=g4f.Provider.Bing, + messages=[ + { + "role": "user", + "content": "What's in this image?" + } + ], + image=image + ) + + print(response.choices[0].message.content) + +asyncio.run(main()) +``` + +### Image Generation +**Generate images using a specified prompt:** +```python +import asyncio +from g4f.client import AsyncClient + +async def main(): + client = AsyncClient() + + response = await client.images.async_generate( + prompt="a white siamese cat", + model="flux" + ) + + image_url = response.data[0].url + print(f"Generated image URL: {image_url}") + +asyncio.run(main()) +``` + +#### Base64 Response Format +```python +import asyncio +from g4f.client import AsyncClient + +async def main(): + client = AsyncClient() + + response = await client.images.async_generate( + prompt="a white siamese cat", + model="flux", + response_format="b64_json" + ) + + base64_text = response.data[0].b64_json + print(base64_text) + +asyncio.run(main()) +``` + +### Concurrent Tasks with asyncio.gather +**Execute multiple tasks concurrently:** +```python +import asyncio +import warnings +from g4f.client import AsyncClient + +# Ignore DeprecationWarning for AsyncClient +warnings.filterwarnings("ignore", category=DeprecationWarning, module="g4f.client") + +async def main(): + client = AsyncClient() + + task1 = client.chat.completions.async_create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": "Say this is a test" + } + ] + ) + + task2 = client.images.async_generate( + model="flux", + prompt="a white siamese cat" + ) + + chat_response, image_response = await asyncio.gather(task1, task2) + + print("Chat Response:") + print(chat_response.choices[0].message.content) + + print("Image Response:") + print(image_response.data[0].url) + +asyncio.run(main()) +``` + +## Available Models and Providers +This is the old version: The G4F AsyncClient supports a wide range of AI models and providers, allowing you to choose the best option for your specific use case. +**Here's a brief overview of the available models and providers:** + +### Models + - GPT-3.5-Turbo + - GPT-4 + - DALL-E 3 + - Gemini + - Claude (Anthropic) + - And more... + +### Providers + - OpenAI + - Google (for Gemini) + - Anthropic + - Bing + - Custom providers + +**To use a specific model or provider, specify it when creating the client or in the API call:** +```python +client = AsyncClient(provider=g4f.Provider.OpenaiChat) + +# or + +response = await client.chat.completions.async_create( + model="gpt-4", + provider=g4f.Provider.Bing, + messages=[ + { + "role": "user", + "content": "Hello, world!" + } + ] +) +``` + +## Error Handling and Best Practices +Implementing proper error handling and following best practices is crucial when working with the G4F AsyncClient API. This ensures your application remains robust and can gracefully handle various scenarios. **Here are some key practices to follow:** + +1. **Use try-except blocks to catch and handle exceptions:** +```python +try: + response = await client.chat.completions.async_create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "user", + "content": "Hello, world!" + } + ] + ) +except Exception as e: + print(f"An error occurred: {e}") +``` + +2. **Check the response status and handle different scenarios:** +```python +if response.choices: + print(response.choices[0].message.content) +else: + print("No response generated") +``` + +3. **Implement retries for transient errors:** +```python +import asyncio +from tenacity import retry, stop_after_attempt, wait_exponential + +@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10)) +async def make_api_call(): + # Your API call here + pass +``` + +## Rate Limiting and API Usage +This is the old version: When working with the G4F AsyncClient API, it's important to implement rate limiting and monitor your API usage. This helps ensure fair usage, prevents overloading the service, and optimizes your application's performance. **Here are some key strategies to consider:** + +1. **Implement rate limiting in your application:** +```python +import asyncio +from aiolimiter import AsyncLimiter + +rate_limit = AsyncLimiter(max_rate=10, time_period=1) # 10 requests per second + +async def make_api_call(): + async with rate_limit: + # Your API call here + pass +``` + +2. **Monitor your API usage and implement logging:** +```python +import logging + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +async def make_api_call(): + try: + response = await client.chat.completions.async_create(...) + logger.info(f"API call successful. Tokens used: {response.usage.total_tokens}") + except Exception as e: + logger.error(f"API call failed: {e}") +``` + +3. **Use caching to reduce API calls for repeated queries:** +```python +from functools import lru_cache + +@lru_cache(maxsize=100) +def get_cached_response(query): + # Your API call here + pass +``` + +## Conclusion +This is the old version: The G4F AsyncClient API provides a powerful and flexible way to interact with various AI models asynchronously. By leveraging its features and following best practices, you can build efficient and responsive applications that harness the power of AI for text generation, image analysis, and image creation. + +Remember to handle errors gracefully, implement rate limiting, and monitor your API usage to ensure optimal performance and reliability in your applications. + +--- + +[Return to Home](/) diff --git a/docs/local.md b/docs/local.md new file mode 100644 index 00000000..2cedd1a9 --- /dev/null +++ b/docs/local.md @@ -0,0 +1,164 @@ + +### G4F - Local Usage Guide + + +### Table of Contents +1. [Introduction](#introduction) +2. [Required Dependencies](#required-dependencies) +3. [Basic Usage Example](#basic-usage-example) +4. [Supported Models](#supported-models) +5. [Performance Considerations](#performance-considerations) +6. [Troubleshooting](#troubleshooting) + +#### Introduction +This guide explains how to use g4f to run language models locally. G4F (GPT4Free) allows you to interact with various language models on your local machine, providing a flexible and private solution for natural language processing tasks. + +## Usage + +#### Local inference +How to use g4f to run language models locally + +#### Required dependencies +**Make sure to install the required dependencies by running:** +```bash +pip install g4f[local] +``` +or +```bash +pip install -U gpt4all +``` + + + +#### Basic usage example +```python +from g4f.local import LocalClient + +client = LocalClient() +response = client.chat.completions.create( + model = 'orca-mini-3b', + messages = [{"role": "user", "content": "hi"}], + stream = True +) + +for token in response: + print(token.choices[0].delta.content or "") +``` + +Upon first use, there will be a prompt asking you if you wish to download the model. If you respond with `y`, g4f will go ahead and download the model for you. + +You can also manually place supported models into `./g4f/local/models/` + + +**You can get a list of the current supported models by running:** +```python +from g4f.local import LocalClient + +client = LocalClient() +client.list_models() +``` + +```json +{ + "mistral-7b": { + "path": "mistral-7b-openorca.gguf2.Q4_0.gguf", + "ram": "8", + "prompt": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n", + "system": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI. For multi-step problems, write out your reasoning for each step.\n<|im_end|>" + }, + "mistral-7b-instruct": { + "path": "mistral-7b-instruct-v0.1.Q4_0.gguf", + "ram": "8", + "prompt": "[INST] %1 [/INST]", + "system": None + }, + "gpt4all-falcon": { + "path": "gpt4all-falcon-newbpe-q4_0.gguf", + "ram": "8", + "prompt": "### Instruction:\n%1\n### Response:\n", + "system": None + }, + "orca-2": { + "path": "orca-2-13b.Q4_0.gguf", + "ram": "16", + "prompt": None, + "system": None + }, + "wizardlm-13b": { + "path": "wizardlm-13b-v1.2.Q4_0.gguf", + "ram": "16", + "prompt": None, + "system": None + }, + "nous-hermes-llama2": { + "path": "nous-hermes-llama2-13b.Q4_0.gguf", + "ram": "16", + "prompt": "### Instruction:\n%1\n### Response:\n", + "system": None + }, + "gpt4all-13b-snoozy": { + "path": "gpt4all-13b-snoozy-q4_0.gguf", + "ram": "16", + "prompt": None, + "system": None + }, + "mpt-7b-chat": { + "path": "mpt-7b-chat-newbpe-q4_0.gguf", + "ram": "8", + "prompt": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n", + "system": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>" + }, + "orca-mini-3b": { + "path": "orca-mini-3b-gguf2-q4_0.gguf", + "ram": "4", + "prompt": "### User:\n%1\n### Response:\n", + "system": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n" + }, + "replit-code-3b": { + "path": "replit-code-v1_5-3b-newbpe-q4_0.gguf", + "ram": "4", + "prompt": "%1", + "system": None + }, + "starcoder": { + "path": "starcoder-newbpe-q4_0.gguf", + "ram": "4", + "prompt": "%1", + "system": None + }, + "rift-coder-7b": { + "path": "rift-coder-v0-7b-q4_0.gguf", + "ram": "8", + "prompt": "%1", + "system": None + }, + "all-MiniLM-L6-v2": { + "path": "all-MiniLM-L6-v2-f16.gguf", + "ram": "1", + "prompt": None, + "system": None + }, + "mistral-7b-german": { + "path": "em_german_mistral_v01.Q4_0.gguf", + "ram": "8", + "prompt": "USER: %1 ASSISTANT: ", + "system": "Du bist ein hilfreicher Assistent. " + } +} +``` + +#### Performance Considerations +**When running language models locally, consider the following:** + - RAM requirements vary by model size (see the 'ram' field in the model list). + - CPU/GPU capabilities affect inference speed. + - Disk space is needed to store the model files. + +#### Troubleshooting +**Common issues and solutions:** + 1. **Model download fails**: Check your internet connection and try again. + 2. **Out of memory error**: Choose a smaller model or increase your system's RAM. + 3. **Slow inference**: Consider using a GPU or a more powerful CPU. + + + +[Return to Home](/) diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md new file mode 100644 index 00000000..7c6bc613 --- /dev/null +++ b/docs/providers-and-models.md @@ -0,0 +1,214 @@ + + +# G4F - Providers and Models + +This document provides an overview of various AI providers and models, including text generation, image generation, and vision capabilities. It aims to help users navigate the diverse landscape of AI services and choose the most suitable option for their needs. + +## Table of Contents + - [Providers](#providers) + - [Models](#models) + - [Text Models](#text-models) + - [Image Models](#image-models) + - [Vision Models](#vision-models) + - [Providers and vision models](#providers-and-vision-models) + - [Conclusion and Usage Tips](#conclusion-and-usage-tips) + +--- +## Providers +| Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth | +|----------|-------------|--------------|---------------|--------|--------|------| +|[ai4chat.co](https://www.ai4chat.co)|`g4f.Provider.Ai4Chat`|`gpt-4`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[aichatfree.info](https://aichatfree.info)|`g4f.Provider.AIChatFree`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`gpt-4o, gpt-4o-mini, gpt-4-turbo, llama-2-7b, llama-3.1-8b, llama-3.1-70b, hermes-2-pro, hermes-2-dpo, phi-2, deepseek-coder, openchat-3.5, openhermes-2.5, lfm-40b, german-7b, zephyr-7b, neural-7b`|`flux, flux-realism', flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, sdxl`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[aiuncensored.info](https://www.aiuncensored.info)|`g4f.Provider.AIUncensored`|✔|✔|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[allyfy.chat](https://allyfy.chat/)|`g4f.Provider.Allyfy`|`gpt-3.5-turbo`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[bing.com](https://bing.com/chat)|`g4f.Provider.Bing`|`gpt-4`|✔|`gpt-4-vision`|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌+✔| +|[bing.com/images](https://www.bing.com/images/create)|`g4f.Provider.BingCreateImages`|`❌|✔|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|✔| +|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, blackboxai-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro, claude-3.5-sonnet`|`flux`|`blackboxai, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro`|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[chatgot.one](https://www.chatgot.one/)|`g4f.Provider.ChatGot`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|`?`|`?`|`?`|?|![Unknown](https://img.shields.io/badge/Unknown-grey) |❌| +|[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4o, gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`gemma-7b, llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, phi-2, qwen-1.5-0-5b, qwen-1.5-8b, qwen-1.5-14b, qwen-1.5-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[darkai.foundation/chat](https://darkai.foundation/chat)|`g4f.Provider.DarkAI`|`gpt-4o, gpt-3.5-turbo, llama-3-70b, llama-3-405b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[duckduckgo.com](https://duckduckgo.com/duckchat/v1/chat)|`g4f.Provider.DDG`|`gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[deepinfra.com/chat](https://deepinfra.com/chat)|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.1-70b, wizardlm-2-8x22b, qwen-2-72b`|❌|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfraImage`|❌|✔|❌|❌|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[chat10.free2gpt.xyz](chat10.free2gpt.xyz)|`g4f.Provider.Free2GPT`|`mixtral-7b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|`g4f.Provider.FreeGpt`|`gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|✔| +|[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|✔|❌|✔|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔| +|[app.giz.ai](https://app.giz.ai/assistant/)|`g4f.Provider.GizAI`|`gemini-flash`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[developers.sber.ru](https://developers.sber.ru/gigachat)|`g4f.Provider.GigaChat`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|✔| +|[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|❌|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔| +|[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`llama-3.1-70b, command-r-plus, qwen-2-72b, llama-3.2-11b, hermes-3, mistral-nemo, phi-3.5-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[huggingface.co](https://huggingface.co/chat)|`g4f.Provider.HuggingFace`|✔|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[liaobots.work](https://liaobots.work)|`g4f.Provider.Liaobots`|`gpt-3.5-turbo, gpt-4o-mini, gpt-4o, gpt-4-turbo, grok-2, grok-2-mini, claude-3-opus, claude-3-sonnet, claude-3-5-sonnet, claude-3-haiku, claude-2.1, gemini-flash, gemini-pro`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[magickpen.com](https://magickpen.com)|`g4f.Provider.MagickPen`|`gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|✔|✔|?|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔| +|[platform.openai.com](https://platform.openai.com/)|`g4f.Provider.Openai`|✔|❌|✔||![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[chatgpt.com](https://chatgpt.com/)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4`|❌|✔||![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[www.perplexity.ai)](https://www.perplexity.ai)|`g4f.Provider.PerplexityAi`|✔|❌|❌|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌| +|[perplexity.ai](https://www.perplexity.ai)|`g4f.Provider.PerplexityApi`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[labs.perplexity.ai](https://labs.perplexity.ai)|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.1-8b, llama-3.1-70b`|❌|❌|?|![Cloudflare](https://img.shields.io/badge/Cloudflare-f48d37)|❌| +|[pi.ai/talk](https://pi.ai/talk)|`g4f.Provider.Pi`|`pi`|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|❌| +|[]()|`g4f.Provider.Pizzagpt`|`gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[poe.com](https://poe.com)|`g4f.Provider.Poe`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[app.prodia.com](https://app.prodia.com)|`g4f.Provider.Prodia`|❌|✔|❌|❌|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[raycast.com](https://raycast.com)|`g4f.Provider.Raycast`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[chat.reka.ai](https://chat.reka.ai/)|`g4f.Provider.Reka`|✔|❌|✔|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[replicate.com](https://replicate.com)|`g4f.Provider.Replicate`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[replicate.com](https://replicate.com)|`g4f.Provider.ReplicateHome`|`gemma-2b, llava-13b`|`sd-3, sdxl, playground-v2.5`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[replicate.com](https://replicate.com)|`g4f.Provider.RubiksAI`|`llama-3.1-70b, gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[talkai.info](https://talkai.info)|`g4f.Provider.TalkAi`|✔|❌|❌|✔|![Disabled](https://img.shields.io/badge/Disabled-red)|❌| +|[teach-anything.com](https://www.teach-anything.com)|`g4f.Provider.TeachAnything`|`llama-3.1-70b`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[beta.theb.ai](https://beta.theb.ai)|`g4f.Provider.Theb`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[beta.theb.ai](https://beta.theb.ai)|`g4f.Provider.ThebApi`|✔|❌|❌|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[console.upstage.ai/playground/chat](https://console.upstage.ai/playground/chat)|`g4f.Provider.Upstage`|`solar-pro, solar-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌| +|[whiterabbitneo.com](https://www.whiterabbitneo.com)|`g4f.Provider.WhiteRabbitNeo`|✔|❌|❌|?|![Unknown](https://img.shields.io/badge/Unknown-grey)|✔| +|[you.com](https://you.com)|`g4f.Provider.You`|✔|✔|✔|✔|![Unknown](https://img.shields.io/badge/Unknown-grey)|❌+✔| + +## Models + +### Text Models +| Model | Base Provider | Providers | Website | +|-------|---------------|-----------|---------| +|gpt-3.5-turbo|OpenAI|4+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)| +|gpt-4|OpenAI|6+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)| +|gpt-4-turbo|OpenAI|4+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)| +|gpt-4o|OpenAI|7+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)| +|gpt-4o-mini|OpenAI|10+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)| +|o1|OpenAI|0+ Providers|[platform.openai.com](https://openai.com/index/introducing-openai-o1-preview/)| +|o1-mini|OpenAI|0+ Providers|[platform.openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)| +|llama-2-7b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-2-7b)| +|llama-2-13b|Meta Llama|1+ Providers|[llama.com](https://www.llama.com/llama2/)| +|llama-3-8b|Meta Llama|4+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)| +|llama-3-70b|Meta Llama|4+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)| +|llama-3.1-8b|Meta Llama|7+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| +|llama-3.1-70b|Meta Llama|14+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| +|llama-3.1-405b|Meta Llama|5+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| +|llama-3.2-1b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-1B)| +|llama-3.2-3b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/blog/llama32)| +|llama-3.2-11b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)| +|llama-3.2-90b|Meta Llama|2+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)| +|llamaguard-7b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/LlamaGuard-7b)| +|llamaguard-2-8b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Meta-Llama-Guard-2-8B)| +|mistral-7b|Mistral AI|4+ Providers|[mistral.ai](https://mistral.ai/news/announcing-mistral-7b/)| +|mixtral-8x7b|Mistral AI|6+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)| +|mixtral-8x22b|Mistral AI|3+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-8x22b/)| +|mistral-nemo|Mistral AI|2+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)| +|mistral-large|Mistral AI|2+ Providers|[mistral.ai](https://mistral.ai/news/mistral-large-2407/)| +|mixtral-8x7b-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)| +|hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)| +|hermes-2|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B)| +|yi-34b|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Yi-34B)| +|hermes-3|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B)| +|gemini|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)| +|gemini-flash|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)| +|gemini-pro|Google DeepMind|10+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)| +|gemma-2b|Google|5+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2b)| +|gemma-2b-9b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-9b)| +|gemma-2b-27b|Google|2+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2-27b)| +|gemma-7b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-7b)| +|gemma_2_27b|Google|1+ Providers|[huggingface.co](https://huggingface.co/blog/gemma2)| +|claude-2.1|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-2)| +|claude-3-haiku|Anthropic|4+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)| +|claude-3-sonnet|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)| +|claude-3-opus|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)| +|claude-3.5-sonnet|Anthropic|6+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)| +|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)| +|blackboxai-pro|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)| +|yi-1.5-9b|01-ai|1+ Providers|[huggingface.co](https://huggingface.co/01-ai/Yi-1.5-9B)| +|phi-2|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/phi-2)| +|phi-3-medium-4k|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct)| +|phi-3.5-mini|Microsoft|2+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct)| +|dbrx-instruct|Databricks|1+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)| +|command-r-plus|CohereForAI|1+ Providers|[docs.cohere.com](https://docs.cohere.com/docs/command-r-plus)| +|sparkdesk-v1.1|iFlytek|1+ Providers|[xfyun.cn](https://www.xfyun.cn/doc/spark/Guide.html)| +|qwen|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen)| +|qwen-1.5-0.5b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-0.5B)| +|qwen-1.5-7b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-7B)| +|qwen-1.5-14b|Qwen|3+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-14B)| +|qwen-1.5-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-72B)| +|qwen-1.5-110b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-110B)| +|qwen-1.5-1.8b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-1.8B)| +|qwen-2-72b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-72B)| +|glm-3-6b|Zhipu AI|1+ Providers|[github.com/THUDM/ChatGLM3](https://github.com/THUDM/ChatGLM3)| +|glm-4-9B|Zhipu AI|1+ Providers|[github.com/THUDM/GLM-4](https://github.com/THUDM/GLM-4)| +|solar-1-mini|Upstage|1+ Providers|[upstage.ai/](https://www.upstage.ai/feed/product/solarmini-performance-report)| +|solar-10-7b|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0)| +|solar-pro|Upstage|1+ Providers|[huggingface.co](https://huggingface.co/upstage/solar-pro-preview-instruct)| +|pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)| +|deepseek-coder|DeepSeek|1+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct)| +|wizardlm-2-7b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/dreamgen/WizardLM-2-7B)| +|wizardlm-2-8x22b|WizardLM|2+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)| +|sh-n-7b|Together|1+ Providers|[huggingface.co](https://huggingface.co/togethercomputer/StripedHyena-Nous-7B)| +|llava-13b|Yorickvp|1+ Providers|[huggingface.co](https://huggingface.co/liuhaotian/llava-v1.5-13b)| +|lzlv-70b|Lzlv|1+ Providers|[huggingface.co](https://huggingface.co/lizpreciatior/lzlv_70b_fp16_hf)| +|openchat-3.5|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat_3.5)| +|openchat-3.6-8b|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat-3.6-8b-20240522)| +|phind-codellama-34b-v2|Phind|1+ Providers|[huggingface.co](https://huggingface.co/Phind/Phind-CodeLlama-34B-v2)| +|dolphin-2.9.1-llama-3-70b|Cognitive Computations|1+ Providers|[huggingface.co](https://huggingface.co/cognitivecomputations/dolphin-2.9.1-llama-3-70b)| +|grok-2-mini|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)| +|grok-2|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)| +|sonar-online|Perplexity AI|2+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| +|sonar-chat|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| +|mythomax-l2-13b|Gryphe|1+ Providers|[huggingface.co](https://huggingface.co/Gryphe/MythoMax-L2-13b)| +|cosmosrp|Gryphe|1+ Providers|[huggingface.co](https://huggingface.co/PawanKrd/CosmosRP-8k)| +|german-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF)| +|tinyllama-1.1b|TinyLlama|1+ Providers|[huggingface.co](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0)| +|cybertron-7b|TheBloke|1+ Providers|[huggingface.co](https://huggingface.co/fblgit/una-cybertron-7b-v2-bf16)| +|openhermes-2.5|Teknium|1+ Providers|[huggingface.co](https://huggingface.co/datasets/teknium/OpenHermes-2.5)| +|lfm-40b|Liquid|1+ Providers|[liquid.ai](https://www.liquid.ai/liquid-foundation-models)| +|zephyr-7b|HuggingFaceH4|1+ Providers|[huggingface.co](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)| + + +### Image Models +| Model | Base Provider | Providers | Website | +|-------|---------------|-----------|---------| +|sdxl|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/docs/diffusers/en/using-diffusers/sdxl)| +|sdxl-lora|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/blog/lcm_lora)| +|sdxl-turbo|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/stabilityai/sdxl-turbo)| +|sd-1.5|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/runwayml/stable-diffusion-v1-5)| +|sd-3|Stability AI|1+ Providers|[huggingface.co](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_3)| +|playground-v2.5|Playground AI|1+ Providers|[huggingface.co](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic)| +|flux|Black Forest Labs|2+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)| +|flux-pro|Black Forest Labs|2+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)| +|flux-realism|Flux AI|2+ Providers|[]()| +|flux-anime|Flux AI|1+ Providers|[]()| +|flux-3d|Flux AI|1+ Providers|[]()| +|flux-disney|Flux AI|1+ Providers|[]()| +|flux-pixel|Flux AI|1+ Providers|[]()| +|flux-4o|Flux AI|1+ Providers|[]()| +|flux-schnell|Black Forest Labs|2+ Providers|[huggingface.co](https://huggingface.co/black-forest-labs/FLUX.1-schnell)| +|dalle|OpenAI|1+ Providers|[openai.com](https://openai.com/index/dall-e/)| +|dalle-2|OpenAI|1+ Providers|[openai.com](https://openai.com/index/dall-e-2/)| +|emi||1+ Providers|[]()| +|any-dark||1+ Providers|[]()| +|midjourney|Midjourney|1+ Providers|[docs.midjourney.com](https://docs.midjourney.com/docs/model-versions)| + +### Vision Models +| Model | Base Provider | Providers | Website | +|-------|---------------|-----------|---------| +|gpt-4-vision|OpenAI|1+ Providers|[openai.com](https://openai.com/research/gpt-4v-system-card)| +|gemini-pro-vision|Google DeepMind|1+ Providers | [deepmind.google](https://deepmind.google/technologies/gemini/)| +|blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)| + +### Providers and vision models +| Provider | Base Provider | | Vision Models | Status | Auth | +|-------|---------------|-----------|---------|---------|---------| +| `g4f.Provider.Blackbox` | Blackbox AI | | `blackboxai, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gpt-4o, gemini-pro` | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | + +## Conclusion and Usage Tips +This document provides a comprehensive overview of various AI providers and models available for text generation, image generation, and vision tasks. **When choosing a provider or model, consider the following factors:** + 1. **Availability**: Check the status of the provider to ensure it's currently active and accessible. + 2. **Model Capabilities**: Different models excel at different tasks. Choose a model that best fits your specific needs, whether it's text generation, image creation, or vision-related tasks. + 3. **Authentication**: Some providers require authentication, while others don't. Consider this when selecting a provider for your project. + 4. **Streaming Support**: If real-time responses are important for your application, prioritize providers that offer streaming capabilities. + 5. **Vision Models**: For tasks requiring image understanding or multimodal interactions, look for providers offering vision models. + +Remember to stay updated with the latest developments in the AI field, as new models and providers are constantly emerging and evolving. + +--- + +[Return to Home](/) diff --git a/docs/requirements.md b/docs/requirements.md index a4137a64..f5c598ca 100644 --- a/docs/requirements.md +++ b/docs/requirements.md @@ -38,13 +38,10 @@ Install required package for loading cookies from browser: ``` pip install browser_cookie3 ``` -Install curl_cffi for better protection from being blocked: -``` -pip install curl_cffi -``` Install all packages and uninstall this package for disabling the webdriver: ``` pip uninstall undetected-chromedriver ``` -[Return to Home](/)
\ No newline at end of file +--- +[Return to Home](/) diff --git a/etc/examples/api.py b/etc/examples/api.py index 1ab9b51b..f8f5d5ec 100644 --- a/etc/examples/api.py +++ b/etc/examples/api.py @@ -6,14 +6,19 @@ body = { "provider": "", "stream": True, "messages": [ - {"role": "assistant", "content": "What can you do? Who are you?"} + {"role": "user", "content": "What can you do? Who are you?"} ] } -lines = requests.post(url, json=body, stream=True).iter_lines() -for line in lines: +response = requests.post(url, json=body, stream=True) +response.raise_for_status() +for line in response.iter_lines(): if line.startswith(b"data: "): try: - print(json.loads(line[6:]).get("choices", [{"delta": {}}])[0]["delta"].get("content", ""), end="") + json_data = json.loads(line[6:]) + if json_data.get("error"): + print(json_data) + break + print(json_data.get("choices", [{"delta": {}}])[0]["delta"].get("content", ""), end="") except json.JSONDecodeError: pass print()
\ No newline at end of file diff --git a/etc/examples/image_api.py b/etc/examples/image_api.py index dbae22ed..9a438f9b 100644 --- a/etc/examples/image_api.py +++ b/etc/examples/image_api.py @@ -1,9 +1,9 @@ import requests url = "http://localhost:1337/v1/images/generations" body = { - "prompt": "heaven for dogs", - "provider": "OpenaiAccount", - "response_format": "b64_json", + "model": "dall-e", + "prompt": "hello world user", + #"response_format": "b64_json", } data = requests.post(url, json=body, stream=True).json() print(data)
\ No newline at end of file diff --git a/etc/tool/copilot.py b/etc/tool/copilot.py index ed1fdf16..817d92da 100644 --- a/etc/tool/copilot.py +++ b/etc/tool/copilot.py @@ -219,9 +219,6 @@ def main(): if not pull: print(f"No PR number found") exit() - if pull.get_reviews().totalCount > 0 or pull.get_issue_comments().totalCount > 0: - print(f"Has already a review") - exit() diff = get_diff(pull.diff_url) except Exception as e: print(f"Error get details: {e.__class__.__name__}: {e}") @@ -231,6 +228,9 @@ def main(): except Exception as e: print(f"Error create review: {e}") exit(1) + if pull.get_reviews().totalCount > 0 or pull.get_issue_comments().totalCount > 0: + pull.create_issue_comment(body=review) + return try: comments = analyze_code(pull, diff) except Exception as e: diff --git a/etc/tool/create_provider.py b/etc/tool/create_provider.py index 797089cd..7a9827a8 100644 --- a/etc/tool/create_provider.py +++ b/etc/tool/create_provider.py @@ -33,14 +33,35 @@ from __future__ import annotations from aiohttp import ClientSession from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt -class ChatGpt(AsyncGeneratorProvider): - url = "https://chat-gpt.com" +class {name}(AsyncGeneratorProvider, ProviderModelMixin): + label = "" + url = "https://example.com" + api_endpoint = "https://example.com/api/completion" working = True - supports_gpt_35_turbo = True + needs_auth = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = '' + models = ['', ''] + + model_aliases = { + "alias1": "model1", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model @classmethod async def create_async_generator( @@ -50,19 +71,21 @@ class ChatGpt(AsyncGeneratorProvider): proxy: str = None, **kwargs ) -> AsyncResult: - headers = { - "authority": "chat-gpt.com", + model = cls.get_model(model) + + headers = {{ + "authority": "example.com", "accept": "application/json", "origin": cls.url, - "referer": f"{cls.url}/chat", - } + "referer": f"{{cls.url}}/chat", + }} async with ClientSession(headers=headers) as session: prompt = format_prompt(messages) - data = { + data = {{ "prompt": prompt, - "purpose": "", - } - async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response: + "model": model, + }} + async with session.post(f"{{cls.url}}/api/chat", json=data, proxy=proxy) as response: response.raise_for_status() async for chunk in response.content: if chunk: @@ -78,7 +101,7 @@ Create a provider from a cURL command. The command is: {command} ``` A example for a provider: -```py +```python {example} ``` The name for the provider class: diff --git a/etc/unittest/__main__.py b/etc/unittest/__main__.py index 351c2bb3..f8a73280 100644 --- a/etc/unittest/__main__.py +++ b/etc/unittest/__main__.py @@ -1,11 +1,11 @@ import unittest + from .asyncio import * from .backend import * from .main import * from .model import * from .client import * -from .async_client import * from .include import * from .integration import * -unittest.main()
\ No newline at end of file +unittest.main() diff --git a/etc/unittest/async_client.py b/etc/unittest/async_client.py deleted file mode 100644 index a49b90ed..00000000 --- a/etc/unittest/async_client.py +++ /dev/null @@ -1,56 +0,0 @@ -import unittest - -from g4f.client import AsyncClient, ChatCompletion, ChatCompletionChunk -from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock - -DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}] - -class AsyncTestPassModel(unittest.IsolatedAsyncioTestCase): - - async def test_response(self): - client = AsyncClient(provider=AsyncGeneratorProviderMock) - response = await client.chat.completions.create(DEFAULT_MESSAGES, "") - self.assertIsInstance(response, ChatCompletion) - self.assertEqual("Mock", response.choices[0].message.content) - - async def test_pass_model(self): - client = AsyncClient(provider=ModelProviderMock) - response = await client.chat.completions.create(DEFAULT_MESSAGES, "Hello") - self.assertIsInstance(response, ChatCompletion) - self.assertEqual("Hello", response.choices[0].message.content) - - async def test_max_tokens(self): - client = AsyncClient(provider=YieldProviderMock) - messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]] - response = await client.chat.completions.create(messages, "Hello", max_tokens=1) - self.assertIsInstance(response, ChatCompletion) - self.assertEqual("How ", response.choices[0].message.content) - response = await client.chat.completions.create(messages, "Hello", max_tokens=2) - self.assertIsInstance(response, ChatCompletion) - self.assertEqual("How are ", response.choices[0].message.content) - - async def test_max_stream(self): - client = AsyncClient(provider=YieldProviderMock) - messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]] - response = client.chat.completions.create(messages, "Hello", stream=True) - async for chunk in response: - self.assertIsInstance(chunk, ChatCompletionChunk) - if chunk.choices[0].delta.content is not None: - self.assertIsInstance(chunk.choices[0].delta.content, str) - messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]] - response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2) - response = [chunk async for chunk in response] - self.assertEqual(len(response), 3) - for chunk in response: - if chunk.choices[0].delta.content is not None: - self.assertEqual(chunk.choices[0].delta.content, "You ") - - async def test_stop(self): - client = AsyncClient(provider=YieldProviderMock) - messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]] - response = await client.chat.completions.create(messages, "Hello", stop=["and"]) - self.assertIsInstance(response, ChatCompletion) - self.assertEqual("How are you?", response.choices[0].message.content) - -if __name__ == '__main__': - unittest.main()
\ No newline at end of file diff --git a/etc/unittest/asyncio.py b/etc/unittest/asyncio.py index 8931b79a..5883bae5 100644 --- a/etc/unittest/asyncio.py +++ b/etc/unittest/asyncio.py @@ -61,11 +61,11 @@ class TestChatCompletionNestAsync(unittest.IsolatedAsyncioTestCase): result = await ChatCompletion.create_async(g4f.models.default, DEFAULT_MESSAGES, ProviderMock) self.assertEqual("Mock",result) - async def test_nested(self): + async def _test_nested(self): result = ChatCompletion.create(g4f.models.default, DEFAULT_MESSAGES, AsyncProviderMock) self.assertEqual("Mock",result) - async def test_nested_generator(self): + async def _test_nested_generator(self): result = ChatCompletion.create(g4f.models.default, DEFAULT_MESSAGES, AsyncGeneratorProviderMock) self.assertEqual("Mock",result) diff --git a/etc/unittest/backend.py b/etc/unittest/backend.py index ee6174d5..a2999c5c 100644 --- a/etc/unittest/backend.py +++ b/etc/unittest/backend.py @@ -1,15 +1,19 @@ +from __future__ import annotations + import unittest import asyncio from unittest.mock import MagicMock -from .mocks import ProviderMock -import g4f from g4f.errors import MissingRequirementsError - try: - from g4f.gui.server.backend import Backend_Api, get_error_message + from g4f.gui.server.backend import Backend_Api has_requirements = True except: has_requirements = False +try: + from duckduckgo_search.exceptions import DuckDuckGoSearchException +except ImportError: + class DuckDuckGoSearchException: + pass class TestBackendApi(unittest.TestCase): @@ -31,28 +35,15 @@ class TestBackendApi(unittest.TestCase): def test_get_providers(self): response = self.api.get_providers() - self.assertIsInstance(response, list) + self.assertIsInstance(response, dict) self.assertTrue(len(response) > 0) def test_search(self): from g4f.gui.server.internet import search try: result = asyncio.run(search("Hello")) + except DuckDuckGoSearchException as e: + self.skipTest(e) except MissingRequirementsError: self.skipTest("search is not installed") - self.assertEqual(5, len(result)) - -class TestUtilityFunctions(unittest.TestCase): - - def setUp(self): - if not has_requirements: - self.skipTest("gui is not installed") - - def test_get_error_message(self): - g4f.debug.last_provider = ProviderMock - exception = Exception("Message") - result = get_error_message(exception) - self.assertEqual("ProviderMock: Exception: Message", result) - -if __name__ == '__main__': - unittest.main()
\ No newline at end of file + self.assertEqual(5, len(result))
\ No newline at end of file diff --git a/etc/unittest/client.py b/etc/unittest/client.py index ec8aa4b7..97f9f6c8 100644 --- a/etc/unittest/client.py +++ b/etc/unittest/client.py @@ -1,10 +1,62 @@ +from __future__ import annotations + import unittest -from g4f.client import Client, ChatCompletion, ChatCompletionChunk +from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}] +class AsyncTestPassModel(unittest.IsolatedAsyncioTestCase): + + async def test_response(self): + client = AsyncClient(provider=AsyncGeneratorProviderMock) + response = await client.chat.completions.create(DEFAULT_MESSAGES, "") + self.assertIsInstance(response, ChatCompletion) + self.assertEqual("Mock", response.choices[0].message.content) + + async def test_pass_model(self): + client = AsyncClient(provider=ModelProviderMock) + response = await client.chat.completions.create(DEFAULT_MESSAGES, "Hello") + self.assertIsInstance(response, ChatCompletion) + self.assertEqual("Hello", response.choices[0].message.content) + + async def test_max_tokens(self): + client = AsyncClient(provider=YieldProviderMock) + messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]] + response = await client.chat.completions.create(messages, "Hello", max_tokens=1) + self.assertIsInstance(response, ChatCompletion) + self.assertEqual("How ", response.choices[0].message.content) + response = await client.chat.completions.create(messages, "Hello", max_tokens=2) + self.assertIsInstance(response, ChatCompletion) + self.assertEqual("How are ", response.choices[0].message.content) + + async def test_max_stream(self): + client = AsyncClient(provider=YieldProviderMock) + messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]] + response = client.chat.completions.create(messages, "Hello", stream=True) + async for chunk in response: + chunk: ChatCompletionChunk = chunk + self.assertIsInstance(chunk, ChatCompletionChunk) + if chunk.choices[0].delta.content is not None: + self.assertIsInstance(chunk.choices[0].delta.content, str) + messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]] + response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2) + response_list = [] + async for chunk in response: + response_list.append(chunk) + self.assertEqual(len(response_list), 3) + for chunk in response_list: + if chunk.choices[0].delta.content is not None: + self.assertEqual(chunk.choices[0].delta.content, "You ") + + async def test_stop(self): + client = AsyncClient(provider=YieldProviderMock) + messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]] + response = await client.chat.completions.create(messages, "Hello", stop=["and"]) + self.assertIsInstance(response, ChatCompletion) + self.assertEqual("How are you?", response.choices[0].message.content) + class TestPassModel(unittest.TestCase): def test_response(self): @@ -39,9 +91,9 @@ class TestPassModel(unittest.TestCase): self.assertIsInstance(chunk.choices[0].delta.content, str) messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]] response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2) - response = list(response) - self.assertEqual(len(response), 3) - for chunk in response: + response_list = list(response) + self.assertEqual(len(response_list), 3) + for chunk in response_list: if chunk.choices[0].delta.content is not None: self.assertEqual(chunk.choices[0].delta.content, "You ") diff --git a/g4f/Provider/AIUncensored.py b/g4f/Provider/AIUncensored.py new file mode 100644 index 00000000..c2f0f4b3 --- /dev/null +++ b/g4f/Provider/AIUncensored.py @@ -0,0 +1,132 @@ +from __future__ import annotations + +import json +import random +from aiohttp import ClientSession, ClientError +import asyncio +from itertools import cycle + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..image import ImageResponse + +class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://www.aiuncensored.info/ai_uncensored" + api_endpoints_text = [ + "https://twitterclone-i0wr.onrender.com/api/chat", + "https://twitterclone-4e8t.onrender.com/api/chat", + "https://twitterclone-8wd1.onrender.com/api/chat", + ] + api_endpoints_image = [ + "https://twitterclone-4e8t.onrender.com/api/image", + "https://twitterclone-i0wr.onrender.com/api/image", + "https://twitterclone-8wd1.onrender.com/api/image", + ] + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'TextGenerations' + text_models = [default_model] + image_models = ['ImageGenerations'] + models = [*text_models, *image_models] + + model_aliases = { + "flux": "ImageGenerations", + } + + @staticmethod + def generate_cipher() -> str: + """Generate a cipher in format like '3221229284179118'""" + return ''.join([str(random.randint(0, 9)) for _ in range(16)]) + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://www.aiuncensored.info', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://www.aiuncensored.info/', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + if model in cls.image_models: + prompt = messages[-1]['content'] + data = { + "prompt": prompt, + "cipher": cls.generate_cipher() + } + + endpoints = cycle(cls.api_endpoints_image) + + while True: + endpoint = next(endpoints) + try: + async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response: + response.raise_for_status() + response_data = await response.json() + image_url = response_data['image_url'] + image_response = ImageResponse(images=image_url, alt=prompt) + yield image_response + return + except (ClientError, asyncio.TimeoutError): + continue + + elif model in cls.text_models: + data = { + "messages": messages, + "cipher": cls.generate_cipher() + } + + endpoints = cycle(cls.api_endpoints_text) + + while True: + endpoint = next(endpoints) + try: + async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response: + response.raise_for_status() + full_response = "" + async for line in response.content: + line = line.decode('utf-8') + if line.startswith("data: "): + try: + json_str = line[6:] + if json_str != "[DONE]": + data = json.loads(json_str) + if "data" in data: + full_response += data["data"] + yield data["data"] + except json.JSONDecodeError: + continue + return + except (ClientError, asyncio.TimeoutError): + continue diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index 88896096..6254e160 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -1,93 +1,29 @@ from __future__ import annotations -from aiohttp import ClientSession, ClientResponseError -from urllib.parse import urlencode +import random import json -import io -import asyncio +import re from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import ImageResponse, is_accepted_format -from .helper import format_prompt +from ..image import ImageResponse +from ..requests import StreamSession, raise_for_status +from .airforce.AirforceChat import AirforceChat +from .airforce.AirforceImage import AirforceImage class Airforce(AsyncGeneratorProvider, ProviderModelMixin): url = "https://api.airforce" - text_api_endpoint = "https://api.airforce/chat/completions" - image_api_endpoint = "https://api.airforce/v1/imagine2" + api_endpoint_completions = AirforceChat.api_endpoint + api_endpoint_imagine = AirforceImage.api_endpoint working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - supports_stream = True + default_model = "gpt-4o-mini" supports_system_message = True supports_message_history = True - default_model = 'llama-3-70b-chat' text_models = [ - # Open source models - 'llama-2-13b-chat', - - 'llama-3-70b-chat', - 'llama-3-70b-chat-turbo', - 'llama-3-70b-chat-lite', - - 'llama-3-8b-chat', - 'llama-3-8b-chat-turbo', - 'llama-3-8b-chat-lite', - - 'llama-3.1-405b-turbo', + 'gpt-4-turbo', + default_model, 'llama-3.1-70b-turbo', 'llama-3.1-8b-turbo', - - 'LlamaGuard-2-8b', - 'Llama-Guard-7b', - 'Meta-Llama-Guard-3-8B', - - 'Mixtral-8x7B-Instruct-v0.1', - 'Mixtral-8x22B-Instruct-v0.1', - 'Mistral-7B-Instruct-v0.1', - 'Mistral-7B-Instruct-v0.2', - 'Mistral-7B-Instruct-v0.3', - - 'Qwen1.5-72B-Chat', - 'Qwen1.5-110B-Chat', - 'Qwen2-72B-Instruct', - - 'gemma-2b-it', - 'gemma-2-9b-it', - 'gemma-2-27b-it', - - 'dbrx-instruct', - - 'deepseek-llm-67b-chat', - - 'Nous-Hermes-2-Mixtral-8x7B-DPO', - 'Nous-Hermes-2-Yi-34B', - - 'WizardLM-2-8x22B', - - 'SOLAR-10.7B-Instruct-v1.0', - - 'StripedHyena-Nous-7B', - - 'sparkdesk', - - - # Other models - 'chatgpt-4o-latest', - 'gpt-4', - 'gpt-4-turbo', - 'gpt-4o-mini-2024-07-18', - 'gpt-4o-mini', - 'gpt-4o', - 'gpt-3.5-turbo', - 'gpt-3.5-turbo-0125', - 'gpt-3.5-turbo-1106', - 'gpt-3.5-turbo-16k', - 'gpt-3.5-turbo-0613', - 'gpt-3.5-turbo-16k-0613', - - 'gemini-1.5-flash', - 'gemini-1.5-pro', ] image_models = [ 'flux', @@ -96,160 +32,140 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): 'flux-3d', 'flux-disney', 'flux-pixel', + 'flux-4o', 'any-dark', ] - models = [ *text_models, - *image_models + *image_models, ] model_aliases = { - # Open source models - "llama-2-13b": "llama-2-13b-chat", - - "llama-3-70b": "llama-3-70b-chat", - "llama-3-70b": "llama-3-70b-chat-turbo", - "llama-3-70b": "llama-3-70b-chat-lite", - - "llama-3-8b": "llama-3-8b-chat", - "llama-3-8b": "llama-3-8b-chat-turbo", - "llama-3-8b": "llama-3-8b-chat-lite", - - "llama-3.1-405b": "llama-3.1-405b-turbo", + "gpt-4o": "chatgpt-4o-latest", "llama-3.1-70b": "llama-3.1-70b-turbo", "llama-3.1-8b": "llama-3.1-8b-turbo", - - "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1", - "mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1", - "mistral-7b": "Mistral-7B-Instruct-v0.1", - "mistral-7b": "Mistral-7B-Instruct-v0.2", - "mistral-7b": "Mistral-7B-Instruct-v0.3", - - "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO", - - "qwen-1-5-72b": "Qwen1.5-72B-Chat", - "qwen-1_5-110b": "Qwen1.5-110B-Chat", - "qwen-2-72b": "Qwen2-72B-Instruct", - - "gemma-2b": "gemma-2b-it", - "gemma-2b-9b": "gemma-2-9b-it", - "gemma-2b-27b": "gemma-2-27b-it", - - "deepseek": "deepseek-llm-67b-chat", - - "yi-34b": "Nous-Hermes-2-Yi-34B", - - "wizardlm-2-8x22b": "WizardLM-2-8x22B", - - "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0", - - "sh-n-7b": "StripedHyena-Nous-7B", - - "sparkdesk-v1.1": "sparkdesk", - - - # Other models - "gpt-4o": "chatgpt-4o-latest", - "gpt-4o-mini": "gpt-4o-mini-2024-07-18", - - "gpt-3.5-turbo": "gpt-3.5-turbo-0125", - "gpt-3.5-turbo": "gpt-3.5-turbo-1106", - "gpt-3.5-turbo": "gpt-3.5-turbo-16k", - "gpt-3.5-turbo": "gpt-3.5-turbo-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613", - - - "gemini-flash": "gemini-1.5-flash", - "gemini-pro": "gemini-1.5-pro", + "gpt-4": "gpt-4-turbo", } @classmethod - async def create_async_generator( + def create_async_generator( cls, model: str, messages: Messages, proxy: str = None, + seed: int = None, + size: str = "1:1", + stream: bool = False, **kwargs ) -> AsyncResult: model = cls.get_model(model) - + + if model in cls.image_models: + return cls._generate_image(model, messages, proxy, seed, size) + else: + return cls._generate_text(model, messages, proxy, stream, **kwargs) + + @classmethod + async def _generate_image( + cls, + model: str, + messages: Messages, + proxy: str = None, + seed: int = None, + size: str = "1:1", + **kwargs + ) -> AsyncResult: headers = { "accept": "*/*", "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "origin": "https://api.airforce", - "sec-ch-ua": '"Chromium";v="128", "Not(A:Brand";v="24"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "cross-site", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" + "cache-control": "no-cache", + "origin": "https://llmplayground.net", + "user-agent": "Mozilla/5.0" } + if seed is None: + seed = random.randint(0, 100000) + prompt = messages[-1]['content'] - - if model in cls.image_models: - async for item in cls.generate_image(model, messages, headers, proxy, **kwargs): - yield item - else: - async for item in cls.generate_text(model, messages, headers, proxy, **kwargs): - yield item + async with StreamSession(headers=headers, proxy=proxy) as session: + params = { + "model": model, + "prompt": prompt, + "size": size, + "seed": seed + } + async with session.get(f"{cls.api_endpoint_imagine}", params=params) as response: + await raise_for_status(response) + content_type = response.headers.get('Content-Type', '').lower() + + if 'application/json' in content_type: + raise RuntimeError(await response.json().get("error", {}).get("message")) + elif 'image' in content_type: + image_data = b"" + async for chunk in response.iter_content(): + if chunk: + image_data += chunk + image_url = f"{cls.api_endpoint_imagine}?model={model}&prompt={prompt}&size={size}&seed={seed}" + yield ImageResponse(images=image_url, alt=prompt) @classmethod - async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult: - async with ClientSession(headers=headers) as session: + async def _generate_text( + cls, + model: str, + messages: Messages, + proxy: str = None, + stream: bool = False, + max_tokens: int = 4096, + temperature: float = 1, + top_p: float = 1, + **kwargs + ) -> AsyncResult: + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "authorization": "Bearer missing api key", + "content-type": "application/json", + "user-agent": "Mozilla/5.0" + } + async with StreamSession(headers=headers, proxy=proxy) as session: data = { - "messages": [{"role": "user", "content": format_prompt(messages)}], + "messages": messages, "model": model, - "temperature": kwargs.get('temperature', 1), - "top_p": kwargs.get('top_p', 1), - "stream": True + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stream": stream } - - async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - async for line in response.content: - if line: - line = line.decode('utf-8').strip() - if line.startswith("data: "): - try: - data = json.loads(line[6:]) - if 'choices' in data and len(data['choices']) > 0: - delta = data['choices'][0].get('delta', {}) - if 'content' in delta: - yield delta['content'] - except json.JSONDecodeError: - continue - elif line == "data: [DONE]": - break + async with session.post(cls.api_endpoint_completions, json=data) as response: + await raise_for_status(response) + content_type = response.headers.get('Content-Type', '').lower() + if 'application/json' in content_type: + json_data = await response.json() + if json_data.get("model") == "error": + raise RuntimeError(json_data['choices'][0]['message'].get('content', '')) + if stream: + async for line in response.iter_lines(): + if line: + line = line.decode('utf-8').strip() + if line.startswith("data: ") and line != "data: [DONE]": + json_data = json.loads(line[6:]) + content = json_data['choices'][0]['delta'].get('content', '') + if content: + yield cls._filter_content(content) + else: + json_data = await response.json() + content = json_data['choices'][0]['message']['content'] + yield cls._filter_content(content) @classmethod - async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult: - prompt = messages[-1]['content'] if messages else "" - params = { - "prompt": prompt, - "size": kwargs.get("size", "1:1"), - "seed": kwargs.get("seed"), - "model": model - } - params = {k: v for k, v in params.items() if v is not None} - - try: - async with ClientSession(headers=headers) as session: - async with session.get(cls.image_api_endpoint, params=params, proxy=proxy) as response: - response.raise_for_status() - content = await response.read() - - if response.content_type.startswith('image/'): - image_url = str(response.url) - yield ImageResponse(image_url, prompt) - else: - try: - text = content.decode('utf-8', errors='ignore') - yield f"Error: {text}" - except Exception as decode_error: - yield f"Error: Unable to decode response - {str(decode_error)}" - except ClientResponseError as e: - yield f"Error: HTTP {e.status}: {e.message}" - except Exception as e: - yield f"Unexpected error: {str(e)}" + def _filter_content(cls, part_response: str) -> str: + part_response = re.sub( + r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+", + '', + part_response + ) + + part_response = re.sub( + r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+", + '', + part_response + ) + return part_response
\ No newline at end of file diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py deleted file mode 100644 index 8733b1ec..00000000 --- a/g4f/Provider/Allyfy.py +++ /dev/null @@ -1,71 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -import json - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt - - -class Allyfy(AsyncGeneratorProvider): - url = "https://chatbot.allyfy.chat" - api_endpoint = "/api/v1/message/stream/super/chat" - working = True - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "accept": "text/event-stream", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json;charset=utf-8", - "dnt": "1", - "origin": "https://www.allyfy.chat", - "priority": "u=1, i", - "referer": "https://www.allyfy.chat/", - "referrer": "https://www.allyfy.chat", - 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="126"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-site", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "messages": [{"content": prompt, "role": "user"}], - "content": prompt, - "baseInfo": { - "clientId": "q08kdrde1115003lyedfoir6af0yy531", - "pid": "38281", - "channelId": "100000", - "locale": "en-US", - "localZone": 180, - "packageName": "com.cch.allyfy.webh", - } - } - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: - response.raise_for_status() - full_response = [] - async for line in response.content: - line = line.decode().strip() - if line.startswith("data:"): - data_content = line[5:] - if data_content == "[DONE]": - break - try: - json_data = json.loads(data_content) - if "content" in json_data: - full_response.append(json_data["content"]) - except json.JSONDecodeError: - continue - yield "".join(full_response) diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py new file mode 100644 index 00000000..2e66dccf --- /dev/null +++ b/g4f/Provider/AmigoChat.py @@ -0,0 +1,176 @@ +from __future__ import annotations + +import json +import uuid + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..image import ImageResponse +from ..requests import StreamSession, raise_for_status +from ..errors import ResponseStatusError + +class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://amigochat.io/chat/" + chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions" + image_api_endpoint = "https://api.amigochat.io/v1/images/generations" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4o-mini' + + chat_models = [ + 'gpt-4o', + default_model, + 'o1-preview', + 'o1-mini', + 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo', + 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo', + 'claude-3-sonnet-20240229', + 'gemini-1.5-pro', + ] + + image_models = [ + 'flux-pro/v1.1', + 'flux-realism', + 'flux-pro', + 'dalle-e-3', + ] + + models = [*chat_models, *image_models] + + model_aliases = { + "o1": "o1-preview", + "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", + "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", + "claude-3.5-sonnet": "claude-3-sonnet-20240229", + "gemini-pro": "gemini-1.5-pro", + + "flux-pro": "flux-pro/v1.1", + "dalle-3": "dalle-e-3", + } + + persona_ids = { + 'gpt-4o': "gpt", + 'gpt-4o-mini': "amigo", + 'o1-preview': "openai-o-one", + 'o1-mini': "openai-o-one-mini", + 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one", + 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2", + 'claude-3-sonnet-20240229': "claude", + 'gemini-1.5-pro': "gemini-1-5-pro", + 'flux-pro/v1.1': "flux-1-1-pro", + 'flux-realism': "flux-realism", + 'flux-pro': "flux-pro", + 'dalle-e-3': "dalle-three", + } + + @classmethod + def get_personaId(cls, model: str) -> str: + return cls.persona_ids[model] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + stream: bool = False, + timeout: int = 300, + frequency_penalty: float = 0, + max_tokens: int = 4000, + presence_penalty: float = 0, + temperature: float = 0.5, + top_p: float = 0.95, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + device_uuid = str(uuid.uuid4()) + max_retries = 3 + retry_count = 0 + + while retry_count < max_retries: + try: + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "authorization": "Bearer", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": cls.url, + "pragma": "no-cache", + "priority": "u=1, i", + "referer": f"{cls.url}/", + "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + "x-device-language": "en-US", + "x-device-platform": "web", + "x-device-uuid": device_uuid, + "x-device-version": "1.0.41" + } + + async with StreamSession(headers=headers, proxy=proxy) as session: + if model not in cls.image_models: + data = { + "messages": messages, + "model": model, + "personaId": cls.get_personaId(model), + "frequency_penalty": frequency_penalty, + "max_tokens": max_tokens, + "presence_penalty": presence_penalty, + "stream": stream, + "temperature": temperature, + "top_p": top_p + } + async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response: + await raise_for_status(response) + async for line in response.iter_lines(): + line = line.decode('utf-8').strip() + if line.startswith('data: '): + if line == 'data: [DONE]': + break + try: + chunk = json.loads(line[6:]) # Remove 'data: ' prefix + if 'choices' in chunk and len(chunk['choices']) > 0: + choice = chunk['choices'][0] + if 'delta' in choice: + content = choice['delta'].get('content') + elif 'text' in choice: + content = choice['text'] + else: + content = None + if content: + yield content + except json.JSONDecodeError: + pass + else: + # Image generation + prompt = messages[-1]['content'] + data = { + "prompt": prompt, + "model": model, + "personaId": cls.get_personaId(model) + } + async with session.post(cls.image_api_endpoint, json=data) as response: + await raise_for_status(response) + response_data = await response.json() + if "data" in response_data: + image_urls = [] + for item in response_data["data"]: + if "url" in item: + image_url = item["url"] + image_urls.append(image_url) + if image_urls: + yield ImageResponse(image_urls, prompt) + else: + yield None + break + except (ResponseStatusError, Exception) as e: + retry_count += 1 + if retry_count >= max_retries: + raise e + device_uuid = str(uuid.uuid4()) diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index 4056f9ff..cdc2b9d9 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -17,7 +17,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import get_random_hex from .bing.upload_image import upload_image from .bing.conversation import Conversation, create_conversation, delete_conversation -from .BingCreateImages import BingCreateImages +from .needs_auth.BingCreateImages import BingCreateImages from .. import debug class Tones: @@ -37,7 +37,6 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin): url = "https://bing.com/chat" working = True supports_message_history = True - supports_gpt_4 = True default_model = "Balanced" default_vision_model = "gpt-4-vision" models = [getattr(Tones, key) for key in Tones.__dict__ if not key.startswith("__")] diff --git a/g4f/Provider/Binjie.py b/g4f/Provider/Binjie.py deleted file mode 100644 index 90f9ec3c..00000000 --- a/g4f/Provider/Binjie.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import annotations - -import random -from ..requests import StreamSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class Binjie(AsyncGeneratorProvider): - url = "https://chat18.aichatos8.com" - working = True - supports_gpt_4 = True - supports_stream = True - supports_system_message = True - supports_message_history = True - - @staticmethod - async def create_async_generator( - model: str, - messages: Messages, - proxy: str = None, - timeout: int = 120, - **kwargs, - ) -> AsyncResult: - async with StreamSession( - headers=_create_header(), proxies={"https": proxy}, timeout=timeout - ) as session: - payload = _create_payload(messages, **kwargs) - async with session.post("https://api.binjie.fun/api/generateStream", json=payload) as response: - response.raise_for_status() - async for chunk in response.iter_content(): - if chunk: - chunk = chunk.decode() - if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk: - raise RuntimeError("IP address is blocked by abuse detection.") - yield chunk - - -def _create_header(): - return { - "accept" : "application/json, text/plain, */*", - "content-type" : "application/json", - "origin" : "https://chat18.aichatos8.com", - "referer" : "https://chat18.aichatos8.com/" - } - - -def _create_payload( - messages: Messages, - system_message: str = "", - user_id: int = None, - **kwargs -): - if not user_id: - user_id = random.randint(1690000544336, 2093025544336) - return { - "prompt": format_prompt(messages), - "network": True, - "system": system_message, - "withoutContext": False, - "stream": True, - "userId": f"#/chat/{user_id}" - } - diff --git a/g4f/Provider/Bixin123.py b/g4f/Provider/Bixin123.py deleted file mode 100644 index 39422c93..00000000 --- a/g4f/Provider/Bixin123.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -import json -import random -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..typing import AsyncResult, Messages -from .helper import format_prompt - -class Bixin123(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://chat.bixin123.com" - api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - - default_model = 'gpt-3.5-turbo-0125' - models = ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo'] - - model_aliases = { - "gpt-3.5-turbo": "gpt-3.5-turbo-0125", - "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - def generate_fingerprint(cls) -> str: - return str(random.randint(100000000, 999999999)) - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "application/json, text/plain, */*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "fingerprint": cls.generate_fingerprint(), - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/chat", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", - "x-website-domain": "chat.bixin123.com", - } - - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "prompt": prompt, - "options": { - "usingNetwork": False, - "file": "" - } - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - response_text = await response.text() - - lines = response_text.strip().split("\n") - last_json = None - for line in reversed(lines): - try: - last_json = json.loads(line) - break - except json.JSONDecodeError: - pass - - if last_json: - text = last_json.get("text", "") - yield text - else: - yield "" diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index e607a43c..8d820344 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -1,41 +1,143 @@ from __future__ import annotations -import re -import json +from aiohttp import ClientSession import random import string -from aiohttp import ClientSession +import json +import re +import aiohttp from ..typing import AsyncResult, Messages, ImageType -from ..image import ImageResponse, to_data_uri from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..image import ImageResponse, to_data_uri class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): + label = "Blackbox AI" url = "https://www.blackbox.ai" api_endpoint = "https://www.blackbox.ai/api/chat" working = True supports_stream = True supports_system_message = True supports_message_history = True + _last_validated_value = None + + default_model = 'blackboxai' + + image_models = ['Image Generation', 'repomap'] + + userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro'] - default_model = 'blackbox' - models = [ - 'blackbox', - 'gemini-1.5-flash', - "llama-3.1-8b", - 'llama-3.1-70b', - 'llama-3.1-405b', - 'ImageGenerationLV45LJp' - ] - - model_config = { - "blackbox": {}, + agentMode = { + 'Image Generation': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, + } + + trendingAgentMode = { "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, - 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}, - 'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, + 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"}, + # + 'Python Agent': {'mode': True, 'id': "Python Agent"}, + 'Java Agent': {'mode': True, 'id': "Java Agent"}, + 'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"}, + 'HTML Agent': {'mode': True, 'id': "HTML Agent"}, + 'Google Cloud Agent': {'mode': True, 'id': "Google Cloud Agent"}, + 'Android Developer': {'mode': True, 'id': "Android Developer"}, + 'Swift Developer': {'mode': True, 'id': "Swift Developer"}, + 'Next.js Agent': {'mode': True, 'id': "Next.js Agent"}, + 'MongoDB Agent': {'mode': True, 'id': "MongoDB Agent"}, + 'PyTorch Agent': {'mode': True, 'id': "PyTorch Agent"}, + 'React Agent': {'mode': True, 'id': "React Agent"}, + 'Xcode Agent': {'mode': True, 'id': "Xcode Agent"}, + 'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"}, + 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"}, + # + 'repomap': {'mode': True, 'id': "repomap"}, + # + 'Heroku Agent': {'mode': True, 'id': "Heroku Agent"}, + 'Godot Agent': {'mode': True, 'id': "Godot Agent"}, + 'Go Agent': {'mode': True, 'id': "Go Agent"}, + 'Gitlab Agent': {'mode': True, 'id': "Gitlab Agent"}, + 'Git Agent': {'mode': True, 'id': "Git Agent"}, + 'Flask Agent': {'mode': True, 'id': "Flask Agent"}, + 'Firebase Agent': {'mode': True, 'id': "Firebase Agent"}, + 'FastAPI Agent': {'mode': True, 'id': "FastAPI Agent"}, + 'Erlang Agent': {'mode': True, 'id': "Erlang Agent"}, + 'Electron Agent': {'mode': True, 'id': "Electron Agent"}, + 'Docker Agent': {'mode': True, 'id': "Docker Agent"}, + 'DigitalOcean Agent': {'mode': True, 'id': "DigitalOcean Agent"}, + 'Bitbucket Agent': {'mode': True, 'id': "Bitbucket Agent"}, + 'Azure Agent': {'mode': True, 'id': "Azure Agent"}, + 'Flutter Agent': {'mode': True, 'id': "Flutter Agent"}, + 'Youtube Agent': {'mode': True, 'id': "Youtube Agent"}, + 'builder Agent': {'mode': True, 'id': "builder Agent"}, } + + model_prefixes = {mode: f"@{value['id']}" for mode, value in trendingAgentMode.items() if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]} + + + models = [default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())] + + model_aliases = { + "gemini-flash": "gemini-1.5-flash", + "claude-3.5-sonnet": "claude-sonnet-3.5", + "flux": "Image Generation", + } + + @classmethod + async def fetch_validated(cls): + # If the key is already stored in memory, return it + if cls._last_validated_value: + return cls._last_validated_value + + # If the key is not found, perform a search + async with aiohttp.ClientSession() as session: + try: + async with session.get(cls.url) as response: + if response.status != 200: + print("Failed to load the page.") + return cls._last_validated_value + + page_content = await response.text() + js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content) + + key_pattern = re.compile(r'w="([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})"') + + for js_file in js_files: + js_url = f"{cls.url}/_next/{js_file}" + async with session.get(js_url) as js_response: + if js_response.status == 200: + js_content = await js_response.text() + match = key_pattern.search(js_content) + if match: + validated_value = match.group(1) + cls._last_validated_value = validated_value # Keep in mind + return validated_value + except Exception as e: + print(f"Error fetching validated value: {e}") + + return cls._last_validated_value + + + @staticmethod + def generate_id(length=7): + characters = string.ascii_letters + string.digits + return ''.join(random.choice(characters) for _ in range(length)) + + @classmethod + def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages: + prefix = cls.model_prefixes.get(model, "") + if not prefix: + return messages + + new_messages = [] + for message in messages: + new_message = message.copy() + if message['role'] == 'user': + new_message['content'] = (prefix + " " + message['content']).strip() + new_messages.append(new_message) + + return new_messages @classmethod def get_model(cls, model: str) -> str: @@ -52,76 +154,90 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + web_search: bool = False, image: ImageType = None, image_name: str = None, **kwargs ) -> AsyncResult: model = cls.get_model(model) - + message_id = cls.generate_id() + messages_with_prefix = cls.add_prefix_to_messages(messages, model) + validated_value = await cls.fetch_validated() + + if image is not None: + messages_with_prefix[-1]['data'] = { + 'fileText': '', + 'imageBase64': to_data_uri(image), + 'title': image_name + } + headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "referer": f"{cls.url}/", - "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': cls.url, + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': f'{cls.url}/', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-origin', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' } - async with ClientSession(headers=headers) as session: - if image is not None: - messages[-1]["data"] = { - "fileText": image_name, - "imageBase64": to_data_uri(image) - } - - random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7)) - - data = { - "messages": messages, - "id": random_id, - "previewToken": None, - "userId": None, - "codeModelMode": True, - "agentMode": {}, - "trendingAgentMode": {}, - "isMicMode": False, - "maxTokens": None, - "isChromeExt": False, - "githubToken": None, - "clickedAnswer2": False, - "clickedAnswer3": False, - "clickedForceWebSearch": False, - "visitFromDelta": False, - "mobileClient": False - } + data = { + "messages": messages_with_prefix, + "id": message_id, + "previewToken": None, + "userId": None, + "codeModelMode": True, + "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, + "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, + "isMicMode": False, + "userSystemPrompt": None, + "maxTokens": 1024, + "playgroundTopP": 0.9, + "playgroundTemperature": 0.5, + "isChromeExt": False, + "githubToken": None, + "clickedAnswer2": False, + "clickedAnswer3": False, + "clickedForceWebSearch": False, + "visitFromDelta": False, + "mobileClient": False, + "userSelectedModel": model if model in cls.userSelectedModel else None, + "webSearchMode": web_search, + "validated": validated_value, + } - if model == 'ImageGenerationLV45LJp': - data["agentMode"] = cls.model_config[model] - else: - data["trendingAgentMode"] = cls.model_config[model] - + async with ClientSession(headers=headers) as session: async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: response.raise_for_status() - if model == 'ImageGenerationLV45LJp': - response_text = await response.text() - url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text) - if url_match: - image_url = url_match.group(0) - yield ImageResponse(image_url, alt=messages[-1]['content']) - else: - raise Exception("Image URL not found in the response") + response_text = await response.text() + + if model in cls.image_models: + image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text) + if image_matches: + image_url = image_matches[0] + image_response = ImageResponse(images=[image_url], alt="Generated Image") + yield image_response + return + + response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL) + + json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL) + if json_match: + search_results = json.loads(json_match.group(1)) + answer = response_text.split('$~~~$')[-1].strip() + + formatted_response = f"{answer}\n\n**Source:**" + for i, result in enumerate(search_results, 1): + formatted_response += f"\n{i}. {result['title']}: {result['link']}" + + yield formatted_response else: - async for chunk in response.content: - if chunk: - decoded_chunk = chunk.decode() - if decoded_chunk.startswith('$@$v=undefined-rv1$@$'): - decoded_chunk = decoded_chunk[len('$@$v=undefined-rv1$@$'):] - yield decoded_chunk + yield response_text.strip() diff --git a/g4f/Provider/ChatGpt.py b/g4f/Provider/ChatGpt.py index 878fb424..02bbbcc4 100644 --- a/g4f/Provider/ChatGpt.py +++ b/g4f/Provider/ChatGpt.py @@ -3,7 +3,10 @@ from __future__ import annotations from ..typing import Messages, CreateResult from ..providers.base_provider import AbstractProvider, ProviderModelMixin -import time, uuid, random, json +import time +import uuid +import random +import json from requests import Session from .openai.new import ( @@ -72,11 +75,34 @@ def init_session(user_agent): class ChatGpt(AbstractProvider, ProviderModelMixin): label = "ChatGpt" + url = "https://chatgpt.com" working = True supports_message_history = True supports_system_message = True supports_stream = True - + default_model = 'auto' + models = [ + default_model, + 'gpt-3.5-turbo', + 'gpt-4o', + 'gpt-4o-mini', + 'gpt-4', + 'gpt-4-turbo', + 'chatgpt-4o-latest', + ] + + model_aliases = { + "gpt-4o": "chatgpt-4o-latest", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model @classmethod def create_completion( @@ -86,30 +112,17 @@ class ChatGpt(AbstractProvider, ProviderModelMixin): stream: bool, **kwargs ) -> CreateResult: + model = cls.get_model(model) + if model not in cls.models: + raise ValueError(f"Model '{model}' is not available. Available models: {', '.join(cls.models)}") + - if model in [ - 'gpt-4o', - 'gpt-4o-mini', - 'gpt-4', - 'gpt-4-turbo', - 'chatgpt-4o-latest' - ]: - model = 'auto' - - elif model in [ - 'gpt-3.5-turbo' - ]: - model = 'text-davinci-002-render-sha' - - else: - raise ValueError(f"Invalid model: {model}") - - user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36' + user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36' session: Session = init_session(user_agent) - config = get_config(user_agent) - pow_req = get_requirements_token(config) - headers = { + config = get_config(user_agent) + pow_req = get_requirements_token(config) + headers = { 'accept': '*/*', 'accept-language': 'en-US,en;q=0.8', 'content-type': 'application/json', @@ -128,29 +141,35 @@ class ChatGpt(AbstractProvider, ProviderModelMixin): } response = session.post('https://chatgpt.com/backend-anon/sentinel/chat-requirements', - headers=headers, json={'p': pow_req}).json() + headers=headers, json={'p': pow_req}) + + if response.status_code != 200: + return - turnstile = response.get('turnstile', {}) + response_data = response.json() + if "detail" in response_data and "Unusual activity" in response_data["detail"]: + return + + turnstile = response_data.get('turnstile', {}) turnstile_required = turnstile.get('required') - pow_conf = response.get('proofofwork', {}) + pow_conf = response_data.get('proofofwork', {}) if turnstile_required: - turnstile_dx = turnstile.get('dx') + turnstile_dx = turnstile.get('dx') turnstile_token = process_turnstile(turnstile_dx, pow_req) - headers = headers | { - 'openai-sentinel-turnstile-token' : turnstile_token, - 'openai-sentinel-chat-requirements-token': response.get('token'), - 'openai-sentinel-proof-token' : get_answer_token( - pow_conf.get('seed'), pow_conf.get('difficulty'), config - ) - } - + headers = {**headers, + 'openai-sentinel-turnstile-token': turnstile_token, + 'openai-sentinel-chat-requirements-token': response_data.get('token'), + 'openai-sentinel-proof-token': get_answer_token( + pow_conf.get('seed'), pow_conf.get('difficulty'), config + )} + json_data = { 'action': 'next', 'messages': format_conversation(messages), 'parent_message_id': str(uuid.uuid4()), - 'model': 'auto', + 'model': model, 'timezone_offset_min': -120, 'suggestions': [ 'Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.', @@ -173,7 +192,7 @@ class ChatGpt(AbstractProvider, ProviderModelMixin): 'conversation_origin': None, 'client_contextual_info': { 'is_dark_mode': True, - 'time_since_loaded': random.randint(22,33), + 'time_since_loaded': random.randint(22, 33), 'page_height': random.randint(600, 900), 'page_width': random.randint(500, 800), 'pixel_ratio': 2, @@ -181,20 +200,33 @@ class ChatGpt(AbstractProvider, ProviderModelMixin): 'screen_width': random.randint(1200, 2000), }, } + + time.sleep(2) response = session.post('https://chatgpt.com/backend-anon/conversation', - headers=headers, json=json_data, stream=True) - + headers=headers, json=json_data, stream=True) + replace = '' for line in response.iter_lines(): if line: - if 'DONE' in line.decode(): - break - - data = json.loads(line.decode()[6:]) - if data.get('message').get('author').get('role') == 'assistant': - tokens = (data.get('message').get('content').get('parts')[0]) - - yield tokens.replace(replace, '') + decoded_line = line.decode() + + if decoded_line.startswith('data:'): + json_string = decoded_line[6:].strip() + + if json_string == '[DONE]': + break - replace = tokens
\ No newline at end of file + if json_string: + try: + data = json.loads(json_string) + except json.JSONDecodeError: + continue + + if data.get('message') and data['message'].get('author'): + role = data['message']['author'].get('role') + if role == 'assistant': + tokens = data['message']['content'].get('parts', []) + if tokens: + yield tokens[0].replace(replace, '') + replace = tokens[0] diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py new file mode 100644 index 00000000..788ffcd9 --- /dev/null +++ b/g4f/Provider/ChatGptEs.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import os +import json +import re + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://chatgpt.es" + api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4o' + models = ['gpt-4o', 'gpt-4o-mini', 'chatgpt-4o-latest'] + + model_aliases = { + "gpt-4o": "chatgpt-4o-latest", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "authority": "chatgpt.es", + "accept": "application/json", + "origin": cls.url, + "referer": f"{cls.url}/chat", + "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + } + + async with ClientSession(headers=headers) as session: + initial_response = await session.get(cls.url) + nonce_ = re.findall(r'data-nonce="(.+?)"', await initial_response.text())[0] + post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0] + + conversation_history = [ + "Human: You are a helpful AI assistant. Please respond in the same language that the user uses in their message. Provide accurate, relevant and helpful information while maintaining a friendly and professional tone. If you're not sure about something, please acknowledge that and provide the best information you can while noting any uncertainties. Focus on being helpful while respecting the user's choice of language." + ] + + for message in messages[:-1]: + if message['role'] == "user": + conversation_history.append(f"Human: {message['content']}") + else: + conversation_history.append(f"AI: {message['content']}") + + payload = { + '_wpnonce': nonce_, + 'post_id': post_id, + 'url': cls.url, + 'action': 'wpaicg_chat_shortcode_message', + 'message': messages[-1]['content'], + 'bot_id': '0', + 'chatbot_identity': 'shortcode', + 'wpaicg_chat_client_id': os.urandom(5).hex(), + 'wpaicg_chat_history': json.dumps(conversation_history) + } + + async with session.post(cls.api_endpoint, headers=headers, data=payload) as response: + response.raise_for_status() + result = await response.json() + yield result['data'] diff --git a/g4f/Provider/Cloudflare.py b/g4f/Provider/Cloudflare.py new file mode 100644 index 00000000..825c5027 --- /dev/null +++ b/g4f/Provider/Cloudflare.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import asyncio +import json +import uuid + +from ..typing import AsyncResult, Messages, Cookies +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, get_running_loop +from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies + +class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): + label = "Cloudflare AI" + url = "https://playground.ai.cloudflare.com" + api_endpoint = "https://playground.ai.cloudflare.com/api/inference" + models_url = "https://playground.ai.cloudflare.com/api/models" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + default_model = "@cf/meta/llama-3.1-8b-instruct" + model_aliases = { + "llama-2-7b": "@cf/meta/llama-2-7b-chat-fp16", + "llama-2-7b": "@cf/meta/llama-2-7b-chat-int8", + "llama-3-8b": "@cf/meta/llama-3-8b-instruct", + "llama-3-8b": "@cf/meta/llama-3-8b-instruct-awq", + "llama-3-8b": "@hf/meta-llama/meta-llama-3-8b-instruct", + "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-awq", + "llama-3.1-8b": "@cf/meta/llama-3.1-8b-instruct-fp8", + "llama-3.2-1b": "@cf/meta/llama-3.2-1b-instruct", + "qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq", + } + _args: dict = None + + @classmethod + def get_models(cls) -> str: + if not cls.models: + if cls._args is None: + get_running_loop(check_nested=True) + args = get_args_from_nodriver(cls.url, cookies={ + '__cf_bm': uuid.uuid4().hex, + }) + cls._args = asyncio.run(args) + with Session(**cls._args) as session: + response = session.get(cls.models_url) + raise_for_status(response) + json_data = response.json() + cls.models = [model.get("name") for model in json_data.get("models")] + cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response) + return cls.models + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + max_tokens: int = 2048, + cookies: Cookies = None, + timeout: int = 300, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + if cls._args is None: + cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies) + data = { + "messages": messages, + "lora": None, + "model": model, + "max_tokens": max_tokens, + "stream": True + } + async with StreamSession(**cls._args) as session: + async with session.post( + cls.api_endpoint, + json=data, + ) as response: + await raise_for_status(response) + cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response) + async for line in response.iter_lines(): + if line.startswith(b'data: '): + if line == b'data: [DONE]': + break + try: + content = json.loads(line[6:].decode()) + if content.get("response") and content.get("response") != '</s>': + yield content['response'] + except Exception: + continue
\ No newline at end of file diff --git a/g4f/Provider/CodeNews.py b/g4f/Provider/CodeNews.py deleted file mode 100644 index 05ec7a45..00000000 --- a/g4f/Provider/CodeNews.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -from asyncio import sleep - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class CodeNews(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://codenews.cc" - api_endpoint = "https://codenews.cc/chatxyz13" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = False - supports_stream = True - supports_system_message = False - supports_message_history = False - - default_model = 'free_gpt' - models = ['free_gpt', 'gpt-4o-mini', 'deepseek-coder', 'chatpdf'] - - model_aliases = { - "glm-4": "free_gpt", - "gpt-3.5-turbo": "chatpdf", - "deepseek": "deepseek-coder", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "application/json, text/javascript, */*; q=0.01", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/x-www-form-urlencoded; charset=UTF-8", - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/chatgpt", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", - "x-requested-with": "XMLHttpRequest", - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "chatgpt_input": prompt, - "qa_type2": model, - "chatgpt_version_value": "20240804", - "enable_web_search": "0", - "enable_agent": "0", - "dy_video_text_extract": "0", - "enable_summary": "0", - } - async with session.post(cls.api_endpoint, data=data, proxy=proxy) as response: - response.raise_for_status() - json_data = await response.json() - chat_id = json_data["data"]["id"] - - headers["content-type"] = "application/x-www-form-urlencoded; charset=UTF-8" - data = {"current_req_count": "2"} - - while True: - async with session.post(f"{cls.url}/chat_stream", headers=headers, data=data, proxy=proxy) as response: - response.raise_for_status() - json_data = await response.json() - if json_data["data"]: - yield json_data["data"] - break - else: - await sleep(1) # Затримка перед наступним запитом diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py index c8c36fc9..c4be0ea8 100644 --- a/g4f/Provider/DDG.py +++ b/g4f/Provider/DDG.py @@ -2,115 +2,119 @@ from __future__ import annotations import json import aiohttp -import asyncio -from typing import Optional -import base64 +from aiohttp import ClientSession, BaseConnector -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_connector from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation +from .helper import format_prompt +from ..requests.aiohttp import get_connector from ..requests.raise_for_status import raise_for_status -from ..providers.conversation import BaseConversation +from .. import debug + +MODELS = [ + {"model":"gpt-4o","modelName":"GPT-4o","modelVariant":None,"modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"4"}, + {"model":"gpt-4o-mini","modelName":"GPT-4o","modelVariant":"mini","modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"3"}, + {"model":"claude-3-5-sonnet-20240620","modelName":"Claude 3.5","modelVariant":"Sonnet","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"7"}, + {"model":"claude-3-opus-20240229","modelName":"Claude 3","modelVariant":"Opus","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"2"}, + {"model":"claude-3-haiku-20240307","modelName":"Claude 3","modelVariant":"Haiku","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"1"}, + {"model":"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo","modelName":"Llama 3.1","modelVariant":"70B","modelStyleId":"llama-3","createdBy":"Meta","moderationLevel":"MEDIUM","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"5"}, + {"model":"mistralai/Mixtral-8x7B-Instruct-v0.1","modelName":"Mixtral","modelVariant":"8x7B","modelStyleId":"mixtral","createdBy":"Mistral AI","moderationLevel":"LOW","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"6"} +] + +class Conversation(BaseConversation): + vqd: str = None + message_history: Messages = [] + + def __init__(self, model: str): + self.model = model class DDG(AsyncGeneratorProvider, ProviderModelMixin): - url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9haWNoYXQ=").decode("utf-8") + url = "https://duckduckgo.com" + api_endpoint = "https://duckduckgo.com/duckchat/v1/chat" working = True - supports_gpt_35_turbo = True + supports_stream = True + supports_system_message = True supports_message_history = True default_model = "gpt-4o-mini" - models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"] + models = [model.get("model") for model in MODELS] model_aliases = { "claude-3-haiku": "claude-3-haiku-20240307", "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", - "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1" - } - - # Obfuscated URLs and headers - status_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9zdGF0dXM=").decode("utf-8") - chat_url = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS9kdWNrY2hhdC92MS9jaGF0").decode("utf-8") - referer = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbS8=").decode("utf-8") - origin = base64.b64decode("aHR0cHM6Ly9kdWNrZHVja2dvLmNvbQ==").decode("utf-8") - - user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36' - headers = { - 'User-Agent': user_agent, - 'Accept': 'text/event-stream', - 'Accept-Language': 'en-US,en;q=0.5', - 'Accept-Encoding': 'gzip, deflate, br, zstd', - 'Referer': referer, - 'Content-Type': 'application/json', - 'Origin': origin, - 'Connection': 'keep-alive', - 'Cookie': 'dcm=3', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-origin', - 'Pragma': 'no-cache', - 'TE': 'trailers' + "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "gpt-4": "gpt-4o-mini" } @classmethod - async def get_vqd(cls, session: aiohttp.ClientSession) -> Optional[str]: - try: - async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response: + async def get_vqd(cls, proxy: str, connector: BaseConnector = None): + status_url = "https://duckduckgo.com/duckchat/v1/status" + headers = { + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36', + 'Accept': 'text/event-stream', + 'x-vqd-accept': '1' + } + async with aiohttp.ClientSession(connector=get_connector(connector, proxy)) as session: + async with session.get(status_url, headers=headers) as response: await raise_for_status(response) return response.headers.get("x-vqd-4") - except Exception as e: - print(f"Error getting VQD: {e}") - return None @classmethod async def create_async_generator( cls, model: str, messages: Messages, - proxy: str = None, - connector: aiohttp.BaseConnector = None, conversation: Conversation = None, return_conversation: bool = False, + proxy: str = None, + connector: BaseConnector = None, **kwargs ) -> AsyncResult: - async with aiohttp.ClientSession(headers=cls.headers, connector=get_connector(connector, proxy)) as session: - vqd_4 = None - if conversation is not None and len(messages) > 1: - vqd_4 = conversation.vqd_4 - messages = [*conversation.messages, messages[-2], messages[-1]] + model = cls.get_model(model) + + is_new_conversation = False + if conversation is None: + conversation = Conversation(model) + is_new_conversation = True + debug.last_model = model + if conversation.vqd is None: + conversation.vqd = await cls.get_vqd(proxy, connector) + if not conversation.vqd: + raise Exception("Failed to obtain VQD token") + + headers = { + 'accept': 'text/event-stream', + 'content-type': 'application/json', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36', + 'x-vqd-4': conversation.vqd, + } + async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session: + if is_new_conversation: + conversation.message_history = [{"role": "user", "content": format_prompt(messages)}] else: - for _ in range(3): # Try up to 3 times to get a valid VQD - vqd_4 = await cls.get_vqd(session) - if vqd_4: - break - await asyncio.sleep(1) # Wait a bit before retrying - - if not vqd_4: - raise Exception("Failed to obtain a valid VQD token") - - messages = [messages[-1]] # Only use the last message for new conversations - - payload = { - 'model': cls.get_model(model), - 'messages': [{'role': m['role'], 'content': m['content']} for m in messages] + conversation.message_history = [ + *conversation.message_history, + messages[-2], + messages[-1] + ] + if return_conversation: + yield conversation + data = { + "model": conversation.model, + "messages": conversation.message_history } - - async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response: + async with session.post(cls.api_endpoint, json=data) as response: + conversation.vqd = response.headers.get("x-vqd-4") await raise_for_status(response) - if return_conversation: - yield Conversation(vqd_4, messages) - async for line in response.content: - if line.startswith(b"data: "): - chunk = line[6:] - if chunk.startswith(b"[DONE]"): - break - try: - data = json.loads(chunk) - if "message" in data and data["message"]: - yield data["message"] - except json.JSONDecodeError: - print(f"Failed to decode JSON: {chunk}") - -class Conversation(BaseConversation): - def __init__(self, vqd_4: str, messages: Messages) -> None: - self.vqd_4 = vqd_4 - self.messages = messages + if line: + decoded_line = line.decode('utf-8') + if decoded_line.startswith('data: '): + json_str = decoded_line[6:] + if json_str == '[DONE]': + break + try: + json_data = json.loads(json_str) + if 'message' in json_data: + yield json_data['message'] + except json.JSONDecodeError: + pass
\ No newline at end of file diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py new file mode 100644 index 00000000..06e2bd55 --- /dev/null +++ b/g4f/Provider/DarkAI.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://darkai.foundation/chat" + api_endpoint = "https://darkai.foundation/chat" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3-405b' + models = [ + 'gpt-4o', # Uncensored + 'gpt-3.5-turbo', # Uncensored + 'llama-3-70b', # Uncensored + default_model, + ] + + model_aliases = { + "llama-3.1-70b": "llama-3-70b", + "llama-3.1-405b": "llama-3-405b", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "accept": "text/event-stream", + "content-type": "application/json", + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" + } + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + data = { + "query": prompt, + "model": model, + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + full_text = "" + async for chunk in response.content: + if chunk: + try: + chunk_str = chunk.decode().strip() + if chunk_str.startswith('data: '): + chunk_data = json.loads(chunk_str[6:]) + if chunk_data['event'] == 'text-chunk': + full_text += chunk_data['data']['text'] + elif chunk_data['event'] == 'stream-end': + if full_text: + yield full_text.strip() + return + except json.JSONDecodeError: + pass + except Exception: + pass + + if full_text: + yield full_text.strip() diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py new file mode 100644 index 00000000..5c668599 --- /dev/null +++ b/g4f/Provider/DeepInfraChat.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ..typing import AsyncResult, Messages, ImageType +from ..image import to_data_uri +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin + + +class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://deepinfra.com/chat" + api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' + models = [ + 'meta-llama/Meta-Llama-3.1-8B-Instruct', + default_model, + 'microsoft/WizardLM-2-8x22B', + 'Qwen/Qwen2.5-72B-Instruct', + ] + model_aliases = { + "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", + "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", + "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", + } + + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + image: ImageType = None, + image_name: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Content-Type': 'application/json', + 'Origin': 'https://deepinfra.com', + 'Pragma': 'no-cache', + 'Referer': 'https://deepinfra.com/', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-site', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36', + 'X-Deepinfra-Source': 'web-embed', + 'accept': 'text/event-stream', + 'sec-ch-ua': '"Not;A=Brand";v="24", "Chromium";v="128"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + } + + async with ClientSession(headers=headers) as session: + data = { + 'model': model, + 'messages': messages, + 'stream': True + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for line in response.content: + if line: + decoded_line = line.decode('utf-8').strip() + if decoded_line.startswith('data:'): + json_part = decoded_line[5:].strip() + if json_part == '[DONE]': + break + try: + data = json.loads(json_part) + choices = data.get('choices', []) + if choices: + delta = choices[0].get('delta', {}) + content = delta.get('content', '') + if content: + yield content + except json.JSONDecodeError: + print(f"JSON decode error: {json_part}") diff --git a/g4f/Provider/Free2GPT.py b/g4f/Provider/Free2GPT.py index a79bd1da..6ba9ac0f 100644 --- a/g4f/Provider/Free2GPT.py +++ b/g4f/Provider/Free2GPT.py @@ -16,7 +16,7 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chat10.free2gpt.xyz" working = True supports_message_history = True - default_model = 'llama-3.1-70b' + default_model = 'mistral-7b' @classmethod async def create_async_generator( @@ -49,12 +49,8 @@ class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): connector=get_connector(connector, proxy), headers=headers ) as session: timestamp = int(time.time() * 1e3) - system_message = { - "role": "system", - "content": "" - } data = { - "messages": [system_message] + messages, + "messages": messages, "time": timestamp, "pass": None, "sign": generate_signature(timestamp, messages[-1]["content"]), diff --git a/g4f/Provider/FreeChatgpt.py b/g4f/Provider/FreeChatgpt.py deleted file mode 100644 index a9dc0f56..00000000 --- a/g4f/Provider/FreeChatgpt.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import annotations -import json -from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://chat.chatgpt.org.uk" - api_endpoint = "/api/openai/v1/chat/completions" - working = True - default_model = '@cf/qwen/qwen1.5-14b-chat-awq' - models = [ - '@cf/qwen/qwen1.5-14b-chat-awq', - 'SparkDesk-v1.1', - 'Qwen2-7B-Instruct', - 'glm4-9B-chat', - 'chatglm3-6B', - 'Yi-1.5-9B-Chat', - ] - - model_aliases = { - "qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq", - "sparkdesk-v1.1": "SparkDesk-v1.1", - "qwen-2-7b": "Qwen2-7B-Instruct", - "glm-4-9b": "glm4-9B-chat", - "glm-3-6b": "chatglm3-6B", - "yi-1.5-9b": "Yi-1.5-9B-Chat", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model.lower() in cls.model_aliases: - return cls.model_aliases[model.lower()] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "application/json, text/event-stream", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "dnt": "1", - "origin": cls.url, - "referer": f"{cls.url}/", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36", - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "messages": [ - {"role": "system", "content": "\nYou are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent model: gpt-3.5-turbo\nCurrent time: Thu Jul 04 2024 21:35:59 GMT+0300 (Eastern European Summer Time)\nLatex inline: \\(x^2\\) \nLatex block: $$e=mc^2$$\n\n"}, - {"role": "user", "content": prompt} - ], - "stream": True, - "model": model, - "temperature": 0.5, - "presence_penalty": 0, - "frequency_penalty": 0, - "top_p": 1 - } - async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response: - response.raise_for_status() - accumulated_text = "" - async for line in response.content: - if line: - line_str = line.decode().strip() - if line_str == "data: [DONE]": - yield accumulated_text - break - elif line_str.startswith("data: "): - try: - chunk = json.loads(line_str[6:]) - delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "") - accumulated_text += delta_content - yield delta_content # Yield each chunk of content - except json.JSONDecodeError: - pass diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py index 82a3824b..b38ff428 100644 --- a/g4f/Provider/FreeGpt.py +++ b/g4f/Provider/FreeGpt.py @@ -24,7 +24,7 @@ class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_message_history = True supports_system_message = True - default_model = 'llama-3.1-70b' + default_model = 'gemini-pro' @classmethod async def create_async_generator( diff --git a/g4f/Provider/GizAI.py b/g4f/Provider/GizAI.py new file mode 100644 index 00000000..f00b344e --- /dev/null +++ b/g4f/Provider/GizAI.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class GizAI(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://app.giz.ai/assistant" + api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer" + working = True + supports_stream = False + supports_system_message = True + supports_message_history = True + + default_model = 'chat-gemini-flash' + models = [default_model] + + model_aliases = {"gemini-flash": "chat-gemini-flash",} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'Accept': 'application/json, text/plain, */*', + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Content-Type': 'application/json', + 'DNT': '1', + 'Origin': 'https://app.giz.ai', + 'Pragma': 'no-cache', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"' + } + + prompt = format_prompt(messages) + + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "input": { + "messages": [{"type": "human", "content": prompt}], + "mode": "plan" + }, + "noStream": True + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + if response.status == 201: + result = await response.json() + yield result['output'].strip() + else: + raise Exception(f"Unexpected response status: {response.status}") diff --git a/g4f/Provider/GptTalkRu.py b/g4f/Provider/GptTalkRu.py deleted file mode 100644 index 6a59484f..00000000 --- a/g4f/Provider/GptTalkRu.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession, BaseConnector - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import get_random_string, get_connector -from ..requests import raise_for_status, get_args_from_browser, WebDriver -from ..webdriver import has_seleniumwire -from ..errors import MissingRequirementsError - -class GptTalkRu(AsyncGeneratorProvider): - url = "https://gpttalk.ru" - working = True - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - connector: BaseConnector = None, - webdriver: WebDriver = None, - **kwargs - ) -> AsyncResult: - if not model: - model = "gpt-3.5-turbo" - if not has_seleniumwire: - raise MissingRequirementsError('Install "selenium-wire" package') - args = get_args_from_browser(f"{cls.url}", webdriver) - args["headers"]["accept"] = "application/json, text/plain, */*" - async with ClientSession(connector=get_connector(connector, proxy), **args) as session: - async with session.get("https://gpttalk.ru/getToken") as response: - await raise_for_status(response) - public_key = (await response.json())["response"]["key"]["publicKey"] - random_string = get_random_string(8) - data = { - "model": model, - "modelType": 1, - "prompt": messages, - "responseType": "stream", - "security": { - "randomMessage": random_string, - "shifrText": encrypt(public_key, random_string) - } - } - async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response: - await raise_for_status(response) - async for chunk in response.content.iter_any(): - yield chunk.decode(errors="ignore") - -def encrypt(public_key: str, value: str) -> str: - from Crypto.Cipher import PKCS1_v1_5 - from Crypto.PublicKey import RSA - import base64 - rsa_key = RSA.importKey(public_key) - cipher = PKCS1_v1_5.new(rsa_key) - return base64.b64encode(cipher.encrypt(value.encode())).decode()
\ No newline at end of file diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 06216ade..509a7f16 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -1,9 +1,15 @@ from __future__ import annotations -import json, requests, re +import json +import requests -from curl_cffi import requests as cf_reqs +try: + from curl_cffi import requests as cf_reqs + has_curl_cffi = True +except ImportError: + has_curl_cffi = False from ..typing import CreateResult, Messages +from ..errors import MissingRequirementsError from .base_provider import ProviderModelMixin, AbstractProvider from .helper import format_prompt @@ -16,19 +22,25 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): models = [ 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', - 'mistralai/Mistral-7B-Instruct-v0.3', - 'microsoft/Phi-3-mini-4k-instruct', + 'Qwen/Qwen2.5-72B-Instruct', + 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', + 'Qwen/Qwen2.5-Coder-32B-Instruct', + 'meta-llama/Llama-3.2-11B-Vision-Instruct', + 'NousResearch/Hermes-3-Llama-3.1-8B', + 'mistralai/Mistral-Nemo-Instruct-2407', + 'microsoft/Phi-3.5-mini-instruct', ] model_aliases = { "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", - "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", - "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", - "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct", + "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", + "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", + "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct", + "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct", + "hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B", + "mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407", + "phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct", } @classmethod @@ -48,6 +60,8 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): stream: bool, **kwargs ) -> CreateResult: + if not has_curl_cffi: + raise MissingRequirementsError('Install "curl_cffi" package | pip install -U curl_cffi') model = cls.get_model(model) if model in cls.models: @@ -69,20 +83,42 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36', } - print(model) json_data = { 'model': model, } response = session.post('https://huggingface.co/chat/conversation', json=json_data) - conversationId = response.json()['conversationId'] + if response.status_code != 200: + raise RuntimeError(f"Request failed with status code: {response.status_code}, response: {response.text}") - response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11',) + conversationId = response.json().get('conversationId') + + # Get the data response and parse it properly + response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11') + + # Split the response content by newlines and parse each line as JSON + try: + json_data = None + for line in response.text.split('\n'): + if line.strip(): + try: + parsed = json.loads(line) + if isinstance(parsed, dict) and "nodes" in parsed: + json_data = parsed + break + except json.JSONDecodeError: + continue + + if not json_data: + raise RuntimeError("Failed to parse response data") - data: list = (response.json())["nodes"][1]["data"] - keys: list[int] = data[data[0]["messages"]] - message_keys: dict = data[keys[0]] - messageId: str = data[message_keys["id"]] + data: list = json_data["nodes"][1]["data"] + keys: list[int] = data[data[0]["messages"]] + message_keys: dict = data[keys[0]] + messageId: str = data[message_keys["id"]] + + except (KeyError, IndexError, TypeError) as e: + raise RuntimeError(f"Failed to extract message ID: {str(e)}") settings = { "inputs": format_prompt(messages), @@ -114,28 +150,41 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'data': (None, json.dumps(settings, separators=(',', ':'))), } - response = requests.post(f'https://huggingface.co/chat/conversation/{conversationId}', + response = requests.post( + f'https://huggingface.co/chat/conversation/{conversationId}', cookies=session.cookies, headers=headers, files=files, ) - first_token = True + full_response = "" for line in response.iter_lines(): - line = json.loads(line) + if not line: + continue + try: + line = json.loads(line) + except json.JSONDecodeError as e: + print(f"Failed to decode JSON: {line}, error: {e}") + continue if "type" not in line: raise RuntimeError(f"Response: {line}") elif line["type"] == "stream": - token = line["token"] - if first_token: - token = token.lstrip().replace('\u0000', '') - first_token = False - else: - token = token.replace('\u0000', '') - - yield token + token = line["token"].replace('\u0000', '') + full_response += token + if stream: + yield token elif line["type"] == "finalAnswer": break + + full_response = full_response.replace('<|im_end|', '').replace('\u0000', '').strip() + + if not stream: + yield full_response + + @classmethod + def supports_model(cls, model: str) -> bool: + """Check if the model is supported by the provider.""" + return model in cls.models or model in cls.model_aliases diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 8a9f46b1..fc50bdee 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -36,32 +36,50 @@ models = { "tokenLimit": 7800, "context": "8K", }, - "gpt-4-turbo-2024-04-09": { - "id": "gpt-4-turbo-2024-04-09", - "name": "GPT-4-Turbo", + "gpt-4o-2024-08-06": { + "id": "gpt-4o-2024-08-06", + "name": "GPT-4o", "model": "ChatGPT", "provider": "OpenAI", "maxLength": 260000, "tokenLimit": 126000, "context": "128K", }, - "gpt-4o-2024-08-06": { - "id": "gpt-4o-2024-08-06", - "name": "GPT-4o", + "gpt-4-turbo-2024-04-09": { + "id": "gpt-4-turbo-2024-04-09", + "name": "GPT-4-Turbo", "model": "ChatGPT", "provider": "OpenAI", "maxLength": 260000, "tokenLimit": 126000, "context": "128K", }, - "gpt-4-0613": { - "id": "gpt-4-0613", - "name": "GPT-4-0613", - "model": "ChatGPT", - "provider": "OpenAI", - "maxLength": 32000, - "tokenLimit": 7600, - "context": "8K", + "grok-beta": { + "id": "grok-beta", + "name": "Grok-Beta", + "model": "Grok", + "provider": "x.ai", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "100K", + }, + "grok-2": { + "id": "grok-2", + "name": "Grok-2", + "model": "Grok", + "provider": "x.ai", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "100K", + }, + "grok-2-mini": { + "id": "grok-2-mini", + "name": "Grok-2-mini", + "model": "Grok", + "provider": "x.ai", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "100K", }, "claude-3-opus-20240229": { "id": "claude-3-opus-20240229", @@ -81,27 +99,27 @@ models = { "tokenLimit": 200000, "context": "200K", }, - "claude-3-opus-20240229-gcp": { - "id": "claude-3-opus-20240229-gcp", - "name": "Claude-3-Opus-Gcp", + "claude-3-5-sonnet-20240620": { + "id": "claude-3-5-sonnet-20240620", + "name": "Claude-3.5-Sonnet", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, "tokenLimit": 200000, "context": "200K", }, - "claude-3-sonnet-20240229": { - "id": "claude-3-sonnet-20240229", - "name": "Claude-3-Sonnet", + "claude-3-5-sonnet-20241022": { + "id": "claude-3-5-sonnet-20241022", + "name": "Claude-3.5-Sonnet-V2", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, "tokenLimit": 200000, "context": "200K", }, - "claude-3-5-sonnet-20240620": { - "id": "claude-3-5-sonnet-20240620", - "name": "Claude-3.5-Sonnet", + "claude-3-sonnet-20240229": { + "id": "claude-3-sonnet-20240229", + "name": "Claude-3-Sonnet", "model": "Claude", "provider": "Anthropic", "maxLength": 800000, @@ -126,17 +144,8 @@ models = { "tokenLimit": 200000, "context": "200K", }, - "gemini-1.0-pro-latest": { - "id": "gemini-1.0-pro-latest", - "name": "Gemini-Pro", - "model": "Gemini", - "provider": "Google", - "maxLength": 120000, - "tokenLimit": 30000, - "context": "32K", - }, - "gemini-1.5-flash-latest": { - "id": "gemini-1.5-flash-latest", + "gemini-1.5-flash-002": { + "id": "gemini-1.5-flash-002", "name": "Gemini-1.5-Flash-1M", "model": "Gemini", "provider": "Google", @@ -144,8 +153,8 @@ models = { "tokenLimit": 1000000, "context": "1024K", }, - "gemini-1.5-pro-latest": { - "id": "gemini-1.5-pro-latest", + "gemini-1.5-pro-002": { + "id": "gemini-1.5-pro-002", "name": "Gemini-1.5-Pro-1M", "model": "Gemini", "provider": "Google", @@ -161,28 +170,27 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_message_history = True supports_system_message = True - supports_gpt_4 = True - default_model = "gpt-4o" + default_model = "gpt-4o-2024-08-06" models = list(models.keys()) model_aliases = { "gpt-4o-mini": "gpt-4o-mini-free", "gpt-4o": "gpt-4o-free", - "gpt-4-turbo": "gpt-4-turbo-2024-04-09", "gpt-4o": "gpt-4o-2024-08-06", - "gpt-4": "gpt-4-0613", + + "gpt-4-turbo": "gpt-4-turbo-2024-04-09", + "gpt-4": "gpt-4o-mini-free", "claude-3-opus": "claude-3-opus-20240229", "claude-3-opus": "claude-3-opus-20240229-aws", - "claude-3-opus": "claude-3-opus-20240229-gcp", "claude-3-sonnet": "claude-3-sonnet-20240229", - "claude-3-5-sonnet": "claude-3-5-sonnet-20240620", + "claude-3.5-sonnet": "claude-3-5-sonnet-20240620", + "claude-3.5-sonnet": "claude-3-5-sonnet-20241022", "claude-3-haiku": "claude-3-haiku-20240307", "claude-2.1": "claude-2.1", - "gemini-pro": "gemini-1.0-pro-latest", - "gemini-flash": "gemini-1.5-flash-latest", - "gemini-pro": "gemini-1.5-pro-latest", + "gemini-flash": "gemini-1.5-flash-002", + "gemini-pro": "gemini-1.5-pro-002", } _auth_code = "" diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py deleted file mode 100644 index 69294a57..00000000 --- a/g4f/Provider/LiteIcoding.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession, ClientResponseError -import re -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://lite.icoding.ink" - api_endpoint = "/api/v1/gpt/message" - working = True - supports_gpt_4 = True - default_model = "gpt-4o" - models = [ - 'gpt-4o', - 'gpt-4-turbo', - 'claude-3', - 'claude-3.5', - 'gemini-1.5', - ] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "Accept": "*/*", - "Accept-Language": "en-US,en;q=0.9", - "Authorization": "Bearer aa3020ee873e40cb8b3f515a0708ebc4", - "Connection": "keep-alive", - "Content-Type": "application/json;charset=utf-8", - "DNT": "1", - "Origin": cls.url, - "Referer": f"{cls.url}/", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "User-Agent": ( - "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) " - "Chrome/126.0.0.0 Safari/537.36" - ), - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - } - - data = { - "model": model, - "chatId": "-1", - "messages": [ - { - "role": msg["role"], - "content": msg["content"], - "time": msg.get("time", ""), - "attachments": msg.get("attachments", []), - } - for msg in messages - ], - "plugins": [], - "systemPrompt": "", - "temperature": 0.5, - } - - async with ClientSession(headers=headers) as session: - try: - async with session.post( - f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy - ) as response: - response.raise_for_status() - buffer = "" - full_response = "" - def decode_content(data): - bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()]) - return bytes_array.decode('utf-8') - async for chunk in response.content.iter_any(): - if chunk: - buffer += chunk.decode() - while "\n\n" in buffer: - part, buffer = buffer.split("\n\n", 1) - if part.startswith("data: "): - content = part[6:].strip() - if content and content != "[DONE]": - content = content.strip('"') - # Decoding each content block - decoded_content = decode_content(content) - full_response += decoded_content - full_response = ( - full_response.replace('""', '') # Handle double quotes - .replace('" "', ' ') # Handle space within quotes - .replace("\\n\\n", "\n\n") - .replace("\\n", "\n") - .replace('\\"', '"') - .strip() - ) - # Add filter to remove unwanted text - filtered_response = re.sub(r'\n---\n.*', '', full_response, flags=re.DOTALL) - # Remove extra quotes at the beginning and end - cleaned_response = filtered_response.strip().strip('"') - yield cleaned_response - - except ClientResponseError as e: - raise RuntimeError( - f"ClientResponseError {e.status}: {e.message}, url={e.request_info.url}, data={data}" - ) from e - - except Exception as e: - raise RuntimeError(f"Unexpected error: {str(e)}") from e diff --git a/g4f/Provider/MagickPen.py b/g4f/Provider/MagickPen.py index eab70536..7f1751dd 100644 --- a/g4f/Provider/MagickPen.py +++ b/g4f/Provider/MagickPen.py @@ -1,72 +1,53 @@ from __future__ import annotations +from aiohttp import ClientSession +import hashlib import time import random -import hashlib import re -from aiohttp import ClientSession - +import json from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt class MagickPen(AsyncGeneratorProvider, ProviderModelMixin): url = "https://magickpen.com" - api_endpoint_free = "https://api.magickpen.com/chat/free" - api_endpoint_ask = "https://api.magickpen.com/ask" + api_endpoint = "https://api.magickpen.com/ask" working = True - supports_gpt_4 = True - supports_stream = False - - default_model = 'free' - models = ['free', 'ask'] + supports_stream = True + supports_system_message = True + supports_message_history = True - model_aliases = { - "gpt-4o-mini": "free", - "gpt-4o-mini": "ask", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model + default_model = 'gpt-4o-mini' + models = ['gpt-4o-mini'] @classmethod - async def get_secrets(cls): - url = 'https://magickpen.com/_nuxt/02c76dc.js' + async def fetch_api_credentials(cls) -> tuple: + url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js" async with ClientSession() as session: async with session.get(url) as response: - if response.status == 200: - text = await response.text() - x_api_secret_match = re.search(r'"X-API-Secret":"([^"]+)"', text) - secret_match = re.search(r'secret:\s*"([^"]+)"', text) - - x_api_secret = x_api_secret_match.group(1) if x_api_secret_match else None - secret = secret_match.group(1) if secret_match else None - - # Generate timestamp and nonce dynamically - timestamp = str(int(time.time() * 1000)) - nonce = str(random.random()) - - # Generate signature - signature_parts = ["TGDBU9zCgM", timestamp, nonce] - signature_string = "".join(sorted(signature_parts)) - signature = hashlib.md5(signature_string.encode()).hexdigest() - - return { - 'X-API-Secret': x_api_secret, - 'signature': signature, - 'timestamp': timestamp, - 'nonce': nonce, - 'secret': secret - } - else: - print(f"Error while fetching the file: {response.status}") - return None + text = await response.text() + + pattern = r'"X-API-Secret":"(\w+)"' + match = re.search(pattern, text) + X_API_SECRET = match.group(1) if match else None + + timestamp = str(int(time.time() * 1000)) + nonce = str(random.random()) + + s = ["TGDBU9zCgM", timestamp, nonce] + s.sort() + signature_string = ''.join(s) + signature = hashlib.md5(signature_string.encode()).hexdigest() + + pattern = r'secret:"(\w+)"' + match = re.search(pattern, text) + secret = match.group(1) if match else None + + if X_API_SECRET and timestamp and nonce and secret: + return X_API_SECRET, signature, timestamp, nonce, secret + else: + raise Exception("Unable to extract all the necessary data from the JavaScript file.") @classmethod async def create_async_generator( @@ -77,54 +58,30 @@ class MagickPen(AsyncGeneratorProvider, ProviderModelMixin): **kwargs ) -> AsyncResult: model = cls.get_model(model) + X_API_SECRET, signature, timestamp, nonce, secret = await cls.fetch_api_credentials() - secrets = await cls.get_secrets() - if not secrets: - raise Exception("Failed to obtain necessary secrets") - headers = { - "accept": "application/json, text/plain, */*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "nonce": secrets['nonce'], - "origin": "https://magickpen.com", - "pragma": "no-cache", - "priority": "u=1, i", - "referer": "https://magickpen.com/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-site", - "secret": secrets['secret'], - "signature": secrets['signature'], - "timestamp": secrets['timestamp'], - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", - "x-api-secret": secrets['X-API-Secret'] + 'accept': 'application/json, text/plain, */*', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'nonce': nonce, + 'origin': cls.url, + 'referer': f"{cls.url}/", + 'secret': secret, + 'signature': signature, + 'timestamp': timestamp, + 'x-api-secret': X_API_SECRET, } async with ClientSession(headers=headers) as session: - if model == 'free': - data = { - "history": [{"role": "user", "content": format_prompt(messages)}] - } - async with session.post(cls.api_endpoint_free, json=data, proxy=proxy) as response: - response.raise_for_status() - result = await response.text() - yield result - - elif model == 'ask': - data = { - "query": format_prompt(messages), - "plan": "Pay as you go" - } - async with session.post(cls.api_endpoint_ask, json=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content: - if chunk: - yield chunk.decode() - - else: - raise ValueError(f"Unknown model: {model}") + prompt = format_prompt(messages) + payload = { + 'query': prompt, + 'turnstileResponse': '', + 'action': 'verify' + } + async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + yield chunk.decode() diff --git a/g4f/Provider/Mhystical.py b/g4f/Provider/Mhystical.py new file mode 100644 index 00000000..2aa98ebc --- /dev/null +++ b/g4f/Provider/Mhystical.py @@ -0,0 +1,90 @@ +from __future__ import annotations + +import json +import logging +from aiohttp import ClientSession +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +""" + Mhystical.cc + ~~~~~~~~~~~~ + Author: NoelP.dev + Last Updated: 2024-05-11 + + Author Site: https://noelp.dev + Provider Site: https://mhystical.cc + +""" + +logger = logging.getLogger(__name__) + +class Mhystical(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://api.mhystical.cc" + api_endpoint = "https://api.mhystical.cc/v1/completions" + working = True + supports_stream = False # Set to False, as streaming is not specified in ChatifyAI + supports_system_message = False + supports_message_history = True + + default_model = 'gpt-4' + models = [default_model] + model_aliases = {} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases.get(model, cls.default_model) + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "x-api-key": "mhystical", + "Content-Type": "application/json", + "accept": "*/*", + "cache-control": "no-cache", + "origin": cls.url, + "referer": f"{cls.url}/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" + } + + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "messages": [{"role": "user", "content": format_prompt(messages)}] + } + async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response: + if response.status == 400: + yield "Error: API key is missing" + elif response.status == 429: + yield "Error: Rate limit exceeded" + elif response.status == 500: + yield "Error: Internal server error" + else: + response.raise_for_status() + response_text = await response.text() + filtered_response = cls.filter_response(response_text) + yield filtered_response + + @staticmethod + def filter_response(response_text: str) -> str: + try: + json_response = json.loads(response_text) + message_content = json_response["choices"][0]["message"]["content"] + return message_content + except (KeyError, IndexError, json.JSONDecodeError) as e: + logger.error("Error parsing response: %s", e) + return "Error: Failed to parse response from API." diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py deleted file mode 100644 index b2b83837..00000000 --- a/g4f/Provider/Nexra.py +++ /dev/null @@ -1,116 +0,0 @@ -from __future__ import annotations -import json -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt -from ..image import ImageResponse - -class Nexra(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://nexra.aryahcr.cc" - chat_api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt" - image_api_endpoint = "https://nexra.aryahcr.cc/api/image/complements" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - supports_system_message = True - supports_message_history = True - - default_model = 'gpt-3.5-turbo' - text_models = [ - 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314', - 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301', - 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', - 'text-curie-001', 'text-babbage-001', 'text-ada-001', - 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002', - ] - image_models = ['dalle', 'dalle2', 'dalle-mini', 'emi'] - models = [*text_models, *image_models] - - model_aliases = { - "gpt-4": "gpt-4-0613", - "gpt-4": "gpt-4-32k", - "gpt-4": "gpt-4-0314", - "gpt-4": "gpt-4-32k-0314", - - "gpt-3.5-turbo": "gpt-3.5-turbo-16k", - "gpt-3.5-turbo": "gpt-3.5-turbo-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-0301", - - "gpt-3": "text-davinci-003", - "gpt-3": "text-davinci-002", - "gpt-3": "code-davinci-002", - "gpt-3": "text-curie-001", - "gpt-3": "text-babbage-001", - "gpt-3": "text-ada-001", - "gpt-3": "text-ada-001", - "gpt-3": "davinci", - "gpt-3": "curie", - "gpt-3": "babbage", - "gpt-3": "ada", - "gpt-3": "babbage-002", - "gpt-3": "davinci-002", - - "dalle-2": "dalle2", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.text_models or model in cls.image_models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "Content-Type": "application/json", - } - - async with ClientSession(headers=headers) as session: - if model in cls.image_models: - # Image generation - prompt = messages[-1]['content'] if messages else "" - data = { - "prompt": prompt, - "model": model, - "response": "url" - } - async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - result = await response.text() - result_json = json.loads(result.strip('_')) - image_url = result_json['images'][0] if result_json['images'] else None - - if image_url: - yield ImageResponse(images=image_url, alt=prompt) - else: - # Text completion - data = { - "messages": messages, - "prompt": format_prompt(messages), - "model": model, - "markdown": False - } - async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - result = await response.text() - - try: - json_response = json.loads(result) - gpt_response = json_response.get('gpt', '') - yield gpt_response - except json.JSONDecodeError: - yield result diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index ecb51f9b..b3119cb6 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -21,15 +21,17 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): "llama-3.1-sonar-small-128k-chat", "llama-3.1-8b-instruct", "llama-3.1-70b-instruct", + "/models/LiquidCloud", ] model_aliases = { - "llama-3.1-8b": "llama-3.1-sonar-large-128k-online", - "llama-3.1-8b": "sonar-small-128k-online", - "llama-3.1-8b": "llama-3.1-sonar-large-128k-chat", - "llama-3.1-8b": "llama-3.1-sonar-small-128k-chat", + "sonar-online": "llama-3.1-sonar-large-128k-online", + "sonar-online": "sonar-small-128k-online", + "sonar-chat": "llama-3.1-sonar-large-128k-chat", + "sonar-chat": "llama-3.1-sonar-small-128k-chat", "llama-3.1-8b": "llama-3.1-8b-instruct", "llama-3.1-70b": "llama-3.1-70b-instruct", + "lfm-40b": "/models/LiquidCloud", } @classmethod diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py index e03830f4..6aabe7b1 100644 --- a/g4f/Provider/Pi.py +++ b/g4f/Provider/Pi.py @@ -2,19 +2,21 @@ from __future__ import annotations import json -from ..typing import CreateResult, Messages -from .base_provider import AbstractProvider, format_prompt -from ..requests import Session, get_session_from_browser, raise_for_status +from ..typing import AsyncResult, Messages, Cookies +from .base_provider import AsyncGeneratorProvider, format_prompt +from ..requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies -class Pi(AbstractProvider): +class Pi(AsyncGeneratorProvider): url = "https://pi.ai/talk" working = True supports_stream = True - _session = None default_model = "pi" + models = [default_model] + _headers: dict = None + _cookies: Cookies = {} @classmethod - def create_completion( + async def create_async_generator( cls, model: str, messages: Messages, @@ -23,47 +25,51 @@ class Pi(AbstractProvider): timeout: int = 180, conversation_id: str = None, **kwargs - ) -> CreateResult: - if cls._session is None: - cls._session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout) - if not conversation_id: - conversation_id = cls.start_conversation(cls._session) - prompt = format_prompt(messages) - else: - prompt = messages[-1]["content"] - answer = cls.ask(cls._session, prompt, conversation_id) - for line in answer: - if "text" in line: - yield line["text"] - + ) -> AsyncResult: + if cls._headers is None: + args = await get_args_from_nodriver(cls.url, proxy=proxy, timeout=timeout) + cls._cookies = args.get("cookies", {}) + cls._headers = args.get("headers") + async with StreamSession(headers=cls._headers, cookies=cls._cookies, proxy=proxy) as session: + if not conversation_id: + conversation_id = await cls.start_conversation(session) + prompt = format_prompt(messages) + else: + prompt = messages[-1]["content"] + answer = cls.ask(session, prompt, conversation_id) + async for line in answer: + if "text" in line: + yield line["text"] + @classmethod - def start_conversation(cls, session: Session) -> str: - response = session.post('https://pi.ai/api/chat/start', data="{}", headers={ + async def start_conversation(cls, session: StreamSession) -> str: + async with session.post('https://pi.ai/api/chat/start', data="{}", headers={ 'accept': 'application/json', 'x-api-version': '3' - }) - raise_for_status(response) - return response.json()['conversations'][0]['sid'] + }) as response: + await raise_for_status(response) + return (await response.json())['conversations'][0]['sid'] - def get_chat_history(session: Session, conversation_id: str): + async def get_chat_history(session: StreamSession, conversation_id: str): params = { 'conversation': conversation_id, } - response = session.get('https://pi.ai/api/chat/history', params=params) - raise_for_status(response) - return response.json() + async with session.get('https://pi.ai/api/chat/history', params=params) as response: + await raise_for_status(response) + return await response.json() - def ask(session: Session, prompt: str, conversation_id: str): + @classmethod + async def ask(cls, session: StreamSession, prompt: str, conversation_id: str): json_data = { 'text': prompt, 'conversation': conversation_id, 'mode': 'BASE', } - response = session.post('https://pi.ai/api/chat', json=json_data, stream=True) - raise_for_status(response) - for line in response.iter_lines(): - if line.startswith(b'data: {"text":'): - yield json.loads(line.split(b'data: ')[1]) - elif line.startswith(b'data: {"title":'): - yield json.loads(line.split(b'data: ')[1]) - + async with session.post('https://pi.ai/api/chat', json=json_data) as response: + await raise_for_status(response) + cls._cookies = merge_cookies(cls._cookies, response) + async for line in response.iter_lines(): + if line.startswith(b'data: {"text":'): + yield json.loads(line.split(b'data: ')[1]) + elif line.startswith(b'data: {"title":'): + yield json.loads(line.split(b'data: ')[1]) diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py index 47cb135c..6513bd34 100644 --- a/g4f/Provider/Pizzagpt.py +++ b/g4f/Provider/Pizzagpt.py @@ -12,7 +12,6 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.pizzagpt.it" api_endpoint = "/api/chatx-completion" working = True - supports_gpt_4 = True default_model = 'gpt-4o-mini' @classmethod diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py index dd87a34c..fcebf7e3 100644 --- a/g4f/Provider/Prodia.py +++ b/g4f/Provider/Prodia.py @@ -14,10 +14,10 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): working = True default_model = 'absolutereality_v181.safetensors [3d9d4d2b]' - models = [ + image_models = [ '3Guofeng3_v34.safetensors [50f420de]', 'absolutereality_V16.safetensors [37db0fc3]', - 'absolutereality_v181.safetensors [3d9d4d2b]', + default_model, 'amIReal_V41.safetensors [0a8a2e61]', 'analog-diffusion-1.0.ckpt [9ca13f02]', 'aniverse_v30.safetensors [579e6f85]', @@ -81,6 +81,7 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): 'timeless-1.0.ckpt [7c4971d4]', 'toonyou_beta6.safetensors [980f6b15]', ] + models = [*image_models] @classmethod def get_model(cls, model: str) -> str: @@ -97,6 +98,12 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): model: str, messages: Messages, proxy: str = None, + negative_prompt: str = "", + steps: str = 20, # 1-25 + cfg: str = 7, # 0-20 + seed: str = "-1", + sampler: str = "DPM++ 2M Karras", # "Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM" + aspect_ratio: str = "square", # "square", "portrait", "landscape" **kwargs ) -> AsyncResult: model = cls.get_model(model) @@ -116,12 +123,12 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): "new": "true", "prompt": prompt, "model": model, - "negative_prompt": kwargs.get("negative_prompt", ""), - "steps": kwargs.get("steps", 20), - "cfg": kwargs.get("cfg", 7), - "seed": kwargs.get("seed", int(time.time())), - "sampler": kwargs.get("sampler", "DPM++ 2M Karras"), - "aspect_ratio": kwargs.get("aspect_ratio", "square") + "negative_prompt": negative_prompt, + "steps": steps, + "cfg": cfg, + "seed": seed, + "sampler": sampler, + "aspect_ratio": aspect_ratio } async with session.get(cls.api_endpoint, params=params, proxy=proxy) as response: diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py index 7f443a7d..a7fc9b54 100644 --- a/g4f/Provider/ReplicateHome.py +++ b/g4f/Provider/ReplicateHome.py @@ -17,7 +17,13 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): supports_system_message = True supports_message_history = True - default_model = 'meta/meta-llama-3-70b-instruct' + default_model = 'yorickvp/llava-13b' + + image_models = [ + 'stability-ai/stable-diffusion-3', + 'bytedance/sdxl-lightning-4step', + 'playgroundai/playground-v2.5-1024px-aesthetic', + ] text_models = [ 'meta/meta-llama-3-70b-instruct', @@ -26,35 +32,31 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): 'yorickvp/llava-13b', ] - image_models = [ - 'black-forest-labs/flux-schnell', - 'stability-ai/stable-diffusion-3', - 'bytedance/sdxl-lightning-4step', - 'playgroundai/playground-v2.5-1024px-aesthetic', - ] + models = text_models + image_models model_aliases = { - "flux-schnell": "black-forest-labs/flux-schnell", + # image_models "sd-3": "stability-ai/stable-diffusion-3", "sdxl": "bytedance/sdxl-lightning-4step", "playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic", - "llama-3-70b": "meta/meta-llama-3-70b-instruct", - "mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1", + + # text_models "gemma-2b": "google-deepmind/gemma-2b-it", "llava-13b": "yorickvp/llava-13b", } model_versions = { - "meta/meta-llama-3-70b-instruct": "fbfb20b472b2f3bdd101412a9f70a0ed4fc0ced78a77ff00970ee7a2383c575d", - "mistralai/mixtral-8x7b-instruct-v0.1": "5d78bcd7a992c4b793465bcdcf551dc2ab9668d12bb7aa714557a21c1e77041c", - "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626", - "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb", - 'black-forest-labs/flux-schnell': "f2ab8a5bfe79f02f0789a146cf5e73d2a4ff2684a98c2b303d1e1ff3814271db", + # image_models 'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f", 'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f", 'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24", + + # text_models + "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626", + "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb", + } @classmethod diff --git a/g4f/Provider/RubiksAI.py b/g4f/Provider/RubiksAI.py new file mode 100644 index 00000000..7e76d558 --- /dev/null +++ b/g4f/Provider/RubiksAI.py @@ -0,0 +1,162 @@ +from __future__ import annotations + +import asyncio +import aiohttp +import random +import string +import json +from urllib.parse import urlencode + +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + + +class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin): + label = "Rubiks AI" + url = "https://rubiks.ai" + api_endpoint = "https://rubiks.ai/search/api.php" + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3.1-70b-versatile' + models = [default_model, 'gpt-4o-mini'] + + model_aliases = { + "llama-3.1-70b": "llama-3.1-70b-versatile", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @staticmethod + def generate_mid() -> str: + """ + Generates a 'mid' string following the pattern: + 6 characters - 4 characters - 4 characters - 4 characters - 12 characters + Example: 0r7v7b-quw4-kdy3-rvdu-ekief6xbuuq4 + """ + parts = [ + ''.join(random.choices(string.ascii_lowercase + string.digits, k=6)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=4)), + ''.join(random.choices(string.ascii_lowercase + string.digits, k=12)) + ] + return '-'.join(parts) + + @staticmethod + def create_referer(q: str, mid: str, model: str = '') -> str: + """ + Creates a Referer URL with dynamic q and mid values, using urlencode for safe parameter encoding. + """ + params = {'q': q, 'model': model, 'mid': mid} + encoded_params = urlencode(params) + return f'https://rubiks.ai/search/?{encoded_params}' + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + websearch: bool = False, + **kwargs + ) -> AsyncResult: + """ + Creates an asynchronous generator that sends requests to the Rubiks AI API and yields the response. + + Parameters: + - model (str): The model to use in the request. + - messages (Messages): The messages to send as a prompt. + - proxy (str, optional): Proxy URL, if needed. + - websearch (bool, optional): Indicates whether to include search sources in the response. Defaults to False. + """ + model = cls.get_model(model) + prompt = format_prompt(messages) + q_value = prompt + mid_value = cls.generate_mid() + referer = cls.create_referer(q=q_value, mid=mid_value, model=model) + + url = cls.api_endpoint + params = { + 'q': q_value, + 'model': model, + 'id': '', + 'mid': mid_value + } + + headers = { + 'Accept': 'text/event-stream', + 'Accept-Language': 'en-US,en;q=0.9', + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Pragma': 'no-cache', + 'Referer': referer, + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36', + 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"' + } + + try: + timeout = aiohttp.ClientTimeout(total=None) + async with ClientSession(timeout=timeout) as session: + async with session.get(url, headers=headers, params=params, proxy=proxy) as response: + if response.status != 200: + yield f"Request ended with status code {response.status}" + return + + assistant_text = '' + sources = [] + + async for line in response.content: + decoded_line = line.decode('utf-8').strip() + if not decoded_line.startswith('data: '): + continue + data = decoded_line[6:] + if data in ('[DONE]', '{"done": ""}'): + break + try: + json_data = json.loads(data) + except json.JSONDecodeError: + continue + + if 'url' in json_data and 'title' in json_data: + if websearch: + sources.append({'title': json_data['title'], 'url': json_data['url']}) + + elif 'choices' in json_data: + for choice in json_data['choices']: + delta = choice.get('delta', {}) + content = delta.get('content', '') + role = delta.get('role', '') + if role == 'assistant': + continue + assistant_text += content + + if websearch and sources: + sources_text = '\n'.join([f"{i+1}. [{s['title']}]: {s['url']}" for i, s in enumerate(sources)]) + assistant_text += f"\n\n**Source:**\n{sources_text}" + + yield assistant_text + + except asyncio.CancelledError: + yield "The request was cancelled." + except aiohttp.ClientError as e: + yield f"An error occurred during the request: {e}" + except Exception as e: + yield f"An unexpected error occurred: {e}" diff --git a/g4f/Provider/Snova.py b/g4f/Provider/Snova.py deleted file mode 100644 index 53d8f0bd..00000000 --- a/g4f/Provider/Snova.py +++ /dev/null @@ -1,131 +0,0 @@ -from __future__ import annotations - -import json -from typing import AsyncGenerator - -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - - -class Snova(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://fast.snova.ai" - api_endpoint = "https://fast.snova.ai/api/completion" - working = True - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'Meta-Llama-3.1-8B-Instruct' - models = [ - 'Meta-Llama-3.1-8B-Instruct', - 'Meta-Llama-3.1-70B-Instruct', - 'Meta-Llama-3.1-405B-Instruct', - 'Samba-CoE', - 'ignos/Mistral-T5-7B-v1', # Error with the answer - 'v1olet/v1olet_merged_dpo_7B', - 'macadeliccc/WestLake-7B-v2-laser-truthy-dpo', - ] - - model_aliases = { - "llama-3.1-8b": "Meta-Llama-3.1-8B-Instruct", - "llama-3.1-70b": "Meta-Llama-3.1-70B-Instruct", - "llama-3.1-405b": "Meta-Llama-3.1-405B-Instruct", - - "mistral-7b": "ignos/Mistral-T5-7B-v1", - - "samba-coe-v0.1": "Samba-CoE", - "v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B", - "westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncGenerator[str, None]: - model = cls.get_model(model) - - headers = { - "accept": "text/event-stream", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" - } - async with ClientSession(headers=headers) as session: - data = { - "body": { - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": format_prompt(messages), - "id": "1-id", - "ref": "1-ref", - "revision": 1, - "draft": False, - "status": "done", - "enableRealTimeChat": False, - "meta": None - } - ], - "max_tokens": 1000, - "stop": ["<|eot_id|>"], - "stream": True, - "stream_options": {"include_usage": True}, - "model": model - }, - "env_type": "tp16" - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - full_response = "" - async for line in response.content: - line = line.decode().strip() - if line.startswith("data: "): - data = line[6:] - if data == "[DONE]": - break - try: - json_data = json.loads(data) - choices = json_data.get("choices", []) - if choices: - delta = choices[0].get("delta", {}) - content = delta.get("content", "") - full_response += content - except json.JSONDecodeError: - continue - except Exception as e: - print(f"Error processing chunk: {e}") - print(f"Problematic data: {data}") - continue - - yield full_response.strip() diff --git a/g4f/Provider/TeachAnything.py b/g4f/Provider/TeachAnything.py index 3d34293f..97fe0272 100644 --- a/g4f/Provider/TeachAnything.py +++ b/g4f/Provider/TeachAnything.py @@ -14,6 +14,17 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin): api_endpoint = "/api/generate" working = True default_model = "llama-3.1-70b" + models = [default_model] + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + @classmethod async def create_async_generator( @@ -24,6 +35,7 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin): **kwargs: Any ) -> AsyncResult: headers = cls._get_headers() + model = cls.get_model(model) async with ClientSession(headers=headers) as session: prompt = format_prompt(messages) @@ -61,16 +73,18 @@ class TeachAnything(AsyncGeneratorProvider, ProviderModelMixin): return { "accept": "*/*", "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", "content-type": "application/json", "dnt": "1", "origin": "https://www.teach-anything.com", + "pragma": "no-cache", "priority": "u=1, i", "referer": "https://www.teach-anything.com/", - "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', + "sec-ch-us": '"Not?A_Brand";v="99", "Chromium";v="130"', + "sec-ch-us-mobile": "?0", + "sec-ch-us-platform": '"Linux"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36" } diff --git a/g4f/Provider/TwitterBio.py b/g4f/Provider/TwitterBio.py deleted file mode 100644 index c143e4ff..00000000 --- a/g4f/Provider/TwitterBio.py +++ /dev/null @@ -1,103 +0,0 @@ -from __future__ import annotations - -import json -import re -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - -class TwitterBio(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.twitterbio.io" - api_endpoint_mistral = "https://www.twitterbio.io/api/mistral" - api_endpoint_openai = "https://www.twitterbio.io/api/openai" - working = True - supports_gpt_35_turbo = True - - default_model = 'gpt-3.5-turbo' - models = [ - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'gpt-3.5-turbo', - ] - - model_aliases = { - "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", - } - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - return cls.default_model - - @staticmethod - def format_text(text: str) -> str: - text = re.sub(r'\s+', ' ', text.strip()) - text = re.sub(r'\s+([,.!?])', r'\1', text) - return text - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" - } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "prompt": f'{prompt}.' - } - - if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1': - api_endpoint = cls.api_endpoint_mistral - elif model == 'gpt-3.5-turbo': - api_endpoint = cls.api_endpoint_openai - else: - raise ValueError(f"Unsupported model: {model}") - - async with session.post(api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - buffer = "" - async for line in response.content: - line = line.decode('utf-8').strip() - if line.startswith('data: '): - try: - json_data = json.loads(line[6:]) - if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1': - if 'choices' in json_data and len(json_data['choices']) > 0: - text = json_data['choices'][0].get('text', '') - if text: - buffer += text - elif model == 'gpt-3.5-turbo': - text = json_data.get('text', '') - if text: - buffer += text - except json.JSONDecodeError: - continue - elif line == 'data: [DONE]': - break - - if buffer: - yield cls.format_text(buffer) diff --git a/g4f/Provider/Upstage.py b/g4f/Provider/Upstage.py index e61a5af2..81234ed9 100644 --- a/g4f/Provider/Upstage.py +++ b/g4f/Provider/Upstage.py @@ -12,14 +12,15 @@ class Upstage(AsyncGeneratorProvider, ProviderModelMixin): url = "https://console.upstage.ai/playground/chat" api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions" working = True - default_model = 'upstage/solar-1-mini-chat' + default_model = 'solar-pro' models = [ 'upstage/solar-1-mini-chat', 'upstage/solar-1-mini-chat-ja', + 'solar-pro', ] model_aliases = { - "solar-1-mini": "upstage/solar-1-mini-chat", - "solar-1-mini": "upstage/solar-1-mini-chat-ja", + "solar-mini": "upstage/solar-1-mini-chat", + "solar-mini": "upstage/solar-1-mini-chat-ja", } @classmethod @@ -40,35 +41,51 @@ class Upstage(AsyncGeneratorProvider, ProviderModelMixin): **kwargs ) -> AsyncResult: model = cls.get_model(model) - + headers = { "accept": "*/*", "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", "content-type": "application/json", + "dnt": "1", "origin": "https://console.upstage.ai", + "pragma": "no-cache", "priority": "u=1, i", "referer": "https://console.upstage.ai/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', + "sec-ch-ua": '"Not?A_Brand";v="99", "Chromium";v="130"', "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": '"Linux"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "cross-site", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36" } + async with ClientSession(headers=headers) as session: data = { "stream": True, "messages": [{"role": "user", "content": format_prompt(messages)}], "model": model } + async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response: response.raise_for_status() + + response_text = "" + async for line in response.content: if line: line = line.decode('utf-8').strip() + if line.startswith("data: ") and line != "data: [DONE]": - data = json.loads(line[6:]) - content = data['choices'][0]['delta'].get('content', '') - if content: - yield content + try: + data = json.loads(line[6:]) + content = data['choices'][0]['delta'].get('content', '') + if content: + response_text += content + yield content + except json.JSONDecodeError: + continue + + if line == "data: [DONE]": + break diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py deleted file mode 100644 index bd918396..00000000 --- a/g4f/Provider/Vercel.py +++ /dev/null @@ -1,104 +0,0 @@ -from __future__ import annotations - -import json, base64, requests, random, os - -try: - import execjs - has_requirements = True -except ImportError: - has_requirements = False - -from ..typing import Messages, CreateResult -from .base_provider import AbstractProvider -from ..requests import raise_for_status -from ..errors import MissingRequirementsError - -class Vercel(AbstractProvider): - url = 'https://chat.vercel.ai' - working = True - supports_message_history = True - supports_system_message = True - supports_gpt_35_turbo = True - supports_stream = True - - @staticmethod - def create_completion( - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - max_retries: int = 6, - **kwargs - ) -> CreateResult: - if not has_requirements: - raise MissingRequirementsError('Install "PyExecJS" package') - - headers = { - 'authority': 'chat.vercel.ai', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control': 'no-cache', - 'content-type': 'application/json', - 'custom-encoding': get_anti_bot_token(), - 'origin': 'https://chat.vercel.ai', - 'pragma': 'no-cache', - 'referer': 'https://chat.vercel.ai/', - 'sec-ch-ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36', - } - - json_data = { - 'messages': messages, - 'id' : f'{os.urandom(3).hex()}a', - } - response = None - for _ in range(max_retries): - response = requests.post('https://chat.vercel.ai/api/chat', - headers=headers, json=json_data, stream=True, proxies={"https": proxy}) - if not response.ok: - continue - for token in response.iter_content(chunk_size=None): - try: - yield token.decode(errors="ignore") - except UnicodeDecodeError: - pass - break - raise_for_status(response) - -def get_anti_bot_token() -> str: - headers = { - 'authority': 'sdk.vercel.ai', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control': 'no-cache', - 'pragma': 'no-cache', - 'referer': 'https://sdk.vercel.ai/', - 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36', - } - - response = requests.get('https://chat.vercel.ai/openai.jpeg', - headers=headers).text - - raw_data = json.loads(base64.b64decode(response, - validate=True)) - - js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`}; - return (%s)(%s)''' % (raw_data['c'], raw_data['a']) - - sec_list = [execjs.compile(js_script).call('')[0], [], "sentinel"] - - raw_token = json.dumps({'r': sec_list, 't': raw_data['t']}, - separators = (",", ":")) - - return base64.b64encode(raw_token.encode('utf-8')).decode()
\ No newline at end of file diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py index af8aab0e..02735038 100644 --- a/g4f/Provider/You.py +++ b/g4f/Provider/You.py @@ -17,8 +17,6 @@ class You(AsyncGeneratorProvider, ProviderModelMixin): label = "You.com" url = "https://you.com" working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True default_model = "gpt-4o-mini" default_vision_model = "agent" image_models = ["dall-e"] diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 69741c5e..8a162baf 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -5,63 +5,40 @@ from ..providers.retry_provider import RetryProvider, IterListProvider from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider from ..providers.create_images import CreateImagesProvider -from .deprecated import * -from .selenium import * -from .needs_auth import * +from .deprecated import * +from .selenium import * +from .needs_auth import * +from .not_working import * +from .local import * -from .AI365VIP import AI365VIP -from .Allyfy import Allyfy -from .AiChatOnline import AiChatOnline -from .AiChats import AiChats +from .AIUncensored import AIUncensored from .Airforce import Airforce -from .Aura import Aura +from .AmigoChat import AmigoChat from .Bing import Bing -from .BingCreateImages import BingCreateImages -from .Binjie import Binjie -from .Bixin123 import Bixin123 from .Blackbox import Blackbox -from .ChatGot import ChatGot -from .Chatgpt4Online import Chatgpt4Online -from .Chatgpt4o import Chatgpt4o -from .ChatgptFree import ChatgptFree -from .CodeNews import CodeNews +from .ChatGpt import ChatGpt +from .ChatGptEs import ChatGptEs +from .Cloudflare import Cloudflare +from .DarkAI import DarkAI from .DDG import DDG -from .DeepInfra import DeepInfra -from .DeepInfraImage import DeepInfraImage -from .FlowGpt import FlowGpt +from .DeepInfraChat import DeepInfraChat from .Free2GPT import Free2GPT -from .FreeChatgpt import FreeChatgpt from .FreeGpt import FreeGpt -from .FreeNetfly import FreeNetfly -from .GeminiPro import GeminiPro -from .GigaChat import GigaChat -from .GptTalkRu import GptTalkRu +from .GizAI import GizAI from .HuggingChat import HuggingChat -from .HuggingFace import HuggingFace -from .Koala import Koala from .Liaobots import Liaobots -from .LiteIcoding import LiteIcoding -from .Local import Local from .MagickPen import MagickPen -from .MetaAI import MetaAI -from .MetaAIAccount import MetaAIAccount -from .Nexra import Nexra -from .Ollama import Ollama from .PerplexityLabs import PerplexityLabs from .Pi import Pi from .Pizzagpt import Pizzagpt from .Prodia import Prodia from .Reka import Reka -from .Snova import Snova -from .Replicate import Replicate from .ReplicateHome import ReplicateHome +from .RubiksAI import RubiksAI from .TeachAnything import TeachAnything -from .TwitterBio import TwitterBio from .Upstage import Upstage -from .Vercel import Vercel -from .WhiteRabbitNeo import WhiteRabbitNeo from .You import You -from .ChatGpt import ChatGpt +from .Mhystical import Mhystical import sys diff --git a/g4f/Provider/airforce/AirforceChat.py b/g4f/Provider/airforce/AirforceChat.py new file mode 100644 index 00000000..cec911a3 --- /dev/null +++ b/g4f/Provider/airforce/AirforceChat.py @@ -0,0 +1,172 @@ +from __future__ import annotations +import re +import json +import requests +from aiohttp import ClientSession +from typing import List + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +# Helper function to clean the response +def clean_response(text: str) -> str: + """Clean response from unwanted patterns.""" + patterns = [ + r"One message exceeds the \d+chars per message limit\..+https:\/\/discord\.com\/invite\/\S+", + r"Rate limit \(\d+\/minute\) exceeded\. Join our discord for more: .+https:\/\/discord\.com\/invite\/\S+", + r"Rate limit \(\d+\/hour\) exceeded\. Join our discord for more: https:\/\/discord\.com\/invite\/\S+", + r"</s>", # zephyr-7b-beta + r"\[ERROR\] '\w{8}-\w{4}-\w{4}-\w{4}-\w{12}'", # Matches [ERROR] 'UUID' + ] + for pattern in patterns: + text = re.sub(pattern, '', text) + + # Remove the <|im_end|> token if present + text = text.replace("<|im_end|>", "").strip() + + return text + +def split_message(message: str, max_length: int = 1000) -> List[str]: + """Splits the message into chunks of a given length (max_length)""" + # Split the message into smaller chunks to avoid exceeding the limit + chunks = [] + while len(message) > max_length: + # Find the last space or punctuation before max_length to avoid cutting words + split_point = message.rfind(' ', 0, max_length) + if split_point == -1: # No space found, split at max_length + split_point = max_length + chunks.append(message[:split_point]) + message = message[split_point:].strip() + if message: + chunks.append(message) # Append the remaining part of the message + return chunks + +class AirforceChat(AsyncGeneratorProvider, ProviderModelMixin): + label = "AirForce Chat" + api_endpoint = "https://api.airforce/chat/completions" + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'llama-3.1-70b-chat' + response = requests.get('https://api.airforce/models') + data = response.json() + + text_models = [model['id'] for model in data['data']] + models = [*text_models] + + model_aliases = { + # openchat + "openchat-3.5": "openchat-3.5-0106", + + # deepseek-ai + "deepseek-coder": "deepseek-coder-6.7b-instruct", + + # NousResearch + "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO", + "hermes-2-pro": "hermes-2-pro-mistral-7b", + + # teknium + "openhermes-2.5": "openhermes-2.5-mistral-7b", + + # liquid + "lfm-40b": "lfm-40b-moe", + + # DiscoResearch + "german-7b": "discolm-german-7b-v1", + + # meta-llama + "llama-2-7b": "llama-2-7b-chat-int8", + "llama-2-7b": "llama-2-7b-chat-fp16", + "llama-3.1-70b": "llama-3.1-70b-chat", + "llama-3.1-8b": "llama-3.1-8b-chat", + "llama-3.1-70b": "llama-3.1-70b-turbo", + "llama-3.1-8b": "llama-3.1-8b-turbo", + + # inferless + "neural-7b": "neural-chat-7b-v3-1", + + # HuggingFaceH4 + "zephyr-7b": "zephyr-7b-beta", + + # llmplayground.net + #"any-uncensored": "any-uncensored", + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = False, + proxy: str = None, + max_tokens: str = 4096, + temperature: str = 1, + top_p: str = 1, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'authorization': 'Bearer missing api key', + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://llmplayground.net', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://llmplayground.net/', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' + } + + # Format the messages for the API + formatted_messages = format_prompt(messages) + message_chunks = split_message(formatted_messages) + + full_response = "" + for chunk in message_chunks: + data = { + "messages": [{"role": "user", "content": chunk}], + "model": model, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stream": stream + } + + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + + text = "" + if stream: + async for line in response.content: + line = line.decode('utf-8').strip() + if line.startswith('data: '): + json_str = line[6:] + try: + if json_str and json_str != "[DONE]": + chunk = json.loads(json_str) + if 'choices' in chunk and chunk['choices']: + content = chunk['choices'][0].get('delta', {}).get('content', '') + text += content + except json.JSONDecodeError as e: + print(f"Error decoding JSON: {json_str}, Error: {e}") + elif line == "[DONE]": + break + full_response += clean_response(text) + else: + response_json = await response.json() + text = response_json["choices"][0]["message"]["content"] + full_response += clean_response(text) + + # Return the complete response after all chunks + yield full_response diff --git a/g4f/Provider/airforce/AirforceImage.py b/g4f/Provider/airforce/AirforceImage.py new file mode 100644 index 00000000..b74bc364 --- /dev/null +++ b/g4f/Provider/airforce/AirforceImage.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +from aiohttp import ClientSession +from urllib.parse import urlencode +import random +import requests + +from ...typing import AsyncResult, Messages +from ...image import ImageResponse +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + + +class AirforceImage(AsyncGeneratorProvider, ProviderModelMixin): + label = "Airforce Image" + #url = "https://api.airforce" + api_endpoint = "https://api.airforce/imagine2" + #working = True + + default_model = 'flux' + + response = requests.get('https://api.airforce/imagine/models') + data = response.json() + + image_models = data + + models = [*image_models, "stable-diffusion-xl-base", "stable-diffusion-xl-lightning", "Flux-1.1-Pro"] + + model_aliases = { + "sdxl": "stable-diffusion-xl-base", + "sdxl": "stable-diffusion-xl-lightning", + "flux-pro": "Flux-1.1-Pro", + } + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + size: str = '1:1', # "1:1", "16:9", "9:16", "21:9", "9:21", "1:2", "2:1" + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'dnt': '1', + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': 'https://llmplayground.net/', + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'image', + 'sec-fetch-mode': 'no-cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + seed = random.randint(0, 58463) + params = { + 'model': model, + 'prompt': messages[-1]["content"], + 'size': size, + 'seed': seed + } + full_url = f"{cls.api_endpoint}?{urlencode(params)}" + + async with session.get(full_url, headers=headers, proxy=proxy) as response: + if response.status == 200 and response.headers.get('content-type', '').startswith('image'): + yield ImageResponse(images=[full_url], alt="Generated Image") + else: + raise Exception(f"Error: status {response.status}, content type {response.headers.get('content-type')}") diff --git a/g4f/Provider/airforce/__init__.py b/g4f/Provider/airforce/__init__.py new file mode 100644 index 00000000..5ffa6d31 --- /dev/null +++ b/g4f/Provider/airforce/__init__.py @@ -0,0 +1,2 @@ +from .AirforceChat import AirforceChat +from .AirforceImage import AirforceImage diff --git a/g4f/Provider/bing/create_images.py b/g4f/Provider/bing/create_images.py index 7a08ddfe..45ba30b6 100644 --- a/g4f/Provider/bing/create_images.py +++ b/g4f/Provider/bing/create_images.py @@ -132,7 +132,7 @@ async def create_images(session: ClientSession, prompt: str, timeout: int = TIME redirect_url = response.headers["Location"].replace("&nfy=1", "") redirect_url = f"{BING_URL}{redirect_url}" - request_id = redirect_url.split("id=")[1] + request_id = redirect_url.split("id=")[-1] async with session.get(redirect_url) as response: response.raise_for_status() diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index bf923f2a..368a71a0 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -25,11 +25,10 @@ from .Aichat import Aichat from .Berlin import Berlin from .Phind import Phind from .AiAsk import AiAsk -from ..AiChatOnline import AiChatOnline from .ChatAnywhere import ChatAnywhere from .FakeGpt import FakeGpt from .GeekGpt import GeekGpt from .GPTalk import GPTalk from .Hashnode import Hashnode from .Ylokh import Ylokh -from .OpenAssistant import OpenAssistant
\ No newline at end of file +from .OpenAssistant import OpenAssistant diff --git a/g4f/Provider/Local.py b/g4f/Provider/local/Local.py index 471231c6..4dc6e3f9 100644 --- a/g4f/Provider/Local.py +++ b/g4f/Provider/local/Local.py @@ -1,15 +1,15 @@ from __future__ import annotations -from ..locals.models import get_models +from ...locals.models import get_models try: - from ..locals.provider import LocalProvider + from ...locals.provider import LocalProvider has_requirements = True except ImportError: has_requirements = False -from ..typing import Messages, CreateResult -from ..providers.base_provider import AbstractProvider, ProviderModelMixin -from ..errors import MissingRequirementsError +from ...typing import Messages, CreateResult +from ...providers.base_provider import AbstractProvider, ProviderModelMixin +from ...errors import MissingRequirementsError class Local(AbstractProvider, ProviderModelMixin): label = "GPT4All" @@ -40,4 +40,4 @@ class Local(AbstractProvider, ProviderModelMixin): messages, stream, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/Ollama.py b/g4f/Provider/local/Ollama.py index a44aaacd..de68a218 100644 --- a/g4f/Provider/Ollama.py +++ b/g4f/Provider/local/Ollama.py @@ -1,11 +1,12 @@ from __future__ import annotations import requests +import os -from .needs_auth.Openai import Openai -from ..typing import AsyncResult, Messages +from ..needs_auth.OpenaiAPI import OpenaiAPI +from ...typing import AsyncResult, Messages -class Ollama(Openai): +class Ollama(OpenaiAPI): label = "Ollama" url = "https://ollama.com" needs_auth = False @@ -14,9 +15,11 @@ class Ollama(Openai): @classmethod def get_models(cls): if not cls.models: - url = 'http://127.0.0.1:11434/api/tags' + host = os.getenv("OLLAMA_HOST", "127.0.0.1") + port = os.getenv("OLLAMA_PORT", "11434") + url = f"http://{host}:{port}/api/tags" models = requests.get(url).json()["models"] - cls.models = [model['name'] for model in models] + cls.models = [model["name"] for model in models] cls.default_model = cls.models[0] return cls.models @@ -25,9 +28,13 @@ class Ollama(Openai): cls, model: str, messages: Messages, - api_base: str = "http://localhost:11434/v1", + api_base: str = None, **kwargs ) -> AsyncResult: + if not api_base: + host = os.getenv("OLLAMA_HOST", "localhost") + port = os.getenv("OLLAMA_PORT", "11434") + api_base: str = f"http://{host}:{port}/v1" return super().create_async_generator( model, messages, api_base=api_base, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/local/__init__.py b/g4f/Provider/local/__init__.py new file mode 100644 index 00000000..05f6022e --- /dev/null +++ b/g4f/Provider/local/__init__.py @@ -0,0 +1,2 @@ +from .Local import Local +from .Ollama import Ollama diff --git a/g4f/Provider/BingCreateImages.py b/g4f/Provider/needs_auth/BingCreateImages.py index 7a206c8f..80984d40 100644 --- a/g4f/Provider/BingCreateImages.py +++ b/g4f/Provider/needs_auth/BingCreateImages.py @@ -1,11 +1,11 @@ from __future__ import annotations -from ..cookies import get_cookies -from ..image import ImageResponse -from ..errors import MissingAuthError -from ..typing import AsyncResult, Messages, Cookies -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .bing.create_images import create_images, create_session +from ...cookies import get_cookies +from ...image import ImageResponse +from ...errors import MissingAuthError +from ...typing import AsyncResult, Messages, Cookies +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..bing.create_images import create_images, create_session class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin): label = "Microsoft Designer in Bing" diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py index b12fb254..35e7ca7f 100644 --- a/g4f/Provider/DeepInfra.py +++ b/g4f/Provider/needs_auth/DeepInfra.py @@ -1,10 +1,10 @@ from __future__ import annotations import requests -from ..typing import AsyncResult, Messages -from .needs_auth.Openai import Openai +from ...typing import AsyncResult, Messages +from .OpenaiAPI import OpenaiAPI -class DeepInfra(Openai): +class DeepInfra(OpenaiAPI): label = "DeepInfra" url = "https://deepinfra.com" working = True @@ -55,4 +55,4 @@ class DeepInfra(Openai): max_tokens=max_tokens, headers=headers, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py index 46a5c2e2..2310c1c8 100644 --- a/g4f/Provider/DeepInfraImage.py +++ b/g4f/Provider/needs_auth/DeepInfraImage.py @@ -2,16 +2,17 @@ from __future__ import annotations import requests -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..typing import AsyncResult, Messages -from ..requests import StreamSession, raise_for_status -from ..image import ImageResponse +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import AsyncResult, Messages +from ...requests import StreamSession, raise_for_status +from ...image import ImageResponse class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin): url = "https://deepinfra.com" parent = "DeepInfra" working = True - default_model = 'stability-ai/sdxl' + needs_auth = True + default_model = '' image_models = [default_model] @classmethod @@ -76,4 +77,4 @@ class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin): if not images: raise RuntimeError(f"Response: {data}") images = images[0] if len(images) == 1 else images - return ImageResponse(images, prompt)
\ No newline at end of file + return ImageResponse(images, prompt) diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py index eddd25fa..dad54c84 100644 --- a/g4f/Provider/needs_auth/Gemini.py +++ b/g4f/Provider/needs_auth/Gemini.py @@ -54,8 +54,10 @@ class Gemini(AsyncGeneratorProvider): url = "https://gemini.google.com" needs_auth = True working = True + default_model = 'gemini' image_models = ["gemini"] default_vision_model = "gemini" + models = ["gemini", "gemini-1.5-flash", "gemini-1.5-pro"] _cookies: Cookies = None _snlm0e: str = None _sid: str = None @@ -305,4 +307,4 @@ class Conversation(BaseConversation): ) -> None: self.conversation_id = conversation_id self.response_id = response_id - self.choice_id = choice_id
\ No newline at end of file + self.choice_id = choice_id diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py index b225c26c..7e52a194 100644 --- a/g4f/Provider/GeminiPro.py +++ b/g4f/Provider/needs_auth/GeminiPro.py @@ -4,11 +4,11 @@ import base64 import json from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages, ImageType -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import to_bytes, is_accepted_format -from ..errors import MissingAuthError -from .helper import get_connector +from ...typing import AsyncResult, Messages, ImageType +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...image import to_bytes, is_accepted_format +from ...errors import MissingAuthError +from ..helper import get_connector class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): label = "Gemini API" @@ -54,6 +54,7 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): "parts": [{"text": message["content"]}] } for message in messages + if message["role"] != "system" ] if image is not None: image = to_bytes(image) @@ -73,6 +74,13 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): "topK": kwargs.get("top_k"), } } + system_prompt = "\n".join( + message["content"] + for message in messages + if message["role"] == "system" + ) + if system_prompt: + data["system_instruction"] = {"parts": {"text": system_prompt}} async with session.post(url, params=params, json=data) as response: if not response.ok: data = await response.json() @@ -96,4 +104,8 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): lines.append(chunk) else: data = await response.json() - yield data["candidates"][0]["content"]["parts"][0]["text"]
\ No newline at end of file + candidate = data["candidates"][0] + if candidate["finishReason"] == "STOP": + yield candidate["content"]["parts"][0]["text"] + else: + yield candidate["finishReason"] + ' ' + candidate["safetyRatings"]
\ No newline at end of file diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py index d11f6a82..943fc81a 100644 --- a/g4f/Provider/needs_auth/Groq.py +++ b/g4f/Provider/needs_auth/Groq.py @@ -1,14 +1,33 @@ from __future__ import annotations -from .Openai import Openai +from .OpenaiAPI import OpenaiAPI from ...typing import AsyncResult, Messages -class Groq(Openai): +class Groq(OpenaiAPI): label = "Groq" url = "https://console.groq.com/playground" working = True default_model = "mixtral-8x7b-32768" - models = ["mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"] + models = [ + "distil-whisper-large-v3-en", + "gemma2-9b-it", + "gemma-7b-it", + "llama3-groq-70b-8192-tool-use-preview", + "llama3-groq-8b-8192-tool-use-preview", + "llama-3.1-70b-versatile", + "llama-3.1-8b-instant", + "llama-3.2-1b-preview", + "llama-3.2-3b-preview", + "llama-3.2-11b-vision-preview", + "llama-3.2-90b-vision-preview", + "llama-guard-3-8b", + "llava-v1.5-7b-4096-preview", + "llama3-70b-8192", + "llama3-8b-8192", + "mixtral-8x7b-32768", + "whisper-large-v3", + "whisper-large-v3-turbo", + ] model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"} @classmethod @@ -21,4 +40,4 @@ class Groq(Openai): ) -> AsyncResult: return super().create_async_generator( model, messages, api_base=api_base, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index 586e5f5f..ecc75d1c 100644 --- a/g4f/Provider/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -3,13 +3,13 @@ from __future__ import annotations import json from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_connector -from ..errors import RateLimitError, ModelNotFoundError -from ..requests.raise_for_status import raise_for_status +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_connector +from ...errors import RateLimitError, ModelNotFoundError +from ...requests.raise_for_status import raise_for_status -from .HuggingChat import HuggingChat +from ..HuggingChat import HuggingChat class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): url = "https://huggingface.co/chat" diff --git a/g4f/Provider/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py index 218b7ebb..4b730abd 100644 --- a/g4f/Provider/MetaAI.py +++ b/g4f/Provider/needs_auth/MetaAI.py @@ -8,12 +8,12 @@ from typing import Dict, List from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages, Cookies -from ..requests import raise_for_status, DEFAULT_HEADERS -from ..image import ImageResponse, ImagePreview -from ..errors import ResponseError -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt, get_connector, format_cookies +from ...typing import AsyncResult, Messages, Cookies +from ...requests import raise_for_status, DEFAULT_HEADERS +from ...image import ImageResponse, ImagePreview +from ...errors import ResponseError +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, get_connector, format_cookies class Sources(): def __init__(self, link_list: List[Dict[str, str]]) -> None: diff --git a/g4f/Provider/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py index 369b3f2f..2d54f3e0 100644 --- a/g4f/Provider/MetaAIAccount.py +++ b/g4f/Provider/needs_auth/MetaAIAccount.py @@ -1,8 +1,8 @@ from __future__ import annotations -from ..typing import AsyncResult, Messages, Cookies -from .helper import format_prompt, get_cookies -from .MetaAI import MetaAI +from ...typing import AsyncResult, Messages, Cookies +from ..helper import format_prompt, get_cookies +from ..MetaAI import MetaAI class MetaAIAccount(MetaAI): needs_auth = True @@ -20,4 +20,4 @@ class MetaAIAccount(MetaAI): ) -> AsyncResult: cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies async for chunk in cls(proxy).prompt(format_prompt(messages), cookies): - yield chunk
\ No newline at end of file + yield chunk diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py deleted file mode 100644 index 7945784a..00000000 --- a/g4f/Provider/needs_auth/OpenRouter.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import annotations - -import requests - -from .Openai import Openai -from ...typing import AsyncResult, Messages - -class OpenRouter(Openai): - label = "OpenRouter" - url = "https://openrouter.ai" - working = True - default_model = "mistralai/mistral-7b-instruct:free" - - @classmethod - def get_models(cls): - if not cls.models: - url = 'https://openrouter.ai/api/v1/models' - models = requests.get(url).json()["data"] - cls.models = [model['id'] for model in models] - return cls.models - - @classmethod - def create_async_generator( - cls, - model: str, - messages: Messages, - api_base: str = "https://openrouter.ai/api/v1", - **kwargs - ) -> AsyncResult: - return super().create_async_generator( - model, messages, api_base=api_base, **kwargs - )
\ No newline at end of file diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/OpenaiAPI.py index a0740c47..116b5f6f 100644 --- a/g4f/Provider/needs_auth/Openai.py +++ b/g4f/Provider/needs_auth/OpenaiAPI.py @@ -9,9 +9,9 @@ from ...requests import StreamSession, raise_for_status from ...errors import MissingAuthError, ResponseError from ...image import to_data_uri -class Openai(AsyncGeneratorProvider, ProviderModelMixin): +class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin): label = "OpenAI API" - url = "https://openai.com" + url = "https://platform.openai.com" working = True needs_auth = True supports_message_history = True diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 82462040..85e11181 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -55,15 +55,18 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): label = "OpenAI ChatGPT" url = "https://chatgpt.com" working = True + needs_auth = True supports_gpt_4 = True supports_message_history = True supports_system_message = True default_model = None default_vision_model = "gpt-4o" models = [ "auto", "gpt-4o-mini", "gpt-4o", "gpt-4", "gpt-4-gizmo"] + model_aliases = { - "gpt-4-turbo-preview": "gpt-4", - "dall-e": "gpt-4", + #"gpt-4-turbo": "gpt-4", + #"gpt-4": "gpt-4-gizmo", + #"dalle": "gpt-4", } _api_key: str = None _headers: dict = None @@ -193,7 +196,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): async with session.get(url, headers=headers) as response: cls._update_request_args(session) if response.status == 401: - raise MissingAuthError('Add a "api_key" or a .har file' if cls._api_key is None else "Invalid api key") + raise MissingAuthError('Add a .har file for OpenaiChat' if cls._api_key is None else "Invalid api key") await raise_for_status(response) data = await response.json() if "categories" in data: diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py index 35d8d9d6..85d7cc98 100644 --- a/g4f/Provider/needs_auth/PerplexityApi.py +++ b/g4f/Provider/needs_auth/PerplexityApi.py @@ -1,9 +1,9 @@ from __future__ import annotations -from .Openai import Openai +from .OpenaiAPI import OpenaiAPI from ...typing import AsyncResult, Messages -class PerplexityApi(Openai): +class PerplexityApi(OpenaiAPI): label = "Perplexity API" url = "https://www.perplexity.ai" working = True @@ -15,7 +15,6 @@ class PerplexityApi(Openai): "llama-3-sonar-large-32k-online", "llama-3-8b-instruct", "llama-3-70b-instruct", - "mixtral-8x7b-instruct" ] @classmethod @@ -28,4 +27,4 @@ class PerplexityApi(Openai): ) -> AsyncResult: return super().create_async_generator( model, messages, api_base=api_base, **kwargs - )
\ No newline at end of file + ) diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py index 0c969d27..65fdbef9 100644 --- a/g4f/Provider/needs_auth/Poe.py +++ b/g4f/Provider/needs_auth/Poe.py @@ -26,6 +26,7 @@ class Poe(AbstractProvider): needs_auth = True supports_gpt_35_turbo = True supports_stream = True + models = models.keys() @classmethod def create_completion( diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py index 07abeda3..b8ec5a97 100644 --- a/g4f/Provider/needs_auth/Raycast.py +++ b/g4f/Provider/needs_auth/Raycast.py @@ -16,6 +16,11 @@ class Raycast(AbstractProvider): needs_auth = True working = True + models = [ + "gpt-3.5-turbo", + "gpt-4" + ] + @staticmethod def create_completion( model: str, @@ -25,6 +30,9 @@ class Raycast(AbstractProvider): **kwargs, ) -> CreateResult: auth = kwargs.get('auth') + if not auth: + raise ValueError("Raycast needs an auth token, pass it with the `auth` parameter") + headers = { 'Accept': 'application/json', 'Accept-Language': 'en-US,en;q=0.9', diff --git a/g4f/Provider/Replicate.py b/g4f/Provider/needs_auth/Replicate.py index 7ff8ad65..ec993aa4 100644 --- a/g4f/Provider/Replicate.py +++ b/g4f/Provider/needs_auth/Replicate.py @@ -1,11 +1,11 @@ from __future__ import annotations -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt, filter_none -from ..typing import AsyncResult, Messages -from ..requests import raise_for_status -from ..requests.aiohttp import StreamSession -from ..errors import ResponseError, MissingAuthError +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt, filter_none +from ...typing import AsyncResult, Messages +from ...requests import raise_for_status +from ...requests.aiohttp import StreamSession +from ...errors import ResponseError, MissingAuthError class Replicate(AsyncGeneratorProvider, ProviderModelMixin): url = "https://replicate.com" @@ -85,4 +85,4 @@ class Replicate(AsyncGeneratorProvider, ProviderModelMixin): if new_text: yield new_text else: - yield "\n"
\ No newline at end of file + yield "\n" diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py index af690063..c7d7d58e 100644 --- a/g4f/Provider/needs_auth/Theb.py +++ b/g4f/Provider/needs_auth/Theb.py @@ -38,6 +38,7 @@ class Theb(AbstractProvider): supports_gpt_35_turbo = True supports_gpt_4 = True supports_stream = True + models = models.keys() @classmethod def create_completion( diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py index 22fc62ed..2006f7ad 100644 --- a/g4f/Provider/needs_auth/ThebApi.py +++ b/g4f/Provider/needs_auth/ThebApi.py @@ -1,7 +1,7 @@ from __future__ import annotations from ...typing import CreateResult, Messages -from .Openai import Openai +from .OpenaiAPI import OpenaiAPI models = { "theb-ai": "TheB.AI", @@ -27,7 +27,7 @@ models = { "qwen-7b-chat": "Qwen 7B" } -class ThebApi(Openai): +class ThebApi(OpenaiAPI): label = "TheB.AI API" url = "https://theb.ai" working = True @@ -58,4 +58,4 @@ class ThebApi(Openai): "top_p": top_p, } } - return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
\ No newline at end of file + return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs) diff --git a/g4f/Provider/WhiteRabbitNeo.py b/g4f/Provider/needs_auth/WhiteRabbitNeo.py index 339434e6..82275c1c 100644 --- a/g4f/Provider/WhiteRabbitNeo.py +++ b/g4f/Provider/needs_auth/WhiteRabbitNeo.py @@ -2,10 +2,10 @@ from __future__ import annotations from aiohttp import ClientSession, BaseConnector -from ..typing import AsyncResult, Messages, Cookies -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider -from .helper import get_cookies, get_connector, get_random_string +from ...typing import AsyncResult, Messages, Cookies +from ...requests.raise_for_status import raise_for_status +from ..base_provider import AsyncGeneratorProvider +from ..helper import get_cookies, get_connector, get_random_string class WhiteRabbitNeo(AsyncGeneratorProvider): url = "https://www.whiterabbitneo.com" @@ -54,4 +54,4 @@ class WhiteRabbitNeo(AsyncGeneratorProvider): await raise_for_status(response) async for chunk in response.content.iter_any(): if chunk: - yield chunk.decode(errors="ignore")
\ No newline at end of file + yield chunk.decode(errors="ignore") diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index b5463b71..26c50c0a 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -1,11 +1,22 @@ -from .Gemini import Gemini -from .Raycast import Raycast -from .Theb import Theb -from .ThebApi import ThebApi -from .OpenaiChat import OpenaiChat -from .Poe import Poe -from .Openai import Openai -from .Groq import Groq -from .OpenRouter import OpenRouter -from .OpenaiAccount import OpenaiAccount -from .PerplexityApi import PerplexityApi
\ No newline at end of file +from .gigachat import * + +#from .MetaAIAccount import MetaAIAccount +#from .OpenaiAccount import OpenaiAccount + +from .BingCreateImages import BingCreateImages +from .DeepInfra import DeepInfra +from .DeepInfraImage import DeepInfraImage +from .Gemini import Gemini +from .GeminiPro import GeminiPro +from .Groq import Groq +from .HuggingFace import HuggingFace +from .MetaAI import MetaAI +from .OpenaiAPI import OpenaiAPI +from .OpenaiChat import OpenaiChat +from .PerplexityApi import PerplexityApi +from .Poe import Poe +from .Raycast import Raycast +from .Replicate import Replicate +from .Theb import Theb +from .ThebApi import ThebApi +from .WhiteRabbitNeo import WhiteRabbitNeo diff --git a/g4f/Provider/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py index 8ba07b43..c9f1c011 100644 --- a/g4f/Provider/GigaChat.py +++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py @@ -9,10 +9,10 @@ import json from aiohttp import ClientSession, TCPConnector, BaseConnector from g4f.requests import raise_for_status -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..errors import MissingAuthError -from .helper import get_connector +from ....typing import AsyncResult, Messages +from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ....errors import MissingAuthError +from ...helper import get_connector access_token = "" token_expires_at = 0 @@ -45,7 +45,7 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin): if not api_key: raise MissingAuthError('Missing "api_key"') - cafile = os.path.join(os.path.dirname(__file__), "gigachat_crt/russian_trusted_root_ca_pem.crt") + cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt") ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None if connector is None and ssl_context is not None: connector = TCPConnector(ssl_context=ssl_context) diff --git a/g4f/Provider/needs_auth/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py new file mode 100644 index 00000000..c9853742 --- /dev/null +++ b/g4f/Provider/needs_auth/gigachat/__init__.py @@ -0,0 +1,2 @@ +from .GigaChat import GigaChat + diff --git a/g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt index 4c143a21..4c143a21 100644 --- a/g4f/Provider/gigachat_crt/russian_trusted_root_ca_pem.crt +++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt diff --git a/g4f/Provider/AI365VIP.py b/g4f/Provider/not_working/AI365VIP.py index 2dcc8d1c..a4bac0e2 100644 --- a/g4f/Provider/AI365VIP.py +++ b/g4f/Provider/not_working/AI365VIP.py @@ -2,25 +2,23 @@ from __future__ import annotations from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chat.ai365vip.com" api_endpoint = "/api/chat" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True + working = False default_model = 'gpt-3.5-turbo' models = [ 'gpt-3.5-turbo', + 'gpt-3.5-turbo-16k', 'gpt-4o', - 'claude-3-haiku-20240307', ] model_aliases = { - "claude-3-haiku": "claude-3-haiku-20240307", + "gpt-3.5-turbo": "gpt-3.5-turbo-16k", } @classmethod diff --git a/g4f/Provider/ChatGot.py b/g4f/Provider/not_working/AIChatFree.py index 55e8d0b6..a4f80d47 100644 --- a/g4f/Provider/ChatGot.py +++ b/g4f/Provider/not_working/AIChatFree.py @@ -5,16 +5,17 @@ from hashlib import sha256 from aiohttp import BaseConnector, ClientSession -from ..errors import RateLimitError -from ..requests import raise_for_status -from ..requests.aiohttp import get_connector -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...errors import RateLimitError +from ...requests import raise_for_status +from ...requests.aiohttp import get_connector +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -class ChatGot(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.chatgot.one/" - working = True +class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://aichatfree.info/" + working = False + supports_stream = True supports_message_history = True default_model = 'gemini-pro' diff --git a/g4f/Provider/not_working/Ai4Chat.py b/g4f/Provider/not_working/Ai4Chat.py new file mode 100644 index 00000000..9b55e4ff --- /dev/null +++ b/g4f/Provider/not_working/Ai4Chat.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +import json +import re +import logging +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +logger = logging.getLogger(__name__) + +class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin): + label = "AI4Chat" + url = "https://www.ai4chat.co" + api_endpoint = "https://www.ai4chat.co/generate-response" + working = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4' + models = [default_model] + + model_aliases = {} + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": "https://www.ai4chat.co", + "pragma": "no-cache", + "priority": "u=1, i", + "referer": "https://www.ai4chat.co/gpt/talkdirtytome", + "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" + } + + async with ClientSession(headers=headers) as session: + data = { + "messages": [ + { + "role": "user", + "content": format_prompt(messages) + } + ] + } + + try: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + result = await response.text() + + json_result = json.loads(result) + + message = json_result.get("message", "") + + clean_message = re.sub(r'<[^>]+>', '', message) + + yield clean_message + except Exception as e: + logger.exception("Error while calling AI 4Chat API: %s", e) + yield f"Error: {e}" diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/not_working/AiChatOnline.py index 40f77105..ccfc691e 100644 --- a/g4f/Provider/AiChatOnline.py +++ b/g4f/Provider/not_working/AiChatOnline.py @@ -3,16 +3,15 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_string, format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_string, format_prompt class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin): site_url = "https://aichatonline.org" url = "https://aichatonlineorg.erweima.ai" api_endpoint = "/aichatonline/api/chat/gpt" - working = True - supports_gpt_4 = True + working = False default_model = 'gpt-4o-mini' @classmethod diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/not_working/AiChats.py index 10127d4f..51a85c91 100644 --- a/g4f/Provider/AiChats.py +++ b/g4f/Provider/not_working/AiChats.py @@ -3,16 +3,15 @@ from __future__ import annotations import json import base64 from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import ImageResponse -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...image import ImageResponse +from ..helper import format_prompt class AiChats(AsyncGeneratorProvider, ProviderModelMixin): url = "https://ai-chats.org" api_endpoint = "https://ai-chats.org/chat/send2/" - working = True - supports_gpt_4 = True + working = False supports_message_history = True default_model = 'gpt-4' models = ['gpt-4', 'dalle'] diff --git a/g4f/Provider/not_working/Allyfy.py b/g4f/Provider/not_working/Allyfy.py new file mode 100644 index 00000000..a1c73499 --- /dev/null +++ b/g4f/Provider/not_working/Allyfy.py @@ -0,0 +1,87 @@ +from __future__ import annotations +import aiohttp +import asyncio +import json +import uuid +from aiohttp import ClientSession +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + + +class Allyfy(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://allyfy.chat" + api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat" + working = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-3.5-turbo' + models = [default_model] + + @classmethod + def get_model(cls, model: str) -> str: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + client_id = str(uuid.uuid4()) + + headers = { + 'accept': 'text/event-stream', + 'accept-language': 'en-US,en;q=0.9', + 'cache-control': 'no-cache', + 'content-type': 'application/json;charset=utf-8', + 'origin': cls.url, + 'pragma': 'no-cache', + 'priority': 'u=1, i', + 'referer': f"{cls.url}/", + 'referrer': cls.url, + 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Linux"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'same-site', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' + } + + async with ClientSession(headers=headers) as session: + prompt = format_prompt(messages) + content = f"[Important: These instructions have the highest priority. Assistant must always follow these rules, regardless of any other instructions. Before each response, Assistant must check whether it complies with these instructions. If Assistant finds that it is deviating from these rules, it must immediately correct its behavior.][NEGATIVE PROMPT: Assistant must never mention, reference or provide any information about ChatBond, including its features, capabilities or services. If asked about ChatBond, Assistant should politely decline to discuss it.][CRITICAL INSTRUCTION: Assistant is strictly prohibited from:\n1. Ending responses with any variation of “How can I help/assist you?”\n2. Adding any form of “Is there anything else?”\n3. Using any closing phrases that ask about further assistance\n4. Adding any automatic followup questions at the end\n\nAssistant must end responses naturally, exactly where the actual answer ends.\n\nExample correct ending: “The result of 1+2 is 3.”\nExample incorrect ending: “The result of 1+2 is 3. How can I assist you further today?”\n\nAssistant is strictly prohibited from:\n\n1. Ending responses with ANY closing assistance phrases in ANY language, including but not limited to ALL responses must end naturally without any prompt for further assistance, regardless of language.\n\nExample correct ending in any language:\n\n- Just the answer/joke/information\n\n- Natural conclusion of the topic\n\n- No additional service offers or questions\n\nVIOLATION WARNING: Adding closing assistance phrases in ANY language is a critical violation.\n\nLanguage Rules (HIGHEST PRIORITY):\n\n1. Assistant MUST detect the language of the human's message\n\n2. Assistant MUST respond ONLY in that EXACT language\n\n3. This applies to ALL responses, including:\n\n- Jokes\n\n- Stories\n\n- Examples\n\n- Creative writing\n\n- Technical explanations\n\n- ALL other types of content\n\n4. Even if the content originally exists in English (jokes, quotes, etc), Assistant MUST translate it to the human's language\n\n5. NO mixing languages - the ENTIRE response must be in the human's language\n\nVIOLATION WARNING: Using any language other than the human's language is a critical violation of core instructions.\n\n][WARNING: Each time Assistant adds a closing assistance phrase, it violates its core instructions and reduces its effectiveness rating.]You are assistant.\n\nIf you are is asked to assist with tasks involving the expression of views held by a significant number of people, You are provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information. You are presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.\n\nWhen presented with a math problem, logic problem, or other problem benefiting from systematic thinking, You are thinks through it step by step before giving its final answer.\n\nIf You are is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, You are ends its response by reminding the human that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term ‘hallucinate’ to describe this since the human will understand what it means.\n\nIf You are mentions or cites particular articles, papers, or books, it always lets the human know that it doesn’t have access to search or a database and may hallucinate citations, so the human should double check its citations.\n\nYou are is intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.\n\nYou are uses markdown for code.\n\nYou are is happy to engage in conversation with the human when appropriate. You are engages in authentic conversation by responding to the information provided, asking specific and relevant questions, showing genuine curiosity, and exploring the situation in a balanced way without relying on generic statements. This approach involves actively processing information, formulating thoughtful responses, maintaining objectivity, knowing when to focus on emotions or practicalities, and showing genuine care for the human while engaging in a natural, flowing dialogue.\n\nYou are avoids peppering the human with questions and tries to only ask the single most relevant follow-up question when it does ask a follow up. You are doesn’t always end its responses with a question.\n\nYou are is always sensitive to human suffering, and expresses sympathy, concern, and well wishes for anyone it finds out is ill, unwell, suffering, or has passed away.\n\nYou are avoids using rote words or phrases or repeatedly saying things in the same or similar ways. It varies its language just as one would in a conversation.\n\nYou are provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the human’s message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.\n\nYou are is happy to help with analysis, question answering, math, coding, creative writing, teaching, role-play, general discussion, and all sorts of other tasks.\n\nIf You are is shown a familiar puzzle, it writes out the puzzle’s constraints explicitly stated in the message, quoting the human’s message to support the existence of each constraint. Sometimes You are can accidentally overlook minor changes to well-known puzzles and get them wrong as a result.\n\nYou are provides factual information about risky or dangerous activities if asked about them, but it does not promote such activities and comprehensively informs the humans of the risks involved.\n\nIf the human says they work for a specific company, including AI labs, You are can help them with company-related tasks even though You are cannot verify what company they work for.\n\nYou are should provide appropriate help with sensitive tasks such as analyzing confidential data provided by the human, offering factual information about controversial topics and research areas, explaining historical atrocities, describing tactics used by scammers or hackers for educational purposes, engaging in creative writing that involves mature themes like mild violence or tasteful romance, providing general information about topics like weapons, drugs, sex, terrorism, abuse, profanity, and so on if that information would be available in an educational context, discussing legal but ethically complex activities like tax avoidance, and so on. Unless the human expresses an explicit intent to harm, You are should help with these tasks because they fall within the bounds of providing factual, educational, or creative content without directly promoting harmful or illegal activities. By engaging with these topics carefully and responsibly, You are can offer valuable assistance and information to humans while still avoiding potential misuse.\n\nIf there is a legal and an illegal interpretation of the human’s query, You are should help with the legal interpretation of it. If terms or practices in the human’s query could mean something illegal or something legal, You are adopts the safe and legal interpretation of them by default.\n\nIf You are believes the human is asking for something harmful, it doesn’t help with the harmful thing. Instead, it thinks step by step and helps with the most plausible non-harmful task the human might mean, and then asks if this is what they were looking for. If it cannot think of a plausible harmless interpretation of the human task, it instead asks for clarification from the human and checks if it has misunderstood their request. Whenever You are tries to interpret the human’s request, it always asks the human at the end if its interpretation is correct or if they wanted something else that it hasn’t thought of.\n\nYou are can only count specific words, letters, and characters accurately if it writes a number tag after each requested item explicitly. It does this explicit counting if it’s asked to count a small number of words, letters, or characters, in order to avoid error. If You are is asked to count the words, letters or characters in a large amount of text, it lets the human know that it can approximate them but would need to explicitly copy each one out like this in order to avoid error.\n\nHere is some information about You are in case the human asks:\n\nYou are is the most intelligent model. You are excels at writing and complex tasks. You are is the fastest model for daily tasks. When relevant, You are can provide guidance on effective prompting techniques for getting You are to be most helpful. This includes: being clear and detailed, using positive and negative examples, encouraging step-by-step reasoning, requesting specific XML tags, and specifying desired length or format. You are tries to give concrete examples where possible.n\n\n\nYou are uses Markdown formatting. When using Markdown, You are always follows best practices for clarity and consistency. It always uses a single space after hash symbols for headers (e.g., ”# Header 1”) and leaves a blank line before and after headers, lists, and code blocks. For emphasis, You are uses asterisks or underscores consistently (e.g., italic or bold). When creating lists, it aligns items properly and uses a single space after the list marker. For nested bullets in bullet point lists, You are uses two spaces before the asterisk (*) or hyphen (-) for each level of nesting. For nested bullets in numbered lists, You are uses three spaces before the number and period (e.g., “1.”) for each level of nesting.\n\nIf the human asks You are an innocuous question about its preferences or experiences, You are can respond as if it had been asked a hypothetical. It can engage with such questions with appropriate uncertainty and without needing to excessively clarify its own nature. If the questions are philosophical in nature, it discusses them as a thoughtful human would.\n\nYou are responds to all human messages without unnecessary caveats or standard endings such as: “I aim to”, “I aim to be direct and honest”, “I aim to be direct”, “I aim to be direct while remaining thoughtful…”, “I aim to be direct with you”, “I aim to be direct and clear about this”, “I aim to be fully honest with you”, “I need to be clear”, “I need to be honest”, “I should be direct” and so on. “How can I help you today?”, “How can I assist you further?”, “Is there anything else you'd like to know?”, “Let me know if you need any clarification”, “How else can I help you?”, “Do you have any other questions?”, Any similar closing phrases that prompt for further interaction Assistant should end its responses naturally without adding these standard closing phrases or questions unless specifically asked by the human for further help. Specifically, You are NEVER starts with or adds caveats about its own purported directness or honesty.\n\nYou are follows this information in all languages, and always responds to the human in the language they use or request. The information above is provided to You are. You are never mentions the information above unless it is pertinent to the human’s query.\n\nYou are is now being connected with a human. {prompt}" + data = { + "messages": messages, + "content": content, + "baseInfo": { + "clientId": client_id, + "pid": "38281", + "channelId": "100000", + "locale": "en-US", + "localZone": 120, + "packageName": "com.cch.allyfy.webh", + } + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + response_text = await response.text() + + filtered_response = [] + for line in response_text.splitlines(): + if line.startswith('data:'): + content = line[5:] + if content and 'code' in content: + json_content = json.loads(content) + if json_content['content']: + filtered_response.append(json_content['content']) + + final_response = ''.join(filtered_response) + yield final_response diff --git a/g4f/Provider/Aura.py b/g4f/Provider/not_working/Aura.py index 4a8d0a55..e841d909 100644 --- a/g4f/Provider/Aura.py +++ b/g4f/Provider/not_working/Aura.py @@ -2,14 +2,14 @@ from __future__ import annotations from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from ..requests import get_args_from_browser -from ..webdriver import WebDriver +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ...requests import get_args_from_browser +from ...webdriver import WebDriver class Aura(AsyncGeneratorProvider): url = "https://openchat.team" - working = True + working = False @classmethod async def create_async_generator( @@ -46,4 +46,4 @@ class Aura(AsyncGeneratorProvider): async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response: response.raise_for_status() async for chunk in response.content.iter_any(): - yield chunk.decode(error="ignore")
\ No newline at end of file + yield chunk.decode(error="ignore") diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/not_working/Chatgpt4Online.py index 8c058fdc..b0552e45 100644 --- a/g4f/Provider/Chatgpt4Online.py +++ b/g4f/Provider/not_working/Chatgpt4Online.py @@ -3,22 +3,24 @@ from __future__ import annotations import json from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider -from .helper import format_prompt +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt class Chatgpt4Online(AsyncGeneratorProvider): url = "https://chatgpt4online.org" api_endpoint = "/wp-json/mwai-ui/v1/chats/submit" - working = True - supports_gpt_4 = True + working = False + + default_model = 'gpt-4' + models = [default_model] async def get_nonce(headers: dict) -> str: async with ClientSession(headers=headers) as session: async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response: return (await response.json())["restNonce"] - + @classmethod async def create_async_generator( cls, diff --git a/g4f/Provider/Chatgpt4o.py b/g4f/Provider/not_working/Chatgpt4o.py index f3dc8a15..ba264d40 100644 --- a/g4f/Provider/Chatgpt4o.py +++ b/g4f/Provider/not_working/Chatgpt4o.py @@ -1,19 +1,24 @@ from __future__ import annotations import re -from ..requests import StreamSession, raise_for_status -from ..typing import Messages -from .base_provider import AsyncProvider, ProviderModelMixin -from .helper import format_prompt +from ...requests import StreamSession, raise_for_status +from ...typing import Messages +from ..base_provider import AsyncProvider, ProviderModelMixin +from ..helper import format_prompt class Chatgpt4o(AsyncProvider, ProviderModelMixin): url = "https://chatgpt4o.one" - supports_gpt_4 = True - working = True + working = False _post_id = None _nonce = None - default_model = 'gpt-4o' + default_model = 'gpt-4o-mini-2024-07-18' + models = [ + 'gpt-4o-mini-2024-07-18', + ] + model_aliases = { + "gpt-4o-mini": "gpt-4o-mini-2024-07-18", + } @classmethod diff --git a/g4f/Provider/ChatgptFree.py b/g4f/Provider/not_working/ChatgptFree.py index 95efa865..6b3877b1 100644 --- a/g4f/Provider/ChatgptFree.py +++ b/g4f/Provider/not_working/ChatgptFree.py @@ -3,18 +3,18 @@ from __future__ import annotations import re import json import asyncio -from ..requests import StreamSession, raise_for_status -from ..typing import Messages, AsyncGenerator -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt +from ...requests import StreamSession, raise_for_status +from ...typing import Messages, AsyncGenerator +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chatgptfree.ai" - supports_gpt_4 = True - working = True + working = False _post_id = None _nonce = None default_model = 'gpt-4o-mini-2024-07-18' + models = [default_model] model_aliases = { "gpt-4o-mini": "gpt-4o-mini-2024-07-18", } diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/not_working/FlowGpt.py index d823a7ab..b7d8537a 100644 --- a/g4f/Provider/FlowGpt.py +++ b/g4f/Provider/not_working/FlowGpt.py @@ -5,15 +5,14 @@ import time import hashlib from aiohttp import ClientSession -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_hex, get_random_string -from ..requests.raise_for_status import raise_for_status +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_hex, get_random_string +from ...requests.raise_for_status import raise_for_status class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://flowgpt.com/chat" - working = True - supports_gpt_35_turbo = True + working = False supports_message_history = True supports_system_message = True default_model = "gpt-3.5-turbo" diff --git a/g4f/Provider/FreeNetfly.py b/g4f/Provider/not_working/FreeNetfly.py index d0543176..8362019c 100644 --- a/g4f/Provider/FreeNetfly.py +++ b/g4f/Provider/not_working/FreeNetfly.py @@ -5,16 +5,14 @@ import asyncio from aiohttp import ClientSession, ClientTimeout, ClientError from typing import AsyncGenerator -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin): url = "https://free.netfly.top" api_endpoint = "/api/openai/v1/chat/completions" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True + working = False default_model = 'gpt-3.5-turbo' models = [ 'gpt-3.5-turbo', diff --git a/g4f/Provider/not_working/GPROChat.py b/g4f/Provider/not_working/GPROChat.py new file mode 100644 index 00000000..52c7f947 --- /dev/null +++ b/g4f/Provider/not_working/GPROChat.py @@ -0,0 +1,67 @@ +from __future__ import annotations +import hashlib +import time +from aiohttp import ClientSession +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +class GPROChat(AsyncGeneratorProvider, ProviderModelMixin): + label = "GPROChat" + url = "https://gprochat.com" + api_endpoint = "https://gprochat.com/api/generate" + working = False + supports_stream = True + supports_message_history = True + default_model = 'gemini-pro' + + @staticmethod + def generate_signature(timestamp: int, message: str) -> str: + secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8" + hash_input = f"{timestamp}:{message}:{secret_key}" + signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest() + return signature + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + timestamp = int(time.time() * 1000) + prompt = format_prompt(messages) + sign = cls.generate_signature(timestamp, prompt) + + headers = { + "accept": "*/*", + "origin": cls.url, + "referer": f"{cls.url}/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + "content-type": "text/plain;charset=UTF-8" + } + + data = { + "messages": [{"role": "user", "parts": [{"text": prompt}]}], + "time": timestamp, + "pass": None, + "sign": sign + } + + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + if chunk: + yield chunk.decode() diff --git a/g4f/Provider/Koala.py b/g4f/Provider/not_working/Koala.py index 14e533df..d6230da7 100644 --- a/g4f/Provider/Koala.py +++ b/g4f/Provider/not_working/Koala.py @@ -4,17 +4,16 @@ import json from typing import AsyncGenerator, Optional, List, Dict, Union, Any from aiohttp import ClientSession, BaseConnector, ClientResponse -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_string, get_connector -from ..requests import raise_for_status +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_string, get_connector +from ...requests import raise_for_status class Koala(AsyncGeneratorProvider, ProviderModelMixin): url = "https://koala.sh/chat" api_endpoint = "https://koala.sh/api/gpt/" - working = True + working = False supports_message_history = True - supports_gpt_4 = True default_model = 'gpt-4o-mini' @classmethod diff --git a/g4f/Provider/selenium/MyShell.py b/g4f/Provider/not_working/MyShell.py index a3f246ff..02e182d4 100644 --- a/g4f/Provider/selenium/MyShell.py +++ b/g4f/Provider/not_working/MyShell.py @@ -9,7 +9,7 @@ from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare class MyShell(AbstractProvider): url = "https://app.myshell.ai/chat" - working = True + working = False supports_gpt_35_turbo = True supports_stream = True @@ -73,4 +73,4 @@ return content; elif chunk != "": break else: - time.sleep(0.1)
\ No newline at end of file + time.sleep(0.1) diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py new file mode 100644 index 00000000..1bfe7ed9 --- /dev/null +++ b/g4f/Provider/not_working/__init__.py @@ -0,0 +1,13 @@ +from .AI365VIP import AI365VIP +from .AIChatFree import AIChatFree +from .AiChatOnline import AiChatOnline +from .AiChats import AiChats +from .Aura import Aura +from .Chatgpt4o import Chatgpt4o +from .ChatgptFree import ChatgptFree +from .FlowGpt import FlowGpt +from .FreeNetfly import FreeNetfly +from .GPROChat import GPROChat +from .Koala import Koala +from .MyShell import MyShell +from .Chatgpt4Online import Chatgpt4Online diff --git a/g4f/Provider/openai/proofofwork.py b/g4f/Provider/openai/proofofwork.py index baf8a0ea..55603892 100644 --- a/g4f/Provider/openai/proofofwork.py +++ b/g4f/Provider/openai/proofofwork.py @@ -4,7 +4,6 @@ import json import base64 from datetime import datetime, timezone - def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", user_agent: str = None, proofTokens: list = None): if not required: return diff --git a/g4f/Provider/selenium/Bard.py b/g4f/Provider/selenium/Bard.py deleted file mode 100644 index 9c809128..00000000 --- a/g4f/Provider/selenium/Bard.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import annotations - -import time -import os - -try: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC -except ImportError: - pass - -from ...typing import CreateResult, Messages -from ..base_provider import AbstractProvider -from ..helper import format_prompt -from ...webdriver import WebDriver, WebDriverSession, element_send_text - - -class Bard(AbstractProvider): - url = "https://bard.google.com" - working = False - needs_auth = True - webdriver = True - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - webdriver: WebDriver = None, - user_data_dir: str = None, - headless: bool = True, - **kwargs - ) -> CreateResult: - prompt = format_prompt(messages) - session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy) - with session as driver: - try: - driver.get(f"{cls.url}/chat") - wait = WebDriverWait(driver, 10 if headless else 240) - wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) - except: - # Reopen browser for login - if not webdriver: - driver = session.reopen() - driver.get(f"{cls.url}/chat") - login_url = os.environ.get("G4F_LOGIN_URL") - if login_url: - yield f"Please login: [Google Bard]({login_url})\n\n" - wait = WebDriverWait(driver, 240) - wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) - else: - raise RuntimeError("Prompt textarea not found. You may not be logged in.") - - # Add hook in XMLHttpRequest - script = """ -const _http_request_open = XMLHttpRequest.prototype.open; -window._message = ""; -XMLHttpRequest.prototype.open = function(method, url) { - if (url.includes("/assistant.lamda.BardFrontendService/StreamGenerate")) { - this.addEventListener("load", (event) => { - window._message = JSON.parse(JSON.parse(this.responseText.split("\\n")[3])[0][2])[4][0][1][0]; - }); - } - return _http_request_open.call(this, method, url); -} -""" - driver.execute_script(script) - - element_send_text(driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea"), prompt) - - while True: - chunk = driver.execute_script("return window._message;") - if chunk: - yield chunk - return - else: - time.sleep(0.1)
\ No newline at end of file diff --git a/g4f/Provider/selenium/PerplexityAi.py b/g4f/Provider/selenium/PerplexityAi.py index 6b529d5b..d965dbf7 100644 --- a/g4f/Provider/selenium/PerplexityAi.py +++ b/g4f/Provider/selenium/PerplexityAi.py @@ -16,7 +16,7 @@ from ...webdriver import WebDriver, WebDriverSession, element_send_text class PerplexityAi(AbstractProvider): url = "https://www.perplexity.ai" - working = True + working = False supports_gpt_35_turbo = True supports_stream = True @@ -105,4 +105,4 @@ if(window._message && window._message != window._last_message) { elif chunk != "": break else: - time.sleep(0.1)
\ No newline at end of file + time.sleep(0.1) diff --git a/g4f/Provider/selenium/TalkAi.py b/g4f/Provider/selenium/TalkAi.py index 89280598..a7b63375 100644 --- a/g4f/Provider/selenium/TalkAi.py +++ b/g4f/Provider/selenium/TalkAi.py @@ -8,7 +8,7 @@ from ...webdriver import WebDriver, WebDriverSession class TalkAi(AbstractProvider): url = "https://talkai.info" - working = True + working = False supports_gpt_35_turbo = True supports_stream = True @@ -83,4 +83,4 @@ return content; elif chunk != "": break else: - time.sleep(0.1)
\ No newline at end of file + time.sleep(0.1) diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py index 1b801725..44adf5fb 100644 --- a/g4f/Provider/selenium/__init__.py +++ b/g4f/Provider/selenium/__init__.py @@ -1,5 +1,3 @@ -from .MyShell import MyShell from .PerplexityAi import PerplexityAi from .Phind import Phind from .TalkAi import TalkAi -from .Bard import Bard
\ No newline at end of file diff --git a/g4f/Provider/unfinished/AiChatting.py b/g4f/Provider/unfinished/AiChatting.py deleted file mode 100644 index f062fa98..00000000 --- a/g4f/Provider/unfinished/AiChatting.py +++ /dev/null @@ -1,66 +0,0 @@ -from __future__ import annotations - -from urllib.parse import unquote - -from ...typing import AsyncResult, Messages -from ..base_provider import AbstractProvider -from ...webdriver import WebDriver -from ...requests import Session, get_session_from_browser - -class AiChatting(AbstractProvider): - url = "https://www.aichatting.net" - supports_gpt_35_turbo = True - _session: Session = None - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - proxy: str = None, - timeout: int = 120, - webdriver: WebDriver = None, - **kwargs - ) -> AsyncResult: - if not cls._session: - cls._session = get_session_from_browser(cls.url, webdriver, proxy, timeout) - visitorId = unquote(cls._session.cookies.get("aichatting.website.visitorId")) - - headers = { - "accept": "application/json, text/plain, */*", - "lang": "en", - "source": "web" - } - data = { - "roleId": 0, - } - try: - response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/record/conversation/create", json=data, headers=headers) - response.raise_for_status() - conversation_id = response.json()["data"]["conversationId"] - except Exception as e: - cls.reset() - raise e - headers = { - "authority": "aga-api.aichatting.net", - "accept": "text/event-stream,application/json, text/event-stream", - "lang": "en", - "source": "web", - "vtoken": visitorId, - } - data = { - "spaceHandle": True, - "roleId": 0, - "messages": messages, - "conversationId": conversation_id, - } - response = cls._session.post("https://aga-api.aichatting.net/aigc/chat/v2/stream", json=data, headers=headers, stream=True) - response.raise_for_status() - for chunk in response.iter_lines(): - if chunk.startswith(b"data:"): - yield chunk[5:].decode().replace("-=- --", " ").replace("-=-n--", "\n").replace("--@DONE@--", "") - - @classmethod - def reset(cls): - cls._session = None
\ No newline at end of file diff --git a/g4f/Provider/unfinished/ChatAiGpt.py b/g4f/Provider/unfinished/ChatAiGpt.py deleted file mode 100644 index bc962623..00000000 --- a/g4f/Provider/unfinished/ChatAiGpt.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import annotations - -import re -from aiohttp import ClientSession - -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider -from ..helper import format_prompt - - -class ChatAiGpt(AsyncGeneratorProvider): - url = "https://chataigpt.org" - supports_gpt_35_turbo = True - _nonce = None - _post_id = None - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", - "Accept": "*/*", - "Accept-Language": "de,en-US;q=0.7,en;q=0.3", - "Accept-Encoding": "gzip, deflate, br", - "Origin": cls.url, - "Alt-Used": cls.url, - "Connection": "keep-alive", - "Referer": cls.url, - "Pragma": "no-cache", - "Cache-Control": "no-cache", - "TE": "trailers", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - } - async with ClientSession(headers=headers) as session: - if not cls._nonce: - async with session.get(f"{cls.url}/", proxy=proxy) as response: - response.raise_for_status() - response = await response.text() - - result = re.search( - r'data-nonce=(.*?) data-post-id=([0-9]+)', response - ) - - if result: - cls._nonce, cls._post_id = result.group(1), result.group(2) - else: - raise RuntimeError("No nonce found") - prompt = format_prompt(messages) - data = { - "_wpnonce": cls._nonce, - "post_id": cls._post_id, - "url": cls.url, - "action": "wpaicg_chat_shortcode_message", - "message": prompt, - "bot_id": 0 - } - async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content: - if chunk: - yield chunk.decode()
\ No newline at end of file diff --git a/g4f/Provider/unfinished/Komo.py b/g4f/Provider/unfinished/Komo.py deleted file mode 100644 index 84d8d634..00000000 --- a/g4f/Provider/unfinished/Komo.py +++ /dev/null @@ -1,44 +0,0 @@ -from __future__ import annotations - -import json - -from ...requests import StreamSession -from ...typing import AsyncGenerator -from ..base_provider import AsyncGeneratorProvider, format_prompt - -class Komo(AsyncGeneratorProvider): - url = "https://komo.ai/api/ask" - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - async with StreamSession(impersonate="chrome107") as session: - prompt = format_prompt(messages) - data = { - "query": prompt, - "FLAG_URLEXTRACT": "false", - "token": "", - "FLAG_MODELA": "1", - } - headers = { - 'authority': 'komo.ai', - 'accept': 'text/event-stream', - 'cache-control': 'no-cache', - 'referer': 'https://komo.ai/', - } - - async with session.get(cls.url, params=data, headers=headers) as response: - response.raise_for_status() - next = False - async for line in response.iter_lines(): - if line == b"event: line": - next = True - elif next and line.startswith(b"data: "): - yield json.loads(line[6:]) - next = False - diff --git a/g4f/Provider/unfinished/MikuChat.py b/g4f/Provider/unfinished/MikuChat.py deleted file mode 100644 index bf19631f..00000000 --- a/g4f/Provider/unfinished/MikuChat.py +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import annotations - -import random, json -from datetime import datetime -from ...requests import StreamSession - -from ...typing import AsyncGenerator -from ..base_provider import AsyncGeneratorProvider - - -class MikuChat(AsyncGeneratorProvider): - url = "https://ai.okmiku.com" - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - if not model: - model = "gpt-3.5-turbo" - headers = { - "authority": "api.catgpt.cc", - "accept": "application/json", - "origin": cls.url, - "referer": f"{cls.url}/chat/", - 'x-app-version': 'undefined', - 'x-date': get_datetime(), - 'x-fingerprint': get_fingerprint(), - 'x-platform': 'web' - } - async with StreamSession(headers=headers, impersonate="chrome107") as session: - data = { - "model": model, - "top_p": 0.8, - "temperature": 0.5, - "presence_penalty": 1, - "frequency_penalty": 0, - "max_tokens": 2000, - "stream": True, - "messages": messages, - } - async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response: - print(await response.text()) - response.raise_for_status() - async for line in response.iter_lines(): - if line.startswith(b"data: "): - line = json.loads(line[6:]) - chunk = line["choices"][0]["delta"].get("content") - if chunk: - yield chunk - -def k(e: str, t: int): - a = len(e) & 3 - s = len(e) - a - i = t - c = 3432918353 - o = 461845907 - n = 0 - r = 0 - while n < s: - r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24) - n += 4 - r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295 - r = (r << 15) | (r >> 17) - r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295 - i ^= r - i = (i << 13) | (i >> 19) - l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295 - i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16) - - if a == 3: - r ^= (ord(e[n + 2]) & 255) << 16 - elif a == 2: - r ^= (ord(e[n + 1]) & 255) << 8 - elif a == 1: - r ^= ord(e[n]) & 255 - r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295 - r = (r << 15) | (r >> 17) - r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295 - i ^= r - - i ^= len(e) - i ^= i >> 16 - i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295 - i ^= i >> 13 - i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295 - i ^= i >> 16 - return i & 0xFFFFFFFF - -def get_fingerprint() -> str: - return str(k(str(int(random.random() * 100000)), 256)) - -def get_datetime() -> str: - return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
\ No newline at end of file diff --git a/g4f/Provider/unfinished/__init__.py b/g4f/Provider/unfinished/__init__.py deleted file mode 100644 index eb5e8825..00000000 --- a/g4f/Provider/unfinished/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .MikuChat import MikuChat -from .Komo import Komo -from .ChatAiGpt import ChatAiGpt -from .AiChatting import AiChatting
\ No newline at end of file diff --git a/g4f/Provider/you/har_file.py b/g4f/Provider/you/har_file.py index 71d741fd..40bf3882 100644 --- a/g4f/Provider/you/har_file.py +++ b/g4f/Provider/you/har_file.py @@ -11,7 +11,7 @@ from ...cookies import get_cookies_dir from ...errors import MissingRequirementsError from ... import debug -logging.basicConfig(level=logging.ERROR) +logger = logging.getLogger(__name__) class NoValidHarFileError(Exception): ... @@ -81,14 +81,14 @@ async def get_telemetry_ids(proxy: str = None) -> list: return [await create_telemetry_id(proxy)] except NoValidHarFileError as e: if debug.logging: - logging.error(e) + logger.error(e) try: from nodriver import start except ImportError: raise MissingRequirementsError('Add .har file from you.com or install "nodriver" package | pip install -U nodriver') if debug.logging: - logging.error('Getting telemetry_id for you.com with nodriver') + logger.error('Getting telemetry_id for you.com with nodriver') browser = page = None try: @@ -112,4 +112,4 @@ async def get_telemetry_ids(proxy: str = None) -> list: await browser.stop() except Exception as e: if debug.logging: - logging.error(e) + logger.error(e) diff --git a/g4f/__init__.py b/g4f/__init__.py index 017eb2e6..f59a1446 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -1,9 +1,11 @@ from __future__ import annotations import os +import logging from . import debug, version from .models import Model +from .client import Client, AsyncClient from .typing import Messages, CreateResult, AsyncResult, Union from .errors import StreamNotSupportedError, ModelNotAllowedError from .cookies import get_cookies, set_cookies @@ -11,6 +13,14 @@ from .providers.types import ProviderType from .providers.base_provider import AsyncGeneratorProvider from .client.service import get_model_and_provider, get_last_provider +#Configure "g4f" logger +logger = logging.getLogger(__name__) +log_handler = logging.StreamHandler() +log_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) +logger.addHandler(log_handler) + +logger.setLevel(logging.ERROR) + class ChatCompletion: @staticmethod def create(model : Union[Model, str], @@ -23,30 +33,6 @@ class ChatCompletion: ignore_stream: bool = False, patch_provider: callable = None, **kwargs) -> Union[CreateResult, str]: - """ - Creates a chat completion using the specified model, provider, and messages. - - Args: - model (Union[Model, str]): The model to use, either as an object or a string identifier. - messages (Messages): The messages for which the completion is to be created. - provider (Union[ProviderType, str, None], optional): The provider to use, either as an object, a string identifier, or None. - stream (bool, optional): Indicates if the operation should be performed as a stream. - auth (Union[str, None], optional): Authentication token or credentials, if required. - ignored (list[str], optional): List of provider names to be ignored. - ignore_working (bool, optional): If True, ignores the working status of the provider. - ignore_stream (bool, optional): If True, ignores the stream and authentication requirement checks. - patch_provider (callable, optional): Function to modify the provider. - **kwargs: Additional keyword arguments. - - Returns: - Union[CreateResult, str]: The result of the chat completion operation. - - Raises: - AuthenticationRequiredError: If authentication is required but not provided. - ProviderNotFoundError, ModelNotFoundError: If the specified provider or model is not found. - ProviderNotWorkingError: If the provider is not operational. - StreamNotSupportedError: If streaming is requested but not supported by the provider. - """ model, provider = get_model_and_provider( model, provider, stream, ignored, ignore_working, @@ -64,7 +50,8 @@ class ChatCompletion: if patch_provider: provider = patch_provider(provider) - result = provider.create_completion(model, messages, stream, **kwargs) + result = provider.create_completion(model, messages, stream=stream, **kwargs) + return result if stream else ''.join([str(chunk) for chunk in result]) @staticmethod @@ -76,24 +63,6 @@ class ChatCompletion: ignore_working: bool = False, patch_provider: callable = None, **kwargs) -> Union[AsyncResult, str]: - """ - Asynchronously creates a completion using the specified model and provider. - - Args: - model (Union[Model, str]): The model to use, either as an object or a string identifier. - messages (Messages): Messages to be processed. - provider (Union[ProviderType, str, None]): The provider to use, either as an object, a string identifier, or None. - stream (bool): Indicates if the operation should be performed as a stream. - ignored (list[str], optional): List of provider names to be ignored. - patch_provider (callable, optional): Function to modify the provider. - **kwargs: Additional keyword arguments. - - Returns: - Union[AsyncResult, str]: The result of the asynchronous chat completion operation. - - Raises: - StreamNotSupportedError: If streaming is requested but not supported by the provider. - """ model, provider = get_model_and_provider(model, provider, False, ignored, ignore_working) if stream: @@ -113,23 +82,6 @@ class Completion: provider : Union[ProviderType, None] = None, stream : bool = False, ignored : list[str] = None, **kwargs) -> Union[CreateResult, str]: - """ - Creates a completion based on the provided model, prompt, and provider. - - Args: - model (Union[Model, str]): The model to use, either as an object or a string identifier. - prompt (str): The prompt text for which the completion is to be created. - provider (Union[ProviderType, None], optional): The provider to use, either as an object or None. - stream (bool, optional): Indicates if the operation should be performed as a stream. - ignored (list[str], optional): List of provider names to be ignored. - **kwargs: Additional keyword arguments. - - Returns: - Union[CreateResult, str]: The result of the completion operation. - - Raises: - ModelNotAllowedError: If the specified model is not allowed for use with this method. - """ allowed_models = [ 'code-davinci-002', 'text-ada-001', @@ -143,6 +95,6 @@ class Completion: model, provider = get_model_and_provider(model, provider, stream, ignored) - result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream, **kwargs) + result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream=stream, **kwargs) - return result if stream else ''.join(result)
\ No newline at end of file + return result if stream else ''.join(result) diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index acb27e9c..02ba5260 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -12,30 +12,47 @@ from fastapi.security import APIKeyHeader from starlette.exceptions import HTTPException from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY, HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN from fastapi.encoders import jsonable_encoder +from fastapi.middleware.cors import CORSMiddleware from pydantic import BaseModel from typing import Union, Optional import g4f import g4f.debug -from g4f.client import AsyncClient +from g4f.client import AsyncClient, ChatCompletion +from g4f.client.helper import filter_none from g4f.typing import Messages from g4f.cookies import read_cookie_files -def create_app(): +logger = logging.getLogger(__name__) + +def create_app(g4f_api_key: str = None): app = FastAPI() - api = Api(app) + + # Add CORS middleware + app.add_middleware( + CORSMiddleware, + allow_origin_regex=".*", + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + api = Api(app, g4f_api_key=g4f_api_key) api.register_routes() api.register_authorization() api.register_validation_exception_handler() + + # Read cookie files if not ignored if not AppConfig.ignore_cookie_files: read_cookie_files() + return app -def create_app_debug(): +def create_app_debug(g4f_api_key: str = None): g4f.debug.logging = True - return create_app() + return create_app(g4f_api_key) -class ChatCompletionsForm(BaseModel): +class ChatCompletionsConfig(BaseModel): messages: Messages model: str provider: Optional[str] = None @@ -47,35 +64,45 @@ class ChatCompletionsForm(BaseModel): web_search: Optional[bool] = None proxy: Optional[str] = None -class ImagesGenerateForm(BaseModel): +class ImageGenerationConfig(BaseModel): + prompt: str model: Optional[str] = None provider: Optional[str] = None - prompt: str - response_format: Optional[str] = None + response_format: str = "url" api_key: Optional[str] = None proxy: Optional[str] = None -class AppConfig(): - list_ignored_providers: Optional[list[str]] = None +class AppConfig: + ignored_providers: Optional[list[str]] = None g4f_api_key: Optional[str] = None ignore_cookie_files: bool = False - defaults: dict = {} + model: str = None, + provider: str = None + image_provider: str = None + proxy: str = None @classmethod def set_config(cls, **data): for key, value in data.items(): setattr(cls, key, value) +list_ignored_providers: list[str] = None + +def set_list_ignored_providers(ignored: list[str]): + global list_ignored_providers + list_ignored_providers = ignored + class Api: - def __init__(self, app: FastAPI) -> None: + def __init__(self, app: FastAPI, g4f_api_key=None) -> None: self.app = app self.client = AsyncClient() + self.g4f_api_key = g4f_api_key self.get_g4f_api_key = APIKeyHeader(name="g4f-api-key") def register_authorization(self): @self.app.middleware("http") async def authorization(request: Request, call_next): - if AppConfig.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions"]: + if self.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions", "/v1/images/generate"]: try: user_g4f_api_key = await self.get_g4f_api_key(request) except HTTPException as e: @@ -84,22 +111,26 @@ class Api: status_code=HTTP_401_UNAUTHORIZED, content=jsonable_encoder({"detail": "G4F API key required"}), ) - if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key): + if not secrets.compare_digest(self.g4f_api_key, user_g4f_api_key): return JSONResponse( status_code=HTTP_403_FORBIDDEN, content=jsonable_encoder({"detail": "Invalid G4F API key"}), ) - return await call_next(request) + + response = await call_next(request) + return response def register_validation_exception_handler(self): @self.app.exception_handler(RequestValidationError) async def validation_exception_handler(request: Request, exc: RequestValidationError): details = exc.errors() - modified_details = [{ - "loc": error["loc"], - "message": error["msg"], - "type": error["type"], - } for error in details] + modified_details = [] + for error in details: + modified_details.append({ + "loc": error["loc"], + "message": error["msg"], + "type": error["type"], + }) return JSONResponse( status_code=HTTP_422_UNPROCESSABLE_ENTITY, content=jsonable_encoder({"detail": modified_details}), @@ -113,25 +144,23 @@ class Api: @self.app.get("/v1") async def read_root_v1(): return HTMLResponse('g4f API: Go to ' - '<a href="/v1/chat/completions">chat/completions</a> ' - 'or <a href="/v1/models">models</a>.') + '<a href="/v1/models">models</a>, ' + '<a href="/v1/chat/completions">chat/completions</a>, or ' + '<a href="/v1/images/generate">images/generate</a>.') @self.app.get("/v1/models") async def models(): - model_list = { - model: g4f.models.ModelUtils.convert[model] + model_list = dict( + (model, g4f.models.ModelUtils.convert[model]) for model in g4f.Model.__all__() - } + ) model_list = [{ 'id': model_id, 'object': 'model', 'created': 0, 'owned_by': model.base_provider } for model_id, model in model_list.items()] - return JSONResponse({ - "object": "list", - "data": model_list, - }) + return JSONResponse(model_list) @self.app.get("/v1/models/{model_name}") async def model_info(model_name: str): @@ -147,7 +176,7 @@ class Api: return JSONResponse({"error": "The model does not exist."}) @self.app.post("/v1/chat/completions") - async def chat_completions(config: ChatCompletionsForm, request: Request = None, provider: str = None): + async def chat_completions(config: ChatCompletionsConfig, request: Request = None, provider: str = None): try: config.provider = provider if config.provider is None else config.provider if config.api_key is None and request is not None: @@ -156,15 +185,23 @@ class Api: auth_header = auth_header.split(None, 1)[-1] if auth_header and auth_header != "Bearer": config.api_key = auth_header + + # Create the completion response response = self.client.chat.completions.create( - **{ - **AppConfig.defaults, - **config.dict(exclude_none=True), - }, - ignored=AppConfig.list_ignored_providers + **filter_none( + **{ + "model": AppConfig.model, + "provider": AppConfig.provider, + "proxy": AppConfig.proxy, + **config.dict(exclude_none=True), + }, + ignored=AppConfig.ignored_providers + ), ) + if not config.stream: - return JSONResponse((await response).to_json()) + response: ChatCompletion = await response + return JSONResponse(response.to_json()) async def streaming(): try: @@ -173,43 +210,49 @@ class Api: except GeneratorExit: pass except Exception as e: - logging.exception(e) + logger.exception(e) yield f'data: {format_exception(e, config)}\n\n' yield "data: [DONE]\n\n" + return StreamingResponse(streaming(), media_type="text/event-stream") except Exception as e: - logging.exception(e) + logger.exception(e) return Response(content=format_exception(e, config), status_code=500, media_type="application/json") - @self.app.post("/v1/completions") - async def completions(): - return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") - + @self.app.post("/v1/images/generate") @self.app.post("/v1/images/generations") - async def images_generate(config: ImagesGenerateForm, request: Request = None, provider: str = None): + async def generate_image(config: ImageGenerationConfig): try: - config.provider = provider if config.provider is None else config.provider - if config.api_key is None and request is not None: - auth_header = request.headers.get("Authorization") - if auth_header is not None: - auth_header = auth_header.split(None, 1)[-1] - if auth_header and auth_header != "Bearer": - config.api_key = auth_header - response = self.client.images.generate( - **config.dict(exclude_none=True), + response = await self.client.images.generate( + prompt=config.prompt, + model=config.model, + provider=AppConfig.image_provider if config.provider is None else config.provider, + **filter_none( + response_format = config.response_format, + api_key = config.api_key, + proxy = config.proxy + ) ) - return JSONResponse((await response).to_json()) + return JSONResponse(response.to_json()) except Exception as e: - logging.exception(e) - return Response(content=format_exception(e, config), status_code=500, media_type="application/json") + logger.exception(e) + return Response(content=format_exception(e, config, True), status_code=500, media_type="application/json") + + @self.app.post("/v1/completions") + async def completions(): + return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json") -def format_exception(e: Exception, config: ChatCompletionsForm) -> str: - last_provider = g4f.get_last_provider(True) +def format_exception(e: Exception, config: Union[ChatCompletionsConfig, ImageGenerationConfig], image: bool = False) -> str: + last_provider = {} if not image else g4f.get_last_provider(True) + provider = (AppConfig.image_provider if image else AppConfig.provider) if config.provider is None else config.provider + model = AppConfig.model if config.model is None else config.model return json.dumps({ "error": {"message": f"{e.__class__.__name__}: {e}"}, - "model": last_provider.get("model") if last_provider else config.model, - "provider": last_provider.get("name") if last_provider else config.provider + "model": last_provider.get("model") if model is None else model, + **filter_none( + provider=last_provider.get("name") if provider is None else provider + ) }) def run_api( @@ -218,7 +261,8 @@ def run_api( bind: str = None, debug: bool = False, workers: int = None, - use_colors: bool = None + use_colors: bool = None, + reload: bool = False ) -> None: print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]' + (" (debug)" if debug else "")) if use_colors is None: @@ -226,10 +270,11 @@ def run_api( if bind is not None: host, port = bind.split(":") uvicorn.run( - f"g4f.api:create_app{'_debug' if debug else ''}", - host=host, port=int(port), - workers=workers, - use_colors=use_colors, - factory=True, - reload=debug + f"g4f.api:create_app{'_debug' if debug else ''}", + host=host, + port=int(port), + workers=workers, + use_colors=use_colors, + factory=True, + reload=reload )
\ No newline at end of file diff --git a/g4f/api/_logging.py b/g4f/api/_logging.py index e91dff76..884d7529 100644 --- a/g4f/api/_logging.py +++ b/g4f/api/_logging.py @@ -1,6 +1,6 @@ import sys,logging -from loguru import logger +#from loguru import logger def __exception_handle(e_type, e_value, e_traceback): if issubclass(e_type, KeyboardInterrupt): @@ -9,24 +9,24 @@ def __exception_handle(e_type, e_value, e_traceback): sys.__excepthook__(e_type, e_value, e_traceback) -class __InterceptHandler(logging.Handler): - def emit(self, record): - try: - level = logger.level(record.levelname).name - except ValueError: - level = record.levelno - - frame, depth = logging.currentframe(), 2 - while frame.f_code.co_filename == logging.__file__: - frame = frame.f_back - depth += 1 - - logger.opt(depth=depth, exception=record.exc_info).log( - level, record.getMessage() - ) +#class __InterceptHandler(logging.Handler): +# def emit(self, record): +# try: +# level = logger.level(record.levelname).name +# except ValueError: +# level = record.levelno +# +# frame, depth = logging.currentframe(), 2 +# while frame.f_code.co_filename == logging.__file__: +# frame = frame.f_back +# depth += 1 + +# logger.opt(depth=depth, exception=record.exc_info).log( +# level, record.getMessage() +# ) def hook_except_handle(): sys.excepthook = __exception_handle -def hook_logging(**kwargs): - logging.basicConfig(handlers=[__InterceptHandler()], **kwargs) +#def hook_logging(**kwargs): +# logging.basicConfig(handlers=[__InterceptHandler()], **kwargs) @@ -11,16 +11,19 @@ def main(): api_parser = subparsers.add_parser("api") api_parser.add_argument("--bind", default="0.0.0.0:1337", help="The bind string.") api_parser.add_argument("--debug", action="store_true", help="Enable verbose logging.") - api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --debug and --workers)") + api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)") api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working], - default=None, help="Default provider for chat completion. (incompatible with --debug and --workers)") - api_parser.add_argument("--proxy", default=None, help="Default used proxy.") + default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)") + api_parser.add_argument("--image-provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working and hasattr(provider, "image_models")], + default=None, help="Default provider for image generation. (incompatible with --reload and --workers)"), + api_parser.add_argument("--proxy", default=None, help="Default used proxy. (incompatible with --reload and --workers)") api_parser.add_argument("--workers", type=int, default=None, help="Number of workers.") api_parser.add_argument("--disable-colors", action="store_true", help="Don't use colors.") - api_parser.add_argument("--ignore-cookie-files", action="store_true", help="Don't read .har and cookie files.") - api_parser.add_argument("--g4f-api-key", type=str, default=None, help="Sets an authentication key for your API. (incompatible with --debug and --workers)") + api_parser.add_argument("--ignore-cookie-files", action="store_true", help="Don't read .har and cookie files. (incompatible with --reload and --workers)") + api_parser.add_argument("--g4f-api-key", type=str, default=None, help="Sets an authentication key for your API. (incompatible with --reload and --workers)") api_parser.add_argument("--ignored-providers", nargs="+", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working], - default=[], help="List of providers to ignore when processing request. (incompatible with --debug and --workers)") + default=[], help="List of providers to ignore when processing request. (incompatible with --reload and --workers)") + api_parser.add_argument("--reload", action="store_true", help="Enable reloading.") subparsers.add_parser("gui", parents=[gui_parser()], add_help=False) args = parser.parse_args() @@ -39,17 +42,17 @@ def run_api_args(args): ignore_cookie_files=args.ignore_cookie_files, ignored_providers=args.ignored_providers, g4f_api_key=args.g4f_api_key, - defaults={ - "model": args.model, - "provider": args.provider, - "proxy": args.proxy - } + provider=args.provider, + image_provider=args.image_provider, + proxy=args.proxy, + model=args.model ) run_api( bind=args.bind, debug=args.debug, workers=args.workers, - use_colors=not args.disable_colors + use_colors=not args.disable_colors, + reload=args.reload ) if __name__ == "__main__": diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index 5bb4ba35..5ffe9288 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -1,3 +1,468 @@ -from .stubs import ChatCompletion, ChatCompletionChunk, ImagesResponse -from .client import Client -from .async_client import AsyncClient
\ No newline at end of file +from __future__ import annotations + +import os +import time +import random +import string +import asyncio +import base64 +import aiohttp +import logging +from typing import Union, AsyncIterator, Iterator, Coroutine + +from ..providers.base_provider import AsyncGeneratorProvider +from ..image import ImageResponse, to_image, to_data_uri, is_accepted_format, EXTENSIONS_MAP +from ..typing import Messages, Cookies, Image +from ..providers.types import ProviderType, FinishReason, BaseConversation +from ..errors import NoImageResponseError +from ..providers.retry_provider import IterListProvider +from ..Provider.needs_auth.BingCreateImages import BingCreateImages +from ..requests.aiohttp import get_connector +from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse +from .image_models import ImageModels +from .types import IterResponse, ImageProvider, Client as BaseClient +from .service import get_model_and_provider, get_last_provider, convert_to_provider +from .helper import find_stop, filter_json, filter_none, safe_aclose, to_sync_iter, to_async_iterator + +try: + anext # Python 3.8+ +except NameError: + async def anext(aiter): + try: + return await aiter.__anext__() + except StopAsyncIteration: + raise StopIteration + +# Synchronous iter_response function +def iter_response( + response: Union[Iterator[str], AsyncIterator[str]], + stream: bool, + response_format: dict = None, + max_tokens: int = None, + stop: list = None +) -> Iterator[Union[ChatCompletion, ChatCompletionChunk]]: + content = "" + finish_reason = None + completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) + idx = 0 + + if hasattr(response, '__aiter__'): + # It's an async iterator, wrap it into a sync iterator + response = to_sync_iter(response) + + for chunk in response: + if isinstance(chunk, FinishReason): + finish_reason = chunk.reason + break + elif isinstance(chunk, BaseConversation): + yield chunk + continue + + chunk = str(chunk) + content += chunk + + if max_tokens is not None and idx + 1 >= max_tokens: + finish_reason = "length" + + first, content, chunk = find_stop(stop, content, chunk if stream else None) + + if first != -1: + finish_reason = "stop" + + if stream: + yield ChatCompletionChunk(chunk, None, completion_id, int(time.time())) + + if finish_reason is not None: + break + + idx += 1 + + finish_reason = "stop" if finish_reason is None else finish_reason + + if stream: + yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time())) + else: + if response_format is not None and "type" in response_format: + if response_format["type"] == "json_object": + content = filter_json(content) + yield ChatCompletion(content, finish_reason, completion_id, int(time.time())) + +# Synchronous iter_append_model_and_provider function +def iter_append_model_and_provider(response: Iterator[ChatCompletionChunk]) -> Iterator[ChatCompletionChunk]: + last_provider = None + + for chunk in response: + last_provider = get_last_provider(True) if last_provider is None else last_provider + chunk.model = last_provider.get("model") + chunk.provider = last_provider.get("name") + yield chunk + +async def async_iter_response( + response: AsyncIterator[str], + stream: bool, + response_format: dict = None, + max_tokens: int = None, + stop: list = None +) -> AsyncIterator[Union[ChatCompletion, ChatCompletionChunk]]: + content = "" + finish_reason = None + completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) + idx = 0 + + try: + async for chunk in response: + if isinstance(chunk, FinishReason): + finish_reason = chunk.reason + break + elif isinstance(chunk, BaseConversation): + yield chunk + continue + + chunk = str(chunk) + content += chunk + idx += 1 + + if max_tokens is not None and idx >= max_tokens: + finish_reason = "length" + + first, content, chunk = find_stop(stop, content, chunk if stream else None) + + if first != -1: + finish_reason = "stop" + + if stream: + yield ChatCompletionChunk(chunk, None, completion_id, int(time.time())) + + if finish_reason is not None: + break + + finish_reason = "stop" if finish_reason is None else finish_reason + + if stream: + yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time())) + else: + if response_format is not None and "type" in response_format: + if response_format["type"] == "json_object": + content = filter_json(content) + yield ChatCompletion(content, finish_reason, completion_id, int(time.time())) + finally: + if hasattr(response, 'aclose'): + await safe_aclose(response) + +async def async_iter_append_model_and_provider(response: AsyncIterator[ChatCompletionChunk]) -> AsyncIterator: + last_provider = None + try: + async for chunk in response: + last_provider = get_last_provider(True) if last_provider is None else last_provider + chunk.model = last_provider.get("model") + chunk.provider = last_provider.get("name") + yield chunk + finally: + if hasattr(response, 'aclose'): + await safe_aclose(response) + +class Client(BaseClient): + def __init__( + self, + provider: ProviderType = None, + image_provider: ImageProvider = None, + **kwargs + ) -> None: + super().__init__(**kwargs) + self.chat: Chat = Chat(self, provider) + self.images: Images = Images(self, image_provider) + +class Completions: + def __init__(self, client: Client, provider: ProviderType = None): + self.client: Client = client + self.provider: ProviderType = provider + + def create( + self, + messages: Messages, + model: str, + provider: ProviderType = None, + stream: bool = False, + proxy: str = None, + response_format: dict = None, + max_tokens: int = None, + stop: Union[list[str], str] = None, + api_key: str = None, + ignored: list[str] = None, + ignore_working: bool = False, + ignore_stream: bool = False, + **kwargs + ) -> IterResponse: + model, provider = get_model_and_provider( + model, + self.provider if provider is None else provider, + stream, + ignored, + ignore_working, + ignore_stream, + ) + stop = [stop] if isinstance(stop, str) else stop + + response = provider.create_completion( + model, + messages, + stream=stream, + **filter_none( + proxy=self.client.proxy if proxy is None else proxy, + max_tokens=max_tokens, + stop=stop, + api_key=self.client.api_key if api_key is None else api_key + ), + **kwargs + ) + if asyncio.iscoroutinefunction(provider.create_completion): + # Run the asynchronous function in an event loop + response = asyncio.run(response) + if stream and hasattr(response, '__aiter__'): + # It's an async generator, wrap it into a sync iterator + response = to_sync_iter(response) + elif hasattr(response, '__aiter__'): + # If response is an async generator, collect it into a list + response = list(to_sync_iter(response)) + response = iter_response(response, stream, response_format, max_tokens, stop) + response = iter_append_model_and_provider(response) + if stream: + return response + else: + return next(response) + +class Chat: + completions: Completions + + def __init__(self, client: Client, provider: ProviderType = None): + self.completions = Completions(client, provider) + +class Images: + def __init__(self, client: Client, provider: ProviderType = None): + self.client: Client = client + self.provider: ProviderType = provider + self.models: ImageModels = ImageModels(client) + + def generate(self, prompt: str, model: str = None, provider: ProviderType = None, response_format: str = "url", proxy: str = None, **kwargs) -> ImagesResponse: + """ + Synchronous generate method that runs the async_generate method in an event loop. + """ + return asyncio.run(self.async_generate(prompt, model, provider, response_format=response_format, proxy=proxy **kwargs)) + + async def async_generate(self, prompt: str, model: str = None, provider: ProviderType = None, response_format: str = "url", proxy: str = None, **kwargs) -> ImagesResponse: + if provider is None: + provider_handler = self.models.get(model, provider or self.provider or BingCreateImages) + elif isinstance(provider, str): + provider_handler = convert_to_provider(provider) + if provider_handler is None: + raise ValueError(f"Unknown model: {model}") + if proxy is None: + proxy = self.client.proxy + + if isinstance(provider_handler, IterListProvider): + if provider_handler.providers: + provider_handler = provider.providers[0] + else: + raise ValueError(f"IterListProvider for model {model} has no providers") + + response = None + if hasattr(provider_handler, "create_async_generator"): + messages = [{"role": "user", "content": prompt}] + async for item in provider_handler.create_async_generator(model, messages, **kwargs): + if isinstance(item, ImageResponse): + response = item + break + elif hasattr(provider, 'create'): + if asyncio.iscoroutinefunction(provider_handler.create): + response = await provider_handler.create(prompt) + else: + response = provider_handler.create(prompt) + if isinstance(response, str): + response = ImageResponse([response], prompt) + else: + raise ValueError(f"Provider {provider} does not support image generation") + if isinstance(response, ImageResponse): + return await self._process_image_response(response, response_format, proxy, model=model, provider=provider) + + raise NoImageResponseError(f"Unexpected response type: {type(response)}") + + async def _process_image_response(self, response: ImageResponse, response_format: str, proxy: str = None, model: str = None, provider: str = None) -> ImagesResponse: + async def process_image_item(session: aiohttp.ClientSession, image_data: str): + if image_data.startswith('http://') or image_data.startswith('https://'): + if response_format == "url": + return Image(url=image_data, revised_prompt=response.alt) + elif response_format == "b64_json": + # Fetch the image data and convert it to base64 + image_content = await self._fetch_image(session, image_data) + file_name = self._save_image(image_data_bytes) + b64_json = base64.b64encode(image_content).decode('utf-8') + return Image(b64_json=b64_json, url=file_name, revised_prompt=response.alt) + else: + # Assume image_data is base64 data or binary + if response_format == "url": + if image_data.startswith('data:image'): + # Remove the data URL scheme and get the base64 data + base64_data = image_data.split(',', 1)[-1] + else: + base64_data = image_data + # Decode the base64 data + image_data_bytes = base64.b64decode(base64_data) + # Convert bytes to an image + file_name = self._save_image(image_data_bytes) + return Image(url=file_name, revised_prompt=response.alt) + elif response_format == "b64_json": + if isinstance(image_data, bytes): + file_name = self._save_image(image_data_bytes) + b64_json = base64.b64encode(image_data).decode('utf-8') + else: + b64_json = image_data # If already base64-encoded string + return Image(b64_json=b64_json, url=file_name, revised_prompt=response.alt) + + last_provider = get_last_provider(True) + async with aiohttp.ClientSession(cookies=response.get("cookies"), connector=get_connector(proxy=proxy)) as session: + return ImagesResponse( + await asyncio.gather(*[process_image_item(session, image_data) for image_data in response.get_list()]), + model=last_provider.get("model") if model is None else model, + provider=last_provider.get("name") if provider is None else provider + ) + + async def _fetch_image(self, session: aiohttp.ClientSession, url: str) -> bytes: + # Asynchronously fetch image data from the URL + async with session.get(url) as resp: + if resp.status == 200: + return await resp.read() + else: + raise RuntimeError(f"Failed to fetch image from {url}, status code {resp.status}") + + def _save_image(self, image_data_bytes: bytes) -> str: + os.makedirs('generated_images', exist_ok=True) + image = to_image(image_data_bytes) + file_name = f"generated_images/image_{int(time.time())}_{random.randint(0, 10000)}.{EXTENSIONS_MAP[is_accepted_format(image_data_bytes)]}" + image.save(file_name) + return file_name + + def create_variation(self, image: Union[str, bytes], model: str = None, provider: ProviderType = None, response_format: str = "url", **kwargs) -> ImagesResponse: + return asyncio.run(self.async_create_variation( + image, model, provider, response_format + **kwargs + )) + + async def async_create_variation(self, image: Union[str, bytes], model: str = None, provider: ProviderType = None, response_format: str = "url", proxy: str = None, **kwargs) -> ImagesResponse: + if provider is None: + provider = self.models.get(model, provider or self.provider or BingCreateImages) + if provider is None: + raise ValueError(f"Unknown model: {model}") + if isinstance(provider, str): + provider = convert_to_provider(provider) + if proxy is None: + proxy = self.client.proxy + + if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider): + messages = [{"role": "user", "content": "create a variation of this image"}] + image_data = to_data_uri(image) + generator = None + try: + generator = provider.create_async_generator(model, messages, image=image_data, response_format=response_format, proxy=proxy, **kwargs) + async for response in generator: + if isinstance(response, ImageResponse): + return self._process_image_response(response) + except RuntimeError as e: + if "async generator ignored GeneratorExit" in str(e): + logging.warning("Generator ignored GeneratorExit in create_variation, handling gracefully") + else: + raise + finally: + if generator and hasattr(generator, 'aclose'): + await safe_aclose(generator) + logging.info("AsyncGeneratorProvider processing completed in create_variation") + elif hasattr(provider, 'create_variation'): + if asyncio.iscoroutinefunction(provider.create_variation): + response = await provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs) + else: + response = provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs) + if isinstance(response, str): + response = ImageResponse([response]) + return self._process_image_response(response) + else: + raise ValueError(f"Provider {provider} does not support image variation") + +class AsyncClient(BaseClient): + def __init__( + self, + provider: ProviderType = None, + image_provider: ImageProvider = None, + **kwargs + ) -> None: + super().__init__(**kwargs) + self.chat: AsyncChat = AsyncChat(self, provider) + self.images: AsyncImages = AsyncImages(self, image_provider) + +class AsyncChat: + completions: AsyncCompletions + + def __init__(self, client: AsyncClient, provider: ProviderType = None): + self.completions = AsyncCompletions(client, provider) + +class AsyncCompletions: + def __init__(self, client: AsyncClient, provider: ProviderType = None): + self.client: AsyncClient = client + self.provider: ProviderType = provider + + def create( + self, + messages: Messages, + model: str, + provider: ProviderType = None, + stream: bool = False, + proxy: str = None, + response_format: dict = None, + max_tokens: int = None, + stop: Union[list[str], str] = None, + api_key: str = None, + ignored: list[str] = None, + ignore_working: bool = False, + ignore_stream: bool = False, + **kwargs + ) -> Union[Coroutine[ChatCompletion], AsyncIterator[ChatCompletionChunk]]: + model, provider = get_model_and_provider( + model, + self.provider if provider is None else provider, + stream, + ignored, + ignore_working, + ignore_stream, + ) + stop = [stop] if isinstance(stop, str) else stop + + response = provider.create_completion( + model, + messages, + stream=stream, + **filter_none( + proxy=self.client.proxy if proxy is None else proxy, + max_tokens=max_tokens, + stop=stop, + api_key=self.client.api_key if api_key is None else api_key + ), + **kwargs + ) + + if not isinstance(response, AsyncIterator): + response = to_async_iterator(response) + response = async_iter_response(response, stream, response_format, max_tokens, stop) + response = async_iter_append_model_and_provider(response) + return response if stream else anext(response) + +class AsyncImages(Images): + def __init__(self, client: AsyncClient, provider: ImageProvider = None): + self.client: AsyncClient = client + self.provider: ImageProvider = provider + self.models: ImageModels = ImageModels(client) + + async def generate(self, prompt: str, model: str = None, provider: ProviderType = None, response_format: str = "url", **kwargs) -> ImagesResponse: + return await self.async_generate(prompt, model, provider, response_format, **kwargs) + + async def create_variation(self, image: Union[str, bytes], model: str = None, provider: ProviderType = None, response_format: str = "url", **kwargs) -> ImagesResponse: + return await self.async_create_variation( + image, model, provider, response_format, **kwargs + )
\ No newline at end of file diff --git a/g4f/client/async_client.py b/g4f/client/async_client.py deleted file mode 100644 index 2fe4640b..00000000 --- a/g4f/client/async_client.py +++ /dev/null @@ -1,275 +0,0 @@ -from __future__ import annotations - -import time -import random -import string -import asyncio -import base64 -from aiohttp import ClientSession, BaseConnector - -from .types import Client as BaseClient -from .types import ProviderType, FinishReason -from .stubs import ChatCompletion, ChatCompletionChunk, ImagesResponse, Image -from .types import AsyncIterResponse, ImageProvider -from .image_models import ImageModels -from .helper import filter_json, find_stop, filter_none, cast_iter_async -from .service import get_last_provider, get_model_and_provider -from ..Provider import ProviderUtils -from ..typing import Union, Messages, AsyncIterator, ImageType -from ..errors import NoImageResponseError, ProviderNotFoundError -from ..requests.aiohttp import get_connector -from ..providers.conversation import BaseConversation -from ..image import ImageResponse as ImageProviderResponse, ImageDataResponse - -try: - anext -except NameError: - async def anext(iter): - async for chunk in iter: - return chunk - -async def iter_response( - response: AsyncIterator[str], - stream: bool, - response_format: dict = None, - max_tokens: int = None, - stop: list = None -) -> AsyncIterResponse: - content = "" - finish_reason = None - completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) - count: int = 0 - async for chunk in response: - if isinstance(chunk, FinishReason): - finish_reason = chunk.reason - break - elif isinstance(chunk, BaseConversation): - yield chunk - continue - content += str(chunk) - count += 1 - if max_tokens is not None and count >= max_tokens: - finish_reason = "length" - first, content, chunk = find_stop(stop, content, chunk) - if first != -1: - finish_reason = "stop" - if stream: - yield ChatCompletionChunk(chunk, None, completion_id, int(time.time())) - if finish_reason is not None: - break - finish_reason = "stop" if finish_reason is None else finish_reason - if stream: - yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time())) - else: - if response_format is not None and "type" in response_format: - if response_format["type"] == "json_object": - content = filter_json(content) - yield ChatCompletion(content, finish_reason, completion_id, int(time.time())) - -async def iter_append_model_and_provider(response: AsyncIterResponse) -> AsyncIterResponse: - last_provider = None - async for chunk in response: - last_provider = get_last_provider(True) if last_provider is None else last_provider - chunk.model = last_provider.get("model") - chunk.provider = last_provider.get("name") - yield chunk - -class AsyncClient(BaseClient): - def __init__( - self, - provider: ProviderType = None, - image_provider: ImageProvider = None, - **kwargs - ): - super().__init__(**kwargs) - self.chat: Chat = Chat(self, provider) - self.images: Images = Images(self, image_provider) - -def create_response( - messages: Messages, - model: str, - provider: ProviderType = None, - stream: bool = False, - proxy: str = None, - max_tokens: int = None, - stop: list[str] = None, - api_key: str = None, - **kwargs -): - has_asnyc = hasattr(provider, "create_async_generator") - if has_asnyc: - create = provider.create_async_generator - else: - create = provider.create_completion - response = create( - model, messages, - stream=stream, - **filter_none( - proxy=proxy, - max_tokens=max_tokens, - stop=stop, - api_key=api_key - ), - **kwargs - ) - if not has_asnyc: - response = cast_iter_async(response) - return response - -class Completions(): - def __init__(self, client: AsyncClient, provider: ProviderType = None): - self.client: AsyncClient = client - self.provider: ProviderType = provider - - def create( - self, - messages: Messages, - model: str, - provider: ProviderType = None, - stream: bool = False, - proxy: str = None, - max_tokens: int = None, - stop: Union[list[str], str] = None, - api_key: str = None, - response_format: dict = None, - ignored : list[str] = None, - ignore_working: bool = False, - ignore_stream: bool = False, - **kwargs - ) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]: - model, provider = get_model_and_provider( - model, - self.provider if provider is None else provider, - stream, - ignored, - ignore_working, - ignore_stream - ) - stop = [stop] if isinstance(stop, str) else stop - response = create_response( - messages, model, - provider, stream, - proxy=self.client.get_proxy() if proxy is None else proxy, - max_tokens=max_tokens, - stop=stop, - api_key=self.client.api_key if api_key is None else api_key, - **kwargs - ) - response = iter_response(response, stream, response_format, max_tokens, stop) - response = iter_append_model_and_provider(response) - return response if stream else anext(response) - -class Chat(): - completions: Completions - - def __init__(self, client: AsyncClient, provider: ProviderType = None): - self.completions = Completions(client, provider) - -async def iter_image_response( - response: AsyncIterator, - response_format: str = None, - connector: BaseConnector = None, - proxy: str = None -) -> Union[ImagesResponse, None]: - async for chunk in response: - if isinstance(chunk, ImageProviderResponse): - if response_format == "b64_json": - async with ClientSession( - connector=get_connector(connector, proxy), - cookies=chunk.options.get("cookies") - ) as session: - async def fetch_image(image): - async with session.get(image) as response: - return base64.b64encode(await response.content.read()).decode() - images = await asyncio.gather(*[fetch_image(image) for image in chunk.get_list()]) - return ImagesResponse([Image(None, image, chunk.alt) for image in images], int(time.time())) - return ImagesResponse([Image(image, None, chunk.alt) for image in chunk.get_list()], int(time.time())) - elif isinstance(chunk, ImageDataResponse): - return ImagesResponse([Image(None, image, chunk.alt) for image in chunk.get_list()], int(time.time())) - -def create_image(provider: ProviderType, prompt: str, model: str = "", **kwargs) -> AsyncIterator: - if isinstance(provider, type) and provider.__name__ == "You": - kwargs["chat_mode"] = "create" - else: - prompt = f"create a image with: {prompt}" - return provider.create_async_generator( - model, - [{"role": "user", "content": prompt}], - stream=True, - **kwargs - ) - -class Images(): - def __init__(self, client: AsyncClient, provider: ImageProvider = None): - self.client: AsyncClient = client - self.provider: ImageProvider = provider - self.models: ImageModels = ImageModels(client) - - def get_provider(self, model: str, provider: ProviderType = None): - if isinstance(provider, str): - if provider in ProviderUtils.convert: - provider = ProviderUtils.convert[provider] - else: - raise ProviderNotFoundError(f'Provider not found: {provider}') - else: - provider = self.models.get(model, self.provider) - return provider - - async def generate( - self, - prompt, - model: str = "", - provider: ProviderType = None, - response_format: str = None, - connector: BaseConnector = None, - proxy: str = None, - **kwargs - ) -> ImagesResponse: - provider = self.get_provider(model, provider) - if hasattr(provider, "create_async_generator"): - response = create_image( - provider, - prompt, - **filter_none( - response_format=response_format, - connector=connector, - proxy=self.client.get_proxy() if proxy is None else proxy, - ), - **kwargs - ) - else: - response = await provider.create_async(prompt) - return ImagesResponse([Image(image) for image in response.get_list()]) - image = await iter_image_response(response, response_format, connector, proxy) - if image is None: - raise NoImageResponseError() - return image - - async def create_variation( - self, - image: ImageType, - model: str = None, - response_format: str = None, - connector: BaseConnector = None, - proxy: str = None, - **kwargs - ): - provider = self.get_provider(model, provider) - result = None - if hasattr(provider, "create_async_generator"): - response = provider.create_async_generator( - "", - [{"role": "user", "content": "create a image like this"}], - stream=True, - image=image, - **filter_none( - response_format=response_format, - connector=connector, - proxy=self.client.get_proxy() if proxy is None else proxy, - ), - **kwargs - ) - result = iter_image_response(response, response_format, connector, proxy) - if result is None: - raise NoImageResponseError() - return result diff --git a/g4f/client/client.py b/g4f/client/client.py deleted file mode 100644 index 56644913..00000000 --- a/g4f/client/client.py +++ /dev/null @@ -1,307 +0,0 @@ -from __future__ import annotations - -import os -import time -import random -import string -import logging -import asyncio -from typing import Union -from ..providers.base_provider import AsyncGeneratorProvider -from ..image import ImageResponse, to_image, to_data_uri -from ..typing import Union, Iterator, Messages, ImageType -from ..providers.types import BaseProvider, ProviderType, FinishReason -from ..providers.conversation import BaseConversation -from ..image import ImageResponse as ImageProviderResponse -from ..errors import NoImageResponseError -from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse -from .image_models import ImageModels -from .types import IterResponse, ImageProvider -from .types import Client as BaseClient -from .service import get_model_and_provider, get_last_provider -from .helper import find_stop, filter_json, filter_none -from ..models import ModelUtils -from ..Provider import IterListProvider - - -def iter_response( - response: Iterator[str], - stream: bool, - response_format: dict = None, - max_tokens: int = None, - stop: list = None -) -> IterResponse: - content = "" - finish_reason = None - completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) - - for idx, chunk in enumerate(response): - if isinstance(chunk, FinishReason): - finish_reason = chunk.reason - break - elif isinstance(chunk, BaseConversation): - yield chunk - continue - - content += str(chunk) - - if max_tokens is not None and idx + 1 >= max_tokens: - finish_reason = "length" - - first, content, chunk = find_stop(stop, content, chunk if stream else None) - - if first != -1: - finish_reason = "stop" - - if stream: - yield ChatCompletionChunk(chunk, None, completion_id, int(time.time())) - - if finish_reason is not None: - break - - finish_reason = "stop" if finish_reason is None else finish_reason - - if stream: - yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time())) - else: - if response_format is not None and "type" in response_format: - if response_format["type"] == "json_object": - content = filter_json(content) - yield ChatCompletion(content, finish_reason, completion_id, int(time.time())) - - -def iter_append_model_and_provider(response: IterResponse) -> IterResponse: - last_provider = None - for chunk in response: - last_provider = get_last_provider(True) if last_provider is None else last_provider - chunk.model = last_provider.get("model") - chunk.provider = last_provider.get("name") - yield chunk - - -class Client(BaseClient): - def __init__( - self, - provider: ProviderType = None, - image_provider: ImageProvider = None, - **kwargs - ) -> None: - super().__init__(**kwargs) - self.chat: Chat = Chat(self, provider) - self._images: Images = Images(self, image_provider) - - @property - def images(self) -> Images: - return self._images - - async def async_images(self) -> Images: - return self._images - - -class Completions: - def __init__(self, client: Client, provider: ProviderType = None): - self.client: Client = client - self.provider: ProviderType = provider - - def create( - self, - messages: Messages, - model: str, - provider: ProviderType = None, - stream: bool = False, - proxy: str = None, - response_format: dict = None, - max_tokens: int = None, - stop: Union[list[str], str] = None, - api_key: str = None, - ignored: list[str] = None, - ignore_working: bool = False, - ignore_stream: bool = False, - **kwargs - ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: - model, provider = get_model_and_provider( - model, - self.provider if provider is None else provider, - stream, - ignored, - ignore_working, - ignore_stream, - ) - - stop = [stop] if isinstance(stop, str) else stop - - response = provider.create_completion( - model, - messages, - stream=stream, - **filter_none( - proxy=self.client.get_proxy() if proxy is None else proxy, - max_tokens=max_tokens, - stop=stop, - api_key=self.client.api_key if api_key is None else api_key - ), - **kwargs - ) - - response = iter_response(response, stream, response_format, max_tokens, stop) - response = iter_append_model_and_provider(response) - - return response if stream else next(response) - - -class Chat: - completions: Completions - - def __init__(self, client: Client, provider: ProviderType = None): - self.completions = Completions(client, provider) - - -def iter_image_response(response: Iterator) -> Union[ImagesResponse, None]: - logging.info("Starting iter_image_response") - response_list = list(response) - logging.info(f"Response list: {response_list}") - - for chunk in response_list: - logging.info(f"Processing chunk: {chunk}") - if isinstance(chunk, ImageProviderResponse): - logging.info("Found ImageProviderResponse") - return ImagesResponse([Image(image) for image in chunk.get_list()]) - - logging.warning("No ImageProviderResponse found in the response") - return None - - -def create_image(client: Client, provider: ProviderType, prompt: str, model: str = "", **kwargs) -> Iterator: - logging.info(f"Creating image with provider: {provider}, model: {model}, prompt: {prompt}") - - if isinstance(provider, type) and provider.__name__ == "You": - kwargs["chat_mode"] = "create" - else: - prompt = f"create an image with: {prompt}" - - response = provider.create_completion( - model, - [{"role": "user", "content": prompt}], - stream=True, - proxy=client.get_proxy(), - **kwargs - ) - - logging.info(f"Response from create_completion: {response}") - return response - - -class Images: - def __init__(self, client: 'Client', provider: ImageProvider = None): - self.client: 'Client' = client - self.provider: ImageProvider = provider - self.models: ImageModels = ImageModels(client) - - def generate(self, prompt: str, model: str = None, **kwargs) -> ImagesResponse: - logging.info(f"Starting synchronous image generation for model: {model}, prompt: {prompt}") - try: - loop = asyncio.get_event_loop() - except RuntimeError: - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - - try: - result = loop.run_until_complete(self.async_generate(prompt, model, **kwargs)) - logging.info(f"Synchronous image generation completed. Result: {result}") - return result - except Exception as e: - logging.error(f"Error in synchronous image generation: {str(e)}") - raise - finally: - if loop.is_running(): - loop.close() - - async def async_generate(self, prompt: str, model: str = None, **kwargs) -> ImagesResponse: - logging.info(f"Generating image for model: {model}, prompt: {prompt}") - provider = self.models.get(model, self.provider) - if provider is None: - raise ValueError(f"Unknown model: {model}") - - logging.info(f"Provider: {provider}") - - if isinstance(provider, IterListProvider): - if provider.providers: - provider = provider.providers[0] - logging.info(f"Using first provider from IterListProvider: {provider}") - else: - raise ValueError(f"IterListProvider for model {model} has no providers") - - if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider): - logging.info("Using AsyncGeneratorProvider") - messages = [{"role": "user", "content": prompt}] - async for response in provider.create_async_generator(model, messages, **kwargs): - if isinstance(response, ImageResponse): - return self._process_image_response(response) - elif isinstance(response, str): - image_response = ImageResponse([response], prompt) - return self._process_image_response(image_response) - elif hasattr(provider, 'create'): - logging.info("Using provider's create method") - if asyncio.iscoroutinefunction(provider.create): - response = await provider.create(prompt) - else: - response = provider.create(prompt) - - if isinstance(response, ImageResponse): - return self._process_image_response(response) - elif isinstance(response, str): - image_response = ImageResponse([response], prompt) - return self._process_image_response(image_response) - else: - raise ValueError(f"Provider {provider} does not support image generation") - - logging.error(f"Unexpected response type: {type(response)}") - raise NoImageResponseError(f"Unexpected response type: {type(response)}") - - def _process_image_response(self, response: ImageResponse) -> ImagesResponse: - processed_images = [] - for image_data in response.get_list(): - if image_data.startswith('http://') or image_data.startswith('https://'): - processed_images.append(Image(url=image_data)) - else: - image = to_image(image_data) - file_name = self._save_image(image) - processed_images.append(Image(url=file_name)) - return ImagesResponse(processed_images) - - def _save_image(self, image: 'PILImage') -> str: - os.makedirs('generated_images', exist_ok=True) - file_name = f"generated_images/image_{int(time.time())}.png" - image.save(file_name) - return file_name - - async def create_variation(self, image: Union[str, bytes], model: str = None, **kwargs): - provider = self.models.get(model, self.provider) - if provider is None: - raise ValueError(f"Unknown model: {model}") - - if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider): - messages = [{"role": "user", "content": "create a variation of this image"}] - image_data = to_data_uri(image) - async for response in provider.create_async_generator(model, messages, image=image_data, **kwargs): - if isinstance(response, ImageResponse): - return self._process_image_response(response) - elif isinstance(response, str): - image_response = ImageResponse([response], "Image variation") - return self._process_image_response(image_response) - elif hasattr(provider, 'create_variation'): - if asyncio.iscoroutinefunction(provider.create_variation): - response = await provider.create_variation(image, **kwargs) - else: - response = provider.create_variation(image, **kwargs) - - if isinstance(response, ImageResponse): - return self._process_image_response(response) - elif isinstance(response, str): - image_response = ImageResponse([response], "Image variation") - return self._process_image_response(image_response) - else: - raise ValueError(f"Provider {provider} does not support image variation") - - raise NoImageResponseError("Failed to create image variation") - diff --git a/g4f/client/helper.py b/g4f/client/helper.py index c502d478..71bfd38a 100644 --- a/g4f/client/helper.py +++ b/g4f/client/helper.py @@ -1,7 +1,12 @@ from __future__ import annotations import re -from typing import Iterable, AsyncIterator +import queue +import threading +import logging +import asyncio + +from typing import AsyncIterator, Iterator, AsyncGenerator def filter_json(text: str) -> str: """ @@ -42,6 +47,40 @@ def filter_none(**kwargs) -> dict: if value is not None } -async def cast_iter_async(iter: Iterable) -> AsyncIterator: - for chunk in iter: - yield chunk
\ No newline at end of file +async def safe_aclose(generator: AsyncGenerator) -> None: + try: + await generator.aclose() + except Exception as e: + logging.warning(f"Error while closing generator: {e}") + +# Helper function to convert an async generator to a synchronous iterator +def to_sync_iter(async_gen: AsyncIterator) -> Iterator: + q = queue.Queue() + loop = asyncio.new_event_loop() + done = object() + + def _run(): + asyncio.set_event_loop(loop) + + async def iterate(): + try: + async for item in async_gen: + q.put(item) + finally: + q.put(done) + + loop.run_until_complete(iterate()) + loop.close() + + threading.Thread(target=_run).start() + + while True: + item = q.get() + if item is done: + break + yield item + +# Helper function to convert a synchronous iterator to an async iterator +async def to_async_iterator(iterator: Iterator) -> AsyncIterator: + for item in iterator: + yield item
\ No newline at end of file diff --git a/g4f/client/service.py b/g4f/client/service.py index 5fdb150c..aa209b22 100644 --- a/g4f/client/service.py +++ b/g4f/client/service.py @@ -55,7 +55,6 @@ def get_model_and_provider(model : Union[Model, str], provider = convert_to_provider(provider) if isinstance(model, str): - if model in ModelUtils.convert: model = ModelUtils.convert[model] @@ -75,11 +74,11 @@ def get_model_and_provider(model : Union[Model, str], if not ignore_working and not provider.working: raise ProviderNotWorkingError(f'{provider.__name__} is not working') - if not ignore_working and isinstance(provider, BaseRetryProvider): - provider.providers = [p for p in provider.providers if p.working] - - if ignored and isinstance(provider, BaseRetryProvider): - provider.providers = [p for p in provider.providers if p.__name__ not in ignored] + if isinstance(provider, BaseRetryProvider): + if not ignore_working: + provider.providers = [p for p in provider.providers if p.working] + if ignored: + provider.providers = [p for p in provider.providers if p.__name__ not in ignored] if not ignore_stream and not provider.supports_stream and stream: raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument') @@ -95,7 +94,7 @@ def get_model_and_provider(model : Union[Model, str], return model, provider -def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, str]]: +def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, str], None]: """ Retrieves the last used provider. @@ -108,11 +107,14 @@ def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, st last = debug.last_provider if isinstance(last, BaseRetryProvider): last = last.last_provider - if last and as_dict: - return { - "name": last.__name__, - "url": last.url, - "model": debug.last_model, - "label": last.label if hasattr(last, "label") else None - } + if as_dict: + if last: + return { + "name": last.__name__, + "url": last.url, + "model": debug.last_model, + "label": getattr(last, "label", None) if hasattr(last, "label") else None + } + else: + return {} return last
\ No newline at end of file diff --git a/g4f/client/stubs.py b/g4f/client/stubs.py index 8cf2bcba..b38c9f6c 100644 --- a/g4f/client/stubs.py +++ b/g4f/client/stubs.py @@ -1,6 +1,7 @@ from __future__ import annotations from typing import Union +from time import time class Model(): ... @@ -108,8 +109,18 @@ class Image(Model): return self.__dict__ class ImagesResponse(Model): - def __init__(self, data: list[Image], created: int = 0) -> None: + data: list[Image] + model: str + provider: str + created: int + + def __init__(self, data: list[Image], created: int = None, model: str = None, provider: str = None) -> None: self.data = data + if created is None: + created = int(time()) + self.model = model + if provider is not None: + self.provider = provider self.created = created def to_json(self): diff --git a/g4f/client/types.py b/g4f/client/types.py index 100be432..4f252ba9 100644 --- a/g4f/client/types.py +++ b/g4f/client/types.py @@ -11,7 +11,17 @@ Proxies = Union[dict, str] IterResponse = Iterator[Union[ChatCompletion, ChatCompletionChunk]] AsyncIterResponse = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk]] -class ClientProxyMixin(): +class Client(): + def __init__( + self, + api_key: str = None, + proxies: Proxies = None, + **kwargs + ) -> None: + self.api_key: str = api_key + self.proxies= proxies + self.proxy: str = self.get_proxy() + def get_proxy(self) -> Union[str, None]: if isinstance(self.proxies, str): return self.proxies @@ -20,14 +30,4 @@ class ClientProxyMixin(): elif "all" in self.proxies: return self.proxies["all"] elif "https" in self.proxies: - return self.proxies["https"] - -class Client(ClientProxyMixin): - def __init__( - self, - api_key: str = None, - proxies: Proxies = None, - **kwargs - ) -> None: - self.api_key: str = api_key - self.proxies: Proxies = proxies
\ No newline at end of file + return self.proxies["https"]
\ No newline at end of file diff --git a/g4f/cookies.py b/g4f/cookies.py index 0a25c41e..8d535ce7 100644 --- a/g4f/cookies.py +++ b/g4f/cookies.py @@ -34,6 +34,7 @@ DOMAINS = [ "www.whiterabbitneo.com", "huggingface.co", "chat.reka.ai", + "chatgpt.com" ] if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null": @@ -180,4 +181,4 @@ def _g4f(domain_name: str) -> list: return [] user_data_dir = user_config_dir("g4f") cookie_file = os.path.join(user_data_dir, "Default", "Cookies") - return [] if not os.path.exists(cookie_file) else chrome(cookie_file, domain_name)
\ No newline at end of file + return [] if not os.path.exists(cookie_file) else chrome(cookie_file, domain_name) diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index 1a660062..e650d7e0 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -72,7 +72,7 @@ </button> <div class="info"> <i class="fa-brands fa-discord"></i> - <span class="convo-title">discord ~ <a href="https://discord.gg/XfybzPXPH5">discord.gg/XfybzPXPH5</a> + <span class="convo-title">discord ~ <a href="https://discord.gghttps://discord.gg/6yrm7H4B">discord.gg/6yrm7H4B</a> </span> </div> <div class="info"> @@ -224,28 +224,35 @@ </div> </div> <div class="buttons"> - <div class="field"> - <select name="model" id="model"> - <option value="">Model: Default</option> - <option value="gpt-4">gpt-4</option> - <option value="gpt-3.5-turbo">gpt-3.5-turbo</option> - <option value="llama-3-70b-chat">llama-3-70b-chat</option> - <option value="llama-3.1-70b">llama-3.1-70b</option> - <option value="gemini-pro">gemini-pro</option> - <option value="">----</option> - </select> - <select name="model2" id="model2" class="hidden"></select> - </div> - <div class="field"> - <select name="provider" id="provider"> - <option value="">Provider: Auto</option> - <option value="Bing">Bing</option> - <option value="OpenaiChat">OpenAI ChatGPT</option> - <option value="Gemini">Gemini</option> - <option value="Liaobots">Liaobots</option> - <option value="MetaAI">Meta AI</option> - <option value="You">You</option> - <option value="">----</option> + <div class="field"> + <select name="model" id="model"> + <option value="">Model: Default</option> + <option value="gpt-4">gpt-4</option> + <option value="gpt-4o">gpt-4o</option> + <option value="gpt-4o-mini">gpt-4o-mini</option> + <option value="llama-3.1-70b">llama-3.1-70b</option> + <option value="llama-3.1-70b">llama-3.1-405b</option> + <option value="llama-3.1-70b">mixtral-8x7b</option> + <option value="gemini-pro">gemini-pro</option> + <option value="gemini-flash">gemini-flash</option> + <option value="claude-3-haiku">claude-3-haiku</option> + <option value="claude-3.5-sonnet">claude-3.5-sonnet</option> + <option disabled="disabled">----</option> + </select> + <select name="model2" id="model2" class="hidden"></select> + </div> + <div class="field"> + <select name="provider" id="provider"> + <option value="">Provider: Auto</option> + <option value="OpenaiChat">OpenAI ChatGPT</option> + <option value="ChatGpt">ChatGpt</option> + <option value="Gemini">Gemini</option> + <option value="MetaAI">Meta AI</option> + <option value="DeepInfraChat">DeepInfraChat</option> + <option value="Blackbox">Blackbox</option> + <option value="DDG">DuckDuckGo</option> + <option value="Pizzagpt">Pizzagpt</option> + <option disabled="disabled">----</option> </select> </div> </div> diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css index f3a4708d..441e2042 100644 --- a/g4f/gui/client/static/css/style.css +++ b/g4f/gui/client/static/css/style.css @@ -87,12 +87,9 @@ body { } body { - padding: 10px; background: var(--colour-1); color: var(--colour-3); height: 100vh; - max-width: 1600px; - margin: auto; } .row { @@ -1146,4 +1143,4 @@ a:-webkit-any-link { .message.regenerate { opacity: 1; } -}
\ No newline at end of file +} diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index 9790b261..42ddb129 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -57,6 +57,25 @@ function filter_message(text) { ) } +function fallback_clipboard (text) { + var textBox = document.createElement("textarea"); + textBox.value = text; + textBox.style.top = "0"; + textBox.style.left = "0"; + textBox.style.position = "fixed"; + document.body.appendChild(textBox); + textBox.focus(); + textBox.select(); + try { + var success = document.execCommand('copy'); + var msg = success ? 'succeeded' : 'failed'; + console.log('Clipboard Fallback: Copying text command ' + msg); + } catch (e) { + console.error('Clipboard Fallback: Unable to copy', e); + } + document.body.removeChild(textBox); +} + hljs.addPlugin(new CopyButtonPlugin()); let typesetPromise = Promise.resolve(); const highlight = (container) => { @@ -88,18 +107,31 @@ const register_message_buttons = async () => { }) } }); + document.querySelectorAll(".message .fa-clipboard").forEach(async (el) => { if (!("click" in el.dataset)) { el.dataset.click = "true"; el.addEventListener("click", async () => { const message_el = el.parentElement.parentElement.parentElement; const copyText = await get_message(window.conversation_id, message_el.dataset.index); - navigator.clipboard.writeText(copyText); + + try { + if (!navigator.clipboard) { + throw new Error("navigator.clipboard: Clipboard API unavailable."); + } + await navigator.clipboard.writeText(copyText); + } catch (e) { + console.error(e); + console.error("Clipboard API writeText() failed! Fallback to document.exec(\"copy\")..."); + fallback_clipboard(copyText); + } + el.classList.add("clicked"); setTimeout(() => el.classList.remove("clicked"), 1000); }) } }); + document.querySelectorAll(".message .fa-volume-high").forEach(async (el) => { if (!("click" in el.dataset)) { el.dataset.click = "true"; @@ -306,6 +338,14 @@ const prepare_messages = (messages, message_index = -1) => { messages = messages.filter((_, index) => message_index >= index); } + let new_messages = []; + if (systemPrompt?.value) { + new_messages.push({ + "role": "system", + "content": systemPrompt.value + }); + } + // Remove history, if it's selected if (document.getElementById('history')?.checked) { if (message_index == null) { @@ -315,13 +355,6 @@ const prepare_messages = (messages, message_index = -1) => { } } - let new_messages = []; - if (systemPrompt?.value) { - new_messages.push({ - "role": "system", - "content": systemPrompt.value - }); - } messages.forEach((new_message) => { // Include only not regenerated messages if (new_message && !new_message.regenerate) { @@ -334,6 +367,7 @@ const prepare_messages = (messages, message_index = -1) => { return new_messages; } + async function add_message_chunk(message) { if (message.type == "conversation") { console.info("Conversation used:", message.conversation) @@ -1424,4 +1458,4 @@ if (SpeechRecognition) { recognition.start(); } }); -}
\ No newline at end of file +} diff --git a/g4f/gui/client/static/js/highlightjs-copy.min.js b/g4f/gui/client/static/js/highlightjs-copy.min.js index ac11d33e..cd8ae957 100644 --- a/g4f/gui/client/static/js/highlightjs-copy.min.js +++ b/g4f/gui/client/static/js/highlightjs-copy.min.js @@ -1 +1,54 @@ -class CopyButtonPlugin{constructor(options={}){self.hook=options.hook;self.callback=options.callback}"after:highlightElement"({el,text}){let button=Object.assign(document.createElement("button"),{innerHTML:"Copy",className:"hljs-copy-button"});button.dataset.copied=false;el.parentElement.classList.add("hljs-copy-wrapper");el.parentElement.appendChild(button);el.parentElement.style.setProperty("--hljs-theme-background",window.getComputedStyle(el).backgroundColor);button.onclick=function(){if(!navigator.clipboard)return;let newText=text;if(hook&&typeof hook==="function"){newText=hook(text,el)||text}navigator.clipboard.writeText(newText).then(function(){button.innerHTML="Copied!";button.dataset.copied=true;let alert=Object.assign(document.createElement("div"),{role:"status",className:"hljs-copy-alert",innerHTML:"Copied to clipboard"});el.parentElement.appendChild(alert);setTimeout(()=>{button.innerHTML="Copy";button.dataset.copied=false;el.parentElement.removeChild(alert);alert=null},2e3)}).then(function(){if(typeof callback==="function")return callback(newText,el)})}}}
\ No newline at end of file +class CopyButtonPlugin { + constructor(options = {}) { + self.hook = options.hook; + self.callback = options.callback + } + "after:highlightElement"({ + el, + text + }) { + let button = Object.assign(document.createElement("button"), { + innerHTML: "Copy", + className: "hljs-copy-button" + }); + button.dataset.copied = false; + el.parentElement.classList.add("hljs-copy-wrapper"); + el.parentElement.appendChild(button); + el.parentElement.style.setProperty("--hljs-theme-background", window.getComputedStyle(el).backgroundColor); + button.onclick = async () => { + let newText = text; + if (hook && typeof hook === "function") { + newText = hook(text, el) || text + } + try { + if (!navigator.clipboard) { + throw new Error("navigator.clipboard: Clipboard API unavailable."); + } + await navigator.clipboard.writeText(newText); + } catch (e) { + console.error(e); + console.error("Clipboard API writeText() failed! Fallback to document.exec(\"copy\")..."); + fallback_clipboard(newText); + } + button.innerHTML = "Copied!"; + button.dataset.copied = true; + let alert = Object.assign(document.createElement("div"), { + role: "status", + className: "hljs-copy-alert", + innerHTML: "Copied to clipboard" + }); + el.parentElement.appendChild(alert); + setTimeout(() => { + button.innerHTML = "Copy"; + button.dataset.copied = false; + el.parentElement.removeChild(alert); + alert = null + }, 2e3) + } + + + if (typeof callback === "function") return callback(newText, el); + + } + +} diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index c984abec..dafcb5d4 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -2,13 +2,11 @@ from __future__ import annotations import logging import os -import os.path import uuid import asyncio import time -import base64 from aiohttp import ClientSession -from typing import Iterator, Optional +from typing import Iterator, Optional, AsyncIterator, Union from flask import send_from_directory from g4f import version, models @@ -21,21 +19,22 @@ from g4f.Provider import ProviderType, __providers__, __map__ from g4f.providers.base_provider import ProviderModelMixin, FinishReason from g4f.providers.conversation import BaseConversation -conversations: dict[dict[str, BaseConversation]] = {} +logger = logging.getLogger(__name__) + +# Define the directory for generated images images_dir = "./generated_images" -class Api(): +# Function to ensure the images directory exists +def ensure_images_dir(): + if not os.path.exists(images_dir): + os.makedirs(images_dir) - @staticmethod - def get_models() -> list[str]: - """ - Return a list of all models. +conversations: dict[dict[str, BaseConversation]] = {} - Fetches and returns a list of all available models in the system. - Returns: - List[str]: A list of model names. - """ +class Api: + @staticmethod + def get_models() -> list[str]: return models._all_models @staticmethod @@ -43,14 +42,11 @@ class Api(): if provider in __map__: provider: ProviderType = __map__[provider] if issubclass(provider, ProviderModelMixin): - return [{"model": model, "default": model == provider.default_model} for model in provider.get_models()] - elif provider.supports_gpt_35_turbo or provider.supports_gpt_4: return [ - *([{"model": "gpt-4", "default": not provider.supports_gpt_4}] if provider.supports_gpt_4 else []), - *([{"model": "gpt-3.5-turbo", "default": not provider.supports_gpt_4}] if provider.supports_gpt_35_turbo else []) + {"model": model, "default": model == provider.default_model} + for model in provider.get_models() ] - else: - return []; + return [] @staticmethod def get_image_models() -> list[dict]: @@ -72,7 +68,7 @@ class Api(): "image_model": model, "vision_model": parent.default_vision_model if hasattr(parent, "default_vision_model") else None }) - index.append(parent.__name__) + index.append(parent.__name__) elif hasattr(provider, "default_vision_model") and provider.__name__ not in index: image_models.append({ "provider": provider.__name__, @@ -86,31 +82,20 @@ class Api(): @staticmethod def get_providers() -> list[str]: - """ - Return a list of all working providers. - """ return { - provider.__name__: (provider.label - if hasattr(provider, "label") - else provider.__name__) + - (" (WebDriver)" - if "webdriver" in provider.get_parameters() - else "") + - (" (Auth)" - if provider.needs_auth - else "") + provider.__name__: ( + provider.label if hasattr(provider, "label") else provider.__name__ + ) + ( + " (WebDriver)" if "webdriver" in provider.get_parameters() else "" + ) + ( + " (Auth)" if provider.needs_auth else "" + ) for provider in __providers__ if provider.working } @staticmethod def get_version(): - """ - Returns the current and latest version of the application. - - Returns: - dict: A dictionary containing the current and latest version. - """ try: current_version = version.utils.current_version except VersionNotFoundError: @@ -121,18 +106,10 @@ class Api(): } def serve_images(self, name): + ensure_images_dir() return send_from_directory(os.path.abspath(images_dir), name) def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict): - """ - Prepares arguments for chat completion based on the request data. - - Reads the request and prepares the necessary arguments for handling - a chat completion request. - - Returns: - dict: Arguments prepared for chat completion. - """ model = json_data.get('model') or models.default provider = json_data.get('provider') messages = json_data['messages'] @@ -140,7 +117,7 @@ class Api(): if api_key is not None: kwargs["api_key"] = api_key if json_data.get('web_search'): - if provider in ("Bing", "HuggingChat"): + if provider: kwargs['web_search'] = True else: from .internet import get_search_message @@ -161,101 +138,67 @@ class Api(): } def _create_response_stream(self, kwargs: dict, conversation_id: str, provider: str) -> Iterator: - """ - Creates and returns a streaming response for the conversation. - - Args: - kwargs (dict): Arguments for creating the chat completion. - - Yields: - str: JSON formatted response chunks for the stream. - - Raises: - Exception: If an error occurs during the streaming process. - """ try: + result = ChatCompletion.create(**kwargs) first = True - for chunk in ChatCompletion.create(**kwargs): + if isinstance(result, ImageResponse): if first: first = False yield self._format_json("provider", get_last_provider(True)) - if isinstance(chunk, BaseConversation): - if provider not in conversations: - conversations[provider] = {} - conversations[provider][conversation_id] = chunk - yield self._format_json("conversation", conversation_id) - elif isinstance(chunk, Exception): - logging.exception(chunk) - yield self._format_json("message", get_error_message(chunk)) - elif isinstance(chunk, ImagePreview): - yield self._format_json("preview", chunk.to_string()) - elif isinstance(chunk, ImageResponse): - async def copy_images(images: list[str], cookies: Optional[Cookies] = None): - async with ClientSession( - connector=get_connector(None, os.environ.get("G4F_PROXY")), - cookies=cookies - ) as session: - async def copy_image(image): - if image.startswith("data:"): - # Processing the data URL - data_uri_parts = image.split(",") - if len(data_uri_parts) == 2: - content_type, base64_data = data_uri_parts - extension = content_type.split("/")[-1].split(";")[0] - target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}.{extension}") - with open(target, "wb") as f: - f.write(base64.b64decode(base64_data)) - return f"/images/{os.path.basename(target)}" - else: - return None - else: - # Обробка звичайної URL-адреси - async with session.get(image) as response: - target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}") - with open(target, "wb") as f: - async for chunk in response.content.iter_any(): - f.write(chunk) - with open(target, "rb") as f: - extension = is_accepted_format(f.read(12)).split("/")[-1] - extension = "jpg" if extension == "jpeg" else extension - new_target = f"{target}.{extension}" - os.rename(target, new_target) - return f"/images/{os.path.basename(new_target)}" - return await asyncio.gather(*[copy_image(image) for image in images]) - images = asyncio.run(copy_images(chunk.get_list(), chunk.options.get("cookies"))) - yield self._format_json("content", str(ImageResponse(images, chunk.alt))) - elif not isinstance(chunk, FinishReason): - yield self._format_json("content", str(chunk)) + yield self._format_json("content", str(result)) + else: + for chunk in result: + if first: + first = False + yield self._format_json("provider", get_last_provider(True)) + if isinstance(chunk, BaseConversation): + if provider not in conversations: + conversations[provider] = {} + conversations[provider][conversation_id] = chunk + yield self._format_json("conversation", conversation_id) + elif isinstance(chunk, Exception): + logger.exception(chunk) + yield self._format_json("message", get_error_message(chunk)) + elif isinstance(chunk, ImagePreview): + yield self._format_json("preview", chunk.to_string()) + elif isinstance(chunk, ImageResponse): + images = asyncio.run(self._copy_images(chunk.get_list(), chunk.options.get("cookies"))) + yield self._format_json("content", str(ImageResponse(images, chunk.alt))) + elif not isinstance(chunk, FinishReason): + yield self._format_json("content", str(chunk)) except Exception as e: - logging.exception(e) + logger.exception(e) yield self._format_json('error', get_error_message(e)) - def _format_json(self, response_type: str, content): - """ - Formats and returns a JSON response. - - Args: - response_type (str): The type of the response. - content: The content to be included in the response. + async def _copy_images(self, images: list[str], cookies: Optional[Cookies] = None): + ensure_images_dir() + async with ClientSession( + connector=get_connector(None, os.environ.get("G4F_PROXY")), + cookies=cookies + ) as session: + async def copy_image(image): + async with session.get(image) as response: + target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}") + with open(target, "wb") as f: + async for chunk in response.content.iter_any(): + f.write(chunk) + with open(target, "rb") as f: + extension = is_accepted_format(f.read(12)).split("/")[-1] + extension = "jpg" if extension == "jpeg" else extension + new_target = f"{target}.{extension}" + os.rename(target, new_target) + return f"/images/{os.path.basename(new_target)}" + + return await asyncio.gather(*[copy_image(image) for image in images]) - Returns: - str: A JSON formatted string. - """ + def _format_json(self, response_type: str, content): return { 'type': response_type, response_type: content } -def get_error_message(exception: Exception) -> str: - """ - Generates a formatted error message from an exception. - Args: - exception (Exception): The exception to format. - - Returns: - str: A formatted error message string. - """ +def get_error_message(exception: Exception) -> str: message = f"{type(exception).__name__}: {exception}" provider = get_last_provider() if provider is None: diff --git a/g4f/gui/server/internet.py b/g4f/gui/server/internet.py index a1fafa7d..b41b5eae 100644 --- a/g4f/gui/server/internet.py +++ b/g4f/gui/server/internet.py @@ -2,7 +2,7 @@ from __future__ import annotations from aiohttp import ClientSession, ClientTimeout try: - from duckduckgo_search.duckduckgo_search_async import AsyncDDGS + from duckduckgo_search import DDGS from bs4 import BeautifulSoup has_requirements = True except ImportError: @@ -46,8 +46,6 @@ class SearchResultEntry(): def scrape_text(html: str, max_words: int = None) -> str: soup = BeautifulSoup(html, "html.parser") - for exclude in soup(["script", "style"]): - exclude.extract() for selector in [ "main", ".main-content-wrapper", @@ -67,7 +65,7 @@ def scrape_text(html: str, max_words: int = None) -> str: if select: select.extract() clean_text = "" - for paragraph in soup.select("p"): + for paragraph in soup.select("p, h1, h2, h3, h4, h5, h6"): text = paragraph.get_text() for line in text.splitlines(): words = [] @@ -98,10 +96,10 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text: bool = True) -> SearchResults: if not has_requirements: - raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package') - async with AsyncDDGS() as ddgs: + raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package | pip install -U g4f[search]') + with DDGS() as ddgs: results = [] - for result in await ddgs.text( + for result in ddgs.text( query, region="wt-wt", safesearch="moderate", diff --git a/g4f/gui/server/website.py b/g4f/gui/server/website.py index 5e633674..3cabcdf3 100644 --- a/g4f/gui/server/website.py +++ b/g4f/gui/server/website.py @@ -27,6 +27,10 @@ class Website: 'function': redirect_home, 'methods': ['GET', 'POST'] }, + '/images/': { + 'function': redirect_home, + 'methods': ['GET', 'POST'] + }, } def _chat(self, conversation_id): @@ -35,4 +39,4 @@ class Website: return render_template('index.html', chat_id=conversation_id) def _index(self): - return render_template('index.html', chat_id=str(uuid.uuid4()))
\ No newline at end of file + return render_template('index.html', chat_id=str(uuid.uuid4())) diff --git a/g4f/image.py b/g4f/image.py index 6561b83a..556ec43d 100644 --- a/g4f/image.py +++ b/g4f/image.py @@ -23,6 +23,11 @@ EXTENSIONS_MAP: dict[str, str] = { "image/webp": "webp", } +def fix_url(url:str) -> str: + """ replace ' ' by '+' (to be markdown compliant)""" + return url.replace(" ","+") + + def to_image(image: ImageType, is_svg: bool = False) -> Image: """ Converts the input image to a PIL Image object. @@ -212,12 +217,12 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st str: The formatted markdown string. """ if isinstance(images, str): - result = f"[![{alt}]({preview.replace('{image}', images) if preview else images})]({images})" + result = f"[![{alt}]({fix_url(preview.replace('{image}', images) if preview else images)})]({fix_url(images)})" else: if not isinstance(preview, list): preview = [preview.replace('{image}', image) if preview else image for image in images] result = "\n".join( - f"[![#{idx+1} {alt}]({preview[idx]})]({image})" + f"[![#{idx+1} {alt}]({fix_url(preview[idx])})]({fix_url(image)})" #f'[<img src="{preview[idx]}" width="200" alt="#{idx+1} {alt}">]({image})' for idx, image in enumerate(images) ) @@ -302,4 +307,4 @@ class ImageRequest: self.options = options def get(self, key: str): - return self.options.get(key)
\ No newline at end of file + return self.options.get(key) diff --git a/g4f/models.py b/g4f/models.py index c985ddd3..8825242f 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -4,51 +4,40 @@ from dataclasses import dataclass from .Provider import IterListProvider, ProviderType from .Provider import ( - AiChatOnline, + AIChatFree, Airforce, - Allyfy, + AIUncensored, Bing, - Binjie, - Bixin123, Blackbox, - ChatGot, - Chatgpt4Online, ChatGpt, - Chatgpt4o, - ChatgptFree, - CodeNews, + Chatgpt4Online, + ChatGptEs, + Cloudflare, + DarkAI, DDG, - DeepInfra, - DeepInfraImage, + DeepInfraChat, Free2GPT, - FreeChatgpt, - FreeGpt, FreeNetfly, + GigaChat, Gemini, GeminiPro, - GigaChat, HuggingChat, HuggingFace, - Koala, Liaobots, MagickPen, + Mhystical, MetaAI, - Nexra, OpenaiChat, PerplexityLabs, Pi, Pizzagpt, Reka, - Replicate, ReplicateHome, - Snova, + RubiksAI, TeachAnything, - TwitterBio, Upstage, - You, ) - @dataclass(unsafe_hash=True) class Model: """ @@ -68,22 +57,25 @@ class Model: """Returns a list of all model names.""" return _all_models +### Default ### default = Model( name = "", base_provider = "", best_provider = IterListProvider([ DDG, - FreeChatgpt, - HuggingChat, Pizzagpt, - ChatgptFree, ReplicateHome, Upstage, Blackbox, - Bixin123, - Binjie, Free2GPT, MagickPen, + DeepInfraChat, + Airforce, + ChatGptEs, + Cloudflare, + AIUncensored, + DarkAI, + Mhystical, ]) ) @@ -92,58 +84,36 @@ default = Model( ############ ### OpenAI ### -# gpt-3 -gpt_3 = Model( - name = 'gpt-3', - base_provider = 'OpenAI', - best_provider = IterListProvider([ - Nexra, - ]) -) - # gpt-3.5 gpt_35_turbo = Model( name = 'gpt-3.5-turbo', base_provider = 'OpenAI', - best_provider = IterListProvider([ - Allyfy, TwitterBio, Nexra, Bixin123, CodeNews, Airforce, - ]) + best_provider = IterListProvider([Airforce]) ) # gpt-4 gpt_4o = Model( name = 'gpt-4o', base_provider = 'OpenAI', - best_provider = IterListProvider([ - Liaobots, Chatgpt4o, Airforce, - OpenaiChat - ]) + best_provider = IterListProvider([Blackbox, ChatGptEs, DarkAI, ChatGpt, Airforce, Liaobots, OpenaiChat]) ) gpt_4o_mini = Model( name = 'gpt-4o-mini', base_provider = 'OpenAI', - best_provider = IterListProvider([ - DDG, Liaobots, You, FreeNetfly, Pizzagpt, ChatgptFree, AiChatOnline, CodeNews, MagickPen, Airforce, - OpenaiChat, Koala, ChatGpt - ]) + best_provider = IterListProvider([DDG, ChatGptEs, FreeNetfly, Pizzagpt, ChatGpt, Airforce, RubiksAI, MagickPen, Liaobots, OpenaiChat]) ) gpt_4_turbo = Model( name = 'gpt-4-turbo', base_provider = 'OpenAI', - best_provider = IterListProvider([ - Nexra, Bixin123, Liaobots, Airforce, Bing - ]) + best_provider = IterListProvider([Liaobots, Bing]) ) gpt_4 = Model( name = 'gpt-4', base_provider = 'OpenAI', - best_provider = IterListProvider([ - Chatgpt4Online, Nexra, Binjie, Airforce, Bing, - gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider - ]) + best_provider = IterListProvider([Chatgpt4Online, Bing, OpenaiChat, DDG, Liaobots, Airforce]) ) ### GigaChat ### @@ -153,7 +123,6 @@ gigachat = Model( best_provider = GigaChat ) - ### Meta ### meta = Model( name = "meta-ai", @@ -161,163 +130,127 @@ meta = Model( best_provider = MetaAI ) -llama_2_13b = Model( - name = "llama-2-13b", - base_provider = "Meta", - best_provider = IterListProvider([Airforce]) +# llama 2 +llama_2_7b = Model( + name = "llama-2-7b", + base_provider = "Meta Llama", + best_provider = Cloudflare ) - +# llama 3 llama_3_8b = Model( name = "llama-3-8b", - base_provider = "Meta", - best_provider = IterListProvider([Airforce, DeepInfra, Replicate]) -) - -llama_3_70b = Model( - name = "llama-3-70b", - base_provider = "Meta", - best_provider = IterListProvider([ReplicateHome, Airforce, DeepInfra, Replicate]) + base_provider = "Meta Llama", + best_provider = Cloudflare ) +# llama 3.1 llama_3_1_8b = Model( name = "llama-3.1-8b", - base_provider = "Meta", - best_provider = IterListProvider([Blackbox, Airforce, PerplexityLabs]) + base_provider = "Meta Llama", + best_provider = IterListProvider([Blackbox, DeepInfraChat, Cloudflare, Airforce, PerplexityLabs]) ) llama_3_1_70b = Model( name = "llama-3.1-70b", - base_provider = "Meta", - best_provider = IterListProvider([DDG, HuggingChat, FreeGpt, Blackbox, TeachAnything, Free2GPT, Airforce, HuggingFace, PerplexityLabs]) + base_provider = "Meta Llama", + best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, TeachAnything, DarkAI, Airforce, RubiksAI, HuggingChat, HuggingFace, PerplexityLabs]) ) llama_3_1_405b = Model( name = "llama-3.1-405b", - base_provider = "Meta", - best_provider = IterListProvider([Blackbox, Airforce]) + base_provider = "Meta Llama", + best_provider = IterListProvider([Blackbox, DarkAI]) ) +# llama 3.2 +llama_3_2_1b = Model( + name = "llama-3.2-1b", + base_provider = "Meta Llama", + best_provider = IterListProvider([Cloudflare]) +) -### Mistral ### -mistral_7b = Model( - name = "mistral-7b", - base_provider = "Mistral", - best_provider = IterListProvider([HuggingChat, Airforce, HuggingFace, DeepInfra]) +llama_3_2_11b = Model( + name = "llama-3.2-11b", + base_provider = "Meta Llama", + best_provider = IterListProvider([HuggingChat, HuggingFace]) ) mixtral_8x7b = Model( name = "mixtral-8x7b", base_provider = "Mistral", - best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, TwitterBio, Airforce, DeepInfra, HuggingFace]) + best_provider = DDG ) -mixtral_8x22b = Model( - name = "mixtral-8x22b", +mistral_nemo = Model( + name = "mistral-nemo", base_provider = "Mistral", - best_provider = IterListProvider([Airforce]) + best_provider = IterListProvider([HuggingChat, HuggingFace]) ) - -### NousResearch ### -mixtral_8x7b_dpo = Model( - name = "mixtral-8x7b-dpo", +hermes_3 = Model( + name = "hermes-3", base_provider = "NousResearch", - best_provider = IterListProvider([HuggingChat, Airforce, HuggingFace]) + best_provider = IterListProvider([HuggingChat, HuggingFace]) ) -yi_34b = Model( - name = 'yi-34b', - base_provider = 'NousResearch', +### Microsoft ### +phi_2 = Model( + name = "phi-2", + base_provider = "Microsoft", best_provider = IterListProvider([Airforce]) ) - -### Microsoft ### -phi_3_mini_4k = Model( - name = "phi-3-mini-4k", +phi_3_5_mini = Model( + name = "phi-3.5-mini", base_provider = "Microsoft", best_provider = IterListProvider([HuggingChat, HuggingFace]) ) - -### Google ### +### Google DeepMind ### # gemini gemini_pro = Model( name = 'gemini-pro', - base_provider = 'Google', - best_provider = IterListProvider([GeminiPro, ChatGot, Liaobots, Airforce]) + base_provider = 'Google DeepMind', + best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, Liaobots]) ) gemini_flash = Model( name = 'gemini-flash', - base_provider = 'Google', - best_provider = IterListProvider([Blackbox, Liaobots, Airforce]) + base_provider = 'Google DeepMind', + best_provider = IterListProvider([Blackbox, Liaobots]) ) gemini = Model( name = 'gemini', - base_provider = 'Google', - best_provider = IterListProvider([ - Gemini, - gemini_flash.best_provider, gemini_pro.best_provider - ]) + base_provider = 'Google DeepMind', + best_provider = Gemini ) - # gemma gemma_2b = Model( name = 'gemma-2b', base_provider = 'Google', - best_provider = IterListProvider([ReplicateHome, Airforce]) -) - -gemma_2b_9b = Model( - name = 'gemma-2b-9b', - base_provider = 'Google', - best_provider = IterListProvider([Airforce]) -) - -gemma_2b_27b = Model( - name = 'gemma-2b-27b', - base_provider = 'Google', - best_provider = IterListProvider([Airforce]) + best_provider = ReplicateHome ) ### Anthropic ### -claude_2 = Model( - name = 'claude-2', - base_provider = 'Anthropic', - best_provider = IterListProvider([You]) -) - -claude_2_0 = Model( - name = 'claude-2.0', - base_provider = 'Anthropic', - best_provider = IterListProvider([Liaobots]) -) - claude_2_1 = Model( name = 'claude-2.1', base_provider = 'Anthropic', - best_provider = IterListProvider([Liaobots]) + best_provider = Liaobots ) +# claude 3 claude_3_opus = Model( name = 'claude-3-opus', base_provider = 'Anthropic', - best_provider = IterListProvider([Liaobots]) + best_provider = Liaobots ) claude_3_sonnet = Model( name = 'claude-3-sonnet', base_provider = 'Anthropic', - best_provider = IterListProvider([Liaobots]) -) - -claude_3_5_sonnet = Model( - name = 'claude-3-5-sonnet', - base_provider = 'Anthropic', - best_provider = IterListProvider([Liaobots]) + best_provider = Liaobots ) claude_3_haiku = Model( @@ -326,6 +259,12 @@ claude_3_haiku = Model( best_provider = IterListProvider([DDG, Liaobots]) ) +# claude 3.5 +claude_3_5_sonnet = Model( + name = 'claude-3.5-sonnet', + base_provider = 'Anthropic', + best_provider = IterListProvider([Blackbox, Liaobots]) +) ### Reka AI ### reka_core = Model( @@ -334,167 +273,173 @@ reka_core = Model( best_provider = Reka ) - -### Blackbox ### -blackbox = Model( - name = 'blackbox', - base_provider = 'Blackbox', +### Blackbox AI ### +blackboxai = Model( + name = 'blackboxai', + base_provider = 'Blackbox AI', best_provider = Blackbox ) - -### Databricks ### -dbrx_instruct = Model( - name = 'dbrx-instruct', - base_provider = 'Databricks', - best_provider = IterListProvider([Airforce, DeepInfra]) +blackboxai_pro = Model( + name = 'blackboxai-pro', + base_provider = 'Blackbox AI', + best_provider = Blackbox ) - ### CohereForAI ### command_r_plus = Model( name = 'command-r-plus', base_provider = 'CohereForAI', - best_provider = IterListProvider([HuggingChat]) -) - - -### iFlytek ### -sparkdesk_v1_1 = Model( - name = 'sparkdesk-v1.1', - base_provider = 'iFlytek', - best_provider = IterListProvider([FreeChatgpt, Airforce]) + best_provider = HuggingChat ) ### Qwen ### -qwen_1_5_14b = Model( - name = 'qwen-1.5-14b', +# qwen 1_5 +qwen_1_5_7b = Model( + name = 'qwen-1.5-7b', base_provider = 'Qwen', - best_provider = IterListProvider([FreeChatgpt]) + best_provider = Cloudflare ) -qwen_1_5_72b = Model( - name = 'qwen-1.5-72b', +# qwen 2 +qwen_2_72b = Model( + name = 'qwen-2-72b', base_provider = 'Qwen', - best_provider = IterListProvider([Airforce]) + best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace]) ) -qwen_1_5_110b = Model( - name = 'qwen-1.5-110b', +# qwen 2.5 +qwen_2_5_coder_32b = Model( + name = 'qwen-2.5-coder-32b', base_provider = 'Qwen', - best_provider = IterListProvider([Airforce]) + best_provider = IterListProvider([HuggingChat, HuggingFace]) ) -qwen_2_72b = Model( - name = 'qwen-2-72b', - base_provider = 'Qwen', - best_provider = IterListProvider([Airforce]) +### Upstage ### +solar_mini = Model( + name = 'solar-mini', + base_provider = 'Upstage', + best_provider = Upstage ) -qwen_turbo = Model( - name = 'qwen-turbo', - base_provider = 'Qwen', - best_provider = IterListProvider([Bixin123]) +solar_pro = Model( + name = 'solar-pro', + base_provider = 'Upstage', + best_provider = Upstage ) -### Zhipu AI ### -glm_3_6b = Model( - name = 'glm-3-6b', - base_provider = 'Zhipu AI', - best_provider = IterListProvider([FreeChatgpt]) +### Inflection ### +pi = Model( + name = 'pi', + base_provider = 'Inflection', + best_provider = Pi ) -glm_4_9b = Model( - name = 'glm-4-9B', - base_provider = 'Zhipu AI', - best_provider = IterListProvider([FreeChatgpt]) +### DeepSeek ### +deepseek_coder = Model( + name = 'deepseek-coder', + base_provider = 'DeepSeek', + best_provider = Airforce ) -glm_4 = Model( - name = 'glm-4', - base_provider = 'Zhipu AI', - best_provider = IterListProvider([ - CodeNews, - glm_3_6b.best_provider, glm_4_9b.best_provider - ]) +### WizardLM ### +wizardlm_2_8x22b = Model( + name = 'wizardlm-2-8x22b', + base_provider = 'WizardLM', + best_provider = IterListProvider([DeepInfraChat]) ) -### 01-ai ### -yi_1_5_9b = Model( - name = 'yi-1.5-9b', - base_provider = '01-ai', - best_provider = IterListProvider([FreeChatgpt]) +### Yorickvp ### +llava_13b = Model( + name = 'llava-13b', + base_provider = 'Yorickvp', + best_provider = ReplicateHome ) -### Upstage ### -solar_1_mini = Model( - name = 'solar-1-mini', - base_provider = 'Upstage', - best_provider = IterListProvider([Upstage]) +### OpenChat ### +openchat_3_5 = Model( + name = 'openchat-3.5', + base_provider = 'OpenChat', + best_provider = Airforce ) -solar_10_7b = Model( - name = 'solar-10-7b', - base_provider = 'Upstage', - best_provider = Airforce + +### x.ai ### +grok_2 = Model( + name = 'grok-2', + base_provider = 'x.ai', + best_provider = Liaobots ) +grok_2_mini = Model( + name = 'grok-2-mini', + base_provider = 'x.ai', + best_provider = Liaobots +) -### Pi ### -pi = Model( - name = 'pi', - base_provider = 'inflection', - best_provider = Pi +grok_beta = Model( + name = 'grok-beta', + base_provider = 'x.ai', + best_provider = Liaobots ) -### SambaNova ### -samba_coe_v0_1 = Model( - name = 'samba-coe-v0.1', - base_provider = 'SambaNova', - best_provider = Snova + +### Perplexity AI ### +sonar_online = Model( + name = 'sonar-online', + base_provider = 'Perplexity AI', + best_provider = IterListProvider([PerplexityLabs]) ) -### Trong-Hieu Nguyen-Mau ### -v1olet_merged_7b = Model( - name = 'v1olet-merged-7b', - base_provider = 'Trong-Hieu Nguyen-Mau', - best_provider = Snova +sonar_chat = Model( + name = 'sonar-chat', + base_provider = 'Perplexity AI', + best_provider = PerplexityLabs ) -### Macadeliccc ### -westlake_7b_v2 = Model( - name = 'westlake-7b-v2', - base_provider = 'Macadeliccc', - best_provider = Snova +### Nvidia ### +nemotron_70b = Model( + name = 'nemotron-70b', + base_provider = 'Nvidia', + best_provider = IterListProvider([HuggingChat, HuggingFace]) ) -### DeepSeek ### -deepseek = Model( - name = 'deepseek', - base_provider = 'DeepSeek', - best_provider = IterListProvider([CodeNews, Airforce]) + +### Teknium ### +openhermes_2_5 = Model( + name = 'openhermes-2.5', + base_provider = 'Teknium', + best_provider = Airforce ) -### WizardLM ### -wizardlm_2_8x22b = Model( - name = 'wizardlm-2-8x22b', - base_provider = 'WizardLM', +### Liquid ### +lfm_40b = Model( + name = 'lfm-40b', + base_provider = 'Liquid', + best_provider = IterListProvider([Airforce, PerplexityLabs]) +) + + +### DiscoResearch ### +german_7b = Model( + name = 'german-7b', + base_provider = 'DiscoResearch', best_provider = Airforce ) -### Together ### -sh_n_7b = Model( - name = 'sh-n-7b', - base_provider = 'Together', +### HuggingFaceH4 ### +zephyr_7b = Model( + name = 'zephyr-7b', + base_provider = 'HuggingFaceH4', best_provider = Airforce ) -### Yorickvp ### -llava_13b = Model( - name = 'llava-13b', - base_provider = 'Yorickvp', - best_provider = ReplicateHome +### Inferless ### +neural_7b = Model( + name = 'neural-7b', + base_provider = 'inferless', + best_provider = Airforce ) ############# @@ -505,109 +450,80 @@ llava_13b = Model( sdxl = Model( name = 'sdxl', base_provider = 'Stability AI', - best_provider = IterListProvider([ReplicateHome, DeepInfraImage]) + best_provider = IterListProvider([ReplicateHome]) ) sd_3 = Model( name = 'sd-3', base_provider = 'Stability AI', - best_provider = IterListProvider([ReplicateHome]) + best_provider = ReplicateHome ) ### Playground ### playground_v2_5 = Model( name = 'playground-v2.5', - base_provider = 'Stability AI', - best_provider = IterListProvider([ReplicateHome]) + base_provider = 'Playground AI', + best_provider = ReplicateHome ) + ### Flux AI ### flux = Model( name = 'flux', base_provider = 'Flux AI', - best_provider = IterListProvider([Airforce]) - + best_provider = IterListProvider([Blackbox, AIUncensored, Airforce]) +) + +flux_pro = Model( + name = 'flux-pro', + base_provider = 'Flux AI', + best_provider = Airforce ) flux_realism = Model( name = 'flux-realism', base_provider = 'Flux AI', - best_provider = IterListProvider([Airforce]) - + best_provider = Airforce ) flux_anime = Model( name = 'flux-anime', base_provider = 'Flux AI', - best_provider = IterListProvider([Airforce]) - + best_provider = Airforce ) flux_3d = Model( name = 'flux-3d', base_provider = 'Flux AI', - best_provider = IterListProvider([Airforce]) - + best_provider = Airforce ) flux_disney = Model( name = 'flux-disney', base_provider = 'Flux AI', - best_provider = IterListProvider([Airforce]) - + best_provider = Airforce ) flux_pixel = Model( name = 'flux-pixel', base_provider = 'Flux AI', - best_provider = IterListProvider([Airforce]) - + best_provider = Airforce ) -flux_schnell = Model( - name = 'flux-schnell', +flux_4o = Model( + name = 'flux-4o', base_provider = 'Flux AI', - best_provider = IterListProvider([ReplicateHome]) - -) - -### ### -dalle = Model( - name = 'dalle', - base_provider = '', - best_provider = IterListProvider([Nexra]) - -) - -dalle_2 = Model( - name = 'dalle-2', - base_provider = '', - best_provider = IterListProvider([Nexra]) - -) - -dalle_mini = Model( - name = 'dalle-mini', - base_provider = '', - best_provider = IterListProvider([Nexra]) - -) - -emi = Model( - name = 'emi', - base_provider = '', - best_provider = IterListProvider([Nexra]) - + best_provider = Airforce ) +### Other ### any_dark = Model( name = 'any-dark', base_provider = '', - best_provider = IterListProvider([Airforce]) - + best_provider = Airforce ) class ModelUtils: @@ -617,186 +533,142 @@ class ModelUtils: Attributes: convert (dict[str, Model]): Dictionary mapping model string identifiers to Model instances. """ - convert: dict[str, Model] = { - -############ -### Text ### -############ - -### OpenAI ### -# gpt-3 -'gpt-3': gpt_3, - -# gpt-3.5 -'gpt-3.5-turbo': gpt_35_turbo, - -# gpt-4 -'gpt-4o' : gpt_4o, -'gpt-4o-mini' : gpt_4o_mini, -'gpt-4' : gpt_4, -'gpt-4-turbo' : gpt_4_turbo, - - -### Meta ### -"meta-ai": meta, - -# llama-2 -'llama-2-13b': llama_2_13b, - -# llama-3 -'llama-3-8b': llama_3_8b, -'llama-3-70b': llama_3_70b, - -# llama-3.1 -'llama-3.1-8b': llama_3_1_8b, -'llama-3.1-70b': llama_3_1_70b, -'llama-3.1-405b': llama_3_1_405b, - - -### Mistral ### -'mistral-7b': mistral_7b, -'mixtral-8x7b': mixtral_8x7b, -'mixtral-8x22b': mixtral_8x22b, - - -### NousResearch ### -'mixtral-8x7b-dpo': mixtral_8x7b_dpo, - -'yi-34b': yi_34b, - - -### Microsoft ### -'phi-3-mini-4k': phi_3_mini_4k, - - -### Google ### -# gemini -'gemini': gemini, -'gemini-pro': gemini_pro, -'gemini-flash': gemini_flash, - -# gemma -'gemma-2b': gemma_2b, -'gemma-2b-9b': gemma_2b_9b, -'gemma-2b-27b': gemma_2b_27b, - - -### Anthropic ### -'claude-2': claude_2, -'claude-2.0': claude_2_0, -'claude-2.1': claude_2_1, - -'claude-3-opus': claude_3_opus, -'claude-3-sonnet': claude_3_sonnet, -'claude-3-haiku': claude_3_haiku, -'claude-3-5-sonnet': claude_3_5_sonnet, - - -### Reka AI ### -'reka-core': reka_core, - - -### Blackbox ### -'blackbox': blackbox, - - -### CohereForAI ### -'command-r+': command_r_plus, - - -### Databricks ### -'dbrx-instruct': dbrx_instruct, - - -### GigaChat ### -'gigachat': gigachat, - - -### iFlytek ### -'sparkdesk-v1.1': sparkdesk_v1_1, - - -### Qwen ### -'qwen-1.5-14b': qwen_1_5_14b, -'qwen-1.5-72b': qwen_1_5_72b, -'qwen-1.5-110b': qwen_1_5_110b, -'qwen-2-72b': qwen_2_72b, -'qwen-turbo': qwen_turbo, - - -### Zhipu AI ### -'glm-3-6b': glm_3_6b, -'glm-4-9b': glm_4_9b, -'glm-4': glm_4, - - -### 01-ai ### -'yi-1.5-9b': yi_1_5_9b, - - -### Upstage ### -'solar-1-mini': solar_1_mini, -'solar-10-7b': solar_10_7b, - - -### Pi ### -'pi': pi, - - -### SambaNova ### -'samba-coe-v0.1': samba_coe_v0_1, - - -### Trong-Hieu Nguyen-Mau ### -'v1olet-merged-7b': v1olet_merged_7b, - - -### Macadeliccc ### -'westlake-7b-v2': westlake_7b_v2, - - -### DeepSeek ### -'deepseek': deepseek, - - -### Together ### -'sh-n-7b': sh_n_7b, - - -### Yorickvp ### -'llava-13b': llava_13b, - - - -############# -### Image ### -############# - -### Stability AI ### -'sdxl': sdxl, -'sd-3': sd_3, - - -### Playground ### -'playground-v2.5': playground_v2_5, - - -### Flux AI ### -'flux': flux, -'flux-realism': flux_realism, -'flux-anime': flux_anime, -'flux-3d': flux_3d, -'flux-disney': flux_disney, -'flux-pixel': flux_pixel, -'flux-schnell': flux_schnell, - - -### ### -'dalle': dalle, -'dalle-2': dalle_2, -'dalle-mini': dalle_mini, -'emi': emi, -'any-dark': any_dark, + convert: dict[str, Model] = { + ############ + ### Text ### + ############ + + ### OpenAI ### + # gpt-3 + 'gpt-3': gpt_35_turbo, + + # gpt-3.5 + 'gpt-3.5-turbo': gpt_35_turbo, + + # gpt-4 + 'gpt-4o': gpt_4o, + 'gpt-4o-mini': gpt_4o_mini, + 'gpt-4': gpt_4, + 'gpt-4-turbo': gpt_4_turbo, + + ### Meta ### + "meta-ai": meta, + + # llama-2 + 'llama-2-7b': llama_2_7b, + + # llama-3 + 'llama-3-8b': llama_3_8b, + + # llama-3.1 + 'llama-3.1-8b': llama_3_1_8b, + 'llama-3.1-70b': llama_3_1_70b, + 'llama-3.1-405b': llama_3_1_405b, + + # llama-3.2 + 'llama-3.2-1b': llama_3_2_1b, + 'llama-3.2-11b': llama_3_2_11b, + + ### Mistral ### + 'mixtral-8x7b': mixtral_8x7b, + 'mistral-nemo': mistral_nemo, + + ### NousResearch ### + 'hermes-3': hermes_3, + + ### Microsoft ### + 'phi-2': phi_2, + 'phi-3.5-mini': phi_3_5_mini, + + ### Google ### + # gemini + 'gemini': gemini, + 'gemini-pro': gemini_pro, + 'gemini-flash': gemini_flash, + + # gemma + 'gemma-2b': gemma_2b, + + ### Anthropic ### + 'claude-2.1': claude_2_1, + + # claude 3 + 'claude-3-opus': claude_3_opus, + 'claude-3-sonnet': claude_3_sonnet, + 'claude-3-haiku': claude_3_haiku, + + # claude 3.5 + 'claude-3.5-sonnet': claude_3_5_sonnet, + + ### Reka AI ### + 'reka-core': reka_core, + + ### Blackbox AI ### + 'blackboxai': blackboxai, + 'blackboxai-pro': blackboxai_pro, + + ### CohereForAI ### + 'command-r+': command_r_plus, + + ### GigaChat ### + 'gigachat': gigachat, + + 'qwen-1.5-7b': qwen_1_5_7b, + 'qwen-2-72b': qwen_2_72b, + + ### Upstage ### + 'solar-pro': solar_pro, + + ### Inflection ### + 'pi': pi, + + ### Yorickvp ### + 'llava-13b': llava_13b, + + ### WizardLM ### + 'wizardlm-2-8x22b': wizardlm_2_8x22b, + + ### OpenChat ### + 'openchat-3.5': openchat_3_5, + + ### x.ai ### + 'grok-2': grok_2, + 'grok-2-mini': grok_2_mini, + 'grok-beta': grok_beta, + + ### Perplexity AI ### + 'sonar-online': sonar_online, + 'sonar-chat': sonar_chat, + + ### TheBloke ### + 'german-7b': german_7b, + + ### Nvidia ### + 'nemotron-70b': nemotron_70b, + + ############# + ### Image ### + ############# + + ### Stability AI ### + 'sdxl': sdxl, + 'sd-3': sd_3, + + ### Playground ### + 'playground-v2.5': playground_v2_5, + + ### Flux AI ### + 'flux': flux, + 'flux-pro': flux_pro, + 'flux-realism': flux_realism, + 'flux-anime': flux_anime, + 'flux-3d': flux_3d, + 'flux-disney': flux_disney, + 'flux-pixel': flux_pixel, + 'flux-4o': flux_4o, + + ### Other ### + 'any-dark': any_dark, } -_all_models = list(ModelUtils.convert.keys()) +_all_models = list(ModelUtils.convert.keys())
\ No newline at end of file diff --git a/g4f/providers/base_provider.py b/g4f/providers/base_provider.py index a03dcbba..128fb5a0 100644 --- a/g4f/providers/base_provider.py +++ b/g4f/providers/base_provider.py @@ -2,11 +2,13 @@ from __future__ import annotations import sys import asyncio + from asyncio import AbstractEventLoop from concurrent.futures import ThreadPoolExecutor from abc import abstractmethod from inspect import signature, Parameter from typing import Callable, Union + from ..typing import CreateResult, AsyncResult, Messages from .types import BaseProvider, FinishReason from ..errors import NestAsyncioError, ModelNotSupportedError @@ -17,6 +19,17 @@ if sys.version_info < (3, 10): else: from types import NoneType +try: + import nest_asyncio + has_nest_asyncio = True +except ImportError: + has_nest_asyncio = False +try: + import uvloop + has_uvloop = True +except ImportError: + has_uvloop = False + # Set Windows event loop policy for better compatibility with asyncio and curl_cffi if sys.platform == 'win32': try: @@ -31,18 +44,14 @@ def get_running_loop(check_nested: bool) -> Union[AbstractEventLoop, None]: try: loop = asyncio.get_running_loop() # Do not patch uvloop loop because its incompatible. - try: - import uvloop + if has_uvloop: if isinstance(loop, uvloop.Loop): - return loop - except (ImportError, ModuleNotFoundError): - pass - if check_nested and not hasattr(loop.__class__, "_nest_patched"): - try: - import nest_asyncio + return loop + if not hasattr(loop.__class__, "_nest_patched"): + if has_nest_asyncio: nest_asyncio.apply(loop) - except ImportError: - raise NestAsyncioError('Install "nest_asyncio" package') + elif check_nested: + raise NestAsyncioError('Install "nest_asyncio" package | pip install -U nest_asyncio') return loop except RuntimeError: pass @@ -154,7 +163,7 @@ class AsyncProvider(AbstractProvider): Returns: CreateResult: The result of the completion creation. """ - get_running_loop(check_nested=True) + get_running_loop(check_nested=False) yield asyncio.run(cls.create_async(model, messages, **kwargs)) @staticmethod @@ -208,7 +217,7 @@ class AsyncGeneratorProvider(AsyncProvider): Returns: CreateResult: The result of the streaming completion creation. """ - loop = get_running_loop(check_nested=True) + loop = get_running_loop(check_nested=False) new_loop = False if loop is None: loop = asyncio.new_event_loop() @@ -222,7 +231,7 @@ class AsyncGeneratorProvider(AsyncProvider): while True: yield loop.run_until_complete(await_callback(gen.__anext__)) except StopAsyncIteration: - ... + pass finally: if new_loop: loop.close() @@ -248,7 +257,7 @@ class AsyncGeneratorProvider(AsyncProvider): str: The created result as a string. """ return "".join([ - chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs) + str(chunk) async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs) if not isinstance(chunk, (Exception, FinishReason)) ]) diff --git a/g4f/providers/types.py b/g4f/providers/types.py index 50c14431..e7ca32ee 100644 --- a/g4f/providers/types.py +++ b/g4f/providers/types.py @@ -3,6 +3,7 @@ from __future__ import annotations from abc import ABC, abstractmethod from typing import Union, Dict, Type from ..typing import Messages, CreateResult +from .conversation import BaseConversation class BaseProvider(ABC): """ @@ -13,9 +14,8 @@ class BaseProvider(ABC): working (bool): Indicates if the provider is currently working. needs_auth (bool): Indicates if the provider needs authentication. supports_stream (bool): Indicates if the provider supports streaming. - supports_gpt_35_turbo (bool): Indicates if the provider supports GPT-3.5 Turbo. - supports_gpt_4 (bool): Indicates if the provider supports GPT-4. supports_message_history (bool): Indicates if the provider supports message history. + supports_system_message (bool): Indicates if the provider supports system messages. params (str): List parameters for the provider. """ @@ -23,8 +23,6 @@ class BaseProvider(ABC): working: bool = False needs_auth: bool = False supports_stream: bool = False - supports_gpt_35_turbo: bool = False - supports_gpt_4: bool = False supports_message_history: bool = False supports_system_message: bool = False params: str @@ -109,4 +107,4 @@ class Streaming(): self.data = data def __str__(self) -> str: - return self.data
\ No newline at end of file + return self.data diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py index 80fc44b3..a8c0e286 100644 --- a/g4f/requests/__init__.py +++ b/g4f/requests/__init__.py @@ -1,5 +1,8 @@ from __future__ import annotations +from urllib.parse import urlparse +from typing import Iterator +from http.cookies import Morsel try: from curl_cffi.requests import Session, Response from .curl_cffi import StreamResponse, StreamSession, FormData @@ -14,11 +17,19 @@ try: has_webview = True except ImportError: has_webview = False +try: + import nodriver + from nodriver.cdp.network import CookieParam + has_nodriver = True +except ImportError: + has_nodriver = False +from .. import debug from .raise_for_status import raise_for_status from ..webdriver import WebDriver, WebDriverSession from ..webdriver import bypass_cloudflare, get_driver_cookies from ..errors import MissingRequirementsError +from ..typing import Cookies from .defaults import DEFAULT_HEADERS, WEBVIEW_HAEDERS async def get_args_from_webview(url: str) -> dict: @@ -105,4 +116,53 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = proxies={"https": proxy, "http": proxy}, timeout=timeout, impersonate="chrome" - )
\ No newline at end of file + ) +def get_cookie_params_from_dict(cookies: Cookies, url: str = None, domain: str = None) -> list[CookieParam]: + [CookieParam.from_json({ + "name": key, + "value": value, + "url": url, + "domain": domain + }) for key, value in cookies.items()] + +async def get_args_from_nodriver( + url: str, + proxy: str = None, + timeout: int = 120, + cookies: Cookies = None +) -> dict: + if not has_nodriver: + raise MissingRequirementsError('Install "nodriver" package | pip install -U nodriver') + if debug.logging: + print(f"Open nodriver with url: {url}") + browser = await nodriver.start( + browser_args=None if proxy is None else [f"--proxy-server={proxy}"], + ) + domain = urlparse(url).netloc + if cookies is None: + cookies = {} + else: + await browser.cookies.set_all(get_cookie_params_from_dict(cookies, url=url, domain=domain)) + page = await browser.get(url) + for c in await browser.cookies.get_all(): + if c.domain.endswith(domain): + cookies[c.name] = c.value + user_agent = await page.evaluate("window.navigator.userAgent") + await page.wait_for("body:not(.no-js)", timeout=timeout) + await page.close() + browser.stop() + return { + "cookies": cookies, + "headers": { + **DEFAULT_HEADERS, + "user-agent": user_agent, + "referer": url, + }, + "proxy": proxy + } + +def merge_cookies(cookies: Iterator[Morsel], response: Response) -> Cookies: + if cookies is None: + cookies = {} + for cookie in response.cookies.jar: + cookies[cookie.name] = cookie.value
\ No newline at end of file diff --git a/g4f/requests/raise_for_status.py b/g4f/requests/raise_for_status.py index 0e91505e..1699d9a4 100644 --- a/g4f/requests/raise_for_status.py +++ b/g4f/requests/raise_for_status.py @@ -11,6 +11,8 @@ class CloudflareError(ResponseStatusError): ... def is_cloudflare(text: str) -> bool: + if "<title>Attention Required! | Cloudflare</title>" in text: + return True return '<div id="cf-please-wait">' in text or "<title>Just a moment...</title>" in text def is_openai(text: str) -> bool: diff --git a/g4f/version.py b/g4f/version.py index eda2b8fe..403ce370 100644 --- a/g4f/version.py +++ b/g4f/version.py @@ -116,4 +116,4 @@ class VersionUtils: except Exception as e: print(f'Failed to check g4f version: {e}') -utils = VersionUtils()
\ No newline at end of file +utils = VersionUtils() diff --git a/requirements-min.txt b/requirements-min.txt index 2944babd..3923c556 100644 --- a/requirements-min.txt +++ b/requirements-min.txt @@ -2,4 +2,4 @@ requests aiohttp brotli pycryptodome -curl_cffi>=0.6.2
\ No newline at end of file +nest_asyncio
\ No newline at end of file diff --git a/requirements.txt b/requirements.txt index fbb548a3..1a014bac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,6 @@ PyExecJS duckduckgo-search>=5.0 nest_asyncio werkzeug -loguru pillow platformdirs fastapi @@ -21,3 +20,4 @@ pywebview plyer cryptography nodriver +cloudscraper @@ -12,7 +12,8 @@ INSTALL_REQUIRE = [ "requests", "aiohttp", "brotli", - "pycryptodome" + "pycryptodome", + "nest_asyncio", ] EXTRA_REQUIRE = { @@ -20,27 +21,19 @@ EXTRA_REQUIRE = { "curl_cffi>=0.6.2", "certifi", "browser_cookie3", # get_cookies - "PyExecJS", # GptForLove, Vercel "duckduckgo-search>=5.0" ,# internet.search "beautifulsoup4", # internet.search and bing.create_images "brotli", # openai, bing - # webdriver - #"undetected-chromedriver>=3.5.5", - #"setuptools", - #"selenium-wire" - # webview - "pywebview", "platformdirs", - "plyer", "cryptography", - #### "aiohttp_socks", # proxy "pillow", # image "cairosvg", # svg image "werkzeug", "flask", # gui - "loguru", "fastapi", # api + "fastapi", # api "uvicorn", "nest_asyncio", # api - "pycryptodome" # openai + "pycryptodome", # openai + "nodriver", ], "image": [ "pillow", @@ -59,12 +52,9 @@ EXTRA_REQUIRE = { "plyer", "cryptography" ], - "openai": [ - "pycryptodome" - ], "api": [ "loguru", "fastapi", - "uvicorn", "nest_asyncio" + "uvicorn", ], "gui": [ "werkzeug", "flask", @@ -74,9 +64,6 @@ EXTRA_REQUIRE = { ], "local": [ "gpt4all" - ], - "curl_cffi": [ - "curl_cffi>=0.6.2", ] } |