summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2023-10-02 18:55:53 +0200
committerGitHub <noreply@github.com>2023-10-02 18:55:53 +0200
commit79a9eef02a20227340c3e90537fcb9cbe72779b4 (patch)
tree59b9e30e081b2e1c46b672bc95c5610186516885 /g4f/Provider
parent~ | g4f v-0.1.4.4 (diff)
parentAdd Phind Provider (diff)
downloadgpt4free-79a9eef02a20227340c3e90537fcb9cbe72779b4.tar
gpt4free-79a9eef02a20227340c3e90537fcb9cbe72779b4.tar.gz
gpt4free-79a9eef02a20227340c3e90537fcb9cbe72779b4.tar.bz2
gpt4free-79a9eef02a20227340c3e90537fcb9cbe72779b4.tar.lz
gpt4free-79a9eef02a20227340c3e90537fcb9cbe72779b4.tar.xz
gpt4free-79a9eef02a20227340c3e90537fcb9cbe72779b4.tar.zst
gpt4free-79a9eef02a20227340c3e90537fcb9cbe72779b4.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Bing.py22
-rw-r--r--g4f/Provider/Phind.py76
-rw-r--r--g4f/Provider/__init__.py2
3 files changed, 91 insertions, 9 deletions
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 05be27e7..e4e56519 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -3,6 +3,7 @@ from __future__ import annotations
import random
import json
import os
+import urllib.parse
from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, get_cookies
@@ -245,7 +246,6 @@ async def stream_generate(
await wss.send_str(create_message(conversation, prompt, context))
response_txt = ''
- result_text = ''
returned_text = ''
final = False
@@ -260,14 +260,18 @@ async def stream_generate(
if response.get('type') == 1 and response['arguments'][0].get('messages'):
message = response['arguments'][0]['messages'][0]
if (message['contentOrigin'] != 'Apology'):
- response_txt = result_text + \
- message['adaptiveCards'][0]['body'][0].get('text', '')
-
- if message.get('messageType'):
- inline_txt = message['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
- response_txt += inline_txt + '\n'
- result_text += inline_txt + '\n'
-
+ if 'adaptiveCards' in message:
+ card = message['adaptiveCards'][0]['body'][0]
+ if "text" in card:
+ response_txt = card.get('text')
+ if message.get('messageType'):
+ inline_txt = card['inlines'][0].get('text')
+ response_txt += inline_txt + '\n'
+ elif message.get('contentType') == "IMAGE":
+ query = urllib.parse.quote(message.get('text'))
+ url = f"\nhttps://www.bing.com/images/create?q={query}"
+ response_txt += url
+ final = True
if response_txt.startswith(returned_text):
new = response_txt[len(returned_text):]
if new != "\n":
diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py
new file mode 100644
index 00000000..0db4e3c2
--- /dev/null
+++ b/g4f/Provider/Phind.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import random
+from datetime import datetime
+
+from ..typing import AsyncGenerator
+from ..requests import StreamSession
+from .base_provider import AsyncGeneratorProvider, format_prompt
+
+
+class Phind(AsyncGeneratorProvider):
+ url = "https://www.phind.com"
+ working = True
+ supports_gpt_4 = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncGenerator:
+ chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
+ user_id = ''.join(random.choice(chars) for _ in range(24))
+ data = {
+ "question": format_prompt(messages),
+ "webResults": [],
+ "options": {
+ "date": datetime.now().strftime("%d.%m.%Y"),
+ "language": "en",
+ "detailed": True,
+ "anonUserId": user_id,
+ "answerModel": "GPT-4",
+ "creativeMode": False,
+ "customLinks": []
+ },
+ "context":""
+ }
+ headers = {
+ "Authority": cls.url,
+ "Accept": "application/json, text/plain, */*",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/"
+ }
+ async with StreamSession(headers=headers, timeout=(5, 180), proxies={"https": proxy}, impersonate="chrome107") as session:
+ async with session.post(f"{cls.url}/api/infer/answer", json=data) as response:
+ response.raise_for_status()
+ new_lines = 0
+ async for line in response.iter_lines():
+ if not line:
+ continue
+ if line.startswith(b"data: "):
+ line = line[6:]
+ if line.startswith(b"<PHIND_METADATA>"):
+ continue
+ if line:
+ if new_lines:
+ yield "".join(["\n" for _ in range(int(new_lines / 2))])
+ new_lines = 0
+ yield line.decode()
+ else:
+ new_lines += 1
+
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index b2f0f729..2ac4191b 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -31,6 +31,7 @@ from .Opchatgpts import Opchatgpts
from .OpenaiChat import OpenaiChat
from .OpenAssistant import OpenAssistant
from .PerplexityAi import PerplexityAi
+from .Phind import Phind
from .Raycast import Raycast
from .Theb import Theb
from .Vercel import Vercel
@@ -85,6 +86,7 @@ __all__ = [
'OpenaiChat',
'OpenAssistant',
'PerplexityAi',
+ 'Phind',
'Theb',
'Vercel',
'Vitalentum',