summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-02-09 06:36:28 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-02-09 06:36:28 +0100
commit23e002f52ffaffbaf98fa4251ceca70e465db05a (patch)
treef2ba5b5bec6deccef02afd6b29c3502a97b831a5
parentAdd example for Image Upload & Generation (diff)
downloadgpt4free-23e002f52ffaffbaf98fa4251ceca70e465db05a.tar
gpt4free-23e002f52ffaffbaf98fa4251ceca70e465db05a.tar.gz
gpt4free-23e002f52ffaffbaf98fa4251ceca70e465db05a.tar.bz2
gpt4free-23e002f52ffaffbaf98fa4251ceca70e465db05a.tar.lz
gpt4free-23e002f52ffaffbaf98fa4251ceca70e465db05a.tar.xz
gpt4free-23e002f52ffaffbaf98fa4251ceca70e465db05a.tar.zst
gpt4free-23e002f52ffaffbaf98fa4251ceca70e465db05a.zip
-rw-r--r--README.md15
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py25
-rw-r--r--setup.py2
3 files changed, 34 insertions, 8 deletions
diff --git a/README.md b/README.md
index 9db1679d..6167587e 100644
--- a/README.md
+++ b/README.md
@@ -358,6 +358,21 @@ response = g4f.ChatCompletion.create(
# Displaying the response
print(response)
+
+from g4f.image import ImageResponse
+
+# Get image links from response
+for chunk in g4f.ChatCompletion.create(
+ model=g4f.models.default, # Using the default model
+ provider=g4f.Provider.OpenaiChat, # Specifying the provider as OpenaiChat
+ messages=[{"role": "user", "content": "Create images with dogs"}],
+ access_token="...", # Need a access token from a plus user
+ stream=True,
+ ignore_stream=True
+):
+ if isinstance(chunk, ImageResponse):
+ print(chunk.images) # Print generated image links
+ print(chunk.alt) # Print used prompt for image generation
```
##### Using Browser
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 32aee9fb..c122da46 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -342,26 +342,30 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
raise MissingAuthError(f'Missing "access_token"')
cls._cookies = cookies
- headers = {"Authorization": f"Bearer {access_token}"}
+ auth_headers = {"Authorization": f"Bearer {access_token}"}
async with StreamSession(
proxies={"https": proxy},
impersonate="chrome110",
timeout=timeout,
- cookies=dict([(name, value) for name, value in cookies.items() if name == "_puid"])
+ headers={"Cookie": "; ".join(f"{k}={v}" for k, v in cookies.items())}
) as session:
try:
image_response = None
if image:
- image_response = await cls.upload_image(session, headers, image, kwargs.get("image_name"))
+ image_response = await cls.upload_image(session, auth_headers, image, kwargs.get("image_name"))
except Exception as e:
yield e
end_turn = EndTurn()
- model = cls.get_model(model or await cls.get_default_model(session, headers))
+ model = cls.get_model(model or await cls.get_default_model(session, auth_headers))
model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model
while not end_turn.is_end:
+ arkose_token = await cls.get_arkose_token(session)
data = {
"action": action,
- "arkose_token": await cls.get_arkose_token(session),
+ "arkose_token": arkose_token,
+ "conversation_mode": {"kind": "primary_assistant"},
+ "force_paragen": False,
+ "force_rate_limit": False,
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model,
@@ -373,7 +377,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
async with session.post(
f"{cls.url}/backend-api/conversation",
json=data,
- headers={"Accept": "text/event-stream", **headers}
+ headers={
+ "Accept": "text/event-stream",
+ "OpenAI-Sentinel-Arkose-Token": arkose_token,
+ **auth_headers
+ }
) as response:
if not response.ok:
raise RuntimeError(f"Response {response.status_code}: {await response.text()}")
@@ -439,7 +447,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Returns:
tuple[str, dict]: A tuple containing the access token and cookies.
"""
- with get_browser(proxy=proxy) as driver:
+ driver = get_browser(proxy=proxy)
+ try:
driver.get(f"{cls.url}/")
WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.ID, "prompt-textarea")))
access_token = driver.execute_script(
@@ -451,6 +460,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"return accessToken;"
)
return access_token, get_driver_cookies(driver)
+ finally:
+ driver.close()
@classmethod
async def get_arkose_token(cls, session: StreamSession) -> str:
diff --git a/setup.py b/setup.py
index b8383a7a..4f3b0359 100644
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@ INSTALL_REQUIRE = [
EXTRA_REQUIRE = {
'all': [
- "curl_cffi>=0.5.10",
+ "curl_cffi>=0.6.0b9",
"certifi",
"async-property", # openai
"py-arkose-generator", # openai