1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
|
from __future__ import annotations
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
class Opchatgpts(BaseProvider):
url = "https://opchatgpts.net"
working = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
temperature = kwargs.get("temperature", 0.8)
max_tokens = kwargs.get("max_tokens", 1024)
system_prompt = kwargs.get(
"system_prompt",
"Converse as if you were an AI assistant. Be friendly, creative.")
payload = _create_payload(
messages = messages,
temperature = temperature,
max_tokens = max_tokens,
system_prompt = system_prompt)
response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload)
response.raise_for_status()
yield response.json()["reply"]
def _create_payload(
messages: list[dict[str, str]],
temperature: float,
max_tokens: int, system_prompt: str) -> dict:
return {
"env" : "chatbot",
"session" : "N/A",
"prompt" : "\n",
"context" : system_prompt,
"messages" : messages,
"newMessage" : messages[::-1][0]["content"],
"userName" : '<div class="mwai-name-text">User:</div>',
"aiName" : '<div class="mwai-name-text">AI:</div>',
"model" : "gpt-3.5-turbo",
"temperature" : temperature,
"maxTokens" : max_tokens,
"maxResults" : 1,
"apiKey" : "",
"service" : "openai",
"embeddingsIndex" : "",
"stop" : "",
}
|