summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authort.me/xtekky <98614666+xtekky@users.noreply.github.com>2023-04-15 16:21:51 +0200
committert.me/xtekky <98614666+xtekky@users.noreply.github.com>2023-04-15 16:21:51 +0200
commit6c64e459b36f83aaea79579799b451337411975f (patch)
treeaa5a556e6d962ccbbc0239f1874b92d1393dfbe1
parentxtekky.com site (diff)
downloadgpt4free-6c64e459b36f83aaea79579799b451337411975f.tar
gpt4free-6c64e459b36f83aaea79579799b451337411975f.tar.gz
gpt4free-6c64e459b36f83aaea79579799b451337411975f.tar.bz2
gpt4free-6c64e459b36f83aaea79579799b451337411975f.tar.lz
gpt4free-6c64e459b36f83aaea79579799b451337411975f.tar.xz
gpt4free-6c64e459b36f83aaea79579799b451337411975f.tar.zst
gpt4free-6c64e459b36f83aaea79579799b451337411975f.zip
-rw-r--r--README.md22
-rw-r--r--phind/__init__.py145
-rw-r--r--testing/phind_test.py13
3 files changed, 180 insertions, 0 deletions
diff --git a/README.md b/README.md
index 80e9275b..e8e1d78c 100644
--- a/README.md
+++ b/README.md
@@ -21,6 +21,7 @@ This repository provides reverse-engineered language models from various sources
- [Sites with Authentication (Will Reverse Engineer but Need Account Access)](#sites-with-authentication)
- [Usage Examples](#usage-examples)
- [`quora (poe)`](#example-poe)
+ - [`phind`](#example-phind)
- [`t3nsor`](#example-t3nsor)
- [`ora`](#example-ora)
- [`writesonic`](#example-writesonic)
@@ -36,6 +37,7 @@ This repository provides reverse-engineered language models from various sources
| [writesonic.com](https://writesonic.com)|GPT-3.5 / Internet|
| [t3nsor.com](https://t3nsor.com)|GPT-3.5|
| [you.com](https://you.com)|GPT-3.5 / Internet / good search|
+| [phind.com](https://phind.com)|GPT-4 / Internet / good search|
## Sites with Authentication <a name="sites-with-authentication"></a>
@@ -97,6 +99,26 @@ response = quora.Completion.create(model = 'gpt-4',
print(response.completion.choices[0].text)
```
+### Example: `phind` (use like openai pypi package) <a name="example-phind"></a>
+
+```python
+# HELP WANTED: tls_client does not accept stream and timeout gets hit with long responses
+
+import phind
+
+prompt = 'hello world'
+
+result = phind.Completion.create(
+ model = 'gpt-4',
+ prompt = prompt,
+ results = phind.Search.create(prompt, actualSearch = False), # create search (set actualSearch to False to disable internet)
+ creative = False,
+ detailed = False,
+ codeContext = '') # up to 3000 chars of code
+
+print(result.completion.choices[0].text)
+```
+
### Example: `t3nsor` (use like openai pypi package) <a name="example-t3nsor"></a>
```python
diff --git a/phind/__init__.py b/phind/__init__.py
new file mode 100644
index 00000000..83a6840a
--- /dev/null
+++ b/phind/__init__.py
@@ -0,0 +1,145 @@
+from urllib.parse import quote
+from tls_client import Session
+from time import time
+from datetime import datetime
+
+client = Session(client_identifier='chrome110')
+client.headers = {
+ 'authority': 'www.phind.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'content-type': 'application/json',
+ 'origin': 'https://www.phind.com',
+ 'referer': 'https://www.phind.com/search',
+ 'sec-ch-ua': '"Chromium";v="110", "Google Chrome";v="110", "Not:A-Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
+}
+
+class PhindResponse:
+
+ class Completion:
+
+ class Choices:
+ def __init__(self, choice: dict) -> None:
+ self.text = choice['text']
+ self.content = self.text.encode()
+ self.index = choice['index']
+ self.logprobs = choice['logprobs']
+ self.finish_reason = choice['finish_reason']
+
+ def __repr__(self) -> str:
+ return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
+
+ def __init__(self, choices: dict) -> None:
+ self.choices = [self.Choices(choice) for choice in choices]
+
+ class Usage:
+ def __init__(self, usage_dict: dict) -> None:
+ self.prompt_tokens = usage_dict['prompt_tokens']
+ self.completion_tokens = usage_dict['completion_tokens']
+ self.total_tokens = usage_dict['total_tokens']
+
+ def __repr__(self):
+ return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
+
+ def __init__(self, response_dict: dict) -> None:
+
+ self.response_dict = response_dict
+ self.id = response_dict['id']
+ self.object = response_dict['object']
+ self.created = response_dict['created']
+ self.model = response_dict['model']
+ self.completion = self.Completion(response_dict['choices'])
+ self.usage = self.Usage(response_dict['usage'])
+
+ def json(self) -> dict:
+ return self.response_dict
+
+
+class Search:
+ def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
+ if not actualSearch:
+ return {
+ '_type': 'SearchResponse',
+ 'queryContext': {
+ 'originalQuery': prompt
+ },
+ 'webPages': {
+ 'webSearchUrl': f'https://www.bing.com/search?q={quote(prompt)}',
+ 'totalEstimatedMatches': 0,
+ 'value': []
+ },
+ 'rankingResponse': {
+ 'mainline': {
+ 'items': []
+ }
+ }
+ }
+
+ return client.post('https://www.phind.com/api/bing/search', json = {
+ 'q': prompt,
+ 'userRankList': {},
+ 'browserLanguage': language}).json()['rawBingResults']
+
+class Completion:
+ def create(
+ model = 'gpt-4',
+ prompt: str = '',
+ results: dict = None,
+ creative: bool = False,
+ detailed: bool = False,
+ codeContext: str = '',
+ language: str = 'en') -> PhindResponse:
+
+ if results is None:
+ results = Search.create(prompt, actualSearch = True)
+
+ if len(codeContext) > 2999:
+ raise ValueError('codeContext must be less than 3000 characters')
+
+ models = {
+ 'gpt-4' : 'expert',
+ 'gpt-3.5-turbo' : 'intermediate',
+ 'gpt-3.5': 'intermediate',
+ }
+
+ json_data = {
+ 'question' : prompt,
+ 'bingResults' : results, #response.json()['rawBingResults'],
+ 'codeContext' : codeContext,
+ 'options': {
+ 'skill' : models[model],
+ 'date' : datetime.now().strftime("%d/%m/%Y"),
+ 'language': language,
+ 'detailed': detailed,
+ 'creative': creative
+ }
+ }
+
+ completion = ''
+ response = client.post('https://www.phind.com/api/infer/answer', json=json_data, timeout_seconds=200)
+ for line in response.text.split('\r\n\r\n'):
+ completion += (line.replace('data: ', ''))
+
+ return PhindResponse({
+ 'id' : f'cmpl-1337-{int(time())}',
+ 'object' : 'text_completion',
+ 'created': int(time()),
+ 'model' : models[model],
+ 'choices': [{
+ 'text' : completion,
+ 'index' : 0,
+ 'logprobs' : None,
+ 'finish_reason' : 'stop'
+ }],
+ 'usage': {
+ 'prompt_tokens' : len(prompt),
+ 'completion_tokens' : len(completion),
+ 'total_tokens' : len(prompt) + len(completion)
+ }
+ }) \ No newline at end of file
diff --git a/testing/phind_test.py b/testing/phind_test.py
new file mode 100644
index 00000000..e3148eb6
--- /dev/null
+++ b/testing/phind_test.py
@@ -0,0 +1,13 @@
+import phind
+
+prompt = 'hello world'
+
+result = phind.Completion.create(
+ model = 'gpt-4',
+ prompt = prompt,
+ results = phind.Search.create(prompt, actualSearch = False), # create search (set actualSearch to False to disable internet)
+ creative = False,
+ detailed = False,
+ codeContext = '') # up to 3000 chars of code
+
+print(result.completion.choices[0].text) \ No newline at end of file