summaryrefslogtreecommitdiffstats
path: root/etc/testing/test_all.py
blob: 73134e3fd60b544b715e3e6e8fb1fc8fea342f90 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import asyncio
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent))

import g4f


async def test(model: g4f.Model):
    try:
        try:
            for response in g4f.ChatCompletion.create(
                    model=model,
                    messages=[{"role": "user", "content": "write a poem about a tree"}],
                    temperature=0.1,
                    stream=True
            ):
                print(response, end="")

            print()
        except:
            for response in await g4f.ChatCompletion.create_async(
                    model=model,
                    messages=[{"role": "user", "content": "write a poem about a tree"}],
                    temperature=0.1,
                    stream=True
            ):
                print(response, end="")

            print()

        return True
    except Exception as e:
        print(model.name, "not working:", e)
        print(e.__traceback__.tb_next)
        return False


async def start_test():
    models_to_test = [
        # GPT-3.5 4K Context
        g4f.models.gpt_35_turbo,
        g4f.models.gpt_35_turbo_0613,

        # GPT-3.5 16K Context
        g4f.models.gpt_35_turbo_16k,
        g4f.models.gpt_35_turbo_16k_0613,

        # GPT-4 8K Context
        g4f.models.gpt_4,
        g4f.models.gpt_4_0613,

        # GPT-4 32K Context
        g4f.models.gpt_4_32k,
        g4f.models.gpt_4_32k_0613,
    ]

    models_working = []

    for model in models_to_test:
        if await test(model):
            models_working.append(model.name)

    print("working models:", models_working)


asyncio.run(start_test())