Could not extend the subclass of 'TypedDict' the way I wanted, so I switched to 'dataclass'.
46 lines
1.3 KiB
Python
46 lines
1.3 KiB
Python
import openai
|
|
|
|
from .utils import ChatType
|
|
from .configuration import Config
|
|
|
|
|
|
def openai_api_key(api_key: str) -> None:
|
|
openai.api_key = api_key
|
|
|
|
|
|
def print_models() -> None:
|
|
"""
|
|
Print all models supported by the current AI.
|
|
"""
|
|
not_ready = []
|
|
for engine in sorted(openai.Engine.list()['data'], key=lambda x: x['id']):
|
|
if engine['ready']:
|
|
print(engine['id'])
|
|
else:
|
|
not_ready.append(engine['id'])
|
|
if len(not_ready) > 0:
|
|
print('\nNot ready: ' + ', '.join(not_ready))
|
|
|
|
|
|
def ai(chat: ChatType,
|
|
config: Config,
|
|
number: int
|
|
) -> tuple[list[str], dict[str, int]]:
|
|
"""
|
|
Make AI request with the given chat history and configuration.
|
|
Return AI response and tokens used.
|
|
"""
|
|
response = openai.ChatCompletion.create(
|
|
model=config.openai.model,
|
|
messages=chat,
|
|
temperature=config.openai.temperature,
|
|
max_tokens=config.openai.max_tokens,
|
|
top_p=config.openai.top_p,
|
|
n=number,
|
|
frequency_penalty=config.openai.frequency_penalty,
|
|
presence_penalty=config.openai.presence_penalty)
|
|
result = []
|
|
for choice in response['choices']: # type: ignore
|
|
result.append(choice['message']['content'].strip())
|
|
return result, dict(response['usage']) # type: ignore
|