47 lines
1.4 KiB
Python
47 lines
1.4 KiB
Python
import openai
|
|
|
|
from .utils import ConfigType, ChatType
|
|
|
|
|
|
def openai_api_key(api_key: str) -> None:
|
|
openai.api_key = api_key
|
|
|
|
|
|
def print_models() -> None:
|
|
"""
|
|
Print all models supported by the current AI.
|
|
"""
|
|
not_ready = []
|
|
for engine in sorted(openai.Engine.list()['data'], key=lambda x: x['id']):
|
|
if engine['ready']:
|
|
print(engine['id'])
|
|
else:
|
|
not_ready.append(engine['id'])
|
|
if len(not_ready) > 0:
|
|
print('\nNot ready: ' + ', '.join(not_ready))
|
|
|
|
|
|
def ai(chat: ChatType,
|
|
config: ConfigType,
|
|
number: int
|
|
) -> tuple[list[str], dict[str, int]]:
|
|
"""
|
|
Make AI request with the given chat history and configuration.
|
|
Return AI response and tokens used.
|
|
"""
|
|
if not isinstance(config['openai'], dict):
|
|
raise RuntimeError('Configuration openai is not a dict.')
|
|
response = openai.ChatCompletion.create(
|
|
model=config['openai']['model'],
|
|
messages=chat,
|
|
temperature=config['openai']['temperature'],
|
|
max_tokens=config['openai']['max_tokens'],
|
|
top_p=config['openai']['top_p'],
|
|
n=number,
|
|
frequency_penalty=config['openai']['frequency_penalty'],
|
|
presence_penalty=config['openai']['presence_penalty'])
|
|
result = []
|
|
for choice in response['choices']: # type: ignore
|
|
result.append(choice['message']['content'].strip())
|
|
return result, dict(response['usage']) # type: ignore
|