Compare commits
11 Commits
d72abdc4c0
...
7591a71471
| Author | SHA1 | Date | |
|---|---|---|---|
| 7591a71471 | |||
| e02074d96b | |||
| 0c83ef6ce7 | |||
| 8046f1a424 | |||
| 2f95654c52 | |||
| 0449f70c63 | |||
|
|
0e1267139f | ||
|
|
02dab2807c | ||
|
|
70164a1d45 | ||
|
|
8f95a362d2 | ||
|
|
ee363d9894 |
@ -2,7 +2,8 @@
|
||||
Implements the OpenAI client classes and functions.
|
||||
"""
|
||||
import openai
|
||||
from typing import Optional, Union
|
||||
import tiktoken
|
||||
from typing import Optional, Union, Generator
|
||||
from ..tags import Tag
|
||||
from ..message import Message, Answer
|
||||
from ..chat import Chat
|
||||
@ -12,6 +13,52 @@ from ..configuration import OpenAIConfig
|
||||
ChatType = list[dict[str, str]]
|
||||
|
||||
|
||||
class OpenAIAnswer:
|
||||
def __init__(self,
|
||||
idx: int,
|
||||
streams: dict[int, 'OpenAIAnswer'],
|
||||
response: openai.ChatCompletion,
|
||||
tokens: Tokens,
|
||||
encoding: tiktoken.core.Encoding) -> None:
|
||||
self.idx = idx
|
||||
self.streams = streams
|
||||
self.response = response
|
||||
self.position: int = 0
|
||||
self.encoding = encoding
|
||||
self.data: list[str] = []
|
||||
self.finished: bool = False
|
||||
self.tokens = tokens
|
||||
|
||||
def stream(self) -> Generator[str, None, None]:
|
||||
while True:
|
||||
if not self.next():
|
||||
continue
|
||||
if len(self.data) <= self.position:
|
||||
break
|
||||
yield self.data[self.position]
|
||||
self.position += 1
|
||||
|
||||
def next(self) -> bool:
|
||||
if self.finished:
|
||||
return True
|
||||
try:
|
||||
chunk = next(self.response)
|
||||
except StopIteration:
|
||||
self.finished = True
|
||||
if not self.finished:
|
||||
found_choice = False
|
||||
for choice in chunk['choices']:
|
||||
if not choice['finish_reason']:
|
||||
self.streams[choice['index']].data.append(choice['delta']['content'])
|
||||
self.tokens.completion += len(self.encoding.encode(choice['delta']['content']))
|
||||
self.tokens.total = self.tokens.prompt + self.tokens.completion
|
||||
if choice['index'] == self.idx:
|
||||
found_choice = True
|
||||
if not found_choice:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class OpenAI(AI):
|
||||
"""
|
||||
The OpenAI AI client.
|
||||
@ -21,7 +68,7 @@ class OpenAI(AI):
|
||||
self.ID = config.ID
|
||||
self.name = config.name
|
||||
self.config = config
|
||||
openai.api_key = config.api_key
|
||||
openai.api_key = self.config.api_key
|
||||
|
||||
def request(self,
|
||||
question: Message,
|
||||
@ -33,7 +80,9 @@ class OpenAI(AI):
|
||||
chat history. The nr. of requested answers corresponds to the
|
||||
nr. of messages in the 'AIResponse'.
|
||||
"""
|
||||
oai_chat = self.openai_chat(chat, self.config.system, question)
|
||||
self.encoding = tiktoken.encoding_for_model(self.config.model)
|
||||
oai_chat, prompt_tokens = self.openai_chat(chat, self.config.system, question)
|
||||
tokens: Tokens = Tokens(prompt_tokens, 0, prompt_tokens)
|
||||
response = openai.ChatCompletion.create(
|
||||
model=self.config.model,
|
||||
messages=oai_chat,
|
||||
@ -41,22 +90,24 @@ class OpenAI(AI):
|
||||
max_tokens=self.config.max_tokens,
|
||||
top_p=self.config.top_p,
|
||||
n=num_answers,
|
||||
stream=True,
|
||||
frequency_penalty=self.config.frequency_penalty,
|
||||
presence_penalty=self.config.presence_penalty)
|
||||
question.answer = Answer(response['choices'][0]['message']['content'])
|
||||
streams: dict[int, OpenAIAnswer] = {}
|
||||
for n in range(num_answers):
|
||||
streams[n] = OpenAIAnswer(n, streams, response, tokens, self.encoding)
|
||||
question.answer = Answer(streams[0].stream())
|
||||
question.tags = set(otags) if otags is not None else None
|
||||
question.ai = self.ID
|
||||
question.model = self.config.model
|
||||
answers: list[Message] = [question]
|
||||
for choice in response['choices'][1:]: # type: ignore
|
||||
for idx in range(1, num_answers):
|
||||
answers.append(Message(question=question.question,
|
||||
answer=Answer(choice['message']['content']),
|
||||
answer=Answer(streams[idx].stream()),
|
||||
tags=otags,
|
||||
ai=self.ID,
|
||||
model=self.config.model))
|
||||
return AIResponse(answers, Tokens(response['usage']['prompt_tokens'],
|
||||
response['usage']['completion_tokens'],
|
||||
response['usage']['total_tokens']))
|
||||
return AIResponse(answers, tokens)
|
||||
|
||||
def models(self) -> list[str]:
|
||||
"""
|
||||
@ -83,24 +134,26 @@ class OpenAI(AI):
|
||||
print('\nNot ready: ' + ', '.join(not_ready))
|
||||
|
||||
def openai_chat(self, chat: Chat, system: str,
|
||||
question: Optional[Message] = None) -> ChatType:
|
||||
question: Optional[Message] = None) -> tuple[ChatType, int]:
|
||||
"""
|
||||
Create a chat history with system message in OpenAI format.
|
||||
Optionally append a new question.
|
||||
"""
|
||||
oai_chat: ChatType = []
|
||||
prompt_tokens: int = 0
|
||||
|
||||
def append(role: str, content: str) -> None:
|
||||
def append(role: str, content: str) -> int:
|
||||
oai_chat.append({'role': role, 'content': content.replace("''", "'")})
|
||||
return len(self.encoding.encode(', '.join(['role:', oai_chat[-1]['role'], 'content:', oai_chat[-1]['content']])))
|
||||
|
||||
append('system', system)
|
||||
prompt_tokens += append('system', system)
|
||||
for message in chat.messages:
|
||||
if message.answer:
|
||||
append('user', message.question)
|
||||
append('assistant', message.answer)
|
||||
prompt_tokens += append('user', message.question)
|
||||
prompt_tokens += append('assistant', str(message.answer))
|
||||
if question:
|
||||
append('user', question.question)
|
||||
return oai_chat
|
||||
prompt_tokens += append('user', question.question)
|
||||
return oai_chat, prompt_tokens
|
||||
|
||||
def tokens(self, data: Union[Message, Chat]) -> int:
|
||||
raise NotImplementedError
|
||||
|
||||
69
chatmastermind/commands/common.py
Normal file
69
chatmastermind/commands/common.py
Normal file
@ -0,0 +1,69 @@
|
||||
"""
|
||||
Contains shared functions for the various CMM subcommands.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from ..message import Message, MessageError, source_code
|
||||
|
||||
|
||||
def read_text_file(file: Path) -> str:
|
||||
with open(file) as r:
|
||||
content = r.read().strip()
|
||||
return content
|
||||
|
||||
|
||||
def add_file_as_text(question_parts: list[str], file: str) -> None:
|
||||
"""
|
||||
Add the given file as plain text to the question part list.
|
||||
If the file is a Message, add the answer.
|
||||
"""
|
||||
file_path = Path(file)
|
||||
content: str
|
||||
try:
|
||||
message = Message.from_file(file_path)
|
||||
if message and message.answer:
|
||||
content = message.answer
|
||||
except MessageError:
|
||||
content = read_text_file(Path(file))
|
||||
if len(content) > 0:
|
||||
question_parts.append(content)
|
||||
|
||||
|
||||
def add_file_as_code(question_parts: list[str], file: str) -> None:
|
||||
"""
|
||||
Add all source code from the given file. If no code segments can be extracted,
|
||||
the whole content is added as source code segment. If the file is a Message,
|
||||
extract the source code from the answer.
|
||||
"""
|
||||
file_path = Path(file)
|
||||
content: str
|
||||
try:
|
||||
message = Message.from_file(file_path)
|
||||
if message and message.answer:
|
||||
content = message.answer
|
||||
except MessageError:
|
||||
with open(file) as r:
|
||||
content = r.read().strip()
|
||||
# extract and add source code
|
||||
code_parts = source_code(content, include_delims=True)
|
||||
if len(code_parts) > 0:
|
||||
question_parts += code_parts
|
||||
else:
|
||||
question_parts.append(f"```\n{content}\n```")
|
||||
|
||||
|
||||
def invert_input_tag_args(args: argparse.Namespace) -> None:
|
||||
"""
|
||||
Changes the semantics of the INPUT tags for this command:
|
||||
* not tags specified on the CLI -> no tags are selected
|
||||
* empty tags specified on the CLI -> all tags are selected
|
||||
"""
|
||||
if args.or_tags is None:
|
||||
args.or_tags = set()
|
||||
elif len(args.or_tags) == 0:
|
||||
args.or_tags = None
|
||||
if args.and_tags is None:
|
||||
args.and_tags = set()
|
||||
elif len(args.and_tags) == 0:
|
||||
args.and_tags = None
|
||||
@ -3,9 +3,10 @@ import argparse
|
||||
from pathlib import Path
|
||||
from itertools import zip_longest
|
||||
from copy import deepcopy
|
||||
from .common import invert_input_tag_args, add_file_as_code, add_file_as_text
|
||||
from ..configuration import Config
|
||||
from ..chat import ChatDB, msg_location
|
||||
from ..message import Message, MessageFilter, MessageError, Question, source_code
|
||||
from ..message import Message, MessageFilter, Question
|
||||
from ..ai_factory import create_ai
|
||||
from ..ai import AI, AIResponse
|
||||
|
||||
@ -14,47 +15,6 @@ class QuestionCmdError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def add_file_as_text(question_parts: list[str], file: str) -> None:
|
||||
"""
|
||||
Add the given file as plain text to the question part list.
|
||||
If the file is a Message, add the answer.
|
||||
"""
|
||||
file_path = Path(file)
|
||||
content: str
|
||||
try:
|
||||
message = Message.from_file(file_path)
|
||||
if message and message.answer:
|
||||
content = message.answer
|
||||
except MessageError:
|
||||
with open(file) as r:
|
||||
content = r.read().strip()
|
||||
if len(content) > 0:
|
||||
question_parts.append(content)
|
||||
|
||||
|
||||
def add_file_as_code(question_parts: list[str], file: str) -> None:
|
||||
"""
|
||||
Add all source code from the given file. If no code segments can be extracted,
|
||||
the whole content is added as source code segment. If the file is a Message,
|
||||
extract the source code from the answer.
|
||||
"""
|
||||
file_path = Path(file)
|
||||
content: str
|
||||
try:
|
||||
message = Message.from_file(file_path)
|
||||
if message and message.answer:
|
||||
content = message.answer
|
||||
except MessageError:
|
||||
with open(file) as r:
|
||||
content = r.read().strip()
|
||||
# extract and add source code
|
||||
code_parts = source_code(content, include_delims=True)
|
||||
if len(code_parts) > 0:
|
||||
question_parts += code_parts
|
||||
else:
|
||||
question_parts.append(f"```\n{content}\n```")
|
||||
|
||||
|
||||
def create_msg_args(msg: Message, args: argparse.Namespace) -> argparse.Namespace:
|
||||
"""
|
||||
Takes an existing message and CLI arguments, and returns modified args based
|
||||
@ -101,7 +61,7 @@ def create_message(chat: ChatDB, args: argparse.Namespace) -> Message:
|
||||
if code_file is not None and len(code_file) > 0:
|
||||
add_file_as_code(question_parts, code_file)
|
||||
|
||||
full_question = '\n\n'.join(question_parts)
|
||||
full_question = '\n\n'.join([str(s) for s in question_parts])
|
||||
|
||||
message = Message(question=Question(full_question),
|
||||
tags=args.output_tags,
|
||||
@ -129,13 +89,16 @@ def make_request(ai: AI, chat: ChatDB, message: Message, args: argparse.Namespac
|
||||
args.output_tags)
|
||||
# only write the response messages to the cache,
|
||||
# don't add them to the internal list
|
||||
chat.cache_write(response.messages)
|
||||
for idx, msg in enumerate(response.messages):
|
||||
print(f"=== ANSWER {idx+1} ===")
|
||||
print(msg.answer)
|
||||
print(f"=== ANSWER {idx+1} ===", flush=True)
|
||||
if msg.answer:
|
||||
for piece in msg.answer:
|
||||
print(piece, end='', flush=True)
|
||||
print()
|
||||
if response.tokens:
|
||||
print("===============")
|
||||
print(response.tokens)
|
||||
chat.cache_write(response.messages)
|
||||
|
||||
|
||||
def repeat_messages(messages: list[Message], chat: ChatDB, args: argparse.Namespace, config: Config) -> None:
|
||||
@ -160,22 +123,6 @@ def repeat_messages(messages: list[Message], chat: ChatDB, args: argparse.Namesp
|
||||
make_request(ai, chat, message, msg_args)
|
||||
|
||||
|
||||
def invert_input_tag_args(args: argparse.Namespace) -> None:
|
||||
"""
|
||||
Changes the semantics of the INPUT tags for this command:
|
||||
* not tags specified on the CLI -> no tags are selected
|
||||
* empty tags specified on the CLI -> all tags are selected
|
||||
"""
|
||||
if args.or_tags is None:
|
||||
args.or_tags = set()
|
||||
elif len(args.or_tags) == 0:
|
||||
args.or_tags = None
|
||||
if args.and_tags is None:
|
||||
args.and_tags = set()
|
||||
elif len(args.and_tags) == 0:
|
||||
args.and_tags = None
|
||||
|
||||
|
||||
def question_cmd(args: argparse.Namespace, config: Config) -> None:
|
||||
"""
|
||||
Handler for the 'question' command.
|
||||
|
||||
105
chatmastermind/commands/translation.py
Normal file
105
chatmastermind/commands/translation.py
Normal file
@ -0,0 +1,105 @@
|
||||
import argparse
|
||||
import mimetypes
|
||||
from pathlib import Path
|
||||
from .common import invert_input_tag_args, read_text_file
|
||||
from ..configuration import Config
|
||||
from ..message import MessageFilter, Message, Question
|
||||
from ..chat import ChatDB, msg_location
|
||||
|
||||
|
||||
class TranslationCmdError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
text_separator: str = 'TEXT:'
|
||||
|
||||
|
||||
def assert_document_type_supported_openai(document_file: Path) -> None:
|
||||
doctype = mimetypes.guess_type(document_file)
|
||||
if doctype != 'text/plain':
|
||||
raise TranslationCmdError("AI 'OpenAI' only supports document type 'text/plain''")
|
||||
|
||||
|
||||
def translation_prompt_openai(source_lang: str, target_lang: str) -> str:
|
||||
"""
|
||||
Return the prompt for GPT that tells it to do the translation.
|
||||
"""
|
||||
return f"Translate the text below the line {text_separator} from {source_lang} to {target_lang}."
|
||||
|
||||
|
||||
def create_message_openai(chat: ChatDB, args: argparse.Namespace) -> Message:
|
||||
"""
|
||||
Create a new message from the given arguments and write it to the cache directory.
|
||||
|
||||
Message format
|
||||
1. Translation prompt (tells GPT to do a translation)
|
||||
2. Glossary (if specified as an argument)
|
||||
3. User provided prompt enhancements
|
||||
4. Translation separator
|
||||
5. User provided text to be translated
|
||||
|
||||
The text to be translated is determined as a follows:
|
||||
- if a document is provided in the arguments, translate its content
|
||||
- if no document is provided, translate the last text argument
|
||||
|
||||
The other text arguments will be put into the "header" and can be used
|
||||
to improve the translation prompt.
|
||||
"""
|
||||
text_args: list[str] = []
|
||||
if args.create is not None:
|
||||
text_args = args.create
|
||||
elif args.ask is not None:
|
||||
text_args = args.ask
|
||||
else:
|
||||
raise TranslationCmdError("No input text found")
|
||||
|
||||
# extract user prompt and user text to be translated
|
||||
user_text: str
|
||||
user_prompt: str
|
||||
if args.input_document is not None:
|
||||
assert_document_type_supported_openai(Path(args.input_document))
|
||||
user_text = read_text_file(Path(args.input_document))
|
||||
user_prompt = '\n\n'.join([str(s) for s in text_args])
|
||||
else:
|
||||
user_text = text_args[-1]
|
||||
user_prompt = '\n\n'.join([str(s) for s in text_args[:-1]])
|
||||
|
||||
# build full question string
|
||||
# FIXME: add glossaries if given
|
||||
question_text: str = '\n\n'.join([translation_prompt_openai(args.source_lang, args.target_lang),
|
||||
user_prompt,
|
||||
text_separator,
|
||||
user_text])
|
||||
# create and write the message
|
||||
message = Message(question=Question(question_text),
|
||||
tags=args.output_tags,
|
||||
ai=args.AI,
|
||||
model=args.model)
|
||||
# only write the new message to the cache,
|
||||
# don't add it to the internal list
|
||||
chat.cache_write([message])
|
||||
return message
|
||||
|
||||
|
||||
def translation_cmd(args: argparse.Namespace, config: Config) -> None:
|
||||
"""
|
||||
Handler for the 'translation' command. Creates and executes translation
|
||||
requests based on the input and selected AI. Depending on the AI, the
|
||||
whole process may be significantly different (e.g. DeepL vs OpenAI).
|
||||
"""
|
||||
invert_input_tag_args(args)
|
||||
mfilter = MessageFilter(tags_or=args.or_tags,
|
||||
tags_and=args.and_tags,
|
||||
tags_not=args.exclude_tags)
|
||||
chat = ChatDB.from_dir(cache_path=Path(config.cache),
|
||||
db_path=Path(config.db),
|
||||
mfilter=mfilter,
|
||||
glob=args.glob,
|
||||
loc=msg_location(args.location))
|
||||
# if it's a new translation, create and store it immediately
|
||||
# FIXME: check AI type
|
||||
if args.ask or args.create:
|
||||
# message = create_message(chat, args)
|
||||
create_message_openai(chat, args)
|
||||
if args.create:
|
||||
return
|
||||
125
chatmastermind/glossary.py
Normal file
125
chatmastermind/glossary.py
Normal file
@ -0,0 +1,125 @@
|
||||
"""
|
||||
Module implementing glossaries for translations.
|
||||
"""
|
||||
import yaml
|
||||
import tempfile
|
||||
import shutil
|
||||
import csv
|
||||
from pathlib import Path
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Type, TypeVar
|
||||
|
||||
GlossaryInst = TypeVar('GlossaryInst', bound='Glossary')
|
||||
|
||||
|
||||
class GlossaryError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def str_presenter(dumper: yaml.Dumper, data: str) -> yaml.ScalarNode:
|
||||
"""
|
||||
Changes the YAML dump style to multiline syntax for multiline strings.
|
||||
"""
|
||||
if len(data.splitlines()) > 1:
|
||||
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
|
||||
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Glossary:
|
||||
"""
|
||||
A glossary consists of the following parameters:
|
||||
- Name (freely selectable)
|
||||
- Path (full file path)
|
||||
- Source language
|
||||
- Target language
|
||||
- Entries (pairs of source lang and target lang terms)
|
||||
- ID (automatically generated / modified, required by DeepL)
|
||||
"""
|
||||
|
||||
name: str
|
||||
source_lang: str
|
||||
target_lang: str
|
||||
entries: dict[str, str] = field(default_factory=lambda: dict())
|
||||
file_path: Path | None = None
|
||||
ID: str | None = None
|
||||
|
||||
@classmethod
|
||||
def from_file(cls: Type[GlossaryInst], file_path: Path) -> GlossaryInst:
|
||||
"""
|
||||
Create a glossary from the given file.
|
||||
"""
|
||||
with open(file_path, "r") as fd:
|
||||
try:
|
||||
data = yaml.load(fd, Loader=yaml.FullLoader)
|
||||
return cls(name=data['Name'],
|
||||
source_lang=data['SourceLang'],
|
||||
target_lang=data['TargetLang'],
|
||||
entries=data['Entries'],
|
||||
file_path=file_path,
|
||||
ID=data['ID'] if data['ID'] != 'None' else None)
|
||||
except Exception:
|
||||
raise GlossaryError(f"'{file_path}' does not contain a valid glossary")
|
||||
|
||||
def to_file(self, file_path: Path | None = None) -> None:
|
||||
"""
|
||||
Write glossary to given file.
|
||||
"""
|
||||
if file_path:
|
||||
self.file_path = file_path
|
||||
if not self.file_path:
|
||||
raise GlossaryError("Got no valid path to write glossary")
|
||||
# write YAML
|
||||
with tempfile.NamedTemporaryFile(dir=self.file_path.parent, prefix=self.file_path.name, mode="w", delete=False) as temp_fd:
|
||||
temp_file_path = Path(temp_fd.name)
|
||||
data = {'Name': self.name,
|
||||
'ID': str(self.ID),
|
||||
'SourceLang': self.source_lang,
|
||||
'TargetLang': self.target_lang,
|
||||
'Entries': self.entries}
|
||||
yaml.dump(data, temp_fd, sort_keys=False)
|
||||
shutil.move(temp_file_path, self.file_path)
|
||||
|
||||
def export_csv(self, dictionary: dict[str, str], file_path: Path) -> None:
|
||||
"""
|
||||
Export the 'entries' of this glossary to a file in CSV format (compatible with DeepL).
|
||||
"""
|
||||
with open(file_path, 'w', newline='', encoding='utf-8') as csvfile:
|
||||
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
|
||||
for source_entry, target_entry in self.entries.items():
|
||||
writer.writerow([source_entry, target_entry])
|
||||
|
||||
def export_tsv(self, entries: dict[str, str], file_path: Path) -> None:
|
||||
"""
|
||||
Export the 'entries' of this glossary to a file in TSV format (compatible with DeepL).
|
||||
"""
|
||||
with open(file_path, 'w', encoding='utf-8') as file:
|
||||
for source_entry, target_entry in self.entries.items():
|
||||
file.write(f"{source_entry}\t{target_entry}\n")
|
||||
|
||||
def import_csv(self, file_path: Path) -> None:
|
||||
"""
|
||||
Import the entries from the given CSV file to those of the current glossary.
|
||||
Existing entries are overwritten.
|
||||
"""
|
||||
try:
|
||||
with open(file_path, mode='r', encoding='utf-8') as csvfile:
|
||||
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
|
||||
self.entries = {rows[0]: rows[1] for rows in reader if len(rows) >= 2}
|
||||
except Exception as e:
|
||||
raise GlossaryError(f"Error importing CSV: {e}")
|
||||
|
||||
def import_tsv(self, file_path: Path) -> None:
|
||||
"""
|
||||
Import the entries from the given CSV file to those of the current glossary.
|
||||
Existing entries are overwritten.
|
||||
"""
|
||||
try:
|
||||
with open(file_path, mode='r', encoding='utf-8') as tsvfile:
|
||||
self.entries = {}
|
||||
for line in tsvfile:
|
||||
parts = line.strip().split('\t')
|
||||
if len(parts) == 2:
|
||||
self.entries[parts[0]] = parts[1]
|
||||
except Exception as e:
|
||||
raise GlossaryError(f"Error importing TSV: {e}")
|
||||
@ -14,6 +14,7 @@ from .commands.tags import tags_cmd
|
||||
from .commands.config import config_cmd
|
||||
from .commands.hist import hist_cmd
|
||||
from .commands.print import print_cmd
|
||||
from .commands.translation import translation_cmd
|
||||
from .chat import msg_location
|
||||
|
||||
|
||||
@ -102,7 +103,7 @@ def create_parser() -> argparse.ArgumentParser:
|
||||
# 'tags' command parser
|
||||
tags_cmd_parser = cmdparser.add_parser('tags',
|
||||
help="Manage tags.",
|
||||
aliases=['t'])
|
||||
aliases=['T'])
|
||||
tags_cmd_parser.set_defaults(func=tags_cmd)
|
||||
tags_group = tags_cmd_parser.add_mutually_exclusive_group(required=True)
|
||||
tags_group.add_argument('-l', '--list', help="List all tags and their frequency",
|
||||
@ -136,6 +137,21 @@ def create_parser() -> argparse.ArgumentParser:
|
||||
print_cmd_modes.add_argument('-a', '--answer', help='Only print the answer', action='store_true')
|
||||
print_cmd_modes.add_argument('-S', '--only-source-code', help='Only print embedded source code', action='store_true')
|
||||
|
||||
# 'translation' command parser
|
||||
translation_cmd_parser = cmdparser.add_parser('translation', parents=[ai_parser, tag_parser],
|
||||
help="ask, create and repeat translations.",
|
||||
aliases=['t'])
|
||||
translation_cmd_parser.set_defaults(func=translation_cmd)
|
||||
translation_group = translation_cmd_parser.add_mutually_exclusive_group(required=True)
|
||||
translation_group.add_argument('-a', '--ask', nargs='+', help='Ask to translate the given text', metavar='TEXT')
|
||||
translation_group.add_argument('-c', '--create', nargs='+', help='Create a translation', metavar='TEXT')
|
||||
translation_group.add_argument('-r', '--repeat', nargs='*', help='Repeat a translation', metavar='MESSAGE')
|
||||
translation_cmd_parser.add_argument('-S', '--source-lang', help="Source language", metavar="LANGUAGE", required=True)
|
||||
translation_cmd_parser.add_argument('-T', '--target-lang', help="Target language", metavar="LANGUAGE", required=True)
|
||||
translation_cmd_parser.add_argument('-G', '--glossaries', nargs='+', help="List of glossaries", metavar="GLOSSARY")
|
||||
translation_cmd_parser.add_argument('-d', '--input-document', help="Document to translate", metavar="FILE")
|
||||
translation_cmd_parser.add_argument('-D', '--output-document', help="Path for the translated document", metavar="FILE")
|
||||
|
||||
argcomplete.autocomplete(parser)
|
||||
return parser
|
||||
|
||||
|
||||
@ -5,7 +5,9 @@ import pathlib
|
||||
import yaml
|
||||
import tempfile
|
||||
import shutil
|
||||
import io
|
||||
from typing import Type, TypeVar, ClassVar, Optional, Any, Union, Final, Literal, Iterable, Tuple
|
||||
from typing import Generator, Iterator
|
||||
from typing import get_args as typing_get_args
|
||||
from dataclasses import dataclass, asdict, field
|
||||
from .tags import Tag, TagLine, TagError, match_tags, rename_tags
|
||||
@ -49,7 +51,7 @@ def source_code(text: str, include_delims: bool = False) -> list[str]:
|
||||
code_lines: list[str] = []
|
||||
in_code_block = False
|
||||
|
||||
for line in text.split('\n'):
|
||||
for line in str(text).split('\n'):
|
||||
if line.strip().startswith('```'):
|
||||
if include_delims:
|
||||
code_lines.append(line)
|
||||
@ -142,30 +144,100 @@ class Answer(str):
|
||||
txt_header: ClassVar[str] = '==== ANSWER ===='
|
||||
yaml_key: ClassVar[str] = 'answer'
|
||||
|
||||
def __new__(cls: Type[AnswerInst], string: str) -> AnswerInst:
|
||||
def __init__(self, data: Union[str, Generator[str, None, None]]) -> None:
|
||||
# Indicator of whether all of data has been processed
|
||||
self.is_exhausted: bool = False
|
||||
|
||||
# Initialize data
|
||||
self.iterator: Iterator[str] = self._init_data(data)
|
||||
|
||||
# Set up the buffer to hold the 'Answer' content
|
||||
self.buffer: io.StringIO = io.StringIO()
|
||||
|
||||
def _init_data(self, data: Union[str, Generator[str, None, None]]) -> Iterator[str]:
|
||||
"""
|
||||
Make sure the answer string does not contain the header as a whole line.
|
||||
Process input data (either a string or a string generator)
|
||||
"""
|
||||
if cls.txt_header in string.split('\n'):
|
||||
raise MessageError(f"Answer '{string}' contains the header '{cls.txt_header}'")
|
||||
instance = super().__new__(cls, string)
|
||||
return instance
|
||||
if isinstance(data, str):
|
||||
yield data
|
||||
else:
|
||||
yield from data
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""
|
||||
Output all content when converted into a string
|
||||
"""
|
||||
# Ensure all data has been processed
|
||||
for _ in self:
|
||||
pass
|
||||
# Return the 'Answer' content
|
||||
return self.buffer.getvalue()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(str(self))
|
||||
|
||||
def __iter__(self) -> Generator[str, None, None]:
|
||||
"""
|
||||
Allows the object to be iterable
|
||||
"""
|
||||
# Generate content if not all data has been processed
|
||||
if not self.is_exhausted:
|
||||
yield from self.generator_iter()
|
||||
else:
|
||||
yield self.buffer.getvalue()
|
||||
|
||||
def generator_iter(self) -> Generator[str, None, None]:
|
||||
"""
|
||||
Main generator method to process data
|
||||
"""
|
||||
for piece in self.iterator:
|
||||
# Write to buffer and yield piece for the iterator
|
||||
self.buffer.write(piece)
|
||||
yield piece
|
||||
self.is_exhausted = True # Set the flag that all data has been processed
|
||||
# If the header occurs in the 'Answer' content, raise an error
|
||||
if f'\n{self.txt_header}' in self.buffer.getvalue() or self.buffer.getvalue().startswith(self.txt_header):
|
||||
raise MessageError(f"Answer {repr(self.buffer.getvalue())} contains the header {repr(Answer.txt_header)}")
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""
|
||||
Comparing the object to a string or another object
|
||||
"""
|
||||
if isinstance(other, str):
|
||||
return str(self) == other # Compare the string value of this object to the other string
|
||||
# Default behavior for comparing non-string objects
|
||||
return super().__eq__(other)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""
|
||||
Generate a hash for the object based on its string representation.
|
||||
"""
|
||||
return hash(str(self))
|
||||
|
||||
def __format__(self, format_spec: str) -> str:
|
||||
"""
|
||||
Return a formatted version of the string as per the format specification.
|
||||
"""
|
||||
return str(self).__format__(format_spec)
|
||||
|
||||
@classmethod
|
||||
def from_list(cls: Type[AnswerInst], strings: list[str]) -> AnswerInst:
|
||||
"""
|
||||
Build Question from a list of strings. Make sure strings do not contain the header.
|
||||
Build Answer from a list of strings. Make sure strings do not contain the header.
|
||||
"""
|
||||
if cls.txt_header in strings:
|
||||
raise MessageError(f"Question contains the header '{cls.txt_header}'")
|
||||
instance = super().__new__(cls, '\n'.join(strings).strip())
|
||||
return instance
|
||||
def _gen() -> Generator[str, None, None]:
|
||||
if len(strings) > 0:
|
||||
yield strings[0]
|
||||
for s in strings[1:]:
|
||||
yield '\n'
|
||||
yield s
|
||||
return cls(_gen())
|
||||
|
||||
def source_code(self, include_delims: bool = False) -> list[str]:
|
||||
"""
|
||||
Extract and return all source code sections.
|
||||
"""
|
||||
return source_code(self, include_delims)
|
||||
return source_code(str(self), include_delims)
|
||||
|
||||
|
||||
class Question(str):
|
||||
@ -441,7 +513,7 @@ class Message():
|
||||
output.append(self.question)
|
||||
if self.answer:
|
||||
output.append(Answer.txt_header)
|
||||
output.append(self.answer)
|
||||
output.append(str(self.answer))
|
||||
return '\n'.join(output)
|
||||
|
||||
def to_file(self, file_path: Optional[pathlib.Path]=None, mformat: MessageFormat = message_default_format) -> None: # noqa: 11
|
||||
@ -491,7 +563,7 @@ class Message():
|
||||
temp_fd.write(f'{ModelLine.from_model(self.model)}\n')
|
||||
temp_fd.write(f'{Question.txt_header}\n{self.question}\n')
|
||||
if self.answer:
|
||||
temp_fd.write(f'{Answer.txt_header}\n{self.answer}\n')
|
||||
temp_fd.write(f'{Answer.txt_header}\n{str(self.answer)}\n')
|
||||
shutil.move(temp_file_path, file_path)
|
||||
|
||||
def __to_file_yaml(self, file_path: pathlib.Path) -> None:
|
||||
@ -560,7 +632,7 @@ class Message():
|
||||
or (mfilter.ai and (not self.ai or mfilter.ai != self.ai)) # noqa: W503
|
||||
or (mfilter.model and (not self.model or mfilter.model != self.model)) # noqa: W503
|
||||
or (mfilter.question_contains and mfilter.question_contains not in self.question) # noqa: W503
|
||||
or (mfilter.answer_contains and (not self.answer or mfilter.answer_contains not in self.answer)) # noqa: W503
|
||||
or (mfilter.answer_contains and (not self.answer or mfilter.answer_contains not in str(self.answer))) # noqa: W503
|
||||
or (mfilter.answer_state == 'available' and not self.answer) # noqa: W503
|
||||
or (mfilter.ai_state == 'available' and not self.ai) # noqa: W503
|
||||
or (mfilter.model_state == 'available' and not self.model) # noqa: W503
|
||||
|
||||
@ -2,3 +2,4 @@ openai
|
||||
PyYAML
|
||||
argcomplete
|
||||
pytest
|
||||
tiktoken
|
||||
|
||||
@ -16,26 +16,37 @@ class OpenAITest(unittest.TestCase):
|
||||
openai = OpenAI(config)
|
||||
|
||||
# Set up the mock response from openai.ChatCompletion.create
|
||||
mock_response = {
|
||||
mock_chunk1 = {
|
||||
'choices': [
|
||||
{
|
||||
'message': {
|
||||
'index': 0,
|
||||
'delta': {
|
||||
'content': 'Answer 1'
|
||||
}
|
||||
},
|
||||
'finish_reason': None
|
||||
},
|
||||
{
|
||||
'message': {
|
||||
'index': 1,
|
||||
'delta': {
|
||||
'content': 'Answer 2'
|
||||
}
|
||||
},
|
||||
'finish_reason': None
|
||||
}
|
||||
],
|
||||
'usage': {
|
||||
'prompt_tokens': 10,
|
||||
'completion_tokens': 20,
|
||||
'total_tokens': 30
|
||||
}
|
||||
}
|
||||
mock_create.return_value = mock_response
|
||||
mock_chunk2 = {
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
'finish_reason': 'stop'
|
||||
},
|
||||
{
|
||||
'index': 1,
|
||||
'finish_reason': 'stop'
|
||||
}
|
||||
],
|
||||
}
|
||||
mock_create.return_value = iter([mock_chunk1, mock_chunk2])
|
||||
|
||||
# Create test data
|
||||
question = Message(Question('Question'))
|
||||
@ -57,9 +68,9 @@ class OpenAITest(unittest.TestCase):
|
||||
self.assertIsNotNone(response.tokens)
|
||||
self.assertIsInstance(response.tokens, Tokens)
|
||||
assert response.tokens
|
||||
self.assertEqual(response.tokens.prompt, 10)
|
||||
self.assertEqual(response.tokens.completion, 20)
|
||||
self.assertEqual(response.tokens.total, 30)
|
||||
self.assertEqual(response.tokens.prompt, 53)
|
||||
self.assertEqual(response.tokens.completion, 6)
|
||||
self.assertEqual(response.tokens.total, 59)
|
||||
|
||||
# Assert the mock call to openai.ChatCompletion.create
|
||||
mock_create.assert_called_once_with(
|
||||
@ -76,6 +87,7 @@ class OpenAITest(unittest.TestCase):
|
||||
max_tokens=config.max_tokens,
|
||||
top_p=config.top_p,
|
||||
n=2,
|
||||
stream=True,
|
||||
frequency_penalty=config.frequency_penalty,
|
||||
presence_penalty=config.presence_penalty
|
||||
)
|
||||
|
||||
73
tests/test_glossary.py
Normal file
73
tests/test_glossary.py
Normal file
@ -0,0 +1,73 @@
|
||||
import unittest
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from chatmastermind.glossary import Glossary
|
||||
|
||||
|
||||
class TestGlossary(unittest.TestCase):
|
||||
|
||||
def test_from_file_valid_yaml(self) -> None:
|
||||
# Prepare a temporary YAML file with valid content
|
||||
with tempfile.NamedTemporaryFile('w', delete=False) as yaml_file:
|
||||
yaml_file.write("Name: Sample\n"
|
||||
"ID: '123'\n"
|
||||
"SourceLang: en\n"
|
||||
"TargetLang: es\n"
|
||||
"Entries:\n"
|
||||
" hello: hola\n"
|
||||
" goodbye: adiós\n")
|
||||
yaml_file_path = Path(yaml_file.name)
|
||||
|
||||
glossary = Glossary.from_file(yaml_file_path)
|
||||
self.assertEqual(glossary.name, "Sample")
|
||||
self.assertEqual(glossary.source_lang, "en")
|
||||
self.assertEqual(glossary.target_lang, "es")
|
||||
self.assertEqual(glossary.entries, {"hello": "hola", "goodbye": "adiós"})
|
||||
yaml_file_path.unlink() # Remove the temporary file
|
||||
|
||||
def test_to_file_writes_yaml(self) -> None:
|
||||
# Create glossary instance
|
||||
glossary = Glossary(name="Test", source_lang="en", target_lang="fr", entries={"yes": "oui"})
|
||||
|
||||
# Use a temporary file
|
||||
with tempfile.NamedTemporaryFile('w', delete=False) as tmp_file:
|
||||
file_path = Path(tmp_file.name)
|
||||
glossary.to_file(file_path)
|
||||
|
||||
with open(file_path, 'r') as file:
|
||||
content = file.read()
|
||||
|
||||
self.assertIn("Name: Test", content)
|
||||
self.assertIn("SourceLang: en", content)
|
||||
self.assertIn("TargetLang: fr", content)
|
||||
self.assertIn("Entries", content)
|
||||
self.assertIn("yes: oui", content)
|
||||
file_path.unlink() # Remove the temporary file
|
||||
|
||||
def test_import_export_csv(self) -> None:
|
||||
glossary = Glossary(name="Test", source_lang="en", target_lang="fr", entries={})
|
||||
|
||||
# First export to CSV
|
||||
with tempfile.NamedTemporaryFile('w', delete=False) as csvfile:
|
||||
csv_file_path = Path(csvfile.name)
|
||||
glossary.entries = {"hello": "salut", "goodbye": "au revoir"}
|
||||
glossary.export_csv(glossary.entries, csv_file_path)
|
||||
|
||||
# Now import CSV
|
||||
glossary.import_csv(csv_file_path)
|
||||
self.assertEqual(glossary.entries, {"hello": "salut", "goodbye": "au revoir"})
|
||||
csv_file_path.unlink() # Remove the temporary file
|
||||
|
||||
def test_import_export_tsv(self) -> None:
|
||||
glossary = Glossary(name="Test", source_lang="en", target_lang="fr", entries={})
|
||||
|
||||
# First export to TSV
|
||||
with tempfile.NamedTemporaryFile('w', delete=False) as tsvfile:
|
||||
tsv_file_path = Path(tsvfile.name)
|
||||
glossary.entries = {"hello": "salut", "goodbye": "au revoir"}
|
||||
glossary.export_tsv(glossary.entries, tsv_file_path)
|
||||
|
||||
# Now import TSV
|
||||
glossary.import_tsv(tsv_file_path)
|
||||
self.assertEqual(glossary.entries, {"hello": "salut", "goodbye": "au revoir"})
|
||||
tsv_file_path.unlink() # Remove the temporary file
|
||||
@ -91,7 +91,7 @@ class QuestionTestCase(unittest.TestCase):
|
||||
class AnswerTestCase(unittest.TestCase):
|
||||
def test_answer_with_header(self) -> None:
|
||||
with self.assertRaises(MessageError):
|
||||
Answer(f"{Answer.txt_header}\nno")
|
||||
str(Answer(f"{Answer.txt_header}\nno"))
|
||||
|
||||
def test_answer_with_legal_header(self) -> None:
|
||||
answer = Answer(f"This is a line contaning '{Answer.txt_header}'\nIt is what it is.")
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user