test_question_cmd: test fixes and cleanup

This commit is contained in:
juk0de 2023-09-23 08:10:35 +02:00
parent b83b396c7b
commit 5c46bbe865

View File

@ -313,42 +313,6 @@ class TestQuestionCmd(TestQuestionCmdBase):
message.to_file()
return message
def create_multiple_messages(self) -> list[Message]:
# cached message without an answer
message1 = Message(Question('Question 1'),
ai='foo',
model='bla',
file_path=Path(self.cache_dir.name) / '0001.txt')
# cached message with an answer
message2 = Message(Question('Question 2'),
Answer('Answer 0'),
ai='openai',
model='gpt-3.5-turbo',
file_path=Path(self.cache_dir.name) / '0002.txt')
# DB message without an answer
message3 = Message(Question('Question 3'),
ai='openai',
model='gpt-3.5-turbo',
file_path=Path(self.db_dir.name) / '0003.txt')
message1.to_file()
message2.to_file()
message3.to_file()
return [message1, message2, message3]
def input_message(self, args: argparse.Namespace) -> Message:
"""
Create the expected input message for a question using the
given arguments.
"""
# NOTE: we only use the first question from the "ask" list
# -> message creation using "question.create_message()" is
# tested above
# the answer is always empty for the input message
return Message(Question(args.ask[0]),
tags=args.output_tags,
ai=args.AI,
model=args.model)
def message_list(self, tmp_dir: tempfile.TemporaryDirectory) -> list[Path]:
# exclude '.next'
return sorted([f for f in Path(tmp_dir.name).glob('*.[ty]*')])
@ -362,7 +326,10 @@ class TestQuestionCmdAsk(TestQuestionCmd):
Test single answer with no errors.
"""
mock_create_ai.side_effect = self.mock_create_ai
expected_question = self.input_message(self.args)
expected_question = Message(Question(self.args.ask[0]),
tags=self.args.output_tags,
ai=self.args.AI,
model=self.args.model)
fake_ai = self.mock_create_ai(self.args, self.config)
expected_responses = fake_ai.request(expected_question,
Chat([]),
@ -389,7 +356,10 @@ class TestQuestionCmdAsk(TestQuestionCmd):
mock_from_dir.return_value = chat
mock_create_ai.side_effect = self.mock_create_ai
expected_question = self.input_message(self.args)
expected_question = Message(Question(self.args.ask[0]),
tags=self.args.output_tags,
ai=self.args.AI,
model=self.args.model)
fake_ai = self.mock_create_ai(self.args, self.config)
expected_responses = fake_ai.request(expected_question,
Chat([]),
@ -416,7 +386,10 @@ class TestQuestionCmdAsk(TestQuestionCmd):
has been correctly stored in the cache.
"""
mock_create_ai.side_effect = self.mock_create_ai_with_error
expected_question = self.input_message(self.args)
expected_question = Message(Question(self.args.ask[0]),
tags=self.args.output_tags,
ai=self.args.AI,
model=self.args.model)
# execute the command
with self.assertRaises(AIError):
@ -445,7 +418,6 @@ class TestQuestionCmdRepeat(TestQuestionCmd):
# -> expect two identical messages (except for the file_path)
self.args.ask = None
self.args.repeat = []
self.args.output_tags = []
self.args.overwrite = False
fake_ai = self.mock_create_ai(self.args, self.config)
expected_response = fake_ai.request(message,
@ -561,19 +533,42 @@ class TestQuestionCmdRepeat(TestQuestionCmd):
cached_msg = chat.msg_gather(loc='cache')
self.assertEqual(len(self.message_list(self.cache_dir)), 2)
self.assert_messages_equal(cached_msg, [message] + new_expected_response)
print(cached_msg)
print(message)
print(new_expected_question)
@mock.patch('chatmastermind.commands.question.create_ai')
def test_repeat_multiple_questions(self, mock_create_ai: MagicMock) -> None:
"""
Repeat multiple questions.
"""
# chat = ChatDB.from_dir(Path(self.cache_dir.name),
# Path(self.db_dir.name))
# 1. === create three questions ===
# cached message without an answer
message1 = Message(Question('Question 1'),
ai='foo',
model='bla',
file_path=Path(self.cache_dir.name) / '0001.txt')
# cached message with an answer
message2 = Message(Question('Question 2'),
Answer('Answer 0'),
ai='openai',
model='gpt-3.5-turbo',
file_path=Path(self.cache_dir.name) / '0002.txt')
# DB message without an answer
message3 = Message(Question('Question 3'),
ai='openai',
model='gpt-3.5-turbo',
file_path=Path(self.db_dir.name) / '0003.txt')
message1.to_file()
message2.to_file()
message3.to_file()
questions = [message1, message2, message3]
expected_responses: list[Message] = []
fake_ai = self.mock_create_ai(self.args, self.config)
for question in questions:
expected_responses += fake_ai.request(question,
Chat([]),
self.args.num_answers,
set(self.args.output_tags)).messages
# 2. repeat all three questions (without overwriting)
# 2. === repeat all three questions (without overwriting) ===
self.args.ask = None
self.args.repeat = ['0001', '0002', '0003']
self.args.overwrite = False
@ -581,7 +576,13 @@ class TestQuestionCmdRepeat(TestQuestionCmd):
# two new files should be in the cache directory
# * the repeated cached message with answer
# * the repeated DB message
# -> the cached message wihtout answer should be overwritten
# -> the cached message without answer should be overwritten
self.assertEqual(len(self.message_list(self.cache_dir)), 4)
self.assertEqual(len(self.message_list(self.db_dir)), 1)
# FIXME: also compare actual content!
expected_cache_messages = [expected_responses[0], message2, expected_responses[1], expected_responses[2]]
chat = ChatDB.from_dir(Path(self.cache_dir.name),
Path(self.db_dir.name))
cached_msg = chat.msg_gather(loc='cache')
print(f"Cached: {cached_msg}")
print(f"Expected: {expected_cache_messages}")
self.assert_messages_equal(cached_msg, expected_cache_messages)