Initial commit of the code.
This commit is contained in:
parent
f98acf167d
commit
a9dbf87a76
2
.gitignore
vendored
2
.gitignore
vendored
@ -127,3 +127,5 @@ dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
.config.yaml
|
||||
14
LICENSE
Normal file
14
LICENSE
Normal file
@ -0,0 +1,14 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
107
README.md
107
README.md
@ -1,2 +1,107 @@
|
||||
# ChatMastermind
|
||||
Automate chatting with AI from command line, store and compose chat history for the next question
|
||||
|
||||
ChatMastermind is a Python application that automates conversation with AI, stores question-answer pairs with tags, and composes a relevant chat history for the next question.
|
||||
|
||||
The project uses the OpenAI API to generate responses, and stores the data in YAML files. It also allows you to filter the chat history based on tags, and supports autocompletion for tags.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Python 3.6 or higher
|
||||
- openai
|
||||
- PyYAML
|
||||
- argcomplete
|
||||
|
||||
You can install these requirements using `pip`:
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
python main.py [-h] [-p PRINT | -q QUESTION | -D | -d] [-c CONFIG] [-m MAX_TOKENS] [-T TEMPERATURE] [-M MODEL] [-n NUMBER] [-t [TAGS [TAGS ...]]] [-e [EXTAGS [EXTAGS ...]]] [-o [OTAGS [OTAGS ...]]]
|
||||
```
|
||||
|
||||
### Arguments
|
||||
|
||||
- `-p`, `--print`: YAML file to print.
|
||||
- `-q`, `--question`: Question to ask.
|
||||
- `-D`, `--chat-dump`: Print chat as a Python structure.
|
||||
- `-d`, `--chat`: Print chat as readable text.
|
||||
- `-c`, `--config`: Config file name (defaults to `.config.yaml`).
|
||||
- `-m`, `--max-tokens`: Max tokens to use.
|
||||
- `-T`, `--temperature`: Temperature to use.
|
||||
- `-M`, `--model`: Model to use.
|
||||
- `-n`, `--number`: Number of answers to produce (default is 3).
|
||||
- `-t`, `--tags`: List of tag names.
|
||||
- `-e`, `--extags`: List of tag names to exclude.
|
||||
- `-o`, `--output-tags`: List of output tag names (default is the input tags).
|
||||
|
||||
### Examples
|
||||
|
||||
1. Print the contents of a YAML file:
|
||||
|
||||
```
|
||||
python main.py -p example.yaml
|
||||
```
|
||||
|
||||
2. Ask a question:
|
||||
|
||||
```
|
||||
python main.py -q "What is the meaning of life?" -t philosophy -e religion
|
||||
```
|
||||
|
||||
3. Display the chat history as a Python structure:
|
||||
|
||||
```
|
||||
python main.py -D
|
||||
```
|
||||
|
||||
4. Display the chat history as readable text:
|
||||
|
||||
```
|
||||
python main.py -d
|
||||
```
|
||||
|
||||
5. Filter chat history by tags:
|
||||
|
||||
```
|
||||
python main.py -d -t tag1 tag2
|
||||
```
|
||||
|
||||
6. Exclude chat history by tags:
|
||||
|
||||
```
|
||||
python main.py -d -e tag3 tag4
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
The configuration file (`.config.yaml`) should contain the following fields:
|
||||
|
||||
- `openai`:
|
||||
- `api_key`: Your OpenAI API key.
|
||||
- `model`: The name of the OpenAI model to use (e.g. "text-davinci-002").
|
||||
- `temperature`: The temperature value for the model.
|
||||
- `max_tokens`: The maximum number of tokens for the model.
|
||||
- `top_p`: The top P value for the model.
|
||||
- `frequency_penalty`: The frequency penalty value.
|
||||
- `presence_penalty`: The presence penalty value.
|
||||
- `system`: The system message used to set the behavior of the AI.
|
||||
- `db`: The directory where the question-answer pairs are stored in YAML files.
|
||||
|
||||
## Autocompletion
|
||||
|
||||
To activate autocompletion for tags, add the following line to your shell's configuration file (e.g., `.bashrc`, `.zshrc`, or `.profile`):
|
||||
|
||||
```
|
||||
eval "$(register-python-argcomplete chatmastermind)"
|
||||
```
|
||||
|
||||
After adding this line, restart your shell or run `source <your-shell-config-file>` to enable autocompletion for the `chatmastermind` script.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the terms of the WTFPL License.
|
||||
|
||||
|
||||
0
chatmastermind/__init__.py
Normal file
0
chatmastermind/__init__.py
Normal file
245
chatmastermind/main.py
Executable file
245
chatmastermind/main.py
Executable file
@ -0,0 +1,245 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# vim: set fileencoding=utf-8 :
|
||||
|
||||
import yaml
|
||||
import os
|
||||
import io
|
||||
import sys
|
||||
import shutil
|
||||
import openai
|
||||
import pathlib
|
||||
import argcomplete
|
||||
import argparse
|
||||
from pprint import PrettyPrinter
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
terminal_size = shutil.get_terminal_size()
|
||||
terminal_width = terminal_size.columns
|
||||
pp = PrettyPrinter(width=terminal_width).pprint
|
||||
|
||||
|
||||
def run_print_command(args: argparse.Namespace, config: dict) -> None:
|
||||
with open(args.print, 'r') as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
pp(data)
|
||||
|
||||
|
||||
def process_tags(config: dict, tags: list, extags: list) -> None:
|
||||
print(f"Tags: {', '.join(tags)}")
|
||||
if len(extags) > 0:
|
||||
print(f"Excluding tags: {', '.join(extags)}")
|
||||
print()
|
||||
|
||||
|
||||
def append_message(chat: List[Dict[str, str]],
|
||||
role: str,
|
||||
content: str
|
||||
) -> None:
|
||||
chat.append({'role': role, 'content': content.replace("''", "'")})
|
||||
|
||||
|
||||
def message_to_chat(message: Dict[str, str],
|
||||
chat: List[Dict[str, str]]
|
||||
) -> None:
|
||||
append_message(chat, 'user', message['question'])
|
||||
append_message(chat, 'assistant', message['answer'])
|
||||
|
||||
|
||||
def create_chat(question: Optional[str],
|
||||
tags: Optional[List[str]],
|
||||
extags: Optional[List[str]],
|
||||
config: Dict[str, Any]
|
||||
) -> List[Dict[str, str]]:
|
||||
chat = []
|
||||
append_message(chat, 'system', config['system'].strip())
|
||||
for file in sorted(pathlib.Path(config['db']).iterdir()):
|
||||
if file.suffix == '.yaml':
|
||||
with open(file, 'r') as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
data_tags = set(data.get('tags', []))
|
||||
tags_match = \
|
||||
not tags or data_tags.intersection(tags)
|
||||
extags_do_not_match = \
|
||||
not extags or not data_tags.intersection(extags)
|
||||
if tags_match and extags_do_not_match:
|
||||
message_to_chat(data, chat)
|
||||
if question:
|
||||
append_message(chat, 'user', question)
|
||||
return chat
|
||||
|
||||
|
||||
def ai(chat: list[dict[str, str]],
|
||||
config: dict,
|
||||
number: int
|
||||
) -> tuple[list[str], dict[str, int]]:
|
||||
response = openai.ChatCompletion.create(
|
||||
model=config['openai']['model'],
|
||||
messages=chat,
|
||||
temperature=config['openai']['temperature'],
|
||||
max_tokens=config['openai']['max_tokens'],
|
||||
top_p=config['openai']['top_p'],
|
||||
n=number,
|
||||
frequency_penalty=config['openai']['frequency_penalty'],
|
||||
presence_penalty=config['openai']['presence_penalty'])
|
||||
result = []
|
||||
for choice in response['choices']: # type: ignore
|
||||
result.append(choice['message']['content'].strip())
|
||||
return result, dict(response['usage']) # type: ignore
|
||||
|
||||
|
||||
def process_and_display_chat(args: argparse.Namespace,
|
||||
config: dict,
|
||||
dump: bool = False
|
||||
) -> tuple[list[dict[str, str]], list[str]]:
|
||||
tags = args.tags or []
|
||||
extags = args.extags or []
|
||||
process_tags(config, tags, extags)
|
||||
chat = create_chat(args.question, tags, extags, config)
|
||||
display_chat(chat, dump)
|
||||
return chat, tags
|
||||
|
||||
|
||||
def display_chat(chat, dump=False) -> None:
|
||||
if dump:
|
||||
pp(chat)
|
||||
return
|
||||
for message in chat:
|
||||
if message['role'] == 'user':
|
||||
print('-' * terminal_width)
|
||||
if len(message['content']) > terminal_width-len(message['role'])-2:
|
||||
print(f"{message['role'].upper()}:")
|
||||
print(message['content'])
|
||||
else:
|
||||
print(f"{message['role'].upper()}: {message['content']}")
|
||||
|
||||
|
||||
def handle_question(args: argparse.Namespace,
|
||||
config: dict,
|
||||
dump: bool = False
|
||||
) -> None:
|
||||
chat, tags = process_and_display_chat(args, config, dump)
|
||||
otags = args.output_tags or []
|
||||
answers, usage = ai(chat, config, args.number)
|
||||
save_answers(args.question, answers, tags, otags)
|
||||
print("-" * terminal_width)
|
||||
print(f"Usage: {usage}")
|
||||
|
||||
|
||||
def save_answers(question: str,
|
||||
answers: list[str],
|
||||
tags: list[str],
|
||||
otags: Optional[list[str]]
|
||||
) -> None:
|
||||
wtags = otags or tags
|
||||
for num, answer in enumerate(answers, start=1):
|
||||
title = f'-- ANSWER {num} '
|
||||
title_end = '-' * (terminal_width - len(title))
|
||||
print(f'{title}{title_end}')
|
||||
print(answer)
|
||||
with open(f"{num:02d}.yaml", "w") as fd:
|
||||
with io.StringIO() as f:
|
||||
yaml.dump({'question': question},
|
||||
f,
|
||||
default_style="|",
|
||||
default_flow_style=False)
|
||||
fd.write(f.getvalue().replace('"question":', "question:", 1))
|
||||
with io.StringIO() as f:
|
||||
yaml.dump({'answer': answer},
|
||||
f,
|
||||
default_style="|",
|
||||
default_flow_style=False)
|
||||
fd.write(f.getvalue().replace('"answer":', "answer:", 1))
|
||||
yaml.dump({'tags': wtags},
|
||||
fd,
|
||||
default_flow_style=False)
|
||||
|
||||
|
||||
def main(args: argparse.Namespace) -> int:
|
||||
with open(args.config, 'r') as f:
|
||||
config = yaml.load(f, Loader=yaml.FullLoader)
|
||||
openai.api_key = config['openai']['api_key']
|
||||
if args.max_tokens:
|
||||
config['openai']['max_tokens'] = args.max_tokens
|
||||
if args.temperature:
|
||||
config['openai']['temperature'] = args.temperature
|
||||
if args.model:
|
||||
config['openai']['model'] = args.model
|
||||
if args.print:
|
||||
run_print_command(args, config)
|
||||
elif args.question:
|
||||
handle_question(args, config)
|
||||
elif args.chat_dump:
|
||||
process_and_display_chat(args, config, dump=True)
|
||||
elif args.chat:
|
||||
process_and_display_chat(args, config)
|
||||
return 0
|
||||
|
||||
|
||||
def tags_completer(prefix, parsed_args, **kwargs):
|
||||
with open(parsed_args.config, 'r') as f:
|
||||
config = yaml.load(f, Loader=yaml.FullLoader)
|
||||
result = []
|
||||
for file in sorted(pathlib.Path(config['db']).iterdir()):
|
||||
if file.suffix == '.yaml':
|
||||
with open(file, 'r') as f:
|
||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||
for tag in data.get('tags', []):
|
||||
if prefix and len(prefix) > 0:
|
||||
if tag.startswith(prefix):
|
||||
result.append(tag)
|
||||
else:
|
||||
result.append(tag)
|
||||
return list(set(result))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args_parser = argparse.ArgumentParser(description="Handle chats")
|
||||
default_config = '.config.yaml'
|
||||
group = args_parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument('-p', '--print',
|
||||
help='YAML file to print')
|
||||
group.add_argument('-q', '--question',
|
||||
help='Question to ask')
|
||||
group.add_argument('-D', '--chat-dump',
|
||||
help="Print chat as Python structure",
|
||||
action='store_true')
|
||||
group.add_argument('-d', '--chat',
|
||||
help="Print chat as readable text",
|
||||
action='store_true')
|
||||
args_parser.add_argument('-c', '--config',
|
||||
help='Config file name.',
|
||||
default=default_config)
|
||||
args_parser.add_argument('-m', '--max-tokens',
|
||||
help='Max tokens to use',
|
||||
type=int)
|
||||
args_parser.add_argument('-T', '--temperature',
|
||||
help='Temperature to use',
|
||||
type=float)
|
||||
args_parser.add_argument('-M', '--model',
|
||||
help='Model to use')
|
||||
args_parser.add_argument('-n', '--number',
|
||||
help='Number of answers to produce',
|
||||
type=int,
|
||||
default=3)
|
||||
tags_arg = args_parser.add_argument(
|
||||
'-t', '--tags',
|
||||
nargs='*',
|
||||
help='List of tag names',
|
||||
metavar='TAGS')
|
||||
tags_arg.completer = tags_completer # type: ignore
|
||||
extags_arg = args_parser.add_argument(
|
||||
'-e', '--extags',
|
||||
nargs='*',
|
||||
help='List of tag names to exclude',
|
||||
metavar='EXTAGS')
|
||||
extags_arg.completer = tags_completer # type: ignore
|
||||
otags_arg = args_parser.add_argument(
|
||||
'-o', '--output-tags',
|
||||
nargs='*',
|
||||
help='List of output tag names, default is input',
|
||||
metavar='OTAGS')
|
||||
otags_arg.completer = tags_completer # type: ignore
|
||||
argcomplete.autocomplete(args_parser)
|
||||
args = args_parser.parse_args()
|
||||
sys.exit(main(args))
|
||||
11
config.yaml
Normal file
11
config.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
system: |
|
||||
You are an coding assistent, you develop an application in Python, which automates conversation with AI and helps to store question+answer pairs with tags, and to compose relevant chat history for the next question.
|
||||
db: ./db/
|
||||
openai:
|
||||
api_key: sk-XXX
|
||||
model: gpt-4
|
||||
temperature: 0.8
|
||||
max_tokens: 2900
|
||||
top_p: 1
|
||||
frequency_penalty: 0
|
||||
presence_penalty: 0
|
||||
62
db/001_pkg_struct.yaml
Normal file
62
db/001_pkg_struct.yaml
Normal file
@ -0,0 +1,62 @@
|
||||
question: |-
|
||||
I want to start a Python project on github with the name ChatMastermind. Give me the full package structure with all the files required.
|
||||
answer: |-
|
||||
To create a Python project on GitHub with the name "ChatMastermind", follow these steps:
|
||||
|
||||
1. Create a new repository on GitHub named "ChatMastermind".
|
||||
|
||||
2. Clone the repository to your local machine.
|
||||
|
||||
3. Now, let's create the package structure for the project. Here's a suggested package structure:
|
||||
|
||||
```
|
||||
ChatMastermind/
|
||||
|-- .gitignore
|
||||
|-- README.md
|
||||
|-- LICENSE
|
||||
|-- setup.py
|
||||
|-- chatmastermind/
|
||||
|-- __init__.py
|
||||
|-- main.py
|
||||
|-- api_client.py
|
||||
|-- storage.py
|
||||
|-- utils.py
|
||||
|-- tests/
|
||||
|-- __init__.py
|
||||
|-- test_main.py
|
||||
|-- test_api_client.py
|
||||
|-- test_storage.py
|
||||
|-- test_utils.py
|
||||
|-- examples/
|
||||
|-- example.py
|
||||
|-- requirements.txt
|
||||
```
|
||||
|
||||
- `.gitignore`: Contains a list of files and directories that should be ignored by git.
|
||||
- `README.md`: Documentation for your project.
|
||||
- `LICENSE`: The license for your project.
|
||||
- `setup.py`: Python package setup script.
|
||||
- `chatmastermind`: The main package directory.
|
||||
- `__init__.py`: Initialize the package.
|
||||
- `main.py`: The main script to run the application.
|
||||
- `api_client.py`: Handles conversation with the AI.
|
||||
- `storage.py`: Manages storing question+answer pairs with tags.
|
||||
- `utils.py`: Contains utility/helper functions.
|
||||
- `tests`: Directory containing tests.
|
||||
- `test_main.py`: Tests for the main script.
|
||||
- `test_api_client.py`: Tests for the API client.
|
||||
- `test_storage.py`: Tests for storage functions.
|
||||
- `test_utils.py`: Tests for utility functions.
|
||||
- `examples`: Directory containing example scripts.
|
||||
- `example.py`: An example of how to use the package.
|
||||
- `requirements.txt`: Lists the required packages for your project.
|
||||
|
||||
4. Create these files and directories in your local repository.
|
||||
|
||||
5. Add and commit the changes to the repository.
|
||||
|
||||
6. Push the changes to the remote repository on GitHub.
|
||||
|
||||
Now you have your Python project called "ChatMastermind" on GitHub with the package structure ready for development.
|
||||
tags:
|
||||
- PackageStructure
|
||||
47
db/002_file_setup_py.yaml
Normal file
47
db/002_file_setup_py.yaml
Normal file
@ -0,0 +1,47 @@
|
||||
question: |-
|
||||
Give me the content of the setup.py file.
|
||||
answer: |-
|
||||
To create a `setup.py` file for the ChatMastermind project, use the following content:
|
||||
|
||||
```python
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
with open("README.md", "r", encoding="utf-8") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setup(
|
||||
name="ChatMastermind",
|
||||
version="0.1.0",
|
||||
author="Your Name",
|
||||
author_email="your.email@example.com",
|
||||
description="A Python application to automate conversation with AI, store question+answer pairs, and compose chat history.",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/yourusername/ChatMastermind",
|
||||
packages=find_packages(),
|
||||
classifiers=[
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.6",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
],
|
||||
install_requires=[
|
||||
# Add your project's dependencies here
|
||||
],
|
||||
python_requires=">=3.6",
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"chatmastermind=chatmastermind.main:main",
|
||||
],
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
Replace "Your Name", "your.email@example.com", and "yourusername" with your own information. Make sure to also update the `install_requires` list with any dependencies your project needs. This `setup.py` file uses the setuptools package to define the project's metadata, dependencies, and entry points for console scripts.
|
||||
tags:
|
||||
- FileSetupPy
|
||||
58
db/003_requirements.yaml
Normal file
58
db/003_requirements.yaml
Normal file
@ -0,0 +1,58 @@
|
||||
question: |-
|
||||
Give me the content of requirements.txt file and change the setup.py file to contain following requirements: openai, PyYAML, argcomplete.
|
||||
answer: |-
|
||||
For the `requirements.txt` file, add the following content:
|
||||
|
||||
```
|
||||
openai
|
||||
PyYAML
|
||||
argcomplete
|
||||
```
|
||||
|
||||
Now, update the `setup.py` file to include these requirements:
|
||||
|
||||
```python
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
with open("README.md", "r", encoding="utf-8") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setup(
|
||||
name="ChatMastermind",
|
||||
version="0.1.0",
|
||||
author="Your Name",
|
||||
author_email="your.email@example.com",
|
||||
description="A Python application to automate conversation with AI, store question+answer pairs, and compose chat history.",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/yourusername/ChatMastermind",
|
||||
packages=find_packages(),
|
||||
classifiers=[
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.6",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
],
|
||||
install_requires=[
|
||||
"openai",
|
||||
"PyYAML",
|
||||
"argcomplete",
|
||||
],
|
||||
python_requires=">=3.6",
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"chatmastermind=chatmastermind.main:main",
|
||||
],
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
This will include the "openai", "PyYAML", and "argcomplete" packages as dependencies for your project.
|
||||
tags:
|
||||
- FileRequirementsTxt
|
||||
- Requirements
|
||||
115
db/004_readme.yaml
Normal file
115
db/004_readme.yaml
Normal file
@ -0,0 +1,115 @@
|
||||
question: |-
|
||||
Give me the README.md file content.
|
||||
answer: |-
|
||||
Here's the content of the `README.md` file, describing the ChatMastermind project and the provided script:
|
||||
|
||||
```
|
||||
# ChatMastermind
|
||||
|
||||
ChatMastermind is a Python application that automates conversation with AI, stores question-answer pairs with tags, and composes a relevant chat history for the next question.
|
||||
|
||||
The project uses the OpenAI API to generate responses, and stores the data in YAML files. It also allows you to filter the chat history based on tags, and supports autocompletion for tags.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Python 3.6 or higher
|
||||
- openai
|
||||
- PyYAML
|
||||
- argcomplete
|
||||
|
||||
You can install these requirements using `pip`:
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
python main.py [-h] [-p PRINT | -q QUESTION | -D | -d] [-c CONFIG] [-m MAX_TOKENS] [-T TEMPERATURE] [-M MODEL] [-n NUMBER] [-t [TAGS [TAGS ...]]] [-e [EXTAGS [EXTAGS ...]]] [-o [OTAGS [OTAGS ...]]]
|
||||
```
|
||||
|
||||
### Arguments
|
||||
|
||||
- `-p`, `--print`: YAML file to print.
|
||||
- `-q`, `--question`: Question to ask.
|
||||
- `-D`, `--chat-dump`: Print chat as a Python structure.
|
||||
- `-d`, `--chat`: Print chat as readable text.
|
||||
- `-c`, `--config`: Config file name (defaults to `.config.yaml`).
|
||||
- `-m`, `--max-tokens`: Max tokens to use.
|
||||
- `-T`, `--temperature`: Temperature to use.
|
||||
- `-M`, `--model`: Model to use.
|
||||
- `-n`, `--number`: Number of answers to produce (default is 3).
|
||||
- `-t`, `--tags`: List of tag names.
|
||||
- `-e`, `--extags`: List of tag names to exclude.
|
||||
- `-o`, `--output-tags`: List of output tag names (default is the input tags).
|
||||
|
||||
### Examples
|
||||
|
||||
1. Print the contents of a YAML file:
|
||||
|
||||
```
|
||||
python main.py -p example.yaml
|
||||
```
|
||||
|
||||
2. Ask a question:
|
||||
|
||||
```
|
||||
python main.py -q "What is the meaning of life?" -t philosophy -e religion
|
||||
```
|
||||
|
||||
3. Display the chat history as a Python structure:
|
||||
|
||||
```
|
||||
python main.py -D
|
||||
```
|
||||
|
||||
4. Display the chat history as readable text:
|
||||
|
||||
```
|
||||
python main.py -d
|
||||
```
|
||||
|
||||
5. Filter chat history by tags:
|
||||
|
||||
```
|
||||
python main.py -d -t tag1 tag2
|
||||
```
|
||||
|
||||
6. Exclude chat history by tags:
|
||||
|
||||
```
|
||||
python main.py -d -e tag3 tag4
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
The configuration file (`.config.yaml`) should contain the following fields:
|
||||
|
||||
- `openai`:
|
||||
- `api_key`: Your OpenAI API key.
|
||||
- `model`: The name of the OpenAI model to use (e.g. "text-davinci-002").
|
||||
- `temperature`: The temperature value for the model.
|
||||
- `max_tokens`: The maximum number of tokens for the model.
|
||||
- `top_p`: The top P value for the model.
|
||||
- `frequency_penalty`: The frequency penalty value.
|
||||
- `presence_penalty`: The presence penalty value.
|
||||
- `system`: The system message used to set the behavior of the AI.
|
||||
- `db`: The directory where the question-answer pairs are stored in YAML files.
|
||||
|
||||
## Autocompletion
|
||||
|
||||
To activate autocompletion for tags, add the following line to your shell's configuration file (e.g., `.bashrc`, `.zshrc`, or `.profile`):
|
||||
|
||||
```
|
||||
eval "$(register-python-argcomplete chatmastermind)"
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the terms of the WTFPL License.
|
||||
```
|
||||
|
||||
You can modify the `README.md` file as needed to provide more information or examples.
|
||||
tags:
|
||||
- FileReadmeMd
|
||||
4
requirements.txt
Normal file
4
requirements.txt
Normal file
@ -0,0 +1,4 @@
|
||||
openai
|
||||
PyYAML
|
||||
argcomplete
|
||||
pytest
|
||||
38
setup.py
Normal file
38
setup.py
Normal file
@ -0,0 +1,38 @@
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
with open("README.md", "r", encoding="utf-8") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setup(
|
||||
name="ChatMastermind",
|
||||
version="0.1.0",
|
||||
author="Oleksandr Kozachuk",
|
||||
author_email="ddeus.lp@mailnull.com",
|
||||
description="A Python application to automate conversation with AI, store question+answer pairs, and compose chat history.",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/ok2/ChatMastermind",
|
||||
packages=find_packages(),
|
||||
classifiers=[
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
],
|
||||
install_requires=[
|
||||
"openai",
|
||||
"PyYAML",
|
||||
"argcomplete",
|
||||
"pytest"
|
||||
],
|
||||
python_requires=">=3.10",
|
||||
test_suite="tests",
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"chatmastermind=chatmastermind.main:main",
|
||||
],
|
||||
},
|
||||
)
|
||||
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
200
tests/test_main.py
Normal file
200
tests/test_main.py
Normal file
@ -0,0 +1,200 @@
|
||||
import unittest
|
||||
import io
|
||||
import os
|
||||
import yaml
|
||||
import argparse
|
||||
import chatmastermind.main
|
||||
from chatmastermind.main import create_chat, ai, handle_question, save_answers
|
||||
from unittest import mock
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
|
||||
class TestCreateChat(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.config = {
|
||||
'system': 'System text',
|
||||
'db': 'test_files'
|
||||
}
|
||||
self.question = "test question"
|
||||
self.tags = ['test_tag']
|
||||
|
||||
@patch('os.listdir')
|
||||
@patch('builtins.open')
|
||||
def test_create_chat_with_tags(self, open_mock, listdir_mock):
|
||||
listdir_mock.return_value = ['testfile.yaml']
|
||||
open_mock.return_value.__enter__.return_value = io.StringIO(yaml.dump(
|
||||
{'question': 'test_content', 'answer': 'some answer',
|
||||
'tags': ['test_tag']}))
|
||||
|
||||
test_chat = create_chat(self.question, self.tags, None, self.config)
|
||||
|
||||
self.assertEqual(len(test_chat), 4)
|
||||
self.assertEqual(test_chat[0],
|
||||
{'role': 'system', 'content': self.config['system']})
|
||||
self.assertEqual(test_chat[1],
|
||||
{'role': 'user', 'content': 'test_content'})
|
||||
self.assertEqual(test_chat[2],
|
||||
{'role': 'assistant', 'content': 'some answer'})
|
||||
self.assertEqual(test_chat[3],
|
||||
{'role': 'user', 'content': self.question})
|
||||
|
||||
@patch('os.listdir')
|
||||
@patch('builtins.open')
|
||||
def test_create_chat_with_other_tags(self, open_mock, listdir_mock):
|
||||
listdir_mock.return_value = ['testfile.yaml']
|
||||
open_mock.return_value.__enter__.return_value = io.StringIO(yaml.dump(
|
||||
{'question': 'test_content', 'answer': 'some answer',
|
||||
'tags': ['other_tag']}))
|
||||
|
||||
test_chat = create_chat(self.question, self.tags, None, self.config)
|
||||
|
||||
self.assertEqual(len(test_chat), 2)
|
||||
self.assertEqual(test_chat[0],
|
||||
{'role': 'system', 'content': self.config['system']})
|
||||
self.assertEqual(test_chat[1],
|
||||
{'role': 'user', 'content': self.question})
|
||||
|
||||
@patch('os.listdir')
|
||||
@patch('builtins.open')
|
||||
def test_create_chat_without_tags(self, open_mock, listdir_mock):
|
||||
listdir_mock.return_value = ['testfile.yaml', 'testfile2.yaml']
|
||||
open_mock.side_effect = (
|
||||
io.StringIO(yaml.dump({'question': 'test_content',
|
||||
'answer': 'some answer',
|
||||
'tags': ['test_tag']})),
|
||||
io.StringIO(yaml.dump({'question': 'test_content2',
|
||||
'answer': 'some answer2',
|
||||
'tags': ['test_tag2']})),
|
||||
)
|
||||
|
||||
test_chat = create_chat(self.question, [], None, self.config)
|
||||
|
||||
self.assertEqual(len(test_chat), 6)
|
||||
self.assertEqual(test_chat[0],
|
||||
{'role': 'system', 'content': self.config['system']})
|
||||
self.assertEqual(test_chat[1],
|
||||
{'role': 'user', 'content': 'test_content'})
|
||||
self.assertEqual(test_chat[2],
|
||||
{'role': 'assistant', 'content': 'some answer'})
|
||||
self.assertEqual(test_chat[3],
|
||||
{'role': 'user', 'content': 'test_content2'})
|
||||
self.assertEqual(test_chat[4],
|
||||
{'role': 'assistant', 'content': 'some answer2'})
|
||||
|
||||
|
||||
class TestHandleQuestion(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.args = argparse.Namespace(
|
||||
tags=['tag1'],
|
||||
extags=['extag1'],
|
||||
output_tags=None,
|
||||
question='test question',
|
||||
number=3
|
||||
)
|
||||
self.config = {
|
||||
'setting1': 'value1',
|
||||
'setting2': 'value2'
|
||||
}
|
||||
|
||||
@patch("chatmastermind.main.create_chat", return_value="test_chat")
|
||||
@patch("chatmastermind.main.process_tags")
|
||||
@patch("chatmastermind.main.ai", return_value=(["answer1", "answer2", "answer3"],
|
||||
"test_usage"))
|
||||
@patch("chatmastermind.main.pp")
|
||||
@patch("chatmastermind.main.print")
|
||||
@patch("chatmastermind.main.yaml.dump")
|
||||
def test_handle_question(self, _, mock_print, mock_pp, mock_ai,
|
||||
mock_process_tags, mock_create_chat):
|
||||
open_mock = MagicMock()
|
||||
with patch("chatmastermind.main.open", open_mock):
|
||||
handle_question(self.args, self.config, True)
|
||||
mock_process_tags.assert_called_once_with(self.config,
|
||||
self.args.tags,
|
||||
self.args.extags)
|
||||
mock_create_chat.assert_called_once_with(self.args.question,
|
||||
self.args.tags,
|
||||
self.args.extags,
|
||||
self.config)
|
||||
mock_pp.assert_called_once_with("test_chat")
|
||||
mock_ai.assert_called_with("test_chat",
|
||||
self.config,
|
||||
self.args.number)
|
||||
expected_calls = []
|
||||
for num, answer in enumerate(mock_ai.return_value[0], start=1):
|
||||
title = f'-- ANSWER {num} '
|
||||
title_end = '-' * (chatmastermind.main.terminal_width - len(title))
|
||||
expected_calls.append(((f'{title}{title_end}',),))
|
||||
expected_calls.append(((answer,),))
|
||||
expected_calls.append((("-" * chatmastermind.main.terminal_width,),))
|
||||
expected_calls.append(((f"Usage: {mock_ai.return_value[1]}",),))
|
||||
open_mock.assert_has_calls([
|
||||
mock.call(f"{num:02d}.yaml", "w") for num in range(1, 4)
|
||||
] + [mock.call().__enter__(),
|
||||
mock.call().__exit__(None, None, None)] * 3,
|
||||
any_order=True)
|
||||
self.assertEqual(mock_print.call_args_list, expected_calls)
|
||||
|
||||
|
||||
class TestSaveAnswers(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.question = "What is AI?"
|
||||
self.answers = ["AI is Artificial Intelligence",
|
||||
"AI is a simulation of human intelligence"]
|
||||
self.tags = ["ai", "definition"]
|
||||
|
||||
@patch('sys.stdout', new_callable=io.StringIO)
|
||||
def assert_stdout(self, expected_output: str, mock_stdout: io.StringIO):
|
||||
save_answers(self.question, self.answers, self.tags, None)
|
||||
self.assertEqual(mock_stdout.getvalue(), expected_output)
|
||||
|
||||
def test_save_answers(self):
|
||||
try:
|
||||
self.assert_stdout(f"-- ANSWER 1 {'-'*(chatmastermind.main.terminal_width-12)}\n"
|
||||
"AI is Artificial Intelligence\n"
|
||||
f"-- ANSWER 2 {'-'*(chatmastermind.main.terminal_width-12)}\n"
|
||||
"AI is a simulation of human intelligence\n")
|
||||
for idx, answer in enumerate(self.answers, start=1):
|
||||
with open(f"{idx:02d}.yaml", "r") as file:
|
||||
data = yaml.safe_load(file)
|
||||
self.assertEqual(data["question"], self.question)
|
||||
self.assertEqual(data["answer"], answer)
|
||||
self.assertEqual(data["tags"], self.tags)
|
||||
finally:
|
||||
for idx in range(1, len(self.answers) + 1):
|
||||
if os.path.exists(f"{idx:02d}.yaml"):
|
||||
os.remove(f"{idx:02d}.yaml")
|
||||
|
||||
|
||||
class TestAI(unittest.TestCase):
|
||||
|
||||
@patch("openai.ChatCompletion.create")
|
||||
def test_ai(self, mock_create: MagicMock):
|
||||
mock_create.return_value = {
|
||||
'choices': [
|
||||
{'message': {'content': 'response_text_1'}},
|
||||
{'message': {'content': 'response_text_2'}}
|
||||
],
|
||||
'usage': {'tokens': 10}
|
||||
}
|
||||
|
||||
number = 2
|
||||
chat = [{"role": "system", "content": "hello ai"}]
|
||||
config = {
|
||||
"openai": {
|
||||
"model": "text-davinci-002",
|
||||
"temperature": 0.5,
|
||||
"max_tokens": 150,
|
||||
"top_p": 1,
|
||||
"n": number,
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
}
|
||||
}
|
||||
|
||||
result = ai(chat, config, number)
|
||||
expected_result = (['response_text_1', 'response_text_2'],
|
||||
{'tokens': 10})
|
||||
self.assertEqual(result, expected_result)
|
||||
Loading…
x
Reference in New Issue
Block a user