85 lines
3.3 KiB
Python
85 lines
3.3 KiB
Python
import time
|
|
import json
|
|
from openai import OpenAI
|
|
import chat_tools
|
|
from bot_util import get_api_details
|
|
|
|
# initialize parameters
|
|
MODEL = "deepseek-chat"
|
|
chat_tools = chat_tools.get_chat_tools()
|
|
history = []
|
|
|
|
# Initialize the OpenAI client with the API key and base URL
|
|
client = OpenAI(
|
|
api_key=get_api_details().api_key,
|
|
base_url=get_api_details().api_url,
|
|
)
|
|
|
|
|
|
MAX_RETRIES = 3
|
|
RETRY_DELAY = 2 # seconds
|
|
count = 0
|
|
|
|
system_prompt = "You are a helpful assistant. You can convert text notes to markdown format. These notes are \
|
|
in Question and Answer format. for example: Create two namespaces and name them ns1 and ns2 \n k create ns ns1 \n \
|
|
k create ns ns2. where k create ns ns1 and k create ns ns2 are answers. Sometoimees questions can be more than one line, \
|
|
and each of Q&A are separated by a empty line. Please make question as header and fomrat ansers in correct markdown format. \
|
|
Please write the markdown to user specified file."
|
|
content = ""
|
|
with open("test_data/k8s_day10.txt", "r") as f:
|
|
for line in f:
|
|
content += line
|
|
|
|
def chat_with_deepseek(max_retries: int =MAX_RETRIES, retry_delay: int =RETRY_DELAY):
|
|
# Initialize the chat history with the system prompt
|
|
global content
|
|
history.append({"role": "system", "content": system_prompt})
|
|
history.append({"role": "user", "content": f"please covert notes below to markdown format and write to k8s_day12.md: \n {content} "})
|
|
count = 0
|
|
for _ in range(max_retries):
|
|
try:
|
|
response = client.chat.completions.create(
|
|
model=MODEL,
|
|
messages=history,
|
|
tools=chat_tools.get_tools(),
|
|
)
|
|
|
|
if response.choices[0].message.tool_calls:
|
|
tool_call = response.choices[0].message.tool_calls[0]
|
|
tool_name = tool_call.function.name
|
|
tool_args = tool_call.function.arguments
|
|
tool_id = tool_call.id
|
|
# Call the function with the arguments
|
|
if tool_name == "write_file":
|
|
params = json.loads(tool_args)
|
|
file_path = params["file_path"]
|
|
content = params["content"]
|
|
result = chat_tools.util.write_file(file_path, content)
|
|
history.append(response.choices[0].message)
|
|
history.append({"role": "tool", "tool_call_id": tool_id, "content": result})
|
|
|
|
response = client.chat.completions.create(
|
|
model=MODEL,
|
|
messages=history,
|
|
)
|
|
return response.choices[0].message.content
|
|
else:
|
|
# for chunk in response:
|
|
# text = chunk.choices[0].delta.content
|
|
# if text:
|
|
# sys.stdout.write(text)
|
|
# sys.stdout.flush()
|
|
return response.choices[0].message.content
|
|
|
|
break
|
|
except Exception as e:
|
|
count += 1
|
|
print(f"An error occurre: {e}.")
|
|
if count < max_retries:
|
|
print(f"Retrying in {retry_delay} seconds...")
|
|
time.sleep(retry_delay)
|
|
else:
|
|
print("Max retries reached. Exiting.")
|
|
raise e
|
|
|
|
print(chat_with_deepseek(max_retries=1)) |