add feature to fomrat note and save top local directory

This commit is contained in:
= 2025-04-26 12:16:44 +08:00
parent edfb1416ab
commit 594af0ade7
9 changed files with 237 additions and 35 deletions

9
.gitignore vendored
View File

@ -1 +1,8 @@
.env
.env
# 忽略所有 __pycache__ 文件夹
__pycache__/
# 忽略 Python 编译缓存文件
*.py[cod]
*$py.class

2
bot_util/__init__.py Normal file
View File

@ -0,0 +1,2 @@
from .file_util import write_file, write_file_batch
__all__ = ['write_file', 'write_file_batch']

48
bot_util/file_util.py Normal file
View File

@ -0,0 +1,48 @@
import os
from pathlib import Path
def write_file(file_path: str, content: str) -> str:
"""
Write content to a file, creating directories if they do not exist.
If the file already exists, it will be overwritten.
:param file_path: Path to the file to write to.
:param content: Content to write to the file.
:return: True if the file was written successfully, False otherwise.
"""
# Check if the file path is absolute, if not, append to project root
if not os.path.isabs(file_path):
file_path = os.path.join(Path(__file__).resolve().parent.parent, file_path)
dir_path = os.path.dirname(file_path)
print(f"{dir_path}")
# Create the directory if it doesn't exist
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
print(f"Error creating directory {dir_path}: {e}")
return "Error creating directory, files not written"
try:
with open(file_path, 'w') as f:
f.write(content)
return "file written successfully"
except IOError as e:
print(f"Error writing to file {file_path}: {e}")
return f"Error writing to file {file_path}: {e}"
def write_file_batch(file_paths: list[str], contents: list[str]) -> list[bool]:
"""
Write multiple files in batch.
:param file_paths: List of file paths to write to.
:param contents: List of contents to write to the files.
:return: List of booleans indicating success or failure for each file.
"""
results = []
for file_path, content in zip(file_paths, contents):
result = write_file(file_path, content)
results.append(result)
return results
if __name__ == "__main__":
write_file(Path("test_data/output/test.txt"), "test")

View File

@ -1,2 +0,0 @@
from dotenv import load_dotenv
import os

39
chat_tools.py Normal file
View File

@ -0,0 +1,39 @@
import bot_util
tools = [
{
"type": "function",
"function": {
"name": "write_file",
"description": "write content to a file, creating directories if they do not exist. If the file already exists, it will be overwritten.",
"parameters": {
"type": "object",
"properties": {
"file_path": {
"type": "string",
"description": "the path with file name to write to. it can be relative path. e.g. 'test_data/output/test.md'",
},
"content": {
"type": "string",
"description": "the content to write to the file.",
},
},
"required": ["location"]
},
}
},
]
class ChatTools:
def __init__(self, tools, util):
self.tools = tools
self.names = util.__all__
self.util = util
def get_tools(self):
return self.tools
def get_chat_tools():
chat_tools = ChatTools(tools, bot_util)
return chat_tools

28
k8s_day12.md Normal file
View File

@ -0,0 +1,28 @@
## Create a Daemonset as per the demo shown in the video
```yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx-ds
labels:
env: demo
spec:
template:
metadata:
labels:
env: demo
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
selector:
matchLabels:
env: demo
```
## Understand the cron syntax and create a cronjob object in kubernetes that prints "40daysofkubernetes" after every 5 minutes and use busybox image
```bash
kubectl create cronjob hello --image=busybox:1.28 --schedule="*/5 * * * *" -- echo "40daysofkubernetes"
```

View File

@ -1,15 +1,19 @@
from dotenv import load_dotenv
import os, sys
import os, time
import json
from openai import OpenAI
import time
import chat_tools
load_dotenv()
# initialize then client
# initialize parameters
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
DEEPSEEK_API_URL = os.getenv("DEEPSEEK_API_URL")
MODEL = "deepseek-chat"
chat_tools = chat_tools.get_chat_tools()
history = []
# Initialize the OpenAI client with the API key and base URL
client = OpenAI(
api_key=DEEPSEEK_API_KEY,
base_url=DEEPSEEK_API_URL,
@ -23,38 +27,63 @@ system_prompt = "You are a helpful assistant. You can convert text notes to mark
in Question and Answer format. for example: Create two namespaces and name them ns1 and ns2 \n k create ns ns1 \n \
k create ns ns2. where k create ns ns1 and k create ns ns2 are answers. Sometoimees questions can be more than one line, \
and each of Q&A are separated by a empty line. Please make question as header and fomrat ansers in correct markdown format. \
Please only return the markdown content. "
Please write the markdown to user specified file."
content = ""
with open("k8s_day10.txt", "r") as f:
with open("test_data/k8s_day10.txt", "r") as f:
for line in f:
content += line
def chat_with_deepseek(max_retries: int =MAX_RETRIES, retry_delay: int =RETRY_DELAY):
# Initialize the chat history with the system prompt
global content
history.append({"role": "system", "content": system_prompt})
history.append({"role": "user", "content": f"please covert notes below to markdown format and write to k8s_day12.md: \n {content} "})
count = 0
for _ in range(max_retries):
try:
response = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": f"please covert notes below to markdown format: \n {content} "},
],
stream=True,
messages=history,
tools=chat_tools.get_tools(),
)
for chunk in response:
text = chunk.choices[0].delta.content
if text:
sys.stdout.write(text)
sys.stdout.flush()
if response.choices[0].message.tool_calls:
tool_call = response.choices[0].message.tool_calls[0]
tool_name = tool_call.function.name
tool_args = tool_call.function.arguments
tool_id = tool_call.id
# Call the function with the arguments
if tool_name == "write_file":
params = json.loads(tool_args)
file_path = params["file_path"]
content = params["content"]
result = chat_tools.util.write_file(file_path, content)
history.append(response.choices[0].message)
history.append({"role": "tool", "tool_call_id": tool_id, "content": result})
response = client.chat.completions.create(
model=MODEL,
messages=history,
)
return response.choices[0].message.content
else:
# for chunk in response:
# text = chunk.choices[0].delta.content
# if text:
# sys.stdout.write(text)
# sys.stdout.flush()
return response.choices[0].message.content
break
except Exception as e:
count += 1
print(f"An error occurre: {e}.")
if count < max_retries:
print(f"An error occurre: {e}. Retrying in {retry_delay} seconds...")
print(f"Retrying in {retry_delay} seconds...")
time.sleep(retry_delay)
else:
print("Max retries reached. Exiting.")
break
raise e
chat_with_deepseek()
print(chat_with_deepseek(max_retries=1))

View File

@ -1,20 +1,29 @@
Create two namespaces and name them ns1 and ns2
# Create two namespaces and name them ns1 and ns2
```bash
k create ns ns1
k create ns ns2
```
Create a deployment with a single replica in each of these namespaces with the image as nginx and name as deploy-ns1 and deploy-ns2, respectively
# Create a deployment with a single replica in each of these namespaces with the image as nginx and name as deploy-ns1 and deploy-ns2, respectively
```bash
k create deployment deploy-ns1 --image=nginx --replicas=1 -n ns1
k create deployment deploy-ns2 --image=nginx --replicas=1 -n ns2
```
Get the IP address of each of the pods (Remember the kubectl command for that?)
# Get the IP address of each of the pods (Remember the kubectl command for that?)
```bash
k get po -n ns1 -o wide
k get po -n ns2 -o wide
```
Exec into the pod of deploy-ns1 and try to curl the IP address of the pod running on deploy-ns2
# Exec into the pod of deploy-ns1 and try to curl the IP address of the pod running on deploy-ns2
```bash
k exec -it -n ns1 deploy-ns1-56c4d4b7-xzvl2 -- sh
curl 10.244.2.4
```
Your pod-to-pod connection should work, and you should be able to get a successful response back.
# Your pod-to-pod connection should work, and you should be able to get a successful response back.
```html
<!DOCTYPE html>
<html>
<head>
@ -38,40 +47,57 @@ Commercial support is available at
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
```
Now scale both of your deployments from 1 to 3 replicas.
# Now scale both of your deployments from 1 to 3 replicas.
```bash
k scale --replicas=3 -n ns1 deploy/deploy-ns1
k scale --replicas=3 -n ns2 deploy/deploy-ns2
```
```
deploy-ns1-56c4d4b7-xzvl2 10.244.2.3 kind-worker2
deploy-ns2-846f6b4c9b-r85f7 10.244.2.4 kind-worker2
```
Create two services to expose both of your deployments and name them svc-ns1 and svc-ns2
# Create two services to expose both of your deployments and name them svc-ns1 and svc-ns2
```bash
k expose deployment deploy-ns1 -n ns1 --port=80 --target-port=80
k expose deployment deploy-ns2 -n ns2 --port=80 --target-port=80
```
exec into each pod and try to curl the IP address of the service running on the other namespace.
# exec into each pod and try to curl the IP address of the service running on the other namespace.
```bash
k exec -it deploy-ns1-56c4d4b7-4tptj -n ns1 -- sh
curl 10.244.1.6
k exec -it deploy-ns2-846f6b4c9b-7dqwf -n ns2 -- sh
curl 10.244.1.4
```
This curl should work.
# This curl should work.
```
Yes
```
Now try curling the service name instead of IP. You will notice that you are getting an error and cannot resolve the host.
# Now try curling the service name instead of IP. You will notice that you are getting an error and cannot resolve the host.
```bash
in deploy-ns2-846f6b4c9b-7dqwf:
curl deploy-ns1-56c4d4b7-4tptj not work
curl deploy-ns2-846f6b4c9b-7dqwf works
```
Now use the FQDN of the service and try to curl again, this should work.
# Now use the FQDN of the service and try to curl again, this should work.
```bash
curl deploy-ns1.ns1.svc.cluster.local
works
n the end, delete both the namespaces, which should delete the services and deployments underneath them.
```
# In the end, delete both the namespaces, which should delete the services and deployments underneath them.
```bash
k delete svc deploy-ns1 -n ns1
k delete svc deploy-ns2 -n ns2
k delete deployment deploy-ns1 -n ns1
k delete deployment deploy-ns2 -n ns2
k delete ns ns1 ns2
k delete ns ns1 ns2
```

25
test_data/k8s_day10.txt Normal file
View File

@ -0,0 +1,25 @@
Create a Daemonset as per the demo shown in the video
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx-ds
labels:
env: demo
spec:
template:
metadata:
labels:
env: demo
spec:
containers:
- image: nginx
name: nginx
ports:
- containerPort: 80
selector:
matchLabels:
env: demo
Undertand the cron syntax and create a cronjob object in kubernetes that prints "40daysofkubernetes" after every 5 minutes and use busybox image
kubectl create cronjob hello --image=busybox:1.28 --schedule="*/5 * * * *" -- echo "40daysofkubernetes"