load the system prompt on-the-fly instead of once at startup

this lets us modify it [for new conversations] on disk while the llm server
is running
This commit is contained in:
Charlotte Som 2025-02-26 10:41:39 +00:00
parent d657937ff4
commit 710a6de7bc

View file

@ -4,7 +4,6 @@ from .tid import tid_now
from json import dumps as json
db = sqlite_utils.Database(llm.cli.logs_db_path())
girlypop_prompt = llm.cli.load_template("girlypop").system
async def list_conversations(request: Request):
@ -38,7 +37,7 @@ async def connect_to_conversation(ws: WebSocket):
await ws.accept()
# only send the system prompt at the start of a conversation
system_prompt = girlypop_prompt
system_prompt = llm.cli.load_template("girlypop").system
if not continuing:
for response in conversation.responses: