load the system prompt on-the-fly instead of once at startup
this lets us modify it [for new conversations] on disk while the llm server is running
This commit is contained in:
parent
d657937ff4
commit
710a6de7bc
1 changed files with 1 additions and 2 deletions
|
@ -4,7 +4,6 @@ from .tid import tid_now
|
|||
from json import dumps as json
|
||||
|
||||
db = sqlite_utils.Database(llm.cli.logs_db_path())
|
||||
girlypop_prompt = llm.cli.load_template("girlypop").system
|
||||
|
||||
|
||||
async def list_conversations(request: Request):
|
||||
|
@ -38,7 +37,7 @@ async def connect_to_conversation(ws: WebSocket):
|
|||
await ws.accept()
|
||||
|
||||
# only send the system prompt at the start of a conversation
|
||||
system_prompt = girlypop_prompt
|
||||
system_prompt = llm.cli.load_template("girlypop").system
|
||||
|
||||
if not continuing:
|
||||
for response in conversation.responses:
|
||||
|
|
Loading…
Reference in a new issue