199 lines
6.2 KiB
Python
Executable File
199 lines
6.2 KiB
Python
Executable File
import os
|
|
|
|
LLM_PROMPT_TUNE = os.environ.get('LLM_PROMPT_TUNE') # oai, vicuna, supercot
|
|
STOP_TOKEN_HINT = "" # "\nWrite \"END\" afterwards."
|
|
|
|
USE_STOP_HINT = [ "llama" ]
|
|
|
|
PROMPTS = {
|
|
"entity_from_observation": {
|
|
"system": (
|
|
"What is the observed entity in the following observation?"
|
|
" ONLY report one object and write one sentence."
|
|
f'{STOP_TOKEN_HINT}'
|
|
),
|
|
"user": (
|
|
"Observation: {observation}"
|
|
),
|
|
"assistant": "Entity=",
|
|
},
|
|
"entity_action": {
|
|
"system": (
|
|
"What is the following entity doing in the following observation?"
|
|
" ONLY write one sentence."
|
|
f'{STOP_TOKEN_HINT}'
|
|
),
|
|
"user": (
|
|
"Entity: {entity}"
|
|
"\nObservation: {observation}"
|
|
),
|
|
"assistant": "`{entity}` is ",
|
|
},
|
|
"summarize_related_memories": {
|
|
"system": (
|
|
"Given the following context, answer the following question."
|
|
f'{STOP_TOKEN_HINT}'
|
|
),
|
|
"user": (
|
|
"Context: {relevant_memories_simple}"
|
|
"\nQuestion: {q1}?"
|
|
),
|
|
"assistant": "Summary of relevant context: ",
|
|
},
|
|
"compute_agent_summary": {
|
|
"system": (
|
|
"Given the following previous summary and the following statements, how would you summarize {name}'s core characteristics?"
|
|
" Do not embellish under any circumstances."
|
|
f'{STOP_TOKEN_HINT}'
|
|
),
|
|
"user": (
|
|
"Previous summary: {summary}\n"
|
|
"Statements: {relevant_memories_simple}"
|
|
),
|
|
"assistant": "Summary: ",
|
|
},
|
|
"topic_of_reflection": {
|
|
"system": (
|
|
"Given only the following information, what are the 3 most salient"
|
|
" high-level questions we can answer about the subjects in the statements?"
|
|
" Provide each question on a new line."
|
|
f'{STOP_TOKEN_HINT}'
|
|
),
|
|
"user": (
|
|
"Information: {observations}"
|
|
),
|
|
"assistant": "",
|
|
},
|
|
"insights_on_topic": {
|
|
"system": (
|
|
"Given the following statements about {topic},"
|
|
" what 5 high-level insights can you infer?"
|
|
" (example format: insight (because of 1, 5, 3))"
|
|
f'{STOP_TOKEN_HINT}'
|
|
),
|
|
"user": (
|
|
"Statements: {related_statements}"
|
|
),
|
|
"assistant": "",
|
|
},
|
|
"memory_importance": {
|
|
"system": (
|
|
"On the scale of 1 to 10, where 1 is purely mundane"
|
|
" (e.g., brushing teeth, making bed) and 10 is extremely poignant"
|
|
" (e.g., a break up, college acceptance),"
|
|
" rate the likely poignancy of the following event."
|
|
"\nRespond with only a single integer."
|
|
f"{STOP_TOKEN_HINT}"
|
|
),
|
|
"user": (
|
|
"Event: {memory_content}"
|
|
),
|
|
"assistant": "Rating: ",
|
|
},
|
|
"generate_reaction": {
|
|
"system": (
|
|
"It is {current_time}."
|
|
"\n{summary}"
|
|
"\n{memory}"
|
|
"\n\n{suffix}"
|
|
f"{STOP_TOKEN_HINT}"
|
|
),
|
|
"user": (
|
|
"Observation: {observation}"
|
|
),
|
|
"assistant": ""
|
|
},
|
|
|
|
#
|
|
"context": ( # insert your JB here
|
|
""
|
|
),
|
|
"suffix_generate_reaction": (
|
|
"Given the following observation, how would {name} appropriately react?"
|
|
"\nIf the action is to engage in dialogue, only write `SAY: \"what to say\"`."
|
|
"\nOr otherwise, only write `REACT: how to react`."
|
|
"\nWrite ONLY one line, one sentence."
|
|
#"\nBe proactive, creative, and drive the plot and conversation forward."
|
|
),
|
|
"suffix_generate_dialogue_response": (
|
|
"Given the following observation, what would {name} say?"
|
|
"\nTo continue the conversation, only write: `SAY: \"what to say\"`."
|
|
"\nOr otherwise, to end the conversation, only write: `GOODBYE: \"what to say\"`."
|
|
"\nWrite ONLY one line, one sentence."
|
|
#"\nBe proactive, creative, and drive the plot and conversation forward."
|
|
),
|
|
}
|
|
|
|
PROMPT_TUNES = {
|
|
"default": "{query}",
|
|
"vicuna": "{role}: {query}",
|
|
"supercot": "{role}:\n{query}",
|
|
}
|
|
PROMPT_ROLES = {
|
|
"vicuna": {
|
|
"system": "SYSTEM",
|
|
"user": "USER",
|
|
"assistant": "ASSISTANT",
|
|
},
|
|
"supercot": {
|
|
"system": "### Instruction",
|
|
"user": "### Input",
|
|
"assistant": "### Response",
|
|
}
|
|
}
|
|
|
|
ROLES = [ "system", "user", "assistant" ]
|
|
|
|
for k in PROMPTS:
|
|
if k == "context":
|
|
continue
|
|
|
|
def get_roles( tune=LLM_PROMPT_TUNE, special=True ):
|
|
if tune in PROMPT_ROLES:
|
|
return list(PROMPT_ROLES[tune].values())
|
|
if special:
|
|
return []
|
|
return ROLES
|
|
|
|
def get_prompt( key, tune=LLM_PROMPT_TUNE ):
|
|
prompt = PROMPTS[key]
|
|
|
|
# is a suffix
|
|
if not isinstance( prompt, dict ):
|
|
return prompt
|
|
|
|
# Vicuna is finetuned for `USER: [query]\nASSISTANT:`
|
|
if tune not in PROMPT_TUNES:
|
|
tune = "default"
|
|
|
|
context = PROMPTS["context"]
|
|
if context:
|
|
if "system" in prompt:
|
|
if context not in prompt["system"]:
|
|
prompt["system"] = f'{context}\n{prompt["system"]}'
|
|
else:
|
|
prompt["system"] = f'{context}'
|
|
|
|
outputs = []
|
|
for r in ROLES:
|
|
role = f'{r}' # i can't be assed to check if strings COW
|
|
if role not in prompt:
|
|
continue
|
|
else:
|
|
query = prompt[role]
|
|
|
|
if tune in PROMPT_ROLES:
|
|
roles = PROMPT_ROLES[tune]
|
|
if role in roles:
|
|
role = roles[role]
|
|
|
|
# remove stop token hinting if we're using OAI since I don't have control over early terminating
|
|
if STOP_TOKEN_HINT in query and tune in USE_STOP_HINT:
|
|
query = query.replace(STOP_TOKEN_HINT, "")
|
|
|
|
output = f'{PROMPT_TUNES[tune]}'
|
|
output = output.replace("{role}", role)
|
|
output = output.replace("{query}", query)
|
|
outputs.append(output)
|
|
|
|
return "\n".join(outputs) |