Swapped to a much simpler way of formatting prompts given a finetune by recording prompts as system/user/assistant dicts, then combine them according to provided finetune
This commit is contained in:
parent
0964f48fc0
commit
e9abd9e73f
|
@ -34,7 +34,7 @@ from langchain.prompts import PromptTemplate
|
||||||
from langchain.schema import BaseLanguageModel
|
from langchain.schema import BaseLanguageModel
|
||||||
|
|
||||||
from .memory import GenerativeAgentMemory
|
from .memory import GenerativeAgentMemory
|
||||||
from .prompts import PROMPTS
|
from .prompts import get_prompt
|
||||||
|
|
||||||
class GenerativeAgent(BaseModel):
|
class GenerativeAgent(BaseModel):
|
||||||
"""A character with memory and innate characteristics."""
|
"""A character with memory and innate characteristics."""
|
||||||
|
@ -52,7 +52,7 @@ class GenerativeAgent(BaseModel):
|
||||||
"""The memory object that combines relevance, recency, and 'importance'."""
|
"""The memory object that combines relevance, recency, and 'importance'."""
|
||||||
llm: BaseLanguageModel
|
llm: BaseLanguageModel
|
||||||
"""The underlying language model."""
|
"""The underlying language model."""
|
||||||
verbose: bool = False
|
verbose: bool = True
|
||||||
summary: str = "" #: :meta private:
|
summary: str = "" #: :meta private:
|
||||||
"""Stateful self-summary generated via reflection on the character's memory."""
|
"""Stateful self-summary generated via reflection on the character's memory."""
|
||||||
|
|
||||||
|
@ -83,43 +83,58 @@ class GenerativeAgent(BaseModel):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_entity_from_observation(self, observation: str) -> str:
|
def _get_entity_from_observation(self, observation: str) -> str:
|
||||||
prompt = PromptTemplate.from_template(PROMPTS['entity_from_observation'])
|
if self.verbose:
|
||||||
|
print("_get_entity_from_observation")
|
||||||
|
|
||||||
|
prompt = PromptTemplate.from_template(get_prompt('entity_from_observation'))
|
||||||
return self.chain(prompt).run(observation=observation).strip()
|
return self.chain(prompt).run(observation=observation).strip()
|
||||||
|
|
||||||
def _get_entity_action(self, observation: str, entity_name: str) -> str:
|
def _get_entity_action(self, observation: str, entity_name: str) -> str:
|
||||||
prompt = PromptTemplate.from_template(PROMPTS['entity_action'])
|
if self.verbose:
|
||||||
return (
|
print("_get_entity_action")
|
||||||
self.chain(prompt).run(entity=entity_name, observation=observation).strip()
|
|
||||||
)
|
prompt = PromptTemplate.from_template(get_prompt('entity_action'))
|
||||||
|
return self.chain(prompt).run(entity=entity_name, observation=observation).strip()
|
||||||
|
|
||||||
def summarize_related_memories(self, observation: str) -> str:
|
def summarize_related_memories(self, observation: str) -> str:
|
||||||
|
if self.verbose:
|
||||||
|
print("summarize_related_memories")
|
||||||
|
|
||||||
"""Summarize memories that are most relevant to an observation."""
|
"""Summarize memories that are most relevant to an observation."""
|
||||||
prompt = PromptTemplate.from_template(PROMPTS['summarize_related_memories'])
|
prompt = PromptTemplate.from_template(get_prompt('summarize_related_memories'))
|
||||||
entity_name = self._get_entity_from_observation(observation)
|
entity_name = self._get_entity_from_observation(observation)
|
||||||
entity_action = self._get_entity_action(observation, entity_name)
|
entity_action = self._get_entity_action(observation, entity_name)
|
||||||
q1 = f"What is the relationship between {self.name} and {entity_name}"
|
q1 = f"What is the relationship between {self.name} and {entity_name}"
|
||||||
q2 = f"{entity_name} is {entity_action}"
|
q2 = f"{entity_name} is {entity_action}"
|
||||||
return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
|
|
||||||
|
summary = self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
|
||||||
|
return summary
|
||||||
|
|
||||||
|
#return self.chain(prompt=prompt).run(q1=q1, q2=q2).strip()
|
||||||
|
|
||||||
def _generate_reaction(self, observation: str, suffix: str) -> str:
|
def _generate_reaction(self, observation: str, suffix: str) -> str:
|
||||||
|
if self.verbose:
|
||||||
|
print("_generate_reaction")
|
||||||
|
|
||||||
"""React to a given observation or dialogue act."""
|
"""React to a given observation or dialogue act."""
|
||||||
prompt = PromptTemplate.from_template(PROMPTS['generate_reaction_template'])
|
prompt = PromptTemplate.from_template(
|
||||||
|
get_prompt('generate_reaction').replace("{suffix}", suffix)
|
||||||
|
)
|
||||||
agent_summary_description = self.get_summary()
|
agent_summary_description = self.get_summary()
|
||||||
relevant_memories_str = self.summarize_related_memories(observation)
|
relevant_memories_str = self.summarize_related_memories(observation)
|
||||||
|
|
||||||
current_time_str = datetime.now().strftime("%B %d, %Y, %I:%M %p")
|
current_time_str = datetime.now().strftime("%B %d, %Y, %I:%M %p")
|
||||||
kwargs: Dict[str, Any] = dict(
|
kwargs: Dict[str, Any] = dict(
|
||||||
context=PROMPTS["context"],
|
|
||||||
agent_summary_description=agent_summary_description,
|
agent_summary_description=agent_summary_description,
|
||||||
current_time=current_time_str,
|
current_time=current_time_str,
|
||||||
relevant_memories=relevant_memories_str,
|
relevant_memories=relevant_memories_str,
|
||||||
agent_name=self.name,
|
agent_name=self.name,
|
||||||
observation=observation,
|
observation=observation,
|
||||||
agent_status=self.status,
|
agent_status=self.status,
|
||||||
suffix=suffix,
|
|
||||||
)
|
|
||||||
consumed_tokens = self.llm.get_num_tokens(
|
|
||||||
prompt.format(most_recent_memories="", **kwargs)
|
|
||||||
)
|
)
|
||||||
|
formatted_prompt = prompt.format(most_recent_memories="", **kwargs)
|
||||||
|
consumed_tokens = self.llm.get_num_tokens(formatted_prompt)
|
||||||
|
|
||||||
kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens
|
kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens
|
||||||
return self.chain(prompt=prompt).run(**kwargs).strip()
|
return self.chain(prompt=prompt).run(**kwargs).strip()
|
||||||
|
|
||||||
|
@ -127,15 +142,22 @@ class GenerativeAgent(BaseModel):
|
||||||
return re.sub(f"^{self.name} ", "", text.strip()).strip()
|
return re.sub(f"^{self.name} ", "", text.strip()).strip()
|
||||||
|
|
||||||
def generate_reaction(self, observation: str) -> Tuple[bool, str]:
|
def generate_reaction(self, observation: str) -> Tuple[bool, str]:
|
||||||
|
if self.verbose:
|
||||||
|
print("generate_reaction")
|
||||||
|
|
||||||
"""React to a given observation."""
|
"""React to a given observation."""
|
||||||
full_result = self._generate_reaction(observation, PROMPTS['generate_reaction'])
|
full_result = self._generate_reaction(observation, get_prompt('suffix_generate_reaction'))
|
||||||
result = full_result.strip().split("\n")[0]
|
result = full_result.strip().split("\n")[0]
|
||||||
|
|
||||||
|
response = f"reacted by {result}".strip()
|
||||||
|
if response == "reacted by":
|
||||||
|
response = f"did not react"
|
||||||
|
|
||||||
# AAA
|
# AAA
|
||||||
self.memory.save_context(
|
self.memory.save_context(
|
||||||
{},
|
{},
|
||||||
{
|
{
|
||||||
self.memory.add_memory_key: f"{self.name} observed "
|
self.memory.add_memory_key: f"{self.name} observed {observation} and {response}"
|
||||||
f"{observation} and reacted by {result}"
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if "REACT:" in result:
|
if "REACT:" in result:
|
||||||
|
@ -148,8 +170,11 @@ class GenerativeAgent(BaseModel):
|
||||||
return False, result
|
return False, result
|
||||||
|
|
||||||
def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]:
|
def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]:
|
||||||
|
if self.verbose:
|
||||||
|
print("generate_dialogue_response")
|
||||||
|
|
||||||
"""React to a given observation."""
|
"""React to a given observation."""
|
||||||
call_to_action_template = (PROMPTS['generate_dialogue_response'])
|
call_to_action_template = (get_prompt('suffix_generate_dialogue_response'))
|
||||||
full_result = self._generate_reaction(observation, call_to_action_template)
|
full_result = self._generate_reaction(observation, call_to_action_template)
|
||||||
result = full_result.strip().split("\n")[0]
|
result = full_result.strip().split("\n")[0]
|
||||||
if "GOODBYE:" in result:
|
if "GOODBYE:" in result:
|
||||||
|
@ -182,16 +207,19 @@ class GenerativeAgent(BaseModel):
|
||||||
# updated periodically through probing its memories #
|
# updated periodically through probing its memories #
|
||||||
######################################################
|
######################################################
|
||||||
def _compute_agent_summary(self) -> str:
|
def _compute_agent_summary(self) -> str:
|
||||||
|
if self.verbose:
|
||||||
|
print("_compute_agent_summary")
|
||||||
|
|
||||||
""""""
|
""""""
|
||||||
prompt = PromptTemplate.from_template(PROMPTS['compute_agent_summary'])
|
|
||||||
# The agent seeks to think about their core characteristics.
|
# The agent seeks to think about their core characteristics.
|
||||||
return (
|
prompt = PromptTemplate.from_template(get_prompt('compute_agent_summary'))
|
||||||
self.chain(prompt)
|
summary = self.chain(prompt).run(name=self.name, queries=[f"{self.name}'s core characteristics"]).strip()
|
||||||
.run(name=self.name, queries=[f"{self.name}'s core characteristics"])
|
return summary
|
||||||
.strip()
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_summary(self, force_refresh: bool = False) -> str:
|
def get_summary(self, force_refresh: bool = False) -> str:
|
||||||
|
if self.verbose:
|
||||||
|
print("get_summary")
|
||||||
|
|
||||||
"""Return a descriptive summary of the agent."""
|
"""Return a descriptive summary of the agent."""
|
||||||
current_time = datetime.now()
|
current_time = datetime.now()
|
||||||
since_refresh = (current_time - self.last_refreshed).seconds
|
since_refresh = (current_time - self.last_refreshed).seconds
|
||||||
|
|
|
@ -34,7 +34,7 @@ from langchain.schema import BaseLanguageModel, BaseMemory, Document
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
from .prompts import PROMPTS
|
from .prompts import get_prompt
|
||||||
|
|
||||||
class GenerativeAgentMemory(BaseMemory):
|
class GenerativeAgentMemory(BaseMemory):
|
||||||
llm: BaseLanguageModel
|
llm: BaseLanguageModel
|
||||||
|
@ -42,7 +42,7 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
|
|
||||||
memory_retriever: TimeWeightedVectorStoreRetriever
|
memory_retriever: TimeWeightedVectorStoreRetriever
|
||||||
"""The retriever to fetch related memories."""
|
"""The retriever to fetch related memories."""
|
||||||
verbose: bool = False
|
verbose: bool = True
|
||||||
|
|
||||||
reflection_threshold: Optional[float] = None
|
reflection_threshold: Optional[float] = None
|
||||||
"""When aggregate_importance exceeds reflection_threshold, stop to reflect."""
|
"""When aggregate_importance exceeds reflection_threshold, stop to reflect."""
|
||||||
|
@ -80,16 +80,22 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
|
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
|
||||||
|
|
||||||
def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]:
|
def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]:
|
||||||
|
if self.verbose:
|
||||||
|
print("_get_topics_of_reflection")
|
||||||
|
|
||||||
"""Return the 3 most salient high-level questions about recent observations."""
|
"""Return the 3 most salient high-level questions about recent observations."""
|
||||||
prompt = PromptTemplate.from_template(PROMPTS["topic_of_reflection"])
|
prompt = PromptTemplate.from_template(get_prompt("topic_of_reflection"))
|
||||||
observations = self.memory_retriever.memory_stream[-last_k:]
|
observations = self.memory_retriever.memory_stream[-last_k:]
|
||||||
observation_str = "\n".join([o.page_content for o in observations])
|
observation_str = "\n".join([o.page_content for o in observations])
|
||||||
result = self.chain(prompt).run(observations=observation_str)
|
result = self.chain(prompt).run(observations=observation_str)
|
||||||
return self._parse_list(result)
|
return self._parse_list(result)
|
||||||
|
|
||||||
def _get_insights_on_topic(self, topic: str) -> List[str]:
|
def _get_insights_on_topic(self, topic: str) -> List[str]:
|
||||||
|
if self.verbose:
|
||||||
|
print("_get_insights_on_topic")
|
||||||
|
|
||||||
"""Generate 'insights' on a topic of reflection, based on pertinent memories."""
|
"""Generate 'insights' on a topic of reflection, based on pertinent memories."""
|
||||||
prompt = PromptTemplate.from_template(PROMPTS["insights_on_topic"])
|
prompt = PromptTemplate.from_template(get_prompt("insights_on_topic"))
|
||||||
related_memories = self.fetch_memories(topic)
|
related_memories = self.fetch_memories(topic)
|
||||||
related_statements = "\n".join(
|
related_statements = "\n".join(
|
||||||
[
|
[
|
||||||
|
@ -104,6 +110,9 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
return self._parse_list(result)
|
return self._parse_list(result)
|
||||||
|
|
||||||
def pause_to_reflect(self) -> List[str]:
|
def pause_to_reflect(self) -> List[str]:
|
||||||
|
if self.verbose:
|
||||||
|
print("pause_to_reflect")
|
||||||
|
|
||||||
"""Reflect on recent observations and generate 'insights'."""
|
"""Reflect on recent observations and generate 'insights'."""
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
logger.info("Character is reflecting")
|
logger.info("Character is reflecting")
|
||||||
|
@ -117,8 +126,11 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
return new_insights
|
return new_insights
|
||||||
|
|
||||||
def _score_memory_importance(self, memory_content: str) -> float:
|
def _score_memory_importance(self, memory_content: str) -> float:
|
||||||
|
if self.verbose:
|
||||||
|
print("_score_memory_importance")
|
||||||
|
|
||||||
"""Score the absolute importance of the given memory."""
|
"""Score the absolute importance of the given memory."""
|
||||||
prompt = PromptTemplate.from_template(PROMPTS["memory_importance"])
|
prompt = PromptTemplate.from_template(get_prompt("memory_importance"))
|
||||||
score = self.chain(prompt).run(memory_content=memory_content).strip()
|
score = self.chain(prompt).run(memory_content=memory_content).strip()
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
logger.info(f"Importance score: {score}")
|
logger.info(f"Importance score: {score}")
|
||||||
|
@ -131,6 +143,9 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
return 0.0
|
return 0.0
|
||||||
|
|
||||||
def add_memory(self, memory_content: str) -> List[str]:
|
def add_memory(self, memory_content: str) -> List[str]:
|
||||||
|
if self.verbose:
|
||||||
|
print("add_memory")
|
||||||
|
|
||||||
"""Add an observation or memory to the agent's memory."""
|
"""Add an observation or memory to the agent's memory."""
|
||||||
importance_score = self._score_memory_importance(memory_content)
|
importance_score = self._score_memory_importance(memory_content)
|
||||||
self.aggregate_importance += importance_score
|
self.aggregate_importance += importance_score
|
||||||
|
|
|
@ -2,147 +2,152 @@ import os
|
||||||
|
|
||||||
LLM_PROMPT_TUNE = os.environ.get('LLM_PROMPT_TUNE', "vicuna") # oai, vicuna
|
LLM_PROMPT_TUNE = os.environ.get('LLM_PROMPT_TUNE', "vicuna") # oai, vicuna
|
||||||
|
|
||||||
if LLM_PROMPT_TUNE == "vicuna":
|
PROMPTS = {
|
||||||
PROMPTS = {
|
"entity_from_observation": {
|
||||||
"context": (
|
"system": (
|
||||||
"" # insert your JB here
|
"What is the observed entity in the following observation?"
|
||||||
|
" ONLY report one object."
|
||||||
|
" Write `END` when you are done."
|
||||||
),
|
),
|
||||||
"entity_from_observation": (
|
"user": (
|
||||||
"USER: What is the observed entity in the following observation (Write 'END' when you are done.)? {observation}"
|
"Observation: {observation}"
|
||||||
"\nASSISTANT: Entity="
|
|
||||||
),
|
),
|
||||||
"entity_action": (
|
"assistant": "Entity=",
|
||||||
"USER: What is the {entity} doing in the following observation (Write 'END' when you are done.)? {observation}"
|
},
|
||||||
"\nASSISTANT: The {entity} is"
|
"entity_action": {
|
||||||
|
"system": (
|
||||||
|
"What is the {entity} doing in the following observation?"
|
||||||
|
" ONLY report one object."
|
||||||
|
" Write `END` when you are done."
|
||||||
),
|
),
|
||||||
"summarize_related_memories": (
|
"user": (
|
||||||
"USER: {q1}? Write 'END' when you are done."
|
"Observation: {observation}"
|
||||||
"\nContext from memory:"
|
|
||||||
"\n{relevant_memories}"
|
|
||||||
"\nASSISTANT:"
|
|
||||||
"\nRelevant context: "
|
|
||||||
),
|
),
|
||||||
"generate_reaction_template": (
|
"assistant": "The {entity} is ",
|
||||||
"{context}"
|
},
|
||||||
"\nUSER: {agent_summary_description}"
|
"summarize_related_memories": {
|
||||||
"\nIt is {current_time}."
|
"system": (
|
||||||
"\n{agent_name}'s status: {agent_status}"
|
"Given the following context, {q1}?"
|
||||||
"\nSummary of relevant context from {agent_name}'s memory:"
|
"Write `END` when you are done."
|
||||||
"\n{relevant_memories}"
|
|
||||||
"\nMost recent observations: {most_recent_memories}"
|
|
||||||
"\nObservation: {observation}"
|
|
||||||
"\n{suffix}"
|
|
||||||
"\nASSISTANT: "
|
|
||||||
),
|
),
|
||||||
"generate_reaction": (
|
"user": (
|
||||||
"Should {agent_name} react to the observation, and if so,"
|
"Context: {relevant_memories}"
|
||||||
" what would be an appropriate reaction? Respond in one line."
|
|
||||||
' If the action is to engage in dialogue, write:\nSAY: "what to say"'
|
|
||||||
"\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)."
|
|
||||||
"\nEither do nothing, react, or say something but not both. Write 'END' when you are done."
|
|
||||||
),
|
),
|
||||||
"generate_dialogue_response": (
|
"assistant": "Relevant context: ",
|
||||||
"What would {agent_name} say? To end the conversation, write:"
|
},
|
||||||
' GOODBYE: "what to say". Otherwise to continue the conversation,'
|
"compute_agent_summary": {
|
||||||
' write: SAY: "what to say next". Write "END" when you are done.'
|
"system": (
|
||||||
|
"Given the following statements, how would you summarize {name}'s core characteristics?"
|
||||||
|
" (Do not embellish under any circumstances. Say 'END' when you are done):"
|
||||||
),
|
),
|
||||||
"compute_agent_summary": (
|
"user": (
|
||||||
"USER: How would you summarize {name}'s core characteristics given the following statements (Do not embellish under any circumstances. Say 'END' when you are done):\n"
|
"Statements: {relevant_memories}"
|
||||||
"{relevant_memories}"
|
|
||||||
"\nASSISTANT: Summary: "
|
|
||||||
),
|
),
|
||||||
"topic_of_reflection": (
|
"assistant": "Summary: ",
|
||||||
"USER: {observations}\n\n"
|
},
|
||||||
"Given only the information above, what are the 3 most salient"
|
"topic_of_reflection": {
|
||||||
" high-level questions we can answer about the subjects in"
|
"system": (
|
||||||
" the statements? Provide each question on a new line.\n"
|
"Given only the following information, what are the 3 most salient"
|
||||||
"\nASSISTANT: "
|
" high-level questions we can answer about the subjects in the statements?"
|
||||||
|
" Provide each question on a new line."
|
||||||
),
|
),
|
||||||
"insights_on_topic": (
|
"user": (
|
||||||
"USER: Statements about {topic}\n"
|
"Information: {observations}"
|
||||||
"{related_statements}\n"
|
|
||||||
"What 5 high-level insights can you infer from the above statements?"
|
|
||||||
" (example format: insight (because of 1, 5, 3))"
|
|
||||||
"\nASSISTANT: "
|
|
||||||
),
|
),
|
||||||
"memory_importance": (
|
"assistant": "",
|
||||||
"USER: On the scale of 1 to 10, where 1 is purely mundane"
|
},
|
||||||
" (e.g., brushing teeth, making bed) and 10 is"
|
"insights_on_topic": {
|
||||||
" extremely poignant (e.g., a break up, college"
|
"system": (
|
||||||
" acceptance), rate the likely poignancy of the"
|
"Given the following statements about {topic},"
|
||||||
" following piece of memory. Respond with only a single integer followed by 'END'."
|
" what 5 high-level insights can you infer?"
|
||||||
"\nMemory: {memory_content}"
|
|
||||||
"\nASSISTANT: Rating: "
|
|
||||||
),
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
PROMPTS = {
|
|
||||||
"context": (
|
|
||||||
"" # insert your JB here
|
|
||||||
),
|
|
||||||
"entity_from_observation": (
|
|
||||||
"What is the observed entity in the following observation? {observation}"
|
|
||||||
"\nEntity="
|
|
||||||
),
|
|
||||||
"entity_action": (
|
|
||||||
"What is the {entity} doing in the following observation? {observation}"
|
|
||||||
"\nThe {entity} is"
|
|
||||||
),
|
|
||||||
"summarize_related_memories": """
|
|
||||||
{q1}?
|
|
||||||
Context from memory:
|
|
||||||
{relevant_memories}
|
|
||||||
Relevant context:
|
|
||||||
""",
|
|
||||||
"generate_reaction_template": (
|
|
||||||
"{context}"
|
|
||||||
"\n{agent_summary_description}"
|
|
||||||
"\nIt is {current_time}."
|
|
||||||
"\n{agent_name}'s status: {agent_status}"
|
|
||||||
"\nSummary of relevant context from {agent_name}'s memory:"
|
|
||||||
"\n{relevant_memories}"
|
|
||||||
"\nMost recent observations: {most_recent_memories}"
|
|
||||||
"\nObservation: {observation}"
|
|
||||||
"\n\n{suffix}"
|
|
||||||
),
|
|
||||||
"generate_reaction": (
|
|
||||||
"Should {agent_name} react to the observation, and if so,"
|
|
||||||
" what would be an appropriate reaction? Respond in one line."
|
|
||||||
' If the action is to engage in dialogue, write:\nSAY: "what to say"'
|
|
||||||
"\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)."
|
|
||||||
"\nEither do nothing, react, or say something but not both.\n\n"
|
|
||||||
),
|
|
||||||
"generate_dialogue_response": (
|
|
||||||
"What would {agent_name} say? To end the conversation, write:"
|
|
||||||
' GOODBYE: "what to say". Otherwise to continue the conversation,'
|
|
||||||
' write: SAY: "what to say next"\n\n'
|
|
||||||
),
|
|
||||||
"compute_agent_summary": (
|
|
||||||
"How would you summarize {name}'s core characteristics given the"
|
|
||||||
" following statements:\n"
|
|
||||||
"{relevant_memories}"
|
|
||||||
"\nDo not embellish under any circumstances."
|
|
||||||
"\n\nSummary: "
|
|
||||||
),
|
|
||||||
"topic_of_reflection": (
|
|
||||||
"{observations}\n\n"
|
|
||||||
"Given only the information above, what are the 3 most salient"
|
|
||||||
" high-level questions we can answer about the subjects in"
|
|
||||||
" the statements? Provide each question on a new line.\n\n"
|
|
||||||
),
|
|
||||||
"insights_on_topic": (
|
|
||||||
"Statements about {topic}\n"
|
|
||||||
"{related_statements}\n\n"
|
|
||||||
"What 5 high-level insights can you infer from the above statements?"
|
|
||||||
" (example format: insight (because of 1, 5, 3))"
|
" (example format: insight (because of 1, 5, 3))"
|
||||||
),
|
),
|
||||||
"memory_importance": (
|
"user": (
|
||||||
|
"Statements: {related_statements}"
|
||||||
|
),
|
||||||
|
"assistant": "",
|
||||||
|
},
|
||||||
|
"memory_importance": {
|
||||||
|
"system": (
|
||||||
"On the scale of 1 to 10, where 1 is purely mundane"
|
"On the scale of 1 to 10, where 1 is purely mundane"
|
||||||
" (e.g., brushing teeth, making bed) and 10 is"
|
" (e.g., brushing teeth, making bed) and 10 is extremely poignant"
|
||||||
" extremely poignant (e.g., a break up, college"
|
" (e.g., a break up, college acceptance),"
|
||||||
" acceptance), rate the likely poignancy of the"
|
" rate the likely poignancy of the following piece of memory."
|
||||||
" following piece of memory. Respond with a single integer."
|
" Respond with only a single integer followed by 'END'."
|
||||||
"\nMemory: {memory_content}"
|
|
||||||
"\nRating: "
|
|
||||||
),
|
),
|
||||||
}
|
"user": (
|
||||||
|
"Memory: {memory_content}"
|
||||||
|
),
|
||||||
|
"assistant": "Rating: ",
|
||||||
|
},
|
||||||
|
"generate_reaction": {
|
||||||
|
"system": (
|
||||||
|
"{agent_summary_description}"
|
||||||
|
"\nIt is {current_time}."
|
||||||
|
"\n{agent_name}'s status: {agent_status}"
|
||||||
|
"\nSummary of relevant context from {agent_name}'s memory: {relevant_memories}"
|
||||||
|
"\nMost recent observations: {most_recent_memories}"
|
||||||
|
"\n{suffix}"
|
||||||
|
),
|
||||||
|
"user": (
|
||||||
|
"\nObservation: {observation}"
|
||||||
|
),
|
||||||
|
"assistant": ""
|
||||||
|
},
|
||||||
|
|
||||||
|
#
|
||||||
|
"context": (
|
||||||
|
"" # insert your JB here
|
||||||
|
),
|
||||||
|
"suffix_generate_reaction": (
|
||||||
|
"Given the following observation, how would {agent_name} appropriately react?"
|
||||||
|
"\nRespond in one line. If the action is to engage in dialogue, write `SAY: \"what to say\"`."
|
||||||
|
"\nOtherwise, write `REACT: {agent_name}'s reaction`."
|
||||||
|
"\nEither react or say something, but not both. Write 'END' when you are done."
|
||||||
|
" (To reiterate, either start with \"SAY:\", or \"REACT:\", and end with \"END\")"
|
||||||
|
),
|
||||||
|
"suffix_generate_dialogue_response": (
|
||||||
|
"Given the following observation, what would {agent_name} say?"
|
||||||
|
"\nTo continue the conversation, write: `SAY: \"what to say\"`."
|
||||||
|
"\nOtherwise, to end the conversation, write: `GOODBYE: \"what to say\"`."
|
||||||
|
"\nWrite \"END\" when you are done."
|
||||||
|
" (To reiterate, either start with \"SAY:\", or \"GOODBYE:\", and end with \"END\")"
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
PROMPT_TUNES = {
|
||||||
|
"default": "{query}",
|
||||||
|
"vicuna": "{ROLE}: {query}"
|
||||||
|
}
|
||||||
|
|
||||||
|
ROLES = [ "system", "user", "assistant" ]
|
||||||
|
|
||||||
|
def get_prompt( key, tune=LLM_PROMPT_TUNE ):
|
||||||
|
prompt = PROMPTS[key]
|
||||||
|
|
||||||
|
# is a suffix
|
||||||
|
if not isinstance( prompt, dict ):
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
# Vicuna is finetuned for `USER: [query]\nASSISTANT:`
|
||||||
|
if tune not in PROMPT_TUNES:
|
||||||
|
tune = "default"
|
||||||
|
|
||||||
|
outputs = []
|
||||||
|
for role in ROLES:
|
||||||
|
if role not in prompt:
|
||||||
|
# implicitly add in our context as a system message
|
||||||
|
if role == "system" and PROMPTS["context"]:
|
||||||
|
query = PROMPTS["context"]
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
query = prompt[role]
|
||||||
|
|
||||||
|
output = f'{PROMPT_TUNES[tune]}'
|
||||||
|
output = output.replace("{role}", role.lower())
|
||||||
|
output = output.replace("{ROLE}", role.upper())
|
||||||
|
output = output.replace("{query}", query)
|
||||||
|
outputs.append(output)
|
||||||
|
|
||||||
|
return "\n".join(outputs)
|
|
@ -41,7 +41,7 @@ def agent_observes_proxy( agents, observations ):
|
||||||
agent = AGENTS[agent]
|
agent = AGENTS[agent]
|
||||||
observations = observations.split("\n")
|
observations = observations.split("\n")
|
||||||
results = agent_observes( agent, observations )
|
results = agent_observes( agent, observations )
|
||||||
messages.append(f"[{agent.name} Observation noted. Importance score: {[ result[-1] for result in results ]}")
|
messages.append(f"[{agent.name}] Observation noted. Importance score: {[ result[0] for result in results ]}")
|
||||||
return "\n".join(messages)
|
return "\n".join(messages)
|
||||||
|
|
||||||
def interview_agent_proxy( agents, message ):
|
def interview_agent_proxy( agents, message ):
|
||||||
|
|
|
@ -123,7 +123,7 @@ def _create_new_memories():
|
||||||
memory_retriever=_create_new_memory_retriever(),
|
memory_retriever=_create_new_memory_retriever(),
|
||||||
reflection_threshold=8,
|
reflection_threshold=8,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
max_tokens_limit=LLM_CONTEXT/2
|
max_tokens_limit=LLM_CONTEXT/4
|
||||||
)
|
)
|
||||||
|
|
||||||
def create_agent(**kwargs):
|
def create_agent(**kwargs):
|
||||||
|
@ -184,7 +184,7 @@ def interview_agent(agent: GenerativeAgent, message: str, username: str = "Perso
|
||||||
def run_conversation(agents: List[GenerativeAgent], initial_observation: str, limit: int = 0, p_reaction: float = 0.7 ) -> None:
|
def run_conversation(agents: List[GenerativeAgent], initial_observation: str, limit: int = 0, p_reaction: float = 0.7 ) -> None:
|
||||||
"""Runs a conversation between agents."""
|
"""Runs a conversation between agents."""
|
||||||
print(colored("[Conversation]", "magenta"), initial_observation)
|
print(colored("[Conversation]", "magenta"), initial_observation)
|
||||||
_, observation = agents[1].generate_reaction(initial_observation)
|
_, observation = agents[0].generate_reaction(initial_observation)
|
||||||
print(colored("[Conversation]", "magenta"), observation)
|
print(colored("[Conversation]", "magenta"), observation)
|
||||||
|
|
||||||
dialogue = []
|
dialogue = []
|
||||||
|
|
Loading…
Reference in New Issue
Block a user