more tuning
This commit is contained in:
parent
e9abd9e73f
commit
089b7043b9
11
README.md
11
README.md
|
@ -2,7 +2,10 @@
|
||||||
|
|
||||||
This serves as yet-another cobbled together application of [generative agents](https://arxiv.org/pdf/2304.03442.pdf) utilizing [LangChain](https://github.com/hwchase17/langchain/tree/master/langchain) as the core dependency and subjugating a "proxy" for GPT4.
|
This serves as yet-another cobbled together application of [generative agents](https://arxiv.org/pdf/2304.03442.pdf) utilizing [LangChain](https://github.com/hwchase17/langchain/tree/master/langchain) as the core dependency and subjugating a "proxy" for GPT4.
|
||||||
|
|
||||||
In short, by utilizing a language model to summarize, rank, and query against information, immersive agents can be attained.
|
In short, by utilizing a language model to summarize, rank, and query against information using NLP queries/instructions, immersive agents can be attained.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
|
@ -38,3 +41,9 @@ python .\src\main.py
|
||||||
## Plans
|
## Plans
|
||||||
|
|
||||||
I ***do not*** plan on making this uber-user friendly like [mrq/ai-voice-cloning](https://git.ecker.tech/mrq/ai-voice-cloning), as this is just a stepping stone for a bigger project integrating generative agents.
|
I ***do not*** plan on making this uber-user friendly like [mrq/ai-voice-cloning](https://git.ecker.tech/mrq/ai-voice-cloning), as this is just a stepping stone for a bigger project integrating generative agents.
|
||||||
|
|
||||||
|
## Caveats
|
||||||
|
|
||||||
|
A local LM is quite slow. Even using one that's more instruction-tuned like Vicuna (with a `SYSTEM:\nUSER:\nASSISTANT:` structure of prompts) is still inconsistent.
|
||||||
|
|
||||||
|
GPT4 seems to Just Work, unfortunately.
|
|
@ -83,39 +83,31 @@ class GenerativeAgent(BaseModel):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_entity_from_observation(self, observation: str) -> str:
|
def _get_entity_from_observation(self, observation: str) -> str:
|
||||||
if self.verbose:
|
|
||||||
print("_get_entity_from_observation")
|
|
||||||
|
|
||||||
prompt = PromptTemplate.from_template(get_prompt('entity_from_observation'))
|
prompt = PromptTemplate.from_template(get_prompt('entity_from_observation'))
|
||||||
return self.chain(prompt).run(observation=observation).strip()
|
return self.chain(prompt).run(observation=observation).strip()
|
||||||
|
|
||||||
def _get_entity_action(self, observation: str, entity_name: str) -> str:
|
def _get_entity_action(self, observation: str, entity_name: str) -> str:
|
||||||
if self.verbose:
|
|
||||||
print("_get_entity_action")
|
|
||||||
|
|
||||||
prompt = PromptTemplate.from_template(get_prompt('entity_action'))
|
prompt = PromptTemplate.from_template(get_prompt('entity_action'))
|
||||||
return self.chain(prompt).run(entity=entity_name, observation=observation).strip()
|
return self.chain(prompt).run(entity=entity_name, observation=observation).strip()
|
||||||
|
|
||||||
def summarize_related_memories(self, observation: str) -> str:
|
def summarize_related_memories(self, observation: str) -> str:
|
||||||
if self.verbose:
|
|
||||||
print("summarize_related_memories")
|
|
||||||
|
|
||||||
"""Summarize memories that are most relevant to an observation."""
|
"""Summarize memories that are most relevant to an observation."""
|
||||||
prompt = PromptTemplate.from_template(get_prompt('summarize_related_memories'))
|
prompt = PromptTemplate.from_template(get_prompt('summarize_related_memories'))
|
||||||
entity_name = self._get_entity_from_observation(observation)
|
entity_name = self._get_entity_from_observation(observation).split("\n")[0]
|
||||||
entity_action = self._get_entity_action(observation, entity_name)
|
|
||||||
q1 = f"What is the relationship between {self.name} and {entity_name}"
|
q1 = f"What is the relationship between {self.name} and {entity_name}"
|
||||||
q2 = f"{entity_name} is {entity_action}"
|
|
||||||
|
|
||||||
|
# this is unused, so ignore for now
|
||||||
|
"""
|
||||||
|
entity_action = self._get_entity_action(observation, entity_name)
|
||||||
|
q2 = f"{entity_name} is {entity_action}"
|
||||||
summary = self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
|
summary = self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
|
||||||
|
"""
|
||||||
|
summary = self.chain(prompt=prompt).run(q1=q1, queries=[q1]).strip()
|
||||||
return summary
|
return summary
|
||||||
|
|
||||||
#return self.chain(prompt=prompt).run(q1=q1, q2=q2).strip()
|
#return self.chain(prompt=prompt).run(q1=q1, q2=q2).strip()
|
||||||
|
|
||||||
def _generate_reaction(self, observation: str, suffix: str) -> str:
|
def _generate_reaction(self, observation: str, suffix: str) -> str:
|
||||||
if self.verbose:
|
|
||||||
print("_generate_reaction")
|
|
||||||
|
|
||||||
"""React to a given observation or dialogue act."""
|
"""React to a given observation or dialogue act."""
|
||||||
prompt = PromptTemplate.from_template(
|
prompt = PromptTemplate.from_template(
|
||||||
get_prompt('generate_reaction').replace("{suffix}", suffix)
|
get_prompt('generate_reaction').replace("{suffix}", suffix)
|
||||||
|
@ -142,15 +134,21 @@ class GenerativeAgent(BaseModel):
|
||||||
return re.sub(f"^{self.name} ", "", text.strip()).strip()
|
return re.sub(f"^{self.name} ", "", text.strip()).strip()
|
||||||
|
|
||||||
def generate_reaction(self, observation: str) -> Tuple[bool, str]:
|
def generate_reaction(self, observation: str) -> Tuple[bool, str]:
|
||||||
if self.verbose:
|
|
||||||
print("generate_reaction")
|
|
||||||
|
|
||||||
"""React to a given observation."""
|
"""React to a given observation."""
|
||||||
full_result = self._generate_reaction(observation, get_prompt('suffix_generate_reaction'))
|
full_result = self._generate_reaction(observation, get_prompt('suffix_generate_reaction'))
|
||||||
result = full_result.strip().split("\n")[0]
|
candidates = full_result.replace(u"\u200B", "").strip().split("\n")
|
||||||
|
|
||||||
response = f"reacted by {result}".strip()
|
result = ""
|
||||||
if response == "reacted by":
|
results = []
|
||||||
|
|
||||||
|
for candidate in candidates:
|
||||||
|
if "REACT:" in candidate or "SAY:" in candidate:
|
||||||
|
candidate = candidate.strip()
|
||||||
|
results.append(f'reacted by {candidate}'.replace("SAY:", "saying").replace("reacted by REACT: ", ""))
|
||||||
|
if len(results) > 0:
|
||||||
|
result = "and".join(results)
|
||||||
|
response = f"reacted by {result}"
|
||||||
|
else:
|
||||||
response = f"did not react"
|
response = f"did not react"
|
||||||
|
|
||||||
# AAA
|
# AAA
|
||||||
|
@ -167,12 +165,9 @@ class GenerativeAgent(BaseModel):
|
||||||
said_value = self._clean_response(result.split("SAY:")[-1])
|
said_value = self._clean_response(result.split("SAY:")[-1])
|
||||||
return True, f"{self.name} said {said_value}"
|
return True, f"{self.name} said {said_value}"
|
||||||
else:
|
else:
|
||||||
return False, result
|
return False, f"{self.name} did not react in a relevant way"
|
||||||
|
|
||||||
def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]:
|
def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]:
|
||||||
if self.verbose:
|
|
||||||
print("generate_dialogue_response")
|
|
||||||
|
|
||||||
"""React to a given observation."""
|
"""React to a given observation."""
|
||||||
call_to_action_template = (get_prompt('suffix_generate_dialogue_response'))
|
call_to_action_template = (get_prompt('suffix_generate_dialogue_response'))
|
||||||
full_result = self._generate_reaction(observation, call_to_action_template)
|
full_result = self._generate_reaction(observation, call_to_action_template)
|
||||||
|
@ -207,9 +202,6 @@ class GenerativeAgent(BaseModel):
|
||||||
# updated periodically through probing its memories #
|
# updated periodically through probing its memories #
|
||||||
######################################################
|
######################################################
|
||||||
def _compute_agent_summary(self) -> str:
|
def _compute_agent_summary(self) -> str:
|
||||||
if self.verbose:
|
|
||||||
print("_compute_agent_summary")
|
|
||||||
|
|
||||||
""""""
|
""""""
|
||||||
# The agent seeks to think about their core characteristics.
|
# The agent seeks to think about their core characteristics.
|
||||||
prompt = PromptTemplate.from_template(get_prompt('compute_agent_summary'))
|
prompt = PromptTemplate.from_template(get_prompt('compute_agent_summary'))
|
||||||
|
@ -217,9 +209,6 @@ class GenerativeAgent(BaseModel):
|
||||||
return summary
|
return summary
|
||||||
|
|
||||||
def get_summary(self, force_refresh: bool = False) -> str:
|
def get_summary(self, force_refresh: bool = False) -> str:
|
||||||
if self.verbose:
|
|
||||||
print("get_summary")
|
|
||||||
|
|
||||||
"""Return a descriptive summary of the agent."""
|
"""Return a descriptive summary of the agent."""
|
||||||
current_time = datetime.now()
|
current_time = datetime.now()
|
||||||
since_refresh = (current_time - self.last_refreshed).seconds
|
since_refresh = (current_time - self.last_refreshed).seconds
|
||||||
|
@ -234,7 +223,7 @@ class GenerativeAgent(BaseModel):
|
||||||
return (
|
return (
|
||||||
f"Name: {self.name} (age: {age})"
|
f"Name: {self.name} (age: {age})"
|
||||||
+ f"\nInnate traits: {self.traits}"
|
+ f"\nInnate traits: {self.traits}"
|
||||||
+ f"\n{self.summary}"
|
+ f"\n{self.summary.strip()}"
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_full_header(self, force_refresh: bool = False) -> str:
|
def get_full_header(self, force_refresh: bool = False) -> str:
|
||||||
|
|
|
@ -80,9 +80,6 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
|
return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines]
|
||||||
|
|
||||||
def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]:
|
def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]:
|
||||||
if self.verbose:
|
|
||||||
print("_get_topics_of_reflection")
|
|
||||||
|
|
||||||
"""Return the 3 most salient high-level questions about recent observations."""
|
"""Return the 3 most salient high-level questions about recent observations."""
|
||||||
prompt = PromptTemplate.from_template(get_prompt("topic_of_reflection"))
|
prompt = PromptTemplate.from_template(get_prompt("topic_of_reflection"))
|
||||||
observations = self.memory_retriever.memory_stream[-last_k:]
|
observations = self.memory_retriever.memory_stream[-last_k:]
|
||||||
|
@ -91,9 +88,6 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
return self._parse_list(result)
|
return self._parse_list(result)
|
||||||
|
|
||||||
def _get_insights_on_topic(self, topic: str) -> List[str]:
|
def _get_insights_on_topic(self, topic: str) -> List[str]:
|
||||||
if self.verbose:
|
|
||||||
print("_get_insights_on_topic")
|
|
||||||
|
|
||||||
"""Generate 'insights' on a topic of reflection, based on pertinent memories."""
|
"""Generate 'insights' on a topic of reflection, based on pertinent memories."""
|
||||||
prompt = PromptTemplate.from_template(get_prompt("insights_on_topic"))
|
prompt = PromptTemplate.from_template(get_prompt("insights_on_topic"))
|
||||||
related_memories = self.fetch_memories(topic)
|
related_memories = self.fetch_memories(topic)
|
||||||
|
@ -110,9 +104,6 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
return self._parse_list(result)
|
return self._parse_list(result)
|
||||||
|
|
||||||
def pause_to_reflect(self) -> List[str]:
|
def pause_to_reflect(self) -> List[str]:
|
||||||
if self.verbose:
|
|
||||||
print("pause_to_reflect")
|
|
||||||
|
|
||||||
"""Reflect on recent observations and generate 'insights'."""
|
"""Reflect on recent observations and generate 'insights'."""
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
logger.info("Character is reflecting")
|
logger.info("Character is reflecting")
|
||||||
|
@ -126,9 +117,6 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
return new_insights
|
return new_insights
|
||||||
|
|
||||||
def _score_memory_importance(self, memory_content: str) -> float:
|
def _score_memory_importance(self, memory_content: str) -> float:
|
||||||
if self.verbose:
|
|
||||||
print("_score_memory_importance")
|
|
||||||
|
|
||||||
"""Score the absolute importance of the given memory."""
|
"""Score the absolute importance of the given memory."""
|
||||||
prompt = PromptTemplate.from_template(get_prompt("memory_importance"))
|
prompt = PromptTemplate.from_template(get_prompt("memory_importance"))
|
||||||
score = self.chain(prompt).run(memory_content=memory_content).strip()
|
score = self.chain(prompt).run(memory_content=memory_content).strip()
|
||||||
|
@ -143,9 +131,6 @@ class GenerativeAgentMemory(BaseMemory):
|
||||||
return 0.0
|
return 0.0
|
||||||
|
|
||||||
def add_memory(self, memory_content: str) -> List[str]:
|
def add_memory(self, memory_content: str) -> List[str]:
|
||||||
if self.verbose:
|
|
||||||
print("add_memory")
|
|
||||||
|
|
||||||
"""Add an observation or memory to the agent's memory."""
|
"""Add an observation or memory to the agent's memory."""
|
||||||
importance_score = self._score_memory_importance(memory_content)
|
importance_score = self._score_memory_importance(memory_content)
|
||||||
self.aggregate_importance += importance_score
|
self.aggregate_importance += importance_score
|
||||||
|
|
|
@ -6,8 +6,8 @@ PROMPTS = {
|
||||||
"entity_from_observation": {
|
"entity_from_observation": {
|
||||||
"system": (
|
"system": (
|
||||||
"What is the observed entity in the following observation?"
|
"What is the observed entity in the following observation?"
|
||||||
" ONLY report one object."
|
" ONLY report one object and write one sentence."
|
||||||
" Write `END` when you are done."
|
" Write `END` afterwards."
|
||||||
),
|
),
|
||||||
"user": (
|
"user": (
|
||||||
"Observation: {observation}"
|
"Observation: {observation}"
|
||||||
|
@ -16,32 +16,34 @@ PROMPTS = {
|
||||||
},
|
},
|
||||||
"entity_action": {
|
"entity_action": {
|
||||||
"system": (
|
"system": (
|
||||||
"What is the {entity} doing in the following observation?"
|
"What is `{entity}` doing in the following observation?"
|
||||||
" ONLY report one object."
|
" ONLY report one object and write one sentence."
|
||||||
" Write `END` when you are done."
|
" Write `END` afterwards."
|
||||||
),
|
),
|
||||||
"user": (
|
"user": (
|
||||||
"Observation: {observation}"
|
"Observation: {observation}"
|
||||||
),
|
),
|
||||||
"assistant": "The {entity} is ",
|
"assistant": "`{entity}` is ",
|
||||||
},
|
},
|
||||||
"summarize_related_memories": {
|
"summarize_related_memories": {
|
||||||
"system": (
|
"system": (
|
||||||
"Given the following context, {q1}?"
|
"Given the following context, answer the following question in four sentences or less."
|
||||||
"Write `END` when you are done."
|
" Write `END` afterwards."
|
||||||
),
|
),
|
||||||
"user": (
|
"user": (
|
||||||
"Context: {relevant_memories}"
|
"{q1}?"
|
||||||
|
"\nContext: {relevant_memories_simple}"
|
||||||
),
|
),
|
||||||
"assistant": "Relevant context: ",
|
"assistant": "Relevant context: ",
|
||||||
},
|
},
|
||||||
"compute_agent_summary": {
|
"compute_agent_summary": {
|
||||||
"system": (
|
"system": (
|
||||||
"Given the following statements, how would you summarize {name}'s core characteristics?"
|
"Given the following statements, how would you summarize {name}'s core characteristics?"
|
||||||
" (Do not embellish under any circumstances. Say 'END' when you are done):"
|
" Do not embellish under any circumstances."
|
||||||
|
" Write `END` afterwards."
|
||||||
),
|
),
|
||||||
"user": (
|
"user": (
|
||||||
"Statements: {relevant_memories}"
|
"Statements: {relevant_memories_simple}"
|
||||||
),
|
),
|
||||||
"assistant": "Summary: ",
|
"assistant": "Summary: ",
|
||||||
},
|
},
|
||||||
|
@ -82,15 +84,16 @@ PROMPTS = {
|
||||||
},
|
},
|
||||||
"generate_reaction": {
|
"generate_reaction": {
|
||||||
"system": (
|
"system": (
|
||||||
"{agent_summary_description}"
|
"It is {current_time}."
|
||||||
"\nIt is {current_time}."
|
" The following is a description of {agent_name}:"
|
||||||
|
"\n{agent_summary_description}"
|
||||||
"\n{agent_name}'s status: {agent_status}"
|
"\n{agent_name}'s status: {agent_status}"
|
||||||
"\nSummary of relevant context from {agent_name}'s memory: {relevant_memories}"
|
"\nSummary of relevant context from {agent_name}'s memory: {relevant_memories}"
|
||||||
"\nMost recent observations: {most_recent_memories}"
|
"\nMost recent observations: {most_recent_memories}"
|
||||||
"\n{suffix}"
|
"\n\n{suffix}"
|
||||||
),
|
),
|
||||||
"user": (
|
"user": (
|
||||||
"\nObservation: {observation}"
|
"Observation: {observation}"
|
||||||
),
|
),
|
||||||
"assistant": ""
|
"assistant": ""
|
||||||
},
|
},
|
||||||
|
@ -100,18 +103,16 @@ PROMPTS = {
|
||||||
"" # insert your JB here
|
"" # insert your JB here
|
||||||
),
|
),
|
||||||
"suffix_generate_reaction": (
|
"suffix_generate_reaction": (
|
||||||
"Given the following observation, how would {agent_name} appropriately react?"
|
"Given the following observation, in one sentence, how would {agent_name} appropriately react?"
|
||||||
"\nRespond in one line. If the action is to engage in dialogue, write `SAY: \"what to say\"`."
|
"\nIf the action is to engage in dialogue, write `SAY: \"what to say\"`."
|
||||||
"\nOtherwise, write `REACT: {agent_name}'s reaction`."
|
"\nOtherwise, write `REACT: {agent_name}'s reaction`."
|
||||||
"\nEither react or say something, but not both. Write 'END' when you are done."
|
"\nWrite 'END' afterwards."
|
||||||
" (To reiterate, either start with \"SAY:\", or \"REACT:\", and end with \"END\")"
|
|
||||||
),
|
),
|
||||||
"suffix_generate_dialogue_response": (
|
"suffix_generate_dialogue_response": (
|
||||||
"Given the following observation, what would {agent_name} say?"
|
"Given the following observation, in one sentence, what would {agent_name} say?"
|
||||||
"\nTo continue the conversation, write: `SAY: \"what to say\"`."
|
"\nTo continue the conversation, write: `SAY: \"what to say\"`."
|
||||||
"\nOtherwise, to end the conversation, write: `GOODBYE: \"what to say\"`."
|
"\nOtherwise, to end the conversation, write: `GOODBYE: \"what to say\"`."
|
||||||
"\nWrite \"END\" when you are done."
|
"\nWrite \"END\" afterwards."
|
||||||
" (To reiterate, either start with \"SAY:\", or \"GOODBYE:\", and end with \"END\")"
|
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
38
src/main.py
38
src/main.py
|
@ -69,7 +69,7 @@ def run_conversation_proxy( agents, message ):
|
||||||
messages = run_conversation( agents, message, limit=len(agents)*3 )
|
messages = run_conversation( agents, message, limit=len(agents)*3 )
|
||||||
return "\n".join(messages)
|
return "\n".join(messages)
|
||||||
|
|
||||||
def agent_view_memories( agents, last_k = 50 ):
|
def view_agent( agents, last_k = 50 ):
|
||||||
if not isinstance( agents, list ):
|
if not isinstance( agents, list ):
|
||||||
agents = [ agents ]
|
agents = [ agents ]
|
||||||
|
|
||||||
|
@ -77,7 +77,10 @@ def agent_view_memories( agents, last_k = 50 ):
|
||||||
for agent in agents:
|
for agent in agents:
|
||||||
agent = AGENTS[agent]
|
agent = AGENTS[agent]
|
||||||
memories = agent.memory.memory_retriever.memory_stream[-last_k:]
|
memories = agent.memory.memory_retriever.memory_stream[-last_k:]
|
||||||
messages.append("\n".join([ document.page_content for document in memories]))
|
memories = "\n".join([ document.page_content for document in memories])
|
||||||
|
message = f"{agent.name}'s summary:\n{agent.summary}\n{agent.name}'s memories:\n{memories}"
|
||||||
|
|
||||||
|
messages.append( message )
|
||||||
return "\n".join(messages)
|
return "\n".join(messages)
|
||||||
|
|
||||||
def get_agents_list():
|
def get_agents_list():
|
||||||
|
@ -130,6 +133,7 @@ def setup_webui(share=False):
|
||||||
AGENT_SETTINGS = {}
|
AGENT_SETTINGS = {}
|
||||||
OBSERVE_SETTINGS = {}
|
OBSERVE_SETTINGS = {}
|
||||||
SAVELOAD_SETTINGS = {}
|
SAVELOAD_SETTINGS = {}
|
||||||
|
CONSOLE_OUTPUTS = {}
|
||||||
|
|
||||||
ACTIONS = {}
|
ACTIONS = {}
|
||||||
|
|
||||||
|
@ -149,11 +153,11 @@ def setup_webui(share=False):
|
||||||
ACTIONS["add_agent"] = gr.Button(value="Add Agent")
|
ACTIONS["add_agent"] = gr.Button(value="Add Agent")
|
||||||
ACTIONS["edit_agent"] = gr.Button(value="Edit Agent")
|
ACTIONS["edit_agent"] = gr.Button(value="Edit Agent")
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
console_output = gr.Textbox(lines=8, label="Console Output")
|
CONSOLE_OUTPUTS["create_agent"] = gr.Textbox(lines=8, label="Console Output")
|
||||||
|
|
||||||
ACTIONS["edit_agent"].click(edit_agent,
|
ACTIONS["edit_agent"].click(edit_agent,
|
||||||
inputs=list(AGENT_SETTINGS.values()),
|
inputs=list(AGENT_SETTINGS.values()),
|
||||||
outputs=console_output
|
outputs=CONSOLE_OUTPUTS["create_agent"]
|
||||||
)
|
)
|
||||||
with gr.Tab("Save/Load"):
|
with gr.Tab("Save/Load"):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
|
@ -165,9 +169,12 @@ def setup_webui(share=False):
|
||||||
ACTIONS["load"] = gr.Button(value="Load")
|
ACTIONS["load"] = gr.Button(value="Load")
|
||||||
ACTIONS["refresh_agents_list"] = gr.Button(value="Refresh Agents List")
|
ACTIONS["refresh_agents_list"] = gr.Button(value="Refresh Agents List")
|
||||||
|
|
||||||
ACTIONS["save"].click(save_agent_proxy,
|
with gr.Column():
|
||||||
inputs=SAVELOAD_SETTINGS["agent"],
|
CONSOLE_OUTPUTS["save_load_agent"] = gr.Textbox(lines=8, label="Console Output")
|
||||||
)
|
|
||||||
|
ACTIONS["save"].click(save_agent_proxy,
|
||||||
|
inputs=SAVELOAD_SETTINGS["agent"],
|
||||||
|
)
|
||||||
with gr.Tab("Agent Actions"):
|
with gr.Tab("Agent Actions"):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
|
@ -181,34 +188,33 @@ def setup_webui(share=False):
|
||||||
ACTIONS["interview"] = gr.Button(value="Interview")
|
ACTIONS["interview"] = gr.Button(value="Interview")
|
||||||
ACTIONS["converse"] = gr.Button(value="Converse")
|
ACTIONS["converse"] = gr.Button(value="Converse")
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
console_output = gr.Textbox(lines=8, label="Console Output")
|
CONSOLE_OUTPUTS["agent_actions"] = gr.Textbox(lines=8, label="Console Output")
|
||||||
|
|
||||||
ACTIONS["act"].click(agent_observes_proxy,
|
ACTIONS["act"].click(agent_observes_proxy,
|
||||||
inputs=list(OBSERVE_SETTINGS.values()),
|
inputs=list(OBSERVE_SETTINGS.values()),
|
||||||
outputs=console_output
|
outputs=CONSOLE_OUTPUTS["agent_actions"]
|
||||||
)
|
)
|
||||||
ACTIONS["view"].click(agent_view_memories,
|
ACTIONS["view"].click(view_agent,
|
||||||
inputs=OBSERVE_SETTINGS["agent"],
|
inputs=OBSERVE_SETTINGS["agent"],
|
||||||
outputs=console_output
|
outputs=CONSOLE_OUTPUTS["agent_actions"]
|
||||||
)
|
)
|
||||||
ACTIONS["summarize"].click(get_summary_proxy,
|
ACTIONS["summarize"].click(get_summary_proxy,
|
||||||
inputs=OBSERVE_SETTINGS["agent"],
|
inputs=OBSERVE_SETTINGS["agent"],
|
||||||
outputs=console_output
|
outputs=CONSOLE_OUTPUTS["agent_actions"]
|
||||||
)
|
)
|
||||||
ACTIONS["interview"].click(interview_agent_proxy,
|
ACTIONS["interview"].click(interview_agent_proxy,
|
||||||
inputs=list(OBSERVE_SETTINGS.values()),
|
inputs=list(OBSERVE_SETTINGS.values()),
|
||||||
outputs=console_output
|
outputs=CONSOLE_OUTPUTS["agent_actions"]
|
||||||
)
|
)
|
||||||
ACTIONS["converse"].click(run_conversation_proxy,
|
ACTIONS["converse"].click(run_conversation_proxy,
|
||||||
inputs=list(OBSERVE_SETTINGS.values()),
|
inputs=list(OBSERVE_SETTINGS.values()),
|
||||||
outputs=console_output
|
outputs=CONSOLE_OUTPUTS["agent_actions"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
ACTIONS["add_agent"].click(create_agent_proxy,
|
ACTIONS["add_agent"].click(create_agent_proxy,
|
||||||
inputs=list(AGENT_SETTINGS.values()),
|
inputs=list(AGENT_SETTINGS.values()),
|
||||||
outputs=[
|
outputs=[
|
||||||
console_output,
|
CONSOLE_OUTPUTS["create_agent"],
|
||||||
OBSERVE_SETTINGS["agent"],
|
OBSERVE_SETTINGS["agent"],
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
15
src/utils.py
15
src/utils.py
|
@ -31,13 +31,22 @@ callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) # unncess
|
||||||
if LLM_TYPE=="llamacpp":
|
if LLM_TYPE=="llamacpp":
|
||||||
from langchain.llms import LlamaCpp
|
from langchain.llms import LlamaCpp
|
||||||
|
|
||||||
|
STOP_TOKENS = ["END"]
|
||||||
|
|
||||||
|
if os.environ.get('LLM_PROMPT_TUNE', "vicuna") == "vicuna":
|
||||||
|
STOP_TOKENS.append("SYSTEM:")
|
||||||
|
STOP_TOKENS.append("USER:")
|
||||||
|
STOP_TOKENS.append("ASSISTANT:")
|
||||||
|
|
||||||
LLM = LlamaCpp(
|
LLM = LlamaCpp(
|
||||||
model_path=LLM_LOCAL_MODEL,
|
model_path=LLM_LOCAL_MODEL,
|
||||||
callback_manager=callback_manager,
|
callback_manager=callback_manager,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
n_ctx=LLM_CONTEXT,
|
n_ctx=LLM_CONTEXT,
|
||||||
n_threads=LLM_THREADS,
|
n_threads=LLM_THREADS,
|
||||||
stop=["\n\n", "END"]
|
use_mlock=True,
|
||||||
|
use_mmap=True,
|
||||||
|
stop=STOP_TOKENS
|
||||||
)
|
)
|
||||||
elif LLM_TYPE=="oai":
|
elif LLM_TYPE=="oai":
|
||||||
from langchain.chat_models import ChatOpenAI
|
from langchain.chat_models import ChatOpenAI
|
||||||
|
@ -88,6 +97,8 @@ elif EMBEDDING_TYPE == "llamacpp":
|
||||||
model_path=LLM_LOCAL_MODEL,
|
model_path=LLM_LOCAL_MODEL,
|
||||||
n_ctx=LLM_CONTEXT,
|
n_ctx=LLM_CONTEXT,
|
||||||
n_threads=LLM_THREADS,
|
n_threads=LLM_THREADS,
|
||||||
|
use_mlock=True,
|
||||||
|
use_mmap=True,
|
||||||
)
|
)
|
||||||
EMBEDDINGS_SIZE = 5120
|
EMBEDDINGS_SIZE = 5120
|
||||||
else:
|
else:
|
||||||
|
@ -123,7 +134,7 @@ def _create_new_memories():
|
||||||
memory_retriever=_create_new_memory_retriever(),
|
memory_retriever=_create_new_memory_retriever(),
|
||||||
reflection_threshold=8,
|
reflection_threshold=8,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
max_tokens_limit=LLM_CONTEXT/4
|
max_tokens_limit=256 # LLM_CONTEXT/4
|
||||||
)
|
)
|
||||||
|
|
||||||
def create_agent(**kwargs):
|
def create_agent(**kwargs):
|
||||||
|
|
Loading…
Reference in New Issue
Block a user