added rudimentary web UI

This commit is contained in:
mrq 2023-04-29 05:54:55 +00:00
parent b35f94d319
commit b553ffbc5f
2 changed files with 589 additions and 289 deletions

View File

@ -1,97 +1,314 @@
from utils import create_new_agent, agent_observes, interview_agent, run_conversation, get_summary
import os
import gradio as gr
import gradio.utils
from utils import create_agent, agent_observes, interview_agent, run_conversation, get_summary, save_agent, load_agent
webui = None
AGENTS = {}
def create_agent_proxy(name, age, traits, status, daily_summaries=None):
kwargs = locals()
if "daily_summaries" in kwargs:
summaries = kwargs["daily_summaries"].split("\n")
kwargs["daily_summaries"] = [ ( summary ) for summary in summaries ]
agent = create_agent(**kwargs)
AGENTS[agent.name] = agent
return f"Agent created: {agent.name}"
def agent_observes_proxy( agents, observations ):
if not isinstance( agents, list ):
agents = [ agents ]
for agent in agents:
agent = AGENTS[agent]
observations = observations.split("\n")
agent_observes( agent, observations, summarize = False )
return f"Observation noted"
def interview_agent_proxy( agents, message ):
if not isinstance( agents, list ):
agents = [ agents ]
messages = []
for agent in agents:
agent = AGENTS[agent]
messages.append(interview_agent( agent, message ))
return "\n".join(messages)
def get_summary_proxy( agents ):
if not isinstance( agents, list ):
agents = [ agents ]
messages = []
for agent in agents:
agent = AGENTS[agent]
messages.append(get_summary( agent, force_refresh = True ))
return "\n".join(messages)
def run_conversation_proxy( agents, message ):
agents = [ AGENTS[agent] for agent in agents ]
messages = run_conversation( agents, message )
return "\n".join(messages)
def agent_view_memories( agents, last_k = 50 ):
if not isinstance( agents, list ):
agents = [ agents ]
messages = []
for agent in agents:
agent = AGENTS[agent]
memories = agent.memory.memory_retriever.memory_stream[-last_k:]
messages.append("\n".join([ document.page_content for document in memories]))
return "\n".join(messages)
def get_agents_list():
return [ k for k in AGENTS ]
def get_saved_agents_list():
if not os.path.exists("./agents/"):
return []
return [ d.split(".")[:-1] for d in os.listdir("./agents/") if d.split(".")[-1] == "pth" ]
def update_agents_list():
agents = get_agents_list()
return gr.Dropdown.update(choices=agents, value=agents[0] if len(agents) > 0 else "")
def update_saved_agents_list():
agents = get_saved_agents_list()
return gr.Dropdown.update(choices=agents, value=agents[0] if len(agents) > 0 else "")
def save_agent_proxy( agents ):
if not isinstance( agents, list ):
agents = [ agents ]
for agent in agents:
agent = AGENTS[agent]
save_agent( agent )
def load_agent_proxy( agents ):
if not isinstance( agents, list ):
agents = [ agents ]
for agent in agents:
AGENTS[agent] = load_agent( agent )
def setup_webui(share=False):
if not share:
def noop(function, return_value=None):
def wrapped(*args, **kwargs):
return return_value
return wrapped
gradio.utils.version_check = noop(gradio.utils.version_check)
gradio.utils.initiated_analytics = noop(gradio.utils.initiated_analytics)
gradio.utils.launch_analytics = noop(gradio.utils.launch_analytics)
gradio.utils.integration_analytics = noop(gradio.utils.integration_analytics)
gradio.utils.error_analytics = noop(gradio.utils.error_analytics)
gradio.utils.log_feature_analytics = noop(gradio.utils.log_feature_analytics)
#gradio.utils.get_local_ip_address = noop(gradio.utils.get_local_ip_address, 'localhost')
AGENT_SETTINGS = {}
OBSERVE_SETTINGS = {}
SAVELOAD_SETTINGS = {}
ACTIONS = {}
AGENT_LISTS = []
agents_list = get_agents_list()
saved_agents_list = get_saved_agents_list()
with gr.Blocks() as ui:
with gr.Tab("Create Agent"):
with gr.Row():
with gr.Column():
AGENT_SETTINGS["name"] = gr.Textbox(lines=1, label="Name", value="Adam")
AGENT_SETTINGS["age"] = gr.Number(label="Age")
AGENT_SETTINGS["traits"] = gr.Textbox(lines=1, label="Traits", value="N/A")
AGENT_SETTINGS["status"] = gr.Textbox(lines=1, label="Status", value="N/A")
AGENT_SETTINGS["daily_summaries"] = gr.Textbox(lines=4, label="Summary", value="")
ACTIONS["add_agent"] = gr.Button(value="Add Agent")
with gr.Column():
console_output = gr.Textbox(lines=8, label="Console Output")
ACTIONS["add_agent"].click(create_agent_proxy,
inputs=list(AGENT_SETTINGS.values()),
outputs=console_output
)
with gr.Tab("Save/Load"):
with gr.Row():
with gr.Column():
SAVELOAD_SETTINGS["agent"] = gr.Dropdown(choices=saved_agents_list, label="Agent", type="value", value=saved_agents_list[0] if len(saved_agents_list) > 0 else "", multiselect=True)
with gr.Row():
ACTIONS["save"] = gr.Button(value="Save")
ACTIONS["load"] = gr.Button(value="Load")
ACTIONS["refresh_agents_list"] = gr.Button(value="Refresh Agents List")
ACTIONS["save"].click(save_agent_proxy,
inputs=SAVELOAD_SETTINGS["agent"],
)
ACTIONS["load"].click(load_agent_proxy,
inputs=SAVELOAD_SETTINGS["agent"],
)
with gr.Tab("Agent Actions"):
with gr.Row():
with gr.Column():
OBSERVE_SETTINGS["agent"] = gr.Dropdown(choices=agents_list, label="Agent", type="value", value=agents_list[0] if len(agents_list) > 0 else "", multiselect=True)
OBSERVE_SETTINGS["input"] = gr.Textbox(lines=4, label="Input", value="")
with gr.Row():
ACTIONS["act"] = gr.Button(value="Act")
ACTIONS["view"] = gr.Button(value="View")
ACTIONS["summarize"] = gr.Button(value="Summarize")
ACTIONS["interview"] = gr.Button(value="Interview")
ACTIONS["converse"] = gr.Button(value="Converse")
with gr.Column():
console_output = gr.Textbox(lines=8, label="Console Output")
ACTIONS["act"].click(agent_observes_proxy,
inputs=list(OBSERVE_SETTINGS.values()),
outputs=console_output
)
ACTIONS["view"].click(agent_view_memories,
inputs=OBSERVE_SETTINGS["agent"],
outputs=console_output
)
ACTIONS["summarize"].click(get_summary_proxy,
inputs=OBSERVE_SETTINGS["agent"],
outputs=console_output
)
ACTIONS["interview"].click(interview_agent_proxy,
inputs=list(OBSERVE_SETTINGS.values()),
outputs=console_output
)
ACTIONS["converse"].click(run_conversation_proxy,
inputs=list(OBSERVE_SETTINGS.values()),
outputs=console_output
)
ACTIONS["add_agent"].click(update_saved_agents_list,
inputs=None,
outputs=SAVELOAD_SETTINGS["agent"]
)
ACTIONS["add_agent"].click(update_agents_list,
inputs=None,
outputs=OBSERVE_SETTINGS["agent"]
)
ACTIONS["load"].click(update_agents_list,
inputs=None,
outputs=OBSERVE_SETTINGS["agent"]
)
ACTIONS["refresh_agents_list"].click(update_agents_list,
inputs=None,
outputs=OBSERVE_SETTINGS["agent"]
)
ui.queue(concurrency_count=2)
return ui
if __name__ == "__main__":
tommie = create_new_agent(
name="Tommie",
age=25,
traits="anxious, likes design, talkative", # You can add more persistent traits here
status="looking for a job", # When connected to a virtual world, we can have the characters update their status
)
eve = create_new_agent(
name="Eve",
age=34,
traits="curious, helpful", # You can add more persistent traits here
status="N/A", # When connected to a virtual world, we can have the characters update their status
daily_summaries = [
("{name} started her new job as a career counselor last week and received her first assignment, a client named Tommie.")
],
)
share=False
webui = setup_webui(share=share)
if webui:
webui.launch(share=share, prevent_thread_lock=True, show_error=True)
webui.block_thread()
else:
tommie = create_agent(
name="Tommie",
age=25,
traits="anxious, likes design, talkative", # You can add more persistent traits here
status="looking for a job", # When connected to a virtual world, we can have the characters update their status
)
eve = create_agent(
name="Eve",
age=34,
traits="curious, helpful", # You can add more persistent traits here
status="N/A", # When connected to a virtual world, we can have the characters update their status
daily_summaries = [
("{name} started her new job as a career counselor last week and received her first assignment, a client named Tommie.")
],
)
# We can add memories directly to the memory object
agent_observes(tommie, [
"{name} remembers his dog, Bruno, from when he was a kid",
"{name} feels tired from driving so far",
"{name} sees the new home",
"The new neighbors have a cat",
"The road is noisy at night",
"{name} is hungry",
"{name} tries to get some rest.",
])
# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.
# We will see how this summary updates after more observations to create a more rich description.
# We can add memories directly to the memory object
agent_observes(tommie, [
"{name} remembers his dog, Bruno, from when he was a kid",
"{name} feels tired from driving so far",
"{name} sees the new home",
"The new neighbors have a cat",
"The road is noisy at night",
"{name} is hungry",
"{name} tries to get some rest.",
])
# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.
# We will see how this summary updates after more observations to create a more rich description.
# Interview agent
print(interview_agent(tommie, "What do you like to do?")[-1])
print(interview_agent(tommie, "What are you looking forward to doing today?")[-1])
print(interview_agent(tommie, "What are you most worried about today?")[-1])
# Interview agent
print(interview_agent(tommie, "What do you like to do?")[-1])
print(interview_agent(tommie, "What are you looking forward to doing today?")[-1])
print(interview_agent(tommie, "What are you most worried about today?")[-1])
# Let's have Tommie start going through a day in the life.
agent_observes(tommie, [
"{name} wakes up to the sound of a noisy construction site outside his window.",
"{name} gets out of bed and heads to the kitchen to make himself some coffee.",
"{name} realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.",
"{name} finally finds the filters and makes himself a cup of coffee.",
"The coffee tastes bitter, and {name} regrets not buying a better brand.",
"{name} checks his email and sees that he has no job offers yet.",
"{name} spends some time updating his resume and cover letter.",
"{name} heads out to explore the city and look for job openings.",
"{name} sees a sign for a job fair and decides to attend.",
"The line to get in is long, and {name} has to wait for an hour.",
"{name} meets several potential employers at the job fair but doesn't receive any offers.",
"{name} leaves the job fair feeling disappointed.",
"{name} stops by a local diner to grab some lunch.",
"The service is slow, and {name} has to wait for 30 minutes to get his food.",
"{name} overhears a conversation at the next table about a job opening.",
"{name} asks the diners about the job opening and gets some information about the company.",
"{name} decides to apply for the job and sends his resume and cover letter.",
"{name} continues his search for job openings and drops off his resume at several local businesses.",
"{name} takes a break from his job search to go for a walk in a nearby park.",
"A dog approaches and licks {name}'s feet, and he pets it for a few minutes.",
"{name} sees a group of people playing frisbee and decides to join in.",
"{name} has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.",
"{name} goes back to his apartment to rest for a bit.",
"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.",
"{name} starts to feel frustrated with his job search.",
"{name} calls his best friend to vent about his struggles.",
"{name}'s friend offers some words of encouragement and tells him to keep trying.",
"{name} feels slightly better after talking to his friend.",
])
# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve
# Let's have Tommie start going through a day in the life.
agent_observes(tommie, [
"{name} wakes up to the sound of a noisy construction site outside his window.",
"{name} gets out of bed and heads to the kitchen to make himself some coffee.",
"{name} realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.",
"{name} finally finds the filters and makes himself a cup of coffee.",
"The coffee tastes bitter, and {name} regrets not buying a better brand.",
"{name} checks his email and sees that he has no job offers yet.",
"{name} spends some time updating his resume and cover letter.",
"{name} heads out to explore the city and look for job openings.",
"{name} sees a sign for a job fair and decides to attend.",
"The line to get in is long, and {name} has to wait for an hour.",
"{name} meets several potential employers at the job fair but doesn't receive any offers.",
"{name} leaves the job fair feeling disappointed.",
"{name} stops by a local diner to grab some lunch.",
"The service is slow, and {name} has to wait for 30 minutes to get his food.",
"{name} overhears a conversation at the next table about a job opening.",
"{name} asks the diners about the job opening and gets some information about the company.",
"{name} decides to apply for the job and sends his resume and cover letter.",
"{name} continues his search for job openings and drops off his resume at several local businesses.",
"{name} takes a break from his job search to go for a walk in a nearby park.",
"A dog approaches and licks {name}'s feet, and he pets it for a few minutes.",
"{name} sees a group of people playing frisbee and decides to join in.",
"{name} has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.",
"{name} goes back to his apartment to rest for a bit.",
"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.",
"{name} starts to feel frustrated with his job search.",
"{name} calls his best friend to vent about his struggles.",
"{name}'s friend offers some words of encouragement and tells him to keep trying.",
"{name} feels slightly better after talking to his friend.",
])
# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve
# Interview agent
print(interview_agent(tommie, "Tell me about how your day has been going")[-1])
print(interview_agent(tommie, "How do you feel about coffee?")[-1])
print(interview_agent(tommie, "Tell me about your childhood dog!")[-1])
# Interview agent
print(interview_agent(tommie, "Tell me about how your day has been going")[-1])
print(interview_agent(tommie, "How do you feel about coffee?")[-1])
print(interview_agent(tommie, "Tell me about your childhood dog!")[-1])
agent_observes(eve, [
"{name} overhears her colleague say something about a new client being hard to work with",
"{name} wakes up and hear's the alarm",
"{name} eats a boal of porridge",
"{name} helps a coworker on a task",
"{name} plays tennis with her friend Xu before going to work",
"{name} overhears her colleague say something about Tommie being hard to work with",
])
agent_observes(eve, [
"{name} overhears her colleague say something about a new client being hard to work with",
"{name} wakes up and hear's the alarm",
"{name} eats a boal of porridge",
"{name} helps a coworker on a task",
"{name} plays tennis with her friend Xu before going to work",
"{name} overhears her colleague say something about Tommie being hard to work with",
])
print(interview_agent(eve, "How are you feeling about today?")[-1])
print(interview_agent(eve, "What do you know about Tommie?")[-1])
print(interview_agent(eve, "Tommie is looking to find a job. What are are some things you'd like to ask him?")[-1])
print(interview_agent(eve, "You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.")[-1])
print(interview_agent(eve, "How are you feeling about today?")[-1])
print(interview_agent(eve, "What do you know about Tommie?")[-1])
print(interview_agent(eve, "Tommie is looking to find a job. What are are some things you'd like to ask him?")[-1])
print(interview_agent(eve, "You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.")[-1])
run_conversation([tommie, eve], "Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?")
run_conversation([tommie, eve], "Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?")
print(get_summary(tommie, force_refresh=True))
print(get_summary(eve, force_refresh=True))
print(get_summary(tommie, force_refresh=True))
print(get_summary(eve, force_refresh=True))
print(interview_agent(tommie, "How was your conversation with Eve?")[-1])
print(interview_agent(eve, "How was your conversation with Tommie?")[-1])
print(interview_agent(eve, "What do you wish you would have said to Tommie?")[-1])
print(interview_agent(tommie, "How was your conversation with Eve?")[-1])
print(interview_agent(eve, "How was your conversation with Tommie?")[-1])
print(interview_agent(eve, "What do you wish you would have said to Tommie?")[-1])

View File

@ -1,231 +1,314 @@
import logging
logging.basicConfig(level=logging.ERROR)
from datetime import datetime, timedelta
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from termcolor import colored
import os
import math
import faiss
import re
import gradio as gr
import gradio.utils
from langchain.callbacks.base import CallbackManager
from langchain.docstore import InMemoryDocstore
from utils import create_agent, agent_observes, interview_agent, run_conversation, get_summary, save_agent, load_agent
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import FAISS
from langchain.experimental.generative_agents import GenerativeAgent, GenerativeAgentMemory
webui = None
# Override for fixing memory scoring if it breaks
if os.environ.get('LANGCHAIN_OVERRIDE_MEMORY', '1') == '1':
from langchain.prompts import PromptTemplate
def _score_memory_importance(self, memory_content: str) -> float:
"""Score the absolute importance of the given memory."""
prompt = PromptTemplate.from_template(
"On the scale of 1 to 10, where 1 is purely mundane"
+ " (e.g., brushing teeth, making bed) and 10 is"
+ " extremely poignant (e.g., a break up, college"
+ " acceptance), rate the likely poignancy of the"
+ " following piece of memory. Respond with a single integer."
+ "\nMemory: {memory_content}"
+ "\nRating: "
)
score = self.chain(prompt).run(memory_content=memory_content).strip()
if self.verbose:
logger.info(f"Importance score: {score}")
try:
match = re.search(r"(\d+)", score)
if match:
return (float(match.group(0)) / 10) * self.importance_weight
except Exception as e:
print(colored("[Scoring Error]", "red"), score)
return 0.0
GenerativeAgentMemory._score_memory_importance = _score_memory_importance
AGENTS = {}
# shit I can shove behind an env var
LLM_TYPE = os.environ.get('LLM_TYPE', "oai") # options: llamacpp, oai
LLM_LOCAL_MODEL = os.environ.get('LLM_MODEL', None) # "./models/llama-13b-supercot-ggml/ggml-model-q4_0.bin"
EMBEDDING_TYPE = os.environ.get("LLM_EMBEDDING_TYPE", "hf") # options: llamacpp, oai, hf
def create_agent_proxy(name, age, traits, status, daily_summaries=None):
kwargs = locals()
if "daily_summaries" in kwargs:
summaries = kwargs["daily_summaries"].split("\n")
kwargs["daily_summaries"] = [ ( summary ) for summary in summaries ]
agent = create_agent(**kwargs)
AGENTS[agent.name] = agent
return f"Agent created: {agent.name}"
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) # unncessesary but whatever
if LLM_TYPE=="llamacpp":
from langchain.llms import LlamaCpp
def agent_observes_proxy( agents, observations ):
if not isinstance( agents, list ):
agents = [ agents ]
for agent in agents:
agent = AGENTS[agent]
observations = observations.split("\n")
agent_observes( agent, observations, summarize = False )
return f"Observation noted"
def interview_agent_proxy( agents, message ):
if not isinstance( agents, list ):
agents = [ agents ]
LLM = LlamaCpp(
model_path=LLM_LOCAL_MODEL,
callback_manager=callback_manager,
verbose=False,
n_ctx=2048
)
elif LLM_TYPE=="oai":
from langchain.chat_models import ChatOpenAI
messages = []
for agent in agents:
agent = AGENTS[agent]
messages.append(interview_agent( agent, message )[-1])
return "\n".join(messages)
# os.environ["OPENAI_API_BASE"] = ""
# os.environ["OPENAI_API_KEY"] = ""
def get_summary_proxy( agents ):
if not isinstance( agents, list ):
agents = [ agents ]
messages = []
for agent in agents:
agent = AGENTS[agent]
messages.append(get_summary( agent, force_refresh = True ))
return "\n".join(messages)
# Override for Todd
if os.environ.get('LANGCHAIN_OVERRIDE_RESULT', '1') == '1':
from langchain.schema import Generation, ChatResult, LLMResult, ChatGeneration
from langchain.chat_models.openai import _convert_dict_to_message
def run_conversation_proxy( agents, message ):
agents = [ AGENTS[agent] for agent in agents ]
messages = run_conversation( agents, message )
return "\n".join(messages)
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
token_usage = { "prompt_tokens": 5, "completion_tokens": 5, "total_tokens": 10 }
generations = []
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message)
generations.append(gen)
llm_output = {"token_usage": response["usage"] if "usage" in response else token_usage, "model_name": self.model_name}
return ChatResult(generations=generations, llm_output=llm_output)
ChatOpenAI._create_chat_result = _create_chat_result
def agent_view_memories( agents, last_k = 50 ):
if not isinstance( agents, list ):
agents = [ agents ]
messages = []
for agent in agents:
agent = AGENTS[agent]
memories = agent.memory.memory_retriever.memory_stream[-last_k:]
messages.append("\n".join([ document.page_content for document in memories]))
return "\n".join(messages)
LLM = ChatOpenAI(
max_tokens=int(os.environ.get('OPENAI_MAX_TOKENS', '1500')),
model_name=os.environ.get('OPENAI_MODEL_NAME', 'gpt-4'),
)
def get_agents_list():
return [ k for k in AGENTS ]
# deprecated way or something
"""
from langchain.llms import OpenAI
from langchain.llms.openai import completion_with_retry
def _generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
messages, params = self._get_chat_params(prompts, stop)
if self.streaming:
response = ""
params["stream"] = True
for stream_resp in completion_with_retry(self, messages=messages, **params):
token = stream_resp["choices"][0]["delta"].get("content", "")
response += token
self.callback_manager.on_llm_new_token(
token,
verbose=self.verbose,
def get_saved_agents_list():
if not os.path.exists("./agents/"):
return []
return [ d.split(".")[:-1] for d in os.listdir("./agents/") if d.split(".")[-1] == "pth" ]
def update_agents_list():
agents = get_agents_list()
return gr.Dropdown.update(choices=agents, value=agents[0] if len(agents) > 0 else "")
def update_saved_agents_list():
agents = get_saved_agents_list()
return gr.Dropdown.update(choices=agents, value=agents[0] if len(agents) > 0 else "")
def save_agent_proxy( agents ):
if not isinstance( agents, list ):
agents = [ agents ]
for agent in agents:
agent = AGENTS[agent]
save_agent( agent )
def load_agent_proxy( agents ):
if not isinstance( agents, list ):
agents = [ agents ]
for agent in agents:
AGENTS[agent] = load_agent( agent )
def setup_webui(share=False):
if not share:
def noop(function, return_value=None):
def wrapped(*args, **kwargs):
return return_value
return wrapped
gradio.utils.version_check = noop(gradio.utils.version_check)
gradio.utils.initiated_analytics = noop(gradio.utils.initiated_analytics)
gradio.utils.launch_analytics = noop(gradio.utils.launch_analytics)
gradio.utils.integration_analytics = noop(gradio.utils.integration_analytics)
gradio.utils.error_analytics = noop(gradio.utils.error_analytics)
gradio.utils.log_feature_analytics = noop(gradio.utils.log_feature_analytics)
#gradio.utils.get_local_ip_address = noop(gradio.utils.get_local_ip_address, 'localhost')
AGENT_SETTINGS = {}
OBSERVE_SETTINGS = {}
SAVELOAD_SETTINGS = {}
ACTIONS = {}
AGENT_LISTS = []
agents_list = get_agents_list()
saved_agents_list = get_saved_agents_list()
with gr.Blocks() as ui:
with gr.Tab("Create Agent"):
with gr.Row():
with gr.Column():
AGENT_SETTINGS["name"] = gr.Textbox(lines=1, label="Name", value="Adam")
AGENT_SETTINGS["age"] = gr.Number(label="Age")
AGENT_SETTINGS["traits"] = gr.Textbox(lines=1, label="Traits", value="N/A")
AGENT_SETTINGS["status"] = gr.Textbox(lines=1, label="Status", value="N/A")
AGENT_SETTINGS["daily_summaries"] = gr.Textbox(lines=4, label="Summary", value="")
ACTIONS["add_agent"] = gr.Button(value="Add Agent")
with gr.Column():
console_output = gr.Textbox(lines=8, label="Console Output")
ACTIONS["add_agent"].click(create_agent_proxy,
inputs=list(AGENT_SETTINGS.values()),
outputs=console_output
)
with gr.Tab("Save/Load"):
with gr.Row():
with gr.Column():
SAVELOAD_SETTINGS["agent"] = gr.Dropdown(choices=saved_agents_list, label="Agent", type="value", value=saved_agents_list[0] if len(saved_agents_list) > 0 else "", multiselect=True)
with gr.Row():
ACTIONS["save"] = gr.Button(value="Save")
ACTIONS["load"] = gr.Button(value="Load")
ACTIONS["refresh_agents_list"] = gr.Button(value="Refresh Agents List")
ACTIONS["save"].click(save_agent_proxy,
inputs=SAVELOAD_SETTINGS["agent"],
)
return LLMResult(
generations=[[Generation(text=response)]],
)
else:
full_response = completion_with_retry(self, messages=messages, **params)
llm_output = {
"token_usage": full_response["usage"] if "usage" in response else { "prompt_tokens": 5, "completion_tokens": 5, "total_tokens": 10 },
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
OpenAI._generate = _generate
ACTIONS["load"].click(load_agent_proxy,
inputs=SAVELOAD_SETTINGS["agent"],
)
with gr.Tab("Agent Actions"):
with gr.Row():
with gr.Column():
OBSERVE_SETTINGS["agent"] = gr.Dropdown(choices=agents_list, label="Agent", type="value", value=agents_list[0] if len(agents_list) > 0 else "", multiselect=True)
OBSERVE_SETTINGS["input"] = gr.Textbox(lines=4, label="Input", value="")
LLM = OpenAI(
max_tokens=1500,
model_name="gpt-4",
)
"""
else:
raise f"Invalid LLM type: {LLM_TYPE}"
with gr.Row():
ACTIONS["act"] = gr.Button(value="Act")
ACTIONS["view"] = gr.Button(value="View")
ACTIONS["summarize"] = gr.Button(value="Summarize")
ACTIONS["interview"] = gr.Button(value="Interview")
ACTIONS["converse"] = gr.Button(value="Converse")
with gr.Column():
console_output = gr.Textbox(lines=8, label="Console Output")
ACTIONS["act"].click(agent_observes_proxy,
inputs=list(OBSERVE_SETTINGS.values()),
outputs=console_output
)
ACTIONS["view"].click(agent_view_memories,
inputs=OBSERVE_SETTINGS["agent"],
outputs=console_output
)
ACTIONS["summarize"].click(get_summary_proxy,
inputs=OBSERVE_SETTINGS["agent"],
outputs=console_output
)
ACTIONS["interview"].click(interview_agent_proxy,
inputs=list(OBSERVE_SETTINGS.values()),
outputs=console_output
)
ACTIONS["converse"].click(run_conversation_proxy,
inputs=list(OBSERVE_SETTINGS.values()),
outputs=console_output
)
if EMBEDDING_TYPE == "hf":
from langchain.embeddings import HuggingFaceEmbeddings
ACTIONS["add_agent"].click(update_saved_agents_list,
inputs=None,
outputs=SAVELOAD_SETTINGS["agent"]
)
ACTIONS["add_agent"].click(update_agents_list,
inputs=None,
outputs=OBSERVE_SETTINGS["agent"]
)
ACTIONS["load"].click(update_agents_list,
inputs=None,
outputs=OBSERVE_SETTINGS["agent"]
)
EMBEDDINGS_MODEL = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
EMBEDDINGS_SIZE = 768
elif EMBEDDING_TYPE == "oai":
from langchain.embeddings import OpenAIEmbeddings
ACTIONS["refresh_agents_list"].click(update_agents_list,
inputs=None,
outputs=OBSERVE_SETTINGS["agent"]
)
EMBEDDINGS_MODEL = OpenAIEmbeddings()
EMBEDDINGS_SIZE = 1536
elif EMBEDDING_TYPE == "llama":
from langchain.embeddings import LlamaCppEmbeddings
ui.queue(concurrency_count=2)
return ui
EMBEDDINGS_MODEL = LlamaCppEmbeddings(model_path=LLAMA_CPP_MODEL)
EMBEDDINGS_SIZE = 5120
else:
raise f"Invalid embedding type: {EMBEDDING_TYPE}"
if __name__ == "__main__":
share=False
webui = setup_webui(share=share)
if webui:
webui.launch(share=share, prevent_thread_lock=True, show_error=True)
webui.block_thread()
else:
tommie = create_agent(
name="Tommie",
age=25,
traits="anxious, likes design, talkative", # You can add more persistent traits here
status="looking for a job", # When connected to a virtual world, we can have the characters update their status
)
eve = create_agent(
name="Eve",
age=34,
traits="curious, helpful", # You can add more persistent traits here
status="N/A", # When connected to a virtual world, we can have the characters update their status
daily_summaries = [
("{name} started her new job as a career counselor last week and received her first assignment, a client named Tommie.")
],
)
def relevance_score_fn(score: float) -> float:
"""Return a similarity score on a scale [0, 1]."""
# This will differ depending on a few things:
# - the distance / similarity metric used by the VectorStore
# - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)
# This function converts the euclidean norm of normalized embeddings
# (0 is most similar, sqrt(2) most dissimilar)
# to a similarity function (0 to 1)
if EMBEDDING_TYPE == "oai":
return 1.0 - score / math.sqrt(2)
# We can add memories directly to the memory object
agent_observes(tommie, [
"{name} remembers his dog, Bruno, from when he was a kid",
"{name} feels tired from driving so far",
"{name} sees the new home",
"The new neighbors have a cat",
"The road is noisy at night",
"{name} is hungry",
"{name} tries to get some rest.",
])
# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.
# We will see how this summary updates after more observations to create a more rich description.
score = score / 3.5
res = 1.0 - score
# print(score, res)
return res
# Interview agent
print(interview_agent(tommie, "What do you like to do?")[-1])
print(interview_agent(tommie, "What are you looking forward to doing today?")[-1])
print(interview_agent(tommie, "What are you most worried about today?")[-1])
def create_new_memory_retriever():
"""Create a new vector store retriever unique to the agent."""
index = faiss.IndexFlatL2(EMBEDDINGS_SIZE)
vectorstore = FAISS(EMBEDDINGS_MODEL.embed_query, index, InMemoryDocstore({}), {}, relevance_score_fn=relevance_score_fn)
return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=["importance"], k=15)
# Let's have Tommie start going through a day in the life.
agent_observes(tommie, [
"{name} wakes up to the sound of a noisy construction site outside his window.",
"{name} gets out of bed and heads to the kitchen to make himself some coffee.",
"{name} realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.",
"{name} finally finds the filters and makes himself a cup of coffee.",
"The coffee tastes bitter, and {name} regrets not buying a better brand.",
"{name} checks his email and sees that he has no job offers yet.",
"{name} spends some time updating his resume and cover letter.",
"{name} heads out to explore the city and look for job openings.",
"{name} sees a sign for a job fair and decides to attend.",
"The line to get in is long, and {name} has to wait for an hour.",
"{name} meets several potential employers at the job fair but doesn't receive any offers.",
"{name} leaves the job fair feeling disappointed.",
"{name} stops by a local diner to grab some lunch.",
"The service is slow, and {name} has to wait for 30 minutes to get his food.",
"{name} overhears a conversation at the next table about a job opening.",
"{name} asks the diners about the job opening and gets some information about the company.",
"{name} decides to apply for the job and sends his resume and cover letter.",
"{name} continues his search for job openings and drops off his resume at several local businesses.",
"{name} takes a break from his job search to go for a walk in a nearby park.",
"A dog approaches and licks {name}'s feet, and he pets it for a few minutes.",
"{name} sees a group of people playing frisbee and decides to join in.",
"{name} has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.",
"{name} goes back to his apartment to rest for a bit.",
"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.",
"{name} starts to feel frustrated with his job search.",
"{name} calls his best friend to vent about his struggles.",
"{name}'s friend offers some words of encouragement and tells him to keep trying.",
"{name} feels slightly better after talking to his friend.",
])
# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve
def create_new_memories(reflection_threshold=8):
return GenerativeAgentMemory(llm=LLM,
memory_retriever=create_new_memory_retriever(),
reflection_threshold=reflection_threshold, # we will give this a relatively low number to show how reflection works
verbose=False,
)
# Interview agent
print(interview_agent(tommie, "Tell me about how your day has been going")[-1])
print(interview_agent(tommie, "How do you feel about coffee?")[-1])
print(interview_agent(tommie, "Tell me about your childhood dog!")[-1])
def create_new_agent(**kwargs):
settings = {
"llm": LLM,
"memory": create_new_memories(),
}
settings.update(kwargs)
for k in settings:
if isinstance(settings[k], str):
settings[k] = settings[k].replace("{name}", settings["name"])
agent_observes(eve, [
"{name} overhears her colleague say something about a new client being hard to work with",
"{name} wakes up and hear's the alarm",
"{name} eats a boal of porridge",
"{name} helps a coworker on a task",
"{name} plays tennis with her friend Xu before going to work",
"{name} overhears her colleague say something about Tommie being hard to work with",
])
return GenerativeAgent(**settings)
print(interview_agent(eve, "How are you feeling about today?")[-1])
print(interview_agent(eve, "What do you know about Tommie?")[-1])
print(interview_agent(eve, "Tommie is looking to find a job. What are are some things you'd like to ask him?")[-1])
print(interview_agent(eve, "You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.")[-1])
def get_summary(agent: GenerativeAgent, force_refresh: bool = True) -> str:
print(colored("[Summary]", "magenta"))
return agent.get_summary(force_refresh=force_refresh)
run_conversation([tommie, eve], "Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?")
def agent_observes( agent: GenerativeAgent, observations: List[str], summarize: bool = True ):
for observation in observations:
observation = observation.replace("{name}", agent.name)
print(colored("[Observation]", "magenta"), observation)
agent.memory.add_memory(observation)
print(get_summary(tommie, force_refresh=True))
print(get_summary(eve, force_refresh=True))
if summarize:
print('*'*40)
print(colored(f"After {len(observations)} observations, {agent.name}'s summary is:", "yellow"))
print(get_summary(agent, force_refresh=True))
print('*'*40)
def interview_agent(agent: GenerativeAgent, message: str, username: str = "Person A") -> str:
message = message.replace("{name}", agent.name)
new_message = f"{username} says {message}"
print(colored("[Interview]", "magenta"), message)
return agent.generate_dialogue_response(new_message)
def run_conversation(agents: List[GenerativeAgent], initial_observation: str) -> None:
"""Runs a conversation between agents."""
print(colored("[Conversation]", "magenta"), initial_observation)
_, observation = agents[1].generate_reaction(initial_observation)
print(colored("[Conversation]", "magenta"), observation)
turns = 0
while True:
break_dialogue = False
for agent in agents:
stay_in_dialogue, observation = agent.generate_dialogue_response(observation)
print(colored("[Conversation]", "magenta"), observation)
if not stay_in_dialogue:
break_dialogue = True
if break_dialogue:
break
turns += 1
return turns
print(interview_agent(tommie, "How was your conversation with Eve?")[-1])
print(interview_agent(eve, "How was your conversation with Tommie?")[-1])
print(interview_agent(eve, "What do you wish you would have said to Tommie?")[-1])