diff --git a/samples/apps/fireworks-studio/firestudio/chatmanager.py b/samples/apps/fireworks-studio/firestudio/chatmanager.py index 60ab37990528..c263fd719978 100644 --- a/samples/apps/fireworks-studio/firestudio/chatmanager.py +++ b/samples/apps/fireworks-studio/firestudio/chatmanager.py @@ -1,6 +1,7 @@ +from copy import deepcopy import json import time -from typing import List +from typing import Any, Dict, List from .datamodel import AgentWorkFlowConfig, Message from .utils import ( extract_successful_code_blocks, @@ -18,63 +19,89 @@ def __init__(self) -> None: def chat( self, message: Message, - history: List, - flow_config: AgentWorkFlowConfig = None, + history_list: List[Message], + agent_flow_config: AgentWorkFlowConfig = None, **kwargs, - ) -> None: - work_dir = kwargs.get("work_dir", None) - scratch_dir = os.path.join(work_dir, "scratch") + ) -> Dict[str, Message]: + _work_dir_prefix = kwargs.get("work_dir", None) + output_message: List[Message] = [] - # if no flow config is provided, use the default - if flow_config is None: - flow_config = get_default_agent_config(scratch_dir) + parent_flow_config = deepcopy(agent_flow_config) + if not isinstance(parent_flow_config.receiver, list): + parent_flow_config.receiver = [parent_flow_config.receiver] - flow = AutoGenWorkFlowManager( - config=flow_config, history=history, work_dir=scratch_dir - ) - message_text = message.content.strip() + for idx in range(len(parent_flow_config.receiver)): + if parent_flow_config is None: + flow_config = None + else: + flow_config = deepcopy(parent_flow_config) + flow_config.receiver = flow_config.receiver[idx] - output = "" - start_time = time.time() + # if no flow config is provided, use the default + if flow_config is None: + flow_config = get_default_agent_config(scratch_dir) - metadata = {} - flow.run(message=f"{message_text}", clear_history=False) + work_dir = os.path.join(_work_dir_prefix, f"{str(idx)}") + scratch_dir = os.path.join(work_dir, "scratch") + os.makedirs(scratch_dir, exist_ok=True) + receiver_name = flow_config.receiver.config.name + history = [ + message + for message in history_list + if message.receiver_name == receiver_name + ] - metadata["messages"] = flow.agent_history - - output = "" - - if flow_config.summary_method == "last": - successful_code_blocks = extract_successful_code_blocks(flow.agent_history) - last_message = flow.agent_history[-1]["message"]["content"] - successful_code_blocks = "\n\n".join(successful_code_blocks) - output = ( - (last_message + "\n" + successful_code_blocks) - if successful_code_blocks - else last_message + flow = AutoGenWorkFlowManager( + config=flow_config, history=history, work_dir=scratch_dir ) - elif flow_config.summary_method == "llm": + message_text = message.content.strip() + output = "" - elif flow_config.summary_method == "none": + start_time = time.time() + + metadata = {} + flow.run(message=f"{message_text}", clear_history=False) + + metadata["messages"] = flow.agent_history + output = "" - metadata["code"] = "" - end_time = time.time() - metadata["time"] = end_time - start_time - modified_files = get_modified_files( - start_time, end_time, scratch_dir, dest_dir=work_dir - ) - metadata["files"] = modified_files - - print("Modified files: ", len(modified_files)) - - output_message = Message( - user_id=message.user_id, - root_msg_id=message.root_msg_id, - role="assistant", - content=output, - metadata=json.dumps(metadata), - session_id=message.session_id, - ) + if flow_config.summary_method == "last": + successful_code_blocks = extract_successful_code_blocks( + flow.agent_history + ) + last_message = flow.agent_history[-1]["message"]["content"] + successful_code_blocks = "\n\n".join(successful_code_blocks) + output = ( + (last_message + "\n" + successful_code_blocks) + if successful_code_blocks + else last_message + ) + elif flow_config.summary_method == "llm": + output = "" + elif flow_config.summary_method == "none": + output = "" + + metadata["code"] = "" + end_time = time.time() + metadata["time"] = end_time - start_time + modified_files = get_modified_files( + start_time, end_time, scratch_dir, dest_dir=work_dir + ) + metadata["files"] = modified_files + + print("Modified files: ", len(modified_files)) + + output_message.append( + Message( + user_id=message.user_id, + root_msg_id=message.root_msg_id, + role="assistant", + content=output, + receiver_name=receiver_name, + metadata=json.dumps(metadata), + session_id=message.session_id, + ) + ) return output_message diff --git a/samples/apps/fireworks-studio/firestudio/datamodel.py b/samples/apps/fireworks-studio/firestudio/datamodel.py index 84987809ac9a..dbd9d37c49c2 100644 --- a/samples/apps/fireworks-studio/firestudio/datamodel.py +++ b/samples/apps/fireworks-studio/firestudio/datamodel.py @@ -10,6 +10,7 @@ class Message(object): user_id: str role: str content: str + receiver_name: str root_msg_id: Optional[str] = None msg_id: Optional[str] = None timestamp: Optional[str] = None diff --git a/samples/apps/fireworks-studio/firestudio/utils/dbdefaults.json b/samples/apps/fireworks-studio/firestudio/utils/dbdefaults.json index de61b743926f..da288dd7b184 100644 --- a/samples/apps/fireworks-studio/firestudio/utils/dbdefaults.json +++ b/samples/apps/fireworks-studio/firestudio/utils/dbdefaults.json @@ -102,6 +102,115 @@ } } }, + "receiver": [ + { + "type": "assistant", + "description": "Default assistant to generate plans and write code to solve tasks.", + "skills": [ + { + "title": "generate_and_save_images", + "file_name": "generate_and_save_images.py", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\nfrom typing_extensions import Annotated\nfrom openai import OpenAI\nfrom firestudio.utils.utils import schema_recorder\n\n@schema_recorder(description=\"Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image\")\ndef generate_and_save_images(query: Annotated[str, \"A natural language description of the image to be generated.\"], image_size: Annotated[str, \"The size of the image to be generated. default is '1024x1024'\"] = \"1024x1024\") -> List[str]:\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = \"/tmp/\" + str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n" + }, + { + "title": "show_image", + "file_name": "show_image.py", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\nfrom typing_extensions import Annotated\nfrom firestudio.utils.utils import schema_recorder\nimport cv2\nfrom matplotlib import pyplot as plt\n\n@schema_recorder(description=\"A function that is capable for displaying an image given path to a image file in png or jpg or jpeg.\")\ndef show_image(path: Annotated[str, \"The path to the image file that needs to be displayed\"]) -> str:\n img = cv2.imread(path,-1)\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n return \"\"\n" + }, + { + "title": "find_papers_arxiv", + "description": "Function ability to find the papers on arxiv", + "file_name": "find_papers_arxiv.py", + "content": "\nimport os\nimport re\nimport json\nimport hashlib\nimport arxiv\nfrom firestudio.utils.utils import schema_recorder\nfrom typing_extensions import Annotated\n\n\n@schema_recorder(description=\"Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\")\ndef search_arxiv(query: Annotated[str, \"The search query\"], max_results: Annotated[Optional[int], \"The maximum number of search results to return. Defaults to 10\"]=10) -> Annotated[List[Dict[str, Any]], \"A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\"]:\n # Example:\n # >>> results = search_arxiv(\"attention is all you need\")\n # >>> print(results)\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n" + }, + { + "title": "get_price", + "description": "Get price history for a stock ticker", + "file_name": "get_price.py", + "content": "\nimport yfinance as yf\nfrom firestudio.utils.utils import schema_recorder\nfrom typing_extensions import Annotated\nimport uuid\nfrom pathlib import Path\n\n@schema_recorder(description=\"Helper function to obtain stock price history of a company over specified period. The price information is written to a file and the path of the file is returned. The file is csv and contains following columns - Date,Open,High,Low,Close,Volume,Dividends,Stock Splits\")\ndef get_prices(ticker: Annotated[str, \"Stock ticker for a company\"], period: Annotated[str, \"data period to download (Either Use period parameter or use start and end) Valid periods are: 1d, 5d, 1mo, 3mo, 6mo, 1y, 2y, 5y, 10y, ytd, max\"]) -> Annotated[str, \"File which contains the price of the a ticker, each price in a new line\"]:\n # Generate a random UUID as the file name\n file_name = \"/tmp/\" + str(uuid.uuid4()) + \".csv\" \n file_path = Path(file_name)\n\n tk = yf.Ticker(ticker=ticker)\n prices = tk.history(period=period)\n\n with open(file_path, \"w\") as f:\n prices.to_csv(f) \n\n return file_name\n" + } + ], + "config": { + "name": "primary_assistant", + "llm_config": { + "config_list": [ + { + "model": "gpt-4-1106-preview" + } + ], + "temperature": 0.1, + "timeout": 600, + "cache_seed": null + }, + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 15, + "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." + } + }, + { + "type": "assistant", + "description": "Default assistant to generate plans and write code to solve tasks.", + "skills": [ + { + "title": "generate_and_save_images", + "file_name": "generate_and_save_images.py", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\nfrom typing_extensions import Annotated\nfrom openai import OpenAI\nfrom firestudio.utils.utils import schema_recorder\n\n@schema_recorder(description=\"Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image\")\ndef generate_and_save_images(query: Annotated[str, \"A natural language description of the image to be generated.\"], image_size: Annotated[str, \"The size of the image to be generated. default is '1024x1024'\"] = \"1024x1024\") -> List[str]:\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = \"/tmp/\" + str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n" + }, + { + "title": "show_image", + "file_name": "show_image.py", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\nfrom typing_extensions import Annotated\nfrom firestudio.utils.utils import schema_recorder\nimport cv2\nfrom matplotlib import pyplot as plt\n\n@schema_recorder(description=\"A function that is capable for displaying an image given path to a image file in png or jpg or jpeg.\")\ndef show_image(path: Annotated[str, \"The path to the image file that needs to be displayed\"]) -> str:\n img = cv2.imread(path,-1)\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n return \"\"\n" + }, + { + "title": "find_papers_arxiv", + "description": "Function ability to find the papers on arxiv", + "file_name": "find_papers_arxiv.py", + "content": "\nimport os\nimport re\nimport json\nimport hashlib\nimport arxiv\nfrom firestudio.utils.utils import schema_recorder\nfrom typing_extensions import Annotated\n\n\n@schema_recorder(description=\"Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\")\ndef search_arxiv(query: Annotated[str, \"The search query\"], max_results: Annotated[Optional[int], \"The maximum number of search results to return. Defaults to 10\"]=10) -> Annotated[List[Dict[str, Any]], \"A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\"]:\n # Example:\n # >>> results = search_arxiv(\"attention is all you need\")\n # >>> print(results)\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n" + }, + { + "title": "get_price", + "description": "Get price history for a stock ticker", + "file_name": "get_price.py", + "content": "\nimport yfinance as yf\nfrom firestudio.utils.utils import schema_recorder\nfrom typing_extensions import Annotated\nimport uuid\nfrom pathlib import Path\n\n@schema_recorder(description=\"Helper function to obtain stock price history of a company over specified period. The price information is written to a file and the path of the file is returned. The file is csv and contains following columns - Date,Open,High,Low,Close,Volume,Dividends,Stock Splits\")\ndef get_prices(ticker: Annotated[str, \"Stock ticker for a company\"], period: Annotated[str, \"data period to download (Either Use period parameter or use start and end) Valid periods are: 1d, 5d, 1mo, 3mo, 6mo, 1y, 2y, 5y, 10y, ytd, max\"]) -> Annotated[str, \"File which contains the price of the a ticker, each price in a new line\"]:\n # Generate a random UUID as the file name\n file_name = \"/tmp/\" + str(uuid.uuid4()) + \".csv\" \n file_path = Path(file_name)\n\n tk = yf.Ticker(ticker=ticker)\n prices = tk.history(period=period)\n\n with open(file_path, \"w\") as f:\n prices.to_csv(f) \n\n return file_name\n" + } + ], + "config": { + "name": "secondary_assistant", + "llm_config": { + "config_list": [ + { + "model": "gpt-4-1106-preview" + } + ], + "temperature": 0.1, + "timeout": 600, + "cache_seed": null + }, + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 15, + "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." + } + } + ], + "type": "default" + }, + { + "name": "FW General Agent Workflow", + "description": "This workflow is used for general purpose tasks.", + "sender": { + "type": "userproxy", + "config": { + "name": "userproxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 10, + "system_message": "", + "llm_config": false, + "code_execution_config": { + "work_dir": null, + "use_docker": false + } + } + }, "receiver": { "type": "assistant", "description": "Default assistant to generate plans and write code to solve tasks.", @@ -134,7 +243,9 @@ "llm_config": { "config_list": [ { - "model": "gpt-4-1106-preview" + "model": "accounts/fireworks/models/firefunction-v1", + "api_key": "vnAo7YlsAt4Pwv2LpOAi8TyXklMqUOazjwx9mDLmHcdXoLUH", + "base_url": "https://api.fireworks.ai/inference/v1" } ], "temperature": 0.1, diff --git a/samples/apps/fireworks-studio/firestudio/utils/dbutils.py b/samples/apps/fireworks-studio/firestudio/utils/dbutils.py index 17d05f73744a..50df22e5e9f2 100644 --- a/samples/apps/fireworks-studio/firestudio/utils/dbutils.py +++ b/samples/apps/fireworks-studio/firestudio/utils/dbutils.py @@ -4,7 +4,14 @@ import threading import os from typing import Any, List, Dict, Optional, Tuple -from ..datamodel import AgentFlowSpec, AgentWorkFlowConfig, Gallery, Message, Session, Skill +from ..datamodel import ( + AgentFlowSpec, + AgentWorkFlowConfig, + Gallery, + Message, + Session, + Skill, +) MESSAGES_TABLE_SQL = """ @@ -15,6 +22,7 @@ msg_id TEXT, role TEXT NOT NULL, content TEXT NOT NULL, + received_name TEXT NOT NULL, metadata TEXT, timestamp DATETIME, UNIQUE (user_id, root_msg_id, msg_id) @@ -156,7 +164,9 @@ def init_db(self, path: str = "database.sqlite", **kwargs: Any) -> None: # init skills table with content of defaultskills.json in current directory current_dir = os.path.dirname(os.path.realpath(__file__)) - with open(os.path.join(current_dir, "dbdefaults.json"), "r", encoding="utf-8") as json_file: + with open( + os.path.join(current_dir, "dbdefaults.json"), "r", encoding="utf-8" + ) as json_file: data = json.load(json_file) skills = data["skills"] agents = data["agents"] @@ -165,11 +175,20 @@ def init_db(self, path: str = "database.sqlite", **kwargs: Any) -> None: self.cursor.execute( "INSERT INTO skills (id, user_id, timestamp, content, title, file_name) VALUES (?, ?, ?, ?, ?, ?)", - (skill.id, "default", skill.timestamp, skill.content, skill.title, skill.file_name), + ( + skill.id, + "default", + skill.timestamp, + skill.content, + skill.title, + skill.file_name, + ), ) for agent in agents: agent = AgentFlowSpec(**agent) - agent.skills = [skill.dict() for skill in agent.skills] if agent.skills else None + agent.skills = ( + [skill.dict() for skill in agent.skills] if agent.skills else None + ) self.cursor.execute( "INSERT INTO agents (id, user_id, timestamp, config, type, skills, description) VALUES (?, ?, ?, ?, ?, ?, ?)", ( @@ -192,7 +211,11 @@ def init_db(self, path: str = "database.sqlite", **kwargs: Any) -> None: "default", workflow.timestamp, json.dumps(workflow.sender.dict()), - json.dumps(workflow.receiver.dict()), + json.dumps(workflow.receiver.dict()) + if not isinstance(workflow.receiver, list) + else json.dumps( + [receiver.dict() for receiver in workflow.receiver] + ), workflow.type, workflow.name, workflow.description, @@ -203,7 +226,9 @@ def init_db(self, path: str = "database.sqlite", **kwargs: Any) -> None: # Commit the changes and close the connection self.conn.commit() - def query(self, query: str, args: Tuple = (), return_json: bool = False) -> List[Dict[str, Any]]: + def query( + self, query: str, args: Tuple = (), return_json: bool = False + ) -> List[Dict[str, Any]]: """ Executes a given SQL query and returns the results. @@ -221,10 +246,15 @@ def query(self, query: str, args: Tuple = (), return_json: bool = False) -> List result = self.cursor.fetchall() self.commit() if return_json: - result = [dict(zip([key[0] for key in self.cursor.description], row)) for row in result] + result = [ + dict(zip([key[0] for key in self.cursor.description], row)) + for row in result + ] return result except Exception as e: - logger.error("Error running query with query %s and args %s: %s", query, args, e) + logger.error( + "Error running query with query %s and args %s: %s", query, args, e + ) raise e def commit(self) -> None: @@ -305,8 +335,15 @@ def create_session(user_id: str, session: Session, dbmanager: DBManager) -> List :param dbmanager: The DBManager instance to interact with the database :return: A list of dictionaries, each representing a session """ - query = "INSERT INTO sessions (user_id, id, timestamp, flow_config) VALUES (?, ?, ?,?)" - args = (session.user_id, session.id, session.timestamp, json.dumps(session.flow_config.dict())) + query = ( + "INSERT INTO sessions (user_id, id, timestamp, flow_config) VALUES (?, ?, ?,?)" + ) + args = ( + session.user_id, + session.id, + session.timestamp, + json.dumps(session.flow_config.dict()), + ) dbmanager.query(query=query, args=args) sessions = get_sessions(user_id=user_id, dbmanager=dbmanager) @@ -333,7 +370,9 @@ def delete_session(session: Session, dbmanager: DBManager) -> List[dict]: return get_sessions(user_id=session.user_id, dbmanager=dbmanager) -def create_gallery(session: Session, dbmanager: DBManager, tags: List[str] = []) -> Gallery: +def create_gallery( + session: Session, dbmanager: DBManager, tags: List[str] = [] +) -> Gallery: """ Publish a session to the gallery table in the database. Fetches the session messages first, then saves session and messages object to the gallery database table. :param session: The Session object containing session data @@ -342,7 +381,9 @@ def create_gallery(session: Session, dbmanager: DBManager, tags: List[str] = []) :return: A gallery object containing the session and messages objects """ - messages = get_messages(user_id=session.user_id, session_id=session.id, dbmanager=dbmanager) + messages = get_messages( + user_id=session.user_id, session_id=session.id, dbmanager=dbmanager + ) gallery_item = Gallery(session=session, messages=messages, tags=tags) query = "INSERT INTO gallery (id, session, messages, tags, timestamp) VALUES (?, ?, ?, ?,?)" args = ( @@ -433,7 +474,14 @@ def upsert_skill(skill: Skill, dbmanager: DBManager) -> List[Skill]: update_item("skills", skill.id, updated_data, dbmanager) else: query = "INSERT INTO skills (id, user_id, timestamp, content, title, file_name) VALUES (?, ?, ?, ?, ?, ?)" - args = (skill.id, skill.user_id, skill.timestamp, skill.content, skill.title, skill.file_name) + args = ( + skill.id, + skill.user_id, + skill.timestamp, + skill.content, + skill.title, + skill.file_name, + ) dbmanager.query(query=query, args=args) skills = get_skills(user_id=skill.user_id, dbmanager=dbmanager) @@ -458,7 +506,11 @@ def delete_skill(skill: Skill, dbmanager: DBManager) -> List[Skill]: def delete_message( - user_id: str, msg_id: str, session_id: str, dbmanager: DBManager, delete_all: bool = False + user_id: str, + msg_id: str, + session_id: str, + dbmanager: DBManager, + delete_all: bool = False, ) -> List[dict]: """ Delete a specific message or all messages for a user and session from the database. @@ -477,10 +529,14 @@ def delete_message( dbmanager.query(query=query, args=args) return [] else: - query = "DELETE FROM messages WHERE user_id = ? AND msg_id = ? AND session_id = ?" + query = ( + "DELETE FROM messages WHERE user_id = ? AND msg_id = ? AND session_id = ?" + ) args = (user_id, msg_id, session_id) dbmanager.query(query=query, args=args) - messages = get_messages(user_id=user_id, session_id=session_id, dbmanager=dbmanager) + messages = get_messages( + user_id=user_id, session_id=session_id, dbmanager=dbmanager + ) return messages @@ -507,7 +563,9 @@ def get_agents(user_id: str, dbmanager: DBManager) -> List[AgentFlowSpec]: return agents -def upsert_agent(agent_flow_spec: AgentFlowSpec, dbmanager: DBManager) -> List[Dict[str, Any]]: +def upsert_agent( + agent_flow_spec: AgentFlowSpec, dbmanager: DBManager +) -> List[Dict[str, Any]]: """ Insert or update an agent for a specific user in the database. @@ -528,7 +586,11 @@ def upsert_agent(agent_flow_spec: AgentFlowSpec, dbmanager: DBManager) -> List[D "config": json.dumps(agent_flow_spec.config.dict()), "type": agent_flow_spec.type, "description": agent_flow_spec.description, - "skills": json.dumps([x.dict() for x in agent_flow_spec.skills] if agent_flow_spec.skills else []), + "skills": json.dumps( + [x.dict() for x in agent_flow_spec.skills] + if agent_flow_spec.skills + else [] + ), } update_item("agents", agent_flow_spec.id, updated_data, dbmanager) else: @@ -541,7 +603,11 @@ def upsert_agent(agent_flow_spec: AgentFlowSpec, dbmanager: DBManager) -> List[D config_json, agent_flow_spec.type, agent_flow_spec.description, - json.dumps([x.dict() for x in agent_flow_spec.skills] if agent_flow_spec.skills else []), + json.dumps( + [x.dict() for x in agent_flow_spec.skills] + if agent_flow_spec.skills + else [] + ), ) dbmanager.query(query=query, args=args) @@ -566,14 +632,18 @@ def delete_agent(agent: AgentFlowSpec, dbmanager: DBManager) -> List[Dict[str, A return get_agents(user_id=agent.user_id, dbmanager=dbmanager) -def get_item_by_field(table: str, field: str, value: Any, dbmanager: DBManager) -> Optional[Dict[str, Any]]: +def get_item_by_field( + table: str, field: str, value: Any, dbmanager: DBManager +) -> Optional[Dict[str, Any]]: query = f"SELECT * FROM {table} WHERE {field} = ?" args = (value,) result = dbmanager.query(query=query, args=args) return result[0] if result else None -def update_item(table: str, item_id: str, updated_data: Dict[str, Any], dbmanager: DBManager) -> None: +def update_item( + table: str, item_id: str, updated_data: Dict[str, Any], dbmanager: DBManager +) -> None: set_clause = ", ".join([f"{key} = ?" for key in updated_data.keys()]) query = f"UPDATE {table} SET {set_clause} WHERE id = ?" args = (*updated_data.values(), item_id) @@ -602,7 +672,9 @@ def get_workflows(user_id: str, dbmanager: DBManager) -> List[Dict[str, Any]]: return workflows -def upsert_workflow(workflow: AgentWorkFlowConfig, dbmanager: DBManager) -> List[Dict[str, Any]]: +def upsert_workflow( + workflow: AgentWorkFlowConfig, dbmanager: DBManager +) -> List[Dict[str, Any]]: """ Insert or update a workflow for a specific user in the database. @@ -653,7 +725,9 @@ def upsert_workflow(workflow: AgentWorkFlowConfig, dbmanager: DBManager) -> List return get_workflows(user_id=workflow.user_id, dbmanager=dbmanager) -def delete_workflow(workflow: AgentWorkFlowConfig, dbmanager: DBManager) -> List[Dict[str, Any]]: +def delete_workflow( + workflow: AgentWorkFlowConfig, dbmanager: DBManager +) -> List[Dict[str, Any]]: """ Delete a workflow for a specific user from the database. If the workflow does not exist, do nothing. diff --git a/samples/apps/fireworks-studio/firestudio/web/app.py b/samples/apps/fireworks-studio/firestudio/web/app.py index 642c775eee94..56614e95c8b4 100644 --- a/samples/apps/fireworks-studio/firestudio/web/app.py +++ b/samples/apps/fireworks-studio/firestudio/web/app.py @@ -1,6 +1,7 @@ import json import os import traceback +from typing import Any, Dict, List from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.staticfiles import StaticFiles @@ -64,7 +65,7 @@ @api.post("/messages") async def add_message(req: ChatWebRequestModel): message = Message(**req.message.dict()) - user_history = dbutils.get_messages( + user_history: List[Message] = dbutils.get_messages( user_id=message.user_id, session_id=req.message.session_id, dbmanager=dbmanager ) @@ -76,19 +77,26 @@ async def add_message(req: ChatWebRequestModel): os.makedirs(user_dir, exist_ok=True) try: - response_message: Message = chatmanager.chat( + response_messages: List[Message] = chatmanager.chat( message=message, - history=user_history, + history_list=user_history, + agent_flow_config=req.flow_config, work_dir=user_dir, - flow_config=req.flow_config, ) # save assistant response to db - dbutils.create_message(message=response_message, dbmanager=dbmanager) + for response_message in response_messages: + dbutils.create_message(message=response_message, dbmanager=dbmanager) response = { "status": True, - "message": response_message.content, - "metadata": json.loads(response_message.metadata), + "message": { + response_message.receiver_name: response_message.content + for response_message in response_messages + }, + "metadata": { + response_message.receiver_name: json.loads(response_message.metadata) + for response_message in response_messages + }, } return response except Exception as ex_error: diff --git a/samples/apps/fireworks-studio/frontend/src/components/types.ts b/samples/apps/fireworks-studio/frontend/src/components/types.ts index 4d22ddfef235..433bf98f7f07 100644 --- a/samples/apps/fireworks-studio/frontend/src/components/types.ts +++ b/samples/apps/fireworks-studio/frontend/src/components/types.ts @@ -56,7 +56,7 @@ export interface IFlowConfig { name: string; description: string; sender: IAgentFlowSpec; - receiver: IAgentFlowSpec; + receiver: IAgentFlowSpec | Array; type: "default" | "groupchat"; timestamp?: string; summary_method?: "none" | "last" | "llm"; diff --git a/samples/apps/fireworks-studio/frontend/src/components/views/builder/workflow.tsx b/samples/apps/fireworks-studio/frontend/src/components/views/builder/workflow.tsx index 52eda8d551af..29b159e7d7da 100644 --- a/samples/apps/fireworks-studio/frontend/src/components/views/builder/workflow.tsx +++ b/samples/apps/fireworks-studio/frontend/src/components/views/builder/workflow.tsx @@ -21,7 +21,7 @@ import { LoadingOverlay, } from "../../atoms"; -const WorkflowView = ({}: any) => { +const WorkflowView = ({ }: any) => { const [loading, setLoading] = React.useState(false); const [error, setError] = React.useState({ status: true, diff --git a/samples/apps/fireworks-studio/notebooks/multi_agent_spec.json b/samples/apps/fireworks-studio/notebooks/multi_agent_spec.json new file mode 100644 index 000000000000..30d6739809c3 --- /dev/null +++ b/samples/apps/fireworks-studio/notebooks/multi_agent_spec.json @@ -0,0 +1,109 @@ +{ + "name": "Test Multi Agent Workflow", + "description": "A workflow to quickly test agents", + "sender": { + "type": "userproxy", + "config": { + "name": "userproxy", + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 5, + "system_message": "", + "llm_config": false, + "code_execution_config": { + "work_dir": null, + "use_docker": false + } + } + }, + "receiver": [ + { + "type": "assistant", + "config": { + "name": "gpt_assistant", + "llm_config": { + "config_list": [ + { + "model": "gpt-4-1106-preview" + } + ], + "temperature": 0.1, + "timeout": 600, + "cache_seed": null + }, + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 8, + "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." + }, + "skills": [ + { + "title": "generate_and_save_images", + "file_name": "generate_and_save_images.py", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\nfrom typing_extensions import Annotated\nfrom openai import OpenAI\nfrom firestudio.utils.utils import schema_recorder\n\n@schema_recorder(description=\"Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image\")\ndef generate_and_save_images(query: Annotated[str, \"A natural language description of the image to be generated.\"], image_size: Annotated[str, \"The size of the image to be generated. default is '1024x1024'\"] = \"1024x1024\") -> List[str]:\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = \"/tmp/\" + str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n" + }, + { + "title": "show_image", + "file_name": "show_image.py", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\nfrom typing_extensions import Annotated\nfrom firestudio.utils.utils import schema_recorder\nimport cv2\nfrom matplotlib import pyplot as plt\n\n@schema_recorder(description=\"A function that is capable for displaying an image given path to a image file in png or jpg or jpeg.\")\ndef show_image(path: Annotated[str, \"The path to the image file that needs to be displayed\"]) -> str:\n img = cv2.imread(path,-1)\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n return \"\"\n" + }, + { + "title": "find_papers_arxiv", + "description": "Function ability to find the papers on arxiv", + "file_name": "find_papers_arxiv.py", + "content": "\nimport os\nimport re\nimport json\nimport hashlib\nimport arxiv\nfrom firestudio.utils.utils import schema_recorder\nfrom typing_extensions import Annotated\n\n\n@schema_recorder(description=\"Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\")\ndef search_arxiv(query: Annotated[str, \"The search query\"], max_results: Annotated[Optional[int], \"The maximum number of search results to return. Defaults to 10\"]=10) -> Annotated[List[Dict[str, Any]], \"A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\"]:\n # Example:\n # >>> results = search_arxiv(\"attention is all you need\")\n # >>> print(results)\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n" + }, + { + "title": "get_price", + "description": "Get price history for a stock ticker", + "file_name": "get_price.py", + "content": "\nimport yfinance as yf\nfrom firestudio.utils.utils import schema_recorder\nfrom typing_extensions import Annotated\nimport uuid\nfrom pathlib import Path\n\n@schema_recorder(description=\"Helper function to obtain stock price history of a company over specified period. The price information is written to a file and the path of the file is returned. The file is csv and contains following columns - Date,Open,High,Low,Close,Volume,Dividends,Stock Splits\")\ndef get_prices(ticker: Annotated[str, \"Stock ticker for a company\"], period: Annotated[str, \"data period to download (Either Use period parameter or use start and end) Valid periods are: 1d, 5d, 1mo, 3mo, 6mo, 1y, 2y, 5y, 10y, ytd, max\"]) -> Annotated[str, \"File which contains the price of the a ticker, each price in a new line\"]:\n # Generate a random UUID as the file name\n file_name = \"/tmp/\" + str(uuid.uuid4()) + \".csv\" \n file_path = Path(file_name)\n\n tk = yf.Ticker(ticker=ticker)\n prices = tk.history(period=period)\n\n with open(file_path, \"w\") as f:\n prices.to_csv(f) \n\n return file_name\n" + } + ] + }, + { + "type": "assistant", + "config": { + "name": "fw_assistant", + "llm_config": { + "config_list": [ + { + "model": "accounts/fireworks/models/firefunction-v1", + "api_key": "vnAo7YlsAt4Pwv2LpOAi8TyXklMqUOazjwx9mDLmHcdXoLUH", + "base_url": "https://api.fireworks.ai/inference/v1" + } + ], + "temperature": 0.1, + "timeout": 600, + "cache_seed": null + }, + "human_input_mode": "NEVER", + "max_consecutive_auto_reply": 8, + "system_message": "You are a helpful assistant that can use available functions when needed to solve problems. At each point, do your best to determine if the user's request has been addressed. IF THE REQUEST HAS NOT BEEN ADDRESSED, RESPOND WITH CODE TO ADDRESS IT. IF A FAILURE OCCURRED (e.g., due to a missing library) AND SOME ADDITIONAL CODE WAS WRITTEN (e.g. code to install the library), ENSURE THAT THE ORIGINAL CODE TO ADDRESS THE TASK STILL GETS EXECUTED. If the request HAS been addressed, respond with a summary of the result. The summary must be written as a coherent helpful response to the user request e.g. 'Sure, here is result to your request ' or 'The tallest mountain in Africa is ..' etc. The summary MUST end with the word TERMINATE. If the user request is pleasantry or greeting, you should respond with a pleasantry or greeting and TERMINATE." + }, + "skills": [ + { + "title": "generate_and_save_images", + "file_name": "generate_and_save_images.py", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\nfrom typing_extensions import Annotated\nfrom openai import OpenAI\nfrom firestudio.utils.utils import schema_recorder\n\n@schema_recorder(description=\"Function to paint, draw or illustrate images based on the users query or request. Generates images from a given query using OpenAI's DALL-E model and saves them to disk. Use the code below anytime there is a request to create an image\")\ndef generate_and_save_images(query: Annotated[str, \"A natural language description of the image to be generated.\"], image_size: Annotated[str, \"The size of the image to be generated. default is '1024x1024'\"] = \"1024x1024\") -> List[str]:\n client = OpenAI() # Initialize the OpenAI client\n response = client.images.generate(model=\"dall-e-3\", prompt=query, n=1, size=image_size) # Generate images\n\n # List to store the file names of saved images\n saved_files = []\n\n # Check if the response is successful\n if response.data:\n for image_data in response.data:\n # Generate a random UUID as the file name\n file_name = \"/tmp/\" + str(uuid.uuid4()) + \".png\" # Assuming the image is a PNG\n file_path = Path(file_name)\n\n img_url = image_data.url\n img_response = requests.get(img_url)\n if img_response.status_code == 200:\n # Write the binary content to a file\n with open(file_path, \"wb\") as img_file:\n img_file.write(img_response.content)\n print(f\"Image saved to {file_path}\")\n saved_files.append(str(file_path))\n else:\n print(f\"Failed to download the image from {img_url}\")\n else:\n print(\"No image data found in the response!\")\n\n # Return the list of saved files\n return saved_files\n\n\n# Example usage of the function:\n# generate_and_save_images(\"A cute baby sea otter\")\n" + }, + { + "title": "show_image", + "file_name": "show_image.py", + "content": "\nfrom typing import List\nimport uuid\nimport requests # to perform HTTP requests\nfrom pathlib import Path\nfrom typing_extensions import Annotated\nfrom firestudio.utils.utils import schema_recorder\nimport cv2\nfrom matplotlib import pyplot as plt\n\n@schema_recorder(description=\"A function that is capable for displaying an image given path to a image file in png or jpg or jpeg.\")\ndef show_image(path: Annotated[str, \"The path to the image file that needs to be displayed\"]) -> str:\n img = cv2.imread(path,-1)\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n return \"\"\n" + }, + { + "title": "find_papers_arxiv", + "description": "Function ability to find the papers on arxiv", + "file_name": "find_papers_arxiv.py", + "content": "\nimport os\nimport re\nimport json\nimport hashlib\nimport arxiv\nfrom firestudio.utils.utils import schema_recorder\nfrom typing_extensions import Annotated\n\n\n@schema_recorder(description=\"Searches arXiv for the given query using the arXiv API, then returns the search results. This is a helper function. In most cases, callers will want to use 'find_relevant_papers( query, max_results )' instead.\")\ndef search_arxiv(query: Annotated[str, \"The search query\"], max_results: Annotated[Optional[int], \"The maximum number of search results to return. Defaults to 10\"]=10) -> Annotated[List[Dict[str, Any]], \"A list of dictionaries. Each dictionary contains fields such as 'title', 'authors', 'summary', and 'pdf_url'\"]:\n # Example:\n # >>> results = search_arxiv(\"attention is all you need\")\n # >>> print(results)\n\n key = hashlib.md5((\"search_arxiv(\" + str(max_results) + \")\" + query).encode(\"utf-8\")).hexdigest()\n # Create the cache if it doesn't exist\n cache_dir = \".cache\"\n if not os.path.isdir(cache_dir):\n os.mkdir(cache_dir)\n\n fname = os.path.join(cache_dir, key + \".cache\")\n\n # Cache hit\n if os.path.isfile(fname):\n fh = open(fname, \"r\", encoding=\"utf-8\")\n data = json.loads(fh.read())\n fh.close()\n return data\n\n # Normalize the query, removing operator keywords\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s(and|or|not)\\s\", \" \", \" \" + query + \" \")\n query = re.sub(r\"[^\\s\\w]\", \" \", query.lower())\n query = re.sub(r\"\\s+\", \" \", query).strip()\n\n search = arxiv.Search(query=query, max_results=max_results, sort_by=arxiv.SortCriterion.Relevance)\n\n jresults = list()\n for result in search.results():\n r = dict()\n r[\"entry_id\"] = result.entry_id\n r[\"updated\"] = str(result.updated)\n r[\"published\"] = str(result.published)\n r[\"title\"] = result.title\n r[\"authors\"] = [str(a) for a in result.authors]\n r[\"summary\"] = result.summary\n r[\"comment\"] = result.comment\n r[\"journal_ref\"] = result.journal_ref\n r[\"doi\"] = result.doi\n r[\"primary_category\"] = result.primary_category\n r[\"categories\"] = result.categories\n r[\"links\"] = [str(link) for link in result.links]\n r[\"pdf_url\"] = result.pdf_url\n jresults.append(r)\n\n if len(jresults) > max_results:\n jresults = jresults[0:max_results]\n\n # Save to cache\n fh = open(fname, \"w\")\n fh.write(json.dumps(jresults))\n fh.close()\n return jresults\n" + }, + { + "title": "get_price", + "description": "Get price history for a stock ticker", + "file_name": "get_price.py", + "content": "\nimport yfinance as yf\nfrom firestudio.utils.utils import schema_recorder\nfrom typing_extensions import Annotated\nimport uuid\nfrom pathlib import Path\n\n@schema_recorder(description=\"Helper function to obtain stock price history of a company over specified period. The price information is written to a file and the path of the file is returned. The file is csv and contains following columns - Date,Open,High,Low,Close,Volume,Dividends,Stock Splits\")\ndef get_prices(ticker: Annotated[str, \"Stock ticker for a company\"], period: Annotated[str, \"data period to download (Either Use period parameter or use start and end) Valid periods are: 1d, 5d, 1mo, 3mo, 6mo, 1y, 2y, 5y, 10y, ytd, max\"]) -> Annotated[str, \"File which contains the price of the a ticker, each price in a new line\"]:\n # Generate a random UUID as the file name\n file_name = \"/tmp/\" + str(uuid.uuid4()) + \".csv\" \n file_path = Path(file_name)\n\n tk = yf.Ticker(ticker=ticker)\n prices = tk.history(period=period)\n\n with open(file_path, \"w\") as f:\n prices.to_csv(f) \n\n return file_name\n" + } + ] + } + ], + "type": "default" +} \ No newline at end of file diff --git a/samples/apps/fireworks-studio/notebooks/multi_agent_tutorial.ipynb b/samples/apps/fireworks-studio/notebooks/multi_agent_tutorial.ipynb new file mode 100644 index 000000000000..c36bdf74f2e9 --- /dev/null +++ b/samples/apps/fireworks-studio/notebooks/multi_agent_tutorial.ipynb @@ -0,0 +1,95 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## AutoGen Studio Agent Workflow API Example\n", + "\n", + "This notebook focuses on demonstrating capabilities of the autogen studio workflow python api. \n", + "\n", + "- Declarative Specification of an Agent Workflow \n", + "- Loading the specification and running the resulting agent\n", + "\n", + "\n", + "> Note: The notebook currently demonstrates support for a two agent setup. Support for GroupChat is currently in development." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "from firestudio import AgentWorkFlowConfig, AutoGenWorkFlowManager\n", + "import uuid\n", + "import os\n", + "\n", + "work_dir = f\"/tmp/{uuid.uuid4()}\"\n", + "if not os.path.exists(work_dir):\n", + " os.makedirs(work_dir)\n", + "\n", + "# load an agent specification in JSON\n", + "agent_spec = json.load(open(\"multi_agent_spec.json\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'type': 'userproxy',\n", + " 'config': {'name': 'userproxy',\n", + " 'human_input_mode': 'NEVER',\n", + " 'max_consecutive_auto_reply': 5,\n", + " 'system_message': '',\n", + " 'llm_config': False,\n", + " 'code_execution_config': {'work_dir': None, 'use_docker': False}}}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent_spec['sender']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "agent_spec['" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}