From 0c5978666135964cbd89126272049eab8afb46b2 Mon Sep 17 00:00:00 2001 From: ma3oun Date: Thu, 5 Dec 2024 17:00:45 +0100 Subject: [PATCH 1/6] Moved to latest openai api version --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 829ee93d7..54c52788c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ Flask==2.3.2 Flask-SocketIO==5.3.4 importlib-metadata==6.8.0 numpy==1.24.3 -openai==1.44.1 +openai==1.56.2 regex==2023.6.3 requests==2.31.0 tenacity==8.2.2 @@ -17,6 +17,6 @@ beautifulsoup4==4.12.2 faiss-cpu==1.7.4 pyyaml==6.0 easydict==1.10 -rich==9.13.0 +rich==13.9.0 graphviz==0.20.1 imgcat==0.5.0 From b1bc24be1ef9cff42e91d403efea65d59d90502b Mon Sep 17 00:00:00 2001 From: ma3oun Date: Thu, 5 Dec 2024 17:01:51 +0100 Subject: [PATCH 2/6] support for custom yaml file for configuration --- generate_graph.py | 87 +++++++--- graph.py | 393 ++++++++++++++++++++++++++++++++-------------- 2 files changed, 339 insertions(+), 141 deletions(-) diff --git a/generate_graph.py b/generate_graph.py index 9784668d1..5c4a9b3b8 100644 --- a/generate_graph.py +++ b/generate_graph.py @@ -4,26 +4,33 @@ from graphviz import Digraph import subprocess + class Edge: """Represents an edge in a graph with a source and target node.""" + def __init__(self, source: int, target: int): self.source = source self.target = target + class Graph: """Represents a directed graph with various methods to generate and analyze graph structures.""" - def __init__(self, node_num: int, topo: str): + + def __init__(self, node_num: int, topo: str, config: str = "config.yaml"): self.name = topo self.node_num = node_num self.edges = [] - + self.config = config + def display_image_with_imgcat(self, image_path): """Display the image with imgcat""" subprocess.run(["imgcat", image_path]) def exists_edge(self, source: int, target: int) -> bool: """Checks if an edge exists between the source and target nodes.""" - return any(edge.source == source and edge.target == target for edge in self.edges) + return any( + edge.source == source and edge.target == target for edge in self.edges + ) def generate_chain(self): """Generates a chain graph with the specified number of nodes.""" @@ -83,8 +90,12 @@ def generate_mlp(self): def generate_random(self): """Generates a random graph with the specified number of nodes.""" self.name = "random" - edge_num = random.randint(self.node_num-1, self.node_num*(self.node_num-1)/2) - edges_space = [(u, v) for u in range(self.node_num) for v in range(self.node_num) if u < v] + edge_num = random.randint( + self.node_num - 1, self.node_num * (self.node_num - 1) / 2 + ) + edges_space = [ + (u, v) for u in range(self.node_num) for v in range(self.node_num) if u < v + ] random.shuffle(edges_space) for i in range(edge_num): @@ -95,7 +106,10 @@ def generate_random(self): def get_list(self, reverse=False): """Returns a list of edges in the graph, optionally reversed.""" - return [(edge.target, edge.source) if reverse else (edge.source, edge.target) for edge in self.edges] + return [ + (edge.target, edge.source) if reverse else (edge.source, edge.target) + for edge in self.edges + ] def reverse(self): """Reverses the direction of all edges in the graph.""" @@ -104,13 +118,25 @@ def reverse(self): def view(self, reverse=False): """Visualizes the graph using Graphviz and saves it to a file.""" - graph_viz = Digraph(format="png", node_attr={"shape": "circle"}, edge_attr={"arrowhead": "normal"}) + graph_viz = Digraph( + format="png", + node_attr={"shape": "circle"}, + edge_attr={"arrowhead": "normal"}, + ) llist = self.get_list(reverse) - for (u, v) in llist: + for u, v in llist: graph_viz.edge(str(u), str(v)) - timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime()) - graph_viz.render(directory="./tmp/generated_graphs", filename="graph_{}_{}_{}".format(self.name, self.node_num, timestamp)) - self.display_image_with_imgcat(f"./tmp/generated_graphs/graph_{self.name}_{self.node_num}_{timestamp}.png") + graph_viz.render( + directory="./tmp/generated_graphs", + filename="graph_{}_{}_{}".format( + self.name, + self.node_num, + time.strftime("%Y%m%d%H%M%S", time.localtime()), + ), + ) + self.display_image_with_imgcat( + f"./tmp/generated_graphs/graph_{self.name}_{self.node_num}_{time.strftime('%Y%m%d%H%M%S', time.localtime())}.png" + ) return self def generate_graph(self, reverse=False): @@ -133,30 +159,49 @@ def generate_graph(self, reverse=False): # Generate graph structure for config.yaml edges = self.get_list(reverse) graph_structure = [f"{edge[0]}->{edge[1]}" for edge in edges] - + # Read existing config.yaml and update the graph field - with open("config.yaml", "r") as config_file: + with open(self.config, "r") as config_file: config_data = config_file.readlines() - with open("config.yaml", "w") as config_file: + with open(self.config, "w") as config_file: for line in config_data: if line.startswith("graph:"): - config_file.write(f'graph: {graph_structure}\n') + config_file.write(f"graph: {graph_structure}\n") else: config_file.write(line) - + return graph_structure + if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description="Generate a graph based on specified parameters.") - parser.add_argument("--node_num", type=int, required=True, help="Number of nodes in the graph.") - parser.add_argument("--topology", type=str, required=True, choices=["chain", "star", "tree", "net", "mlp", "random"], help="Type of graph topology to generate.") - parser.add_argument("--reverse", action='store_true', required=False, help="Whether or not reverse the graph.") + parser = argparse.ArgumentParser( + description="Generate a graph based on specified parameters." + ) + parser.add_argument( + "--config", type=str, default="config.yaml", help="Configuration file" + ) + parser.add_argument( + "--node_num", type=int, required=True, help="Number of nodes in the graph." + ) + parser.add_argument( + "--topology", + type=str, + required=True, + choices=["chain", "star", "tree", "net", "mlp", "random"], + help="Type of graph topology to generate.", + ) + parser.add_argument( + "--reverse", + action="store_true", + required=False, + help="Whether or not reverse the graph.", + ) args = parser.parse_args() - graph = Graph(node_num=args.node_num, topo=args.topology) + graph = Graph(config=args.config, node_num=args.node_num, topo=args.topology) graph_structure = graph.generate_graph(args.reverse) print("graph:", graph_structure) # This will replace the graph field in config.yaml graph.view(args.reverse) diff --git a/graph.py b/graph.py index 76fbecad2..8005e7ee5 100644 --- a/graph.py +++ b/graph.py @@ -5,7 +5,7 @@ import signal import subprocess import time -from typing import Tuple +from typing import Tuple, List import yaml from camel.agents.chat_agent import ChatAgent @@ -25,7 +25,9 @@ os.makedirs("./WareHouse", exist_ok=True) os.makedirs(f"./MacNetLog/{now}", exist_ok=True) -formatter = logging.Formatter("[%(asctime)s\t%(filename)s]\n%(message)s\n---\n", "%Y-%m-%d %H:%M:%S") +formatter = logging.Formatter( + "[%(asctime)s\t%(filename)s]\n%(message)s\n---\n", "%Y-%m-%d %H:%M:%S" +) logger = logging.getLogger() logger.setLevel(logging.INFO) @@ -49,7 +51,13 @@ def log_info(message: str, to_console: bool = True) -> None: class Node: - def __init__(self, node_id: int, temperature: float = 0.2, model: str = "GPT_4O_MINI") -> None: + def __init__( + self, + node_id: int, + config: dict, + temperature: float = 0.2, + model: ModelType = ModelType("stub", 100000), + ) -> None: """Initialize a Node.""" self.id: int = node_id self.predecessors: list[Node] = [] @@ -62,20 +70,15 @@ def __init__(self, node_id: int, temperature: float = 0.2, model: str = "GPT_4O_ self.system_message: str = " " self.pool = None self.depth: int = 0 - - args2type = { - 'GPT_3_5_TURBO': ModelType.GPT_3_5_TURBO_NEW, - 'GPT_4': ModelType.GPT_4, - 'GPT_4_TURBO': ModelType.GPT_4_TURBO, - 'GPT_4O': ModelType.GPT_4O, - 'GPT_4O_MINI': ModelType.GPT_4O_MINI, - } - self.model = args2type[model] + self.model = model + self.config = config def create_agent(self, content: str, role_name: str) -> ChatAgent: """Create a chat agent.""" agent = ChatAgent( - system_message=SystemMessage(content=content, role_name=role_name, role_type=RoleType.ASSISTANT), + system_message=SystemMessage( + content=content, role_name=role_name, role_type=RoleType.ASSISTANT + ), model=self.model, temperature=self.temperature, ) @@ -85,23 +88,24 @@ def exist_bugs(self, directory: str) -> Tuple[bool, str]: """Check if there are bugs in the software.""" success_info = "The software run successfully without errors." try: - if os.name == 'nt': + if os.name == "nt": command = f"cd {directory} && dir && python main.py" process = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, - creationflags=subprocess.CREATE_NEW_PROCESS_GROUP + creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, ) else: command = f"cd {directory} && python3 main.py;" - process = subprocess.Popen(command, - shell=True, - preexec_fn=os.setsid, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + process = subprocess.Popen( + command, + shell=True, + preexec_fn=os.setsid, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) time.sleep(3) return_code = process.returncode if process.poll() is None: @@ -114,7 +118,7 @@ def exist_bugs(self, directory: str) -> Tuple[bool, str]: if return_code == 0: return False, success_info else: - error_output = process.stderr.read().decode('utf-8') + error_output = process.stderr.read().decode("utf-8") if error_output: if "Traceback".lower() in error_output.lower(): errs = error_output.replace(directory + "/", "") @@ -125,10 +129,12 @@ def exist_bugs(self, directory: str) -> Tuple[bool, str]: return True, f"Error: {e}" except Exception as ex: return True, f"An error occurred: {ex}" - + return False, success_info - def optimize(self, task_prompt: str, pre_solution: str, config: dict, name: str) -> Tuple[str, Codes, str]: + def optimize( + self, task_prompt: str, pre_solution: str, name: str + ) -> Tuple[str, Codes, str]: """Optimize a single solution.""" logging.info(f"Node {self.id} is optimizing") success_info = "The software run successfully without errors." @@ -136,13 +142,24 @@ def optimize(self, task_prompt: str, pre_solution: str, config: dict, name: str) self.suggestions = "None." if pre_solution != "": - instructor_prompt = config.get("Agent").get("instructor_prompt").format(task_prompt, pre_solution) - message = ChatMessage(content=instructor_prompt, - role_name="User", - role_type=RoleType.USER, - meta_dict=dict(), - role="user") - self.suggestions = self.create_agent(self.system_message, "assistant").step(message).msgs[0].content + instructor_prompt = ( + self.config.get("Agent") + .get("instructor_prompt") + .format(task_prompt, pre_solution) + ) + message = ChatMessage( + content=instructor_prompt, + role_name="User", + role_type=RoleType.USER, + meta_dict=dict(), + role="user", + ) + self.suggestions = ( + self.create_agent(self.system_message, "assistant") + .step(message) + .msgs[0] + .content + ) if self.suggestions.startswith("compile()"): pre_codes = Codes(pre_solution) @@ -153,25 +170,54 @@ def optimize(self, task_prompt: str, pre_solution: str, config: dict, name: str) if not compiler_flag: self.suggestions = success_info + "\n" + self.suggestions else: - self.suggestions = error_info + "\n" + compile_info + "\n" + self.suggestions - instructor_prompt = "Compiler's feedback: " + error_info + "\n" + compile_info + \ - "pre_comments:" + self.suggestions + "\n" + instructor_prompt - message = ChatMessage(content=instructor_prompt, - role_name="User", - role_type=RoleType.USER, - meta_dict=dict(), - role="user") - self.suggestions = self.create_agent(self.system_message, "assistant").step(message).msgs[0].content - self.suggestions = error_info + "\n" + compile_info + "\n" + self.suggestions - - assistant_prompt = config.get("Agent").get("assistant_prompt").format(task_prompt, pre_solution, - self.suggestions) - message = ChatMessage(content=assistant_prompt, - role_name="User", - role_type=RoleType.USER, - meta_dict=dict(), - role="user") - response = self.create_agent(self.system_message, "assistant").step(message).msgs[0].content + self.suggestions = ( + error_info + "\n" + compile_info + "\n" + self.suggestions + ) + instructor_prompt = ( + "Compiler's feedback: " + + error_info + + "\n" + + compile_info + + "pre_comments:" + + self.suggestions + + "\n" + + instructor_prompt + ) + message = ChatMessage( + content=instructor_prompt, + role_name="User", + role_type=RoleType.USER, + meta_dict=dict(), + role="user", + ) + self.suggestions = ( + self.create_agent(self.system_message, "assistant") + .step(message) + .msgs[0] + .content + ) + self.suggestions = ( + error_info + "\n" + compile_info + "\n" + self.suggestions + ) + + assistant_prompt = ( + self.config.get("Agent") + .get("assistant_prompt") + .format(task_prompt, pre_solution, self.suggestions) + ) + message = ChatMessage( + content=assistant_prompt, + role_name="User", + role_type=RoleType.USER, + meta_dict=dict(), + role="user", + ) + response = ( + self.create_agent(self.system_message, "assistant") + .step(message) + .msgs[0] + .content + ) response = "" if response is None else response response = response.replace("```", "\n```").replace("'''", "\n'''") try: @@ -182,27 +228,46 @@ def optimize(self, task_prompt: str, pre_solution: str, config: dict, name: str) return response, codes, self.suggestions - def aggregate(self, prompt: str, retry_limit: int, unit_num: int, layer_directory: str, graph_depth: int, - store_dir: str) -> int: + def aggregate( + self, + prompt: str, + retry_limit: int, + unit_num: int, + layer_directory: str, + graph_depth: int, + store_dir: str, + ) -> int: """Aggregate solutions from predecessors.""" - logging.info(f"Node {self.id} is aggregating with {len(self.pre_solutions)} solutions") + logging.info( + f"Node {self.id} is aggregating with {len(self.pre_solutions)} solutions" + ) - with open("config.yaml", "r", encoding="utf-8") as f: - cc_prompt = "\n\n".join(yaml.load(f.read(), Loader=yaml.FullLoader).get("Agent").get("cc_prompt")) - cc_prompt = self.system_message + cc_prompt + cc_prompt = self.config.get("Agent").get("cc_prompt") + cc_prompt = self.system_message + cc_prompt for file in self.pre_solutions: with open(layer_directory + "/solution_{}.txt".format(file), "w") as f: for key in self.pre_solutions[file].codebooks.keys(): - f.write(str(key) + '\n\n' + self.pre_solutions[file].codebooks[key] + '\n\n') - - self.pool = Pool(len(self.pre_solutions), unit_num, layer_directory, self.model) + f.write( + str(key) + + "\n\n" + + self.pre_solutions[file].codebooks[key] + + "\n\n" + ) + + self.pool = Pool( + len(self.pre_solutions), unit_num, layer_directory, self.model.name + ) for i in range(retry_limit): - new_codes = self.pool.state_pool_add(layer_directory, cc_prompt, 6000000, prompt, - Codes(), - store_dir, - temperature=1 - self.depth / graph_depth, - ) + new_codes = self.pool.state_pool_add( + layer_directory, + cc_prompt, + 6000000, + prompt, + Codes(), + store_dir, + temperature=1 - self.depth / graph_depth, + ) if new_codes is None: logging.info(f"Retry Aggregation at round {i}!") else: @@ -212,11 +277,11 @@ def aggregate(self, prompt: str, retry_limit: int, unit_num: int, layer_director print(f"ERROR: Node {self.id} has reached the retry limit!\n") return 1 - def add_successor(self, node: 'Node') -> None: + def add_successor(self, node: "Node") -> None: """Add a successor node.""" self.successors.append(node) - def add_predecessor(self, node: 'Node') -> None: + def add_predecessor(self, node: "Node") -> None: """Add a predecessor node.""" self.predecessors.append(node) @@ -226,9 +291,9 @@ def parse_string(s: str) -> list[list[Tuple[int, int]]]: def parse_part(part: str) -> list[Tuple[int, int]]: return_list = [] - for sub_part in part.split(','): - if '-' in sub_part: - start, end = map(int, sub_part.split('-')) + for sub_part in part.split(","): + if "-" in sub_part: + start, end = map(int, sub_part.split("-")) return_list.append((start, end)) else: num = int(sub_part) @@ -241,21 +306,27 @@ def parse_part(part: str) -> list[Tuple[int, int]]: class Graph: """Represents a directed graph with various methods to generate and analyze graph structures.""" - def __init__(self, config: dict) -> None: + def __init__(self, config: dict, model: dict) -> None: """Initialize the Graph.""" self.config = config + self.model = model + self.now = time.strftime("%Y%m%d%H%M%S", time.localtime()) - self.node_in = Node(node_id=config.get("Node_in_id"), model=config.get("Model")) - self.node_out = Node(node_id=config.get("Node_out_id"), model=config.get("Model")) - self.nodes = {config.get("Node_in_id"): self.node_in, config.get("Node_out_id"): self.node_out} + self.node_in = Node(node_id=config.get("Node_in_id"), model=model,config=config) + self.node_out = Node(node_id=config.get("Node_out_id"), model=model,config=config) + self.nodes = { + config.get("Node_in_id"): self.node_in, + config.get("Node_out_id"): self.node_out, + } self.height = 0 self.input_layer = None self.output_layer = None self.aggregate_retry_limit = config.get("Aggregate_retry_limit") self.aggregate_unit_num = config.get("Aggregate_unit_num") - self.directory = f"./MacNetLog/{time.strftime('%Y%m%d%H%M%S', time.localtime())}" + self.directory = ( + f"./MacNetLog/{time.strftime('%Y%m%d%H%M%S', time.localtime())}" + ) self.depth = 0 - self.model_types = None def build_graph(self, type_: str) -> None: """Build the graph from the configuration.""" @@ -264,28 +335,37 @@ def build_graph(self, type_: str) -> None: if len(line) == 1: # In case there is only one layer for node_id in range(line[0][0][0], line[0][0][1] + 1): if node_id not in self.nodes: - self.add_node(Node(node_id, model=self.config.get("Model"))) + self.add_node(Node(node_id, model=self.model,config=self.config)) for i in range(len(line) - 1): from_node_list = line[i] to_node_list = line[i + 1] for from_node_tuple in from_node_list: - for from_node_id in range(from_node_tuple[0], from_node_tuple[1] + 1): + for from_node_id in range( + from_node_tuple[0], from_node_tuple[1] + 1 + ): for to_node_tuple in to_node_list: - for to_node_id in range(to_node_tuple[0], to_node_tuple[1] + 1): + for to_node_id in range( + to_node_tuple[0], to_node_tuple[1] + 1 + ): if from_node_id not in self.nodes: - self.add_node(Node(from_node_id, model=self.config.get("Model"))) + self.add_node(Node(from_node_id, model=self.model)) if to_node_id not in self.nodes: - self.add_node(Node(to_node_id, model=self.config.get("Model"))) + self.add_node(Node(to_node_id, model=self.model)) self.add_edge(from_node_id, to_node_id) self.input_layer = self.get_input_layer() self.output_layer = self.get_output_layer() for input_nodes in self.input_layer: - if (input_nodes.id != self.node_in.id) and (input_nodes.id != self.node_out.id): + if (input_nodes.id != self.node_in.id) and ( + input_nodes.id != self.node_out.id + ): self.add_edge(self.node_in.id, input_nodes.id) for output_nodes in self.output_layer: - if output_nodes.id != self.node_out.id and output_nodes.id != self.node_in.id: + if ( + output_nodes.id != self.node_out.id + and output_nodes.id != self.node_in.id + ): self.add_edge(output_nodes.id, self.node_out.id) if self.circular_check(): @@ -295,20 +375,32 @@ def build_graph(self, type_: str) -> None: else: self.view() self.depth = self.agent_deployment(type_) - + def display_image_with_imgcat(self, image_path): """Display the image with imgcat""" - subprocess.run(["imgcat", image_path]) + try: + subprocess.run(["imgcat", image_path],check=True, capture_output=True,text=True) + except: + print("...graph display failed. Moving on...") + pass def view(self, view: bool = False) -> None: """Visualize the graph using Graphviz and save it to a file.""" - graph_viz = Digraph(format="png", node_attr={"shape": "circle"}, edge_attr={"arrowhead": "normal"}) + graph_viz = Digraph( + format="png", + node_attr={"shape": "circle"}, + edge_attr={"arrowhead": "normal"}, + ) for node in self.nodes.values(): for successor in node.successors: graph_viz.edge(str(node.id), str(successor.id)) if view: - graph_viz.view(directory=f"./MacNetLog/{self.now}", filename=f"graph_{self.now}") - graph_viz.render(directory=f"./MacNetLog/{self.now}", filename=f"graph_{self.now}") + graph_viz.view( + directory=f"./MacNetLog/{self.now}", filename=f"graph_{self.now}" + ) + graph_viz.render( + directory=f"./MacNetLog/{self.now}", filename=f"graph_{self.now}" + ) print("MacNet starts running based on the following graph:") self.display_image_with_imgcat(f"./MacNetLog/{self.now}/graph_{self.now}.png") @@ -330,29 +422,63 @@ def execute(self, prompt: str, name: str) -> None: os.makedirs(cur_layer_dir, exist_ok=True) if layer == 0 and not os.path.exists(cur_layer_dir + "/Node -1"): os.makedirs(cur_layer_dir + "/Node -1") - with open(cur_layer_dir + "/Node -1/solution.txt", "w", encoding="utf-8") as f: + with open( + cur_layer_dir + "/Node -1/solution.txt", "w", encoding="utf-8" + ) as f: f.write(prompt) visited_edges, next_nodes = set(), set() for cur_node in input_nodes: - with open(cur_layer_dir + f"/Node {cur_node.id}/profile.txt", "w", encoding="utf-8") as f: + with open( + cur_layer_dir + f"/Node {cur_node.id}/profile.txt", + "w", + encoding="utf-8", + ) as f: f.write(cur_node.system_message) for next_node in cur_node.successors: - response, optimized_solution, suggestion = next_node.optimize(task_prompt=prompt, - pre_solution=cur_node.solution._get_codes(), - config=self.config, - name=name) + response, optimized_solution, suggestion = next_node.optimize( + task_prompt=prompt, + pre_solution=cur_node.solution._get_codes(), + name=name, + ) next_node.pre_solutions[cur_node.id] = optimized_solution - print("----------------------------------------------Complete!----------------------------------------------------------------------") - print(f"(Original Solution on Node {cur_node.id}) ---(Suggestions from Node {next_node.id} on Node {cur_node.id})---> (Optimized Solution on Node {next_node.id}, before Aggregation)") - justify_in_box(text=suggestion, title=f"Suggestions on Node {cur_node.id}'s solution:") - color_code_diff(cur_node.solution._get_codes(), response, cur_node.id, next_node.id) - log_info(f"Original Solution on Node {cur_node.id}:\n{cur_node.solution._get_codes()}", to_console=False) - log_info(f"Suggestions from Node {next_node.id} on Node {cur_node.id}:\n{suggestion}", to_console=False) - log_info(f"Optimized Solution on Node {next_node.id}:\n{response}", to_console=False) - with open(cur_layer_dir + f"/Node {cur_node.id}/suggestions.txt", "a", encoding="utf-8") as f: - f.write(f"\n\n{next_node.id}'s suggestion on {cur_node.id}'s solution:\n{suggestion}\n\n") + print( + "----------------------------------------------Complete!----------------------------------------------------------------------" + ) + print( + f"(Original Solution on Node {cur_node.id}) ---(Suggestions from Node {next_node.id} on Node {cur_node.id})---> (Optimized Solution on Node {next_node.id}, before Aggregation)" + ) + justify_in_box( + text=suggestion, + title=f"Suggestions on Node {cur_node.id}'s solution:", + ) + color_code_diff( + cur_node.solution._get_codes(), + response, + cur_node.id, + next_node.id, + ) + log_info( + f"Original Solution on Node {cur_node.id}:\n{cur_node.solution._get_codes()}", + to_console=False, + ) + log_info( + f"Suggestions from Node {next_node.id} on Node {cur_node.id}:\n{suggestion}", + to_console=False, + ) + log_info( + f"Optimized Solution on Node {next_node.id}:\n{response}", + to_console=False, + ) + with open( + cur_layer_dir + f"/Node {cur_node.id}/suggestions.txt", + "a", + encoding="utf-8", + ) as f: + f.write( + f"\n\n{next_node.id}'s suggestion on {cur_node.id}'s solution:\n{suggestion}\n\n" + ) visited_edges.add((cur_node.id, next_node.id)) next_nodes.add(next_node.id) @@ -362,31 +488,56 @@ def execute(self, prompt: str, name: str) -> None: os.makedirs(node_directory, exist_ok=True) os.makedirs(node_directory + "/pre_solutions", exist_ok=True) for prev_node in node.pre_solutions.keys(): - with open(node_directory + f"/pre_solutions/solution_{prev_node}.txt", "w") as f: + with open( + node_directory + f"/pre_solutions/solution_{prev_node}.txt", "w" + ) as f: for key in node.pre_solutions[prev_node].codebooks.keys(): - f.write(f"{key}\n\n{node.pre_solutions[prev_node].codebooks[key]}\n\n") - - if len(os.listdir(node_directory + "/pre_solutions")) != len(node.pre_solutions): - print("Error: the number of solutions is not equal to the number of files!") + f.write( + f"{key}\n\n{node.pre_solutions[prev_node].codebooks[key]}\n\n" + ) + + if len(os.listdir(node_directory + "/pre_solutions")) != len( + node.pre_solutions + ): + print( + "Error: the number of solutions is not equal to the number of files!" + ) exit(1) - if len(node.pre_solutions) == len(node.predecessors) and len(node.pre_solutions) >= self.aggregate_unit_num: + if ( + len(node.pre_solutions) == len(node.predecessors) + and len(node.pre_solutions) >= self.aggregate_unit_num + ): logging.info(f"Node {node.id} is aggregating") agg_layer_dir = node_directory + "/pre_solutions" - error_flag = node.aggregate(prompt, self.aggregate_retry_limit, self.aggregate_unit_num, - agg_layer_dir, self.depth, node_directory + "/solution.txt") + error_flag = node.aggregate( + prompt, + self.aggregate_retry_limit, + self.aggregate_unit_num, + agg_layer_dir, + self.depth, + node_directory + "/solution.txt", + ) if error_flag: - node.solution = node.pre_solutions[list(node.pre_solutions.keys())[0]] + node.solution = node.pre_solutions[ + list(node.pre_solutions.keys())[0] + ] with open(node_directory + "/solution.txt", "w") as f: for key in node.solution.codebooks.keys(): f.write(f"{key}\n\n{node.solution.codebooks[key]}\n\n") - logging.info(f"Node {node.id} failed aggregating pre_solutions.") + logging.info( + f"Node {node.id} failed aggregating pre_solutions." + ) else: - node.solution = node.pre_solutions[list(node.pre_solutions.keys())[0]] + node.solution = node.pre_solutions[ + list(node.pre_solutions.keys())[0] + ] with open(node_directory + "/solution.txt", "w") as f: for key in node.solution.codebooks.keys(): f.write(f"{key}\n\n{node.solution.codebooks[key]}\n\n") - logging.info(f"Node {node.id} has insufficient predecessors, uses pre_solution.") + logging.info( + f"Node {node.id} has insufficient predecessors, uses pre_solution." + ) for edge in visited_edges: self.delete_edge(edge[0], edge[1]) @@ -414,10 +565,16 @@ def agent_deployment(self, _type): self.nodes[node.id].temperature = 1 - cur_depth / self.depth if _type == "None": - self.nodes[node.id].system_message = "You are an experienced programmer." + self.nodes[node.id].system_message = ( + "You are an experienced programmer." + ) else: profile_num = random.randint(1, 99) - with open(f"./SRDD_Profile/{_type}/{profile_num}.txt", "r", encoding="utf-8") as f: + with open( + f"./SRDD_Profile/{_type}/{profile_num}.txt", + "r", + encoding="utf-8", + ) as f: self.nodes[node.id].system_message = f.read() cur_depth += 1 break @@ -494,7 +651,3 @@ def dfs(_node): if node not in visited and dfs(node): return True return False - - - - From a82dee80a4155c8da53114534c4afb632b4432b5 Mon Sep 17 00:00:00 2001 From: ma3oun Date: Thu, 5 Dec 2024 17:11:13 +0100 Subject: [PATCH 3/6] Support for local models through a models_registry.json file (can also be specified). This provides support for Ollama. --- camel/agents/chat_agent.py | 151 ++++++++++++++--------- camel/agents/role_playing.py | 209 +++++++++++++++++++++----------- camel/messages/base.py | 58 +++++---- camel/messages/chat_messages.py | 4 + camel/model_backend.py | 113 ++++++----------- camel/typing.py | 33 +++-- camel/utils.py | 87 ++++--------- models_registry.json | 56 +++++++++ run.py | 55 +++++++-- 9 files changed, 446 insertions(+), 320 deletions(-) create mode 100644 models_registry.json diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 908c89615..9667319c3 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -24,13 +24,13 @@ from camel.model_backend import ModelBackend, ModelFactory from camel.typing import ModelType, RoleType from camel.utils import ( - get_model_token_limit, num_tokens_from_messages, openai_api_key_required, ) from openai.types.chat import ChatCompletion from chatdev.utils import log_macnet + @dataclass(frozen=True) class ChatAgentResponse: r"""Response of a ChatAgent. @@ -44,6 +44,7 @@ class ChatAgentResponse: to terminate the chat session. info (Dict[str, Any]): Extra information about the chat message. """ + msgs: List[ChatMessage] terminated: bool info: Dict[str, Any] @@ -51,12 +52,18 @@ class ChatAgentResponse: @property def msg(self): if self.terminated: - raise RuntimeError("error in ChatAgentResponse, info:{}".format(str(self.info))) + raise RuntimeError( + "error in ChatAgentResponse, info:{}".format(str(self.info)) + ) if len(self.msgs) > 1: - raise RuntimeError("Property msg is only available for a single message in msgs") + raise RuntimeError( + "Property msg is only available for a single message in msgs" + ) elif len(self.msgs) == 0: if len(self.info) > 0: - raise RuntimeError("Empty msgs in ChatAgentResponse, info:{}".format(str(self.info))) + raise RuntimeError( + "Empty msgs in ChatAgentResponse, info:{}".format(str(self.info)) + ) else: # raise RuntimeError("Known issue that msgs is empty and there is no error info, to be fix") return None @@ -79,35 +86,41 @@ class ChatAgent(BaseAgent): """ def __init__( - self, - system_message: SystemMessage, - memory = None, - model: Optional[ModelType] = None, - model_config: Optional[Any] = None, - message_window_size: Optional[int] = None, - temperature: float = 0.2 + self, + system_message: SystemMessage, + memory=None, + model: Optional[ModelType] = None, + model_config: Optional[Any] = None, + message_window_size: Optional[int] = None, + temperature: float = 0.2, ) -> None: self.system_message: SystemMessage = system_message self.role_name: str = system_message.role_name self.role_type: RoleType = system_message.role_type - self.model: ModelType = (model if model is not None else ModelType.GPT_3_5_TURBO) + self.model: ModelType = ( + model if model is not None else ModelType("stub", 100000) + ) self.model_config: ChatGPTConfig = model_config or ChatGPTConfig(temperature) - self.model_token_limit: int = get_model_token_limit(self.model) + self.model_token_limit: int = self.model.num_tokens self.message_window_size: Optional[int] = message_window_size - self.model_backend: ModelBackend = ModelFactory.create(self.model, self.model_config.__dict__) + self.model_backend: ModelBackend = ModelFactory.create( + self.model, self.model_config.__dict__ + ) self.terminated: bool = False self.info: bool = False self.init_messages() - - if memory !=None and self.role_name in["Code Reviewer","Programmer","Software Test Engineer"]: + if memory != None and self.role_name in [ + "Code Reviewer", + "Programmer", + "Software Test Engineer", + ]: self.memory = memory.memory_data.get("All") else: self.memory = None def reset(self) -> List[MessageType]: - r"""Resets the :obj:`ChatAgent` to its initial state and returns the stored messages. @@ -119,13 +132,12 @@ def reset(self) -> List[MessageType]: return self.stored_messages def get_info( - self, - id: Optional[str], - usage: Optional[Dict[str, int]], - termination_reasons: List[str], - num_tokens: int, + self, + id: Optional[str], + usage: Optional[Dict[str, int]], + termination_reasons: List[str], + num_tokens: int, ) -> Dict[str, Any]: - r"""Returns a dictionary containing information about the chat session. Args: @@ -145,7 +157,7 @@ def get_info( "usage": usage, "termination_reasons": termination_reasons, "num_tokens": num_tokens, - } + } def init_messages(self) -> None: r"""Initializes the stored messages list with the initial system @@ -166,53 +178,65 @@ def update_messages(self, message: ChatMessage) -> List[MessageType]: self.stored_messages.append(message) return self.stored_messages - def use_memory(self,input_message) -> List[MessageType]: - if self.memory is None : + def use_memory(self, input_message) -> List[MessageType]: + if self.memory is None: return None else: if self.role_name == "Programmer": - result = self.memory.memory_retrieval(input_message,"code") + result = self.memory.memory_retrieval(input_message, "code") if result != None: - target_memory,distances, mids,task_list,task_dir_list = result + target_memory, distances, mids, task_list, task_dir_list = result if target_memory != None and len(target_memory) != 0: - target_memory="".join(target_memory) - log_macnet(self.role_name, - "thinking back and found some related code: \n--------------------------\n" - + target_memory + "\n--------------------------\n" - +"And the similarity is "+distances - +", the target code MIDs is "+";".join(mids) - +"\nThe task is " + ";".join(task_list).replace('\n', '') - + "\nThe task dir is "+ ";".join(task_dir_list).replace('\n', '')) + target_memory = "".join(target_memory) + log_macnet( + self.role_name, + "thinking back and found some related code: \n--------------------------\n" + + target_memory + + "\n--------------------------\n" + + "And the similarity is " + + distances + + ", the target code MIDs is " + + ";".join(mids) + + "\nThe task is " + + ";".join(task_list).replace("\n", "") + + "\nThe task dir is " + + ";".join(task_dir_list).replace("\n", ""), + ) else: target_memory = None - log_macnet(self.role_name, - "thinking back but find nothing useful") + log_macnet(self.role_name, "thinking back but find nothing useful") else: result = self.memory.memory_retrieval(input_message, "text") if result != None: target_memory, distances, mids, task_list, task_dir_list = result if target_memory != None and len(target_memory) != 0: - target_memory=";".join(target_memory) - log_macnet(self.role_name, - "thinking back and found some related text: \n--------------------------\n" - + target_memory + "\n--------------------------\n" - +"And the similarity is "+distances - +", the source code MIDs is "+";".join(mids) - +"\nThe task is " + ";".join(task_list).replace('\n', '') - + "\nThe task dir is "+ ";".join(task_dir_list).replace('\n', '')) + target_memory = ";".join(target_memory) + log_macnet( + self.role_name, + "thinking back and found some related text: \n--------------------------\n" + + target_memory + + "\n--------------------------\n" + + "And the similarity is " + + distances + + ", the source code MIDs is " + + ";".join(mids) + + "\nThe task is " + + ";".join(task_list).replace("\n", "") + + "\nThe task dir is " + + ";".join(task_dir_list).replace("\n", ""), + ) else: target_memory = None - log_macnet(self.role_name, - "thinking back but find nothing useful") + log_macnet(self.role_name, "thinking back but find nothing useful") return target_memory @retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5)) @openai_api_key_required def step( - self, - input_message: ChatMessage, + self, + input_message: ChatMessage, ) -> ChatAgentResponse: r"""Performs a single step in the chat session by generating a response to the input message. @@ -227,10 +251,11 @@ def step( session. """ messages = self.update_messages(input_message) - if self.message_window_size is not None and len( - messages) > self.message_window_size: - messages = [self.system_message - ] + messages[-self.message_window_size:] + if ( + self.message_window_size is not None + and len(messages) > self.message_window_size + ): + messages = [self.system_message] + messages[-self.message_window_size :] openai_messages = [message.to_openai_message() for message in messages] num_tokens = num_tokens_from_messages(openai_messages, self.model) @@ -241,11 +266,19 @@ def step( response = self.model_backend.run(messages=openai_messages) if not isinstance(response, ChatCompletion): raise RuntimeError("OpenAI returned unexpected struct") - output_messages = [ - ChatMessage(role_name=self.role_name, role_type=self.role_type, - meta_dict=dict(), **dict(choice.message)) - for choice in response.choices - ] + output_messages = [] + + for choice in response.choices: + try: + msg = ChatMessage( + role_name=self.role_name, + role_type=self.role_type, + meta_dict=dict(), + **dict(choice.message), + ) + except TypeError: + print(f"...caused by:\n{choice.message}\n") + output_messages.append(msg) info = self.get_info( response.id, response.usage, diff --git a/camel/agents/role_playing.py b/camel/agents/role_playing.py index 7242b9d35..3ca3c9ffc 100644 --- a/camel/agents/role_playing.py +++ b/camel/agents/role_playing.py @@ -63,31 +63,31 @@ class RolePlaying: """ def __init__( - self, - assistant_role_name: str, - user_role_name: str, - critic_role_name: str = "critic", - task_prompt: str = "", - assistant_role_prompt: str = "", - user_role_prompt: str = "", - user_role_type: Optional[RoleType] = None, - assistant_role_type: Optional[RoleType] = None, - with_task_specify: bool = True, - with_task_planner: bool = False, - with_critic_in_the_loop: bool = False, - critic_criteria: Optional[str] = None, - model_type: ModelType = ModelType.GPT_3_5_TURBO, - task_type: TaskType = TaskType.AI_SOCIETY, - assistant_agent_kwargs: Optional[Dict] = None, - user_agent_kwargs: Optional[Dict] = None, - task_specify_agent_kwargs: Optional[Dict] = None, - task_planner_agent_kwargs: Optional[Dict] = None, - critic_kwargs: Optional[Dict] = None, - sys_msg_generator_kwargs: Optional[Dict] = None, - extend_sys_msg_meta_dicts: Optional[List[Dict]] = None, - extend_task_specify_meta_dict: Optional[Dict] = None, - memory = None, - placeholders = None + self, + assistant_role_name: str, + user_role_name: str, + critic_role_name: str = "critic", + task_prompt: str = "", + assistant_role_prompt: str = "", + user_role_prompt: str = "", + user_role_type: Optional[RoleType] = None, + assistant_role_type: Optional[RoleType] = None, + with_task_specify: bool = True, + with_task_planner: bool = False, + with_critic_in_the_loop: bool = False, + critic_criteria: Optional[str] = None, + model_type: ModelType = ModelType("stub", 100000), + task_type: TaskType = TaskType.AI_SOCIETY, + assistant_agent_kwargs: Optional[Dict] = None, + user_agent_kwargs: Optional[Dict] = None, + task_specify_agent_kwargs: Optional[Dict] = None, + task_planner_agent_kwargs: Optional[Dict] = None, + critic_kwargs: Optional[Dict] = None, + sys_msg_generator_kwargs: Optional[Dict] = None, + extend_sys_msg_meta_dicts: Optional[List[Dict]] = None, + extend_task_specify_meta_dict: Optional[Dict] = None, + memory=None, + placeholders=None, ) -> None: self.with_task_specify = with_task_specify self.with_task_planner = with_task_planner @@ -100,8 +100,8 @@ def __init__( task_specify_meta_dict = dict() if self.task_type in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT]: task_specify_meta_dict.update( - dict(assistant_role=assistant_role_name, - user_role=user_role_name)) + dict(assistant_role=assistant_role_name, user_role=user_role_name) + ) if extend_task_specify_meta_dict is not None: task_specify_meta_dict.update(extend_task_specify_meta_dict) @@ -130,35 +130,61 @@ def __init__( self.task_prompt = task_prompt - chatdev_prompt_template = "ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of \"changing the digital world through programming\"." + chatdev_prompt_template = 'ChatDev is a software company powered by multiple intelligent agents, such as chief executive officer, chief human resources officer, chief product officer, chief technology officer, etc, with a multi-agent organizational structure and the mission of "changing the digital world through programming".' - sys_msg_meta_dicts = [dict(chatdev_prompt=chatdev_prompt_template, task=task_prompt)] * 2 - if (extend_sys_msg_meta_dicts is None and self.task_type in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT, - TaskType.CHATDEV]): - extend_sys_msg_meta_dicts = [dict(assistant_role=assistant_role_name, user_role=user_role_name)] * 2 + sys_msg_meta_dicts = [ + dict(chatdev_prompt=chatdev_prompt_template, task=task_prompt) + ] * 2 + if extend_sys_msg_meta_dicts is None and self.task_type in [ + TaskType.AI_SOCIETY, + TaskType.MISALIGNMENT, + TaskType.CHATDEV, + ]: + extend_sys_msg_meta_dicts = [ + dict(assistant_role=assistant_role_name, user_role=user_role_name) + ] * 2 if extend_sys_msg_meta_dicts is not None: - sys_msg_meta_dicts = [{**sys_msg_meta_dict, **extend_sys_msg_meta_dict} for - sys_msg_meta_dict, extend_sys_msg_meta_dict in - zip(sys_msg_meta_dicts, extend_sys_msg_meta_dicts)] + sys_msg_meta_dicts = [ + {**sys_msg_meta_dict, **extend_sys_msg_meta_dict} + for sys_msg_meta_dict, extend_sys_msg_meta_dict in zip( + sys_msg_meta_dicts, extend_sys_msg_meta_dicts + ) + ] - self.assistant_sys_msg = SystemMessage(role_name=assistant_role_name, role_type=RoleType.DEFAULT, - meta_dict=sys_msg_meta_dicts[0], - content=assistant_role_prompt.format(**sys_msg_meta_dicts[0])) - self.user_sys_msg = SystemMessage(role_name=user_role_name, role_type=RoleType.DEFAULT, - meta_dict=sys_msg_meta_dicts[1], - content=user_role_prompt.format(**sys_msg_meta_dicts[1])) + self.assistant_sys_msg = SystemMessage( + role_name=assistant_role_name, + role_type=RoleType.DEFAULT, + meta_dict=sys_msg_meta_dicts[0], + content=assistant_role_prompt.format(**sys_msg_meta_dicts[0]), + ) + self.user_sys_msg = SystemMessage( + role_name=user_role_name, + role_type=RoleType.DEFAULT, + meta_dict=sys_msg_meta_dicts[1], + content=user_role_prompt.format(**sys_msg_meta_dicts[1]), + ) - self.assistant_agent: ChatAgent = ChatAgent(self.assistant_sys_msg, memory, model_type, - **(assistant_agent_kwargs or {}), ) - self.user_agent: ChatAgent = ChatAgent(self.user_sys_msg, memory,model_type,**(user_agent_kwargs or {}), ) + self.assistant_agent: ChatAgent = ChatAgent( + self.assistant_sys_msg, + memory, + model_type, + **(assistant_agent_kwargs or {}), + ) + self.user_agent: ChatAgent = ChatAgent( + self.user_sys_msg, + memory, + model_type, + **(user_agent_kwargs or {}), + ) if with_critic_in_the_loop: raise ValueError("with_critic_in_the_loop not available") else: self.critic = None - def init_chat(self, phase_type: PhaseType = None, - placeholders=None, phase_prompt=None): + def init_chat( + self, phase_type: PhaseType = None, placeholders=None, phase_prompt=None + ): r"""Initializes the chat by resetting both the assistant and user agents, and sending the system messages again to the agents using chat messages. Returns the assistant's introductory message and the @@ -173,12 +199,12 @@ def init_chat(self, phase_type: PhaseType = None, placeholders = {} self.assistant_agent.reset() self.user_agent.reset() - placeholders["examples"] = ' ' + placeholders["examples"] = " " content = phase_prompt.format( **({"assistant_role": self.assistant_agent.role_name} | placeholders) ) retrieval_memory = self.assistant_agent.use_memory(content) - if retrieval_memory!= None: + if retrieval_memory != None: placeholders["examples"] = retrieval_memory # refactored ChatDev @@ -188,26 +214,31 @@ def init_chat(self, phase_type: PhaseType = None, user_msg = UserChatMessage( role_name=self.user_sys_msg.role_name, role="user", - content=content + content=content, # content here will be concatenated with assistant role prompt (because we mock user and send msg to assistant) in the ChatAgent.step ) pseudo_msg = copy.deepcopy(user_msg) pseudo_msg.role = "assistant" self.user_agent.update_messages(pseudo_msg) - #user_msg_rst = user_msg.set_user_role_at_backend() + # user_msg_rst = user_msg.set_user_role_at_backend() # user_msg.phase_name = phase_name # here we concatenate to store the real message in the log - log_macnet(self.user_agent.role_name, - "**[Start Chat]**\n\n[" + self.assistant_agent.system_message.content + "]\n\n" + content) + log_macnet( + self.user_agent.role_name, + "**[Start Chat]**\n\n[" + + self.assistant_agent.system_message.content + + "]\n\n" + + content, + ) return None, user_msg def process_messages( - self, - messages: Sequence[ChatMessage], + self, + messages: Sequence[ChatMessage], ) -> ChatMessage: r"""Processes a list of chat messages, returning the processed message. If multiple messages are provided and `with_critic_in_the_loop` @@ -223,8 +254,9 @@ def process_messages( if len(messages) == 0: raise ValueError("No messages to process.") if len(messages) > 1 and not self.with_critic_in_the_loop: - raise ValueError("Got than one message to process. " - f"Num of messages: {len(messages)}.") + raise ValueError( + "Got than one message to process. " f"Num of messages: {len(messages)}." + ) elif self.with_critic_in_the_loop and self.critic is not None: processed_msg = self.critic.step(messages) else: @@ -233,44 +265,79 @@ def process_messages( return processed_msg def step( - self, - user_msg: ChatMessage, - assistant_only: bool, + self, + user_msg: ChatMessage, + assistant_only: bool, ) -> Tuple[ChatAgentResponse, ChatAgentResponse]: - assert isinstance(user_msg, ChatMessage), print("broken user_msg: " + str(user_msg)) + assert isinstance(user_msg, ChatMessage), print( + "broken user_msg: " + str(user_msg) + ) # print("assistant...") user_msg_rst = user_msg.set_user_role_at_backend() assistant_response = self.assistant_agent.step(user_msg_rst) if assistant_response.terminated or assistant_response.msgs is None: return ( - ChatAgentResponse([assistant_response.msgs], assistant_response.terminated, assistant_response.info), - ChatAgentResponse([], False, {})) + ChatAgentResponse( + [assistant_response.msgs], + assistant_response.terminated, + assistant_response.info, + ), + ChatAgentResponse([], False, {}), + ) assistant_msg = self.process_messages(assistant_response.msgs) if self.assistant_agent.info: - return (ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info), - ChatAgentResponse([], False, {})) + return ( + ChatAgentResponse( + [assistant_msg], + assistant_response.terminated, + assistant_response.info, + ), + ChatAgentResponse([], False, {}), + ) self.assistant_agent.update_messages(assistant_msg) if assistant_only: return ( - ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info), - ChatAgentResponse([], False, {}) + ChatAgentResponse( + [assistant_msg], + assistant_response.terminated, + assistant_response.info, + ), + ChatAgentResponse([], False, {}), ) # print("user...") assistant_msg_rst = assistant_msg.set_user_role_at_backend() user_response = self.user_agent.step(assistant_msg_rst) if user_response.terminated or user_response.msgs is None: - return (ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info), - ChatAgentResponse([user_response], user_response.terminated, user_response.info)) + return ( + ChatAgentResponse( + [assistant_msg], + assistant_response.terminated, + assistant_response.info, + ), + ChatAgentResponse( + [user_response], user_response.terminated, user_response.info + ), + ) user_msg = self.process_messages(user_response.msgs) if self.user_agent.info: - return (ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info), - ChatAgentResponse([user_msg], user_response.terminated, user_response.info)) + return ( + ChatAgentResponse( + [assistant_msg], + assistant_response.terminated, + assistant_response.info, + ), + ChatAgentResponse( + [user_msg], user_response.terminated, user_response.info + ), + ) self.user_agent.update_messages(user_msg) return ( - ChatAgentResponse([assistant_msg], assistant_response.terminated, assistant_response.info), + ChatAgentResponse( + [assistant_msg], assistant_response.terminated, assistant_response.info + ), ChatAgentResponse([user_msg], user_response.terminated, user_response.info), ) diff --git a/camel/messages/base.py b/camel/messages/base.py index f4ca8f8ae..f4ef67baa 100644 --- a/camel/messages/base.py +++ b/camel/messages/base.py @@ -25,9 +25,13 @@ from camel.prompts import CodePrompt, TextPrompt from camel.typing import ModelType, RoleType -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall +from openai.types.chat.chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall, +) from openai.types.chat.chat_completion_message import FunctionCall -from openai.types.chat.chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam +from openai.types.chat.chat_completion_content_part_refusal_param import ( + ChatCompletionContentPartRefusalParam, +) @dataclass @@ -44,6 +48,7 @@ class BaseMessage: :obj:`"system"`, :obj:`"user"`, or :obj:`"assistant"`. content (str): The content of the message. """ + role_name: str role_type: RoleType meta_dict: Optional[Dict[str, str]] @@ -66,11 +71,9 @@ def __getattribute__(self, name: str) -> Any: Returns: Any: The attribute value. """ - delegate_methods = [ - method for method in dir(str) if not method.startswith('_') - ] + delegate_methods = [method for method in dir(str) if not method.startswith("_")] if name in delegate_methods: - content = super().__getattribute__('content') + content = super().__getattribute__("content") if isinstance(content, str): content_method = getattr(content, name, None) if callable(content_method): @@ -102,14 +105,13 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: Any: The result of the delegate method. """ modified_args = [modify_arg(arg) for arg in args] - modified_kwargs = { - k: modify_arg(v) - for k, v in kwargs.items() - } - output = content_method(*modified_args, - **modified_kwargs) - return self._create_new_instance(output) if isinstance( - output, str) else output + modified_kwargs = {k: modify_arg(v) for k, v in kwargs.items()} + output = content_method(*modified_args, **modified_kwargs) + return ( + self._create_new_instance(output) + if isinstance(output, str) + else output + ) return wrapper return super().__getattribute__(name) @@ -124,10 +126,13 @@ def _create_new_instance(self, content: str) -> "BaseMessage": Returns: BaseMessage: The new instance of :obj:`BaseMessage`. """ - return self.__class__(role_name=self.role_name, - role_type=self.role_type, - meta_dict=self.meta_dict, role=self.role, - content=content) + return self.__class__( + role_name=self.role_name, + role_type=self.role_type, + meta_dict=self.meta_dict, + role=self.role, + content=content, + ) def __add__(self, other: Any) -> Union["BaseMessage", Any]: r"""Addition operator override for :obj:`BaseMessage`. @@ -145,7 +150,8 @@ def __add__(self, other: Any) -> Union["BaseMessage", Any]: else: raise TypeError( f"Unsupported operand type(s) for +: '{type(self)}' and " - f"'{type(other)}'") + f"'{type(other)}'" + ) return self._create_new_instance(combined_content) def __mul__(self, other: Any) -> Union["BaseMessage", Any]: @@ -163,7 +169,8 @@ def __mul__(self, other: Any) -> Union["BaseMessage", Any]: else: raise TypeError( f"Unsupported operand type(s) for *: '{type(self)}' and " - f"'{type(other)}'") + f"'{type(other)}'" + ) def __len__(self) -> int: r"""Length operator override for :obj:`BaseMessage`. @@ -185,21 +192,23 @@ def __contains__(self, item: str) -> bool: """ return item in self.content - def token_len(self, model: ModelType = ModelType.GPT_3_5_TURBO) -> int: + def token_len(self, model: ModelType = ModelType("stub", 100000)) -> int: r"""Calculate the token length of the message for the specified model. Args: model (ModelType, optional): The model type to calculate the token - length. (default: :obj:`ModelType.GPT_3_5_TURBO`) + length. (default: :obj:`ModelType.Stub`) Returns: int: The token length of the message. """ from camel.utils import num_tokens_from_messages + return num_tokens_from_messages([self.to_openai_chat_message()], model) def extract_text_and_code_prompts( - self) -> Tuple[List[TextPrompt], List[CodePrompt]]: + self, + ) -> Tuple[List[TextPrompt], List[CodePrompt]]: r"""Extract text and code prompts from the message content. Returns: @@ -214,8 +223,7 @@ def extract_text_and_code_prompts( idx = 0 start_idx = 0 while idx < len(lines): - while idx < len(lines) and ( - not lines[idx].lstrip().startswith("```")): + while idx < len(lines) and (not lines[idx].lstrip().startswith("```")): idx += 1 text = "\n".join(lines[start_idx:idx]).strip() text_prompts.append(TextPrompt(text)) diff --git a/camel/messages/chat_messages.py b/camel/messages/chat_messages.py index cae3d5dfd..72069fd55 100644 --- a/camel/messages/chat_messages.py +++ b/camel/messages/chat_messages.py @@ -31,12 +31,14 @@ class ChatMessage(BaseMessage): role (str): The role of the message in OpenAI chat system. content (str): The content of the message. (default: :obj:`""`) """ + role_name: str role_type: RoleType meta_dict: Optional[Dict[str, str]] role: str content: str = "" phase_name: str = "" + audio: str = "" def set_user_role_at_backend(self: BaseMessage): return self.__class__( @@ -64,6 +66,7 @@ class AssistantChatMessage(ChatMessage): (default: :obj:`"assistant"`) content (str): The content of the message. (default: :obj:`""`) """ + role_name: str role_type: RoleType = RoleType.ASSISTANT meta_dict: Optional[Dict[str, str]] = None @@ -84,6 +87,7 @@ class UserChatMessage(ChatMessage): (default: :obj:`"user"`) content (str): The content of the message. (default: :obj:`""`) """ + role_name: str role_type: RoleType = RoleType.USER meta_dict: Optional[Dict[str, str]] = None diff --git a/camel/model_backend.py b/camel/model_backend.py index dd95ec4fe..79a7a740b 100644 --- a/camel/model_backend.py +++ b/camel/model_backend.py @@ -30,9 +30,9 @@ import os -OPENAI_API_KEY = os.environ['OPENAI_API_KEY'] -if 'BASE_URL' in os.environ: - BASE_URL = os.environ['BASE_URL'] +OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] +if "BASE_URL" in os.environ: + BASE_URL = os.environ["BASE_URL"] else: BASE_URL = None @@ -65,10 +65,13 @@ def __init__(self, model_type: ModelType, model_config_dict: Dict) -> None: def run(self, *args, **kwargs): string = "\n".join([message["content"] for message in kwargs["messages"]]) - encoding = tiktoken.encoding_for_model(self.model_type.value) - num_prompt_tokens = len(encoding.encode(string)) - gap_between_send_receive = 15 * len(kwargs["messages"]) - num_prompt_tokens += gap_between_send_receive + try: + encoding = tiktoken.encoding_for_model(self.model_type.name) + num_prompt_tokens = len(encoding.encode(string)) + gap_between_send_receive = 15 * len(kwargs["messages"]) + num_prompt_tokens += gap_between_send_receive + except: + num_prompt_tokens = 0 if openai_new_api: # Experimental, add base_url @@ -78,34 +81,20 @@ def run(self, *args, **kwargs): base_url=BASE_URL, ) else: - client = openai.OpenAI( - api_key=OPENAI_API_KEY - ) + client = openai.OpenAI(api_key=OPENAI_API_KEY) - num_max_token_map = { - "gpt-3.5-turbo": 4096, - "gpt-3.5-turbo-16k": 16384, - "gpt-3.5-turbo-0613": 4096, - "gpt-3.5-turbo-16k-0613": 16384, - "gpt-4": 8192, - "gpt-4-0613": 8192, - "gpt-4-32k": 32768, - "gpt-4-turbo": 100000, - "gpt-4o": 4096, #100000 - "gpt-4o-mini": 16384, #100000 - } - num_max_token = num_max_token_map[self.model_type.value] + num_max_token = self.model_type.num_tokens num_max_completion_tokens = num_max_token - num_prompt_tokens - self.model_config_dict['max_tokens'] = num_max_completion_tokens + self.model_config_dict["max_tokens"] = num_max_completion_tokens - response = client.chat.completions.create(*args, **kwargs, model=self.model_type.value, - **self.model_config_dict) - - cost = prompt_cost( - self.model_type.value, - num_prompt_tokens=response.usage.prompt_tokens, - num_completion_tokens=response.usage.completion_tokens + response = client.chat.completions.create( + *args, **kwargs, model=self.model_type.name, **self.model_config_dict ) + # cost = prompt_cost( + # self.model_type.name, + # num_prompt_tokens=response.usage.prompt_tokens, + # num_completion_tokens=response.usage.completion_tokens, + # ) # log_macnet( # "\n**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ncompletion_tokens: {}\ntotal_tokens: {}\ncost: ${:.6f}\n".format( @@ -115,31 +104,20 @@ def run(self, *args, **kwargs): raise RuntimeError("Unexpected return from OpenAI API") return response else: - num_max_token_map = { - "gpt-3.5-turbo": 4096, - "gpt-3.5-turbo-16k": 16384, - "gpt-3.5-turbo-0613": 4096, - "gpt-3.5-turbo-16k-0613": 16384, - "gpt-4": 8192, - "gpt-4-0613": 8192, - "gpt-4-32k": 32768, - "gpt-4-turbo": 100000, - "gpt-4o": 4096, #100000 - "gpt-4o-mini": 16384, #100000 - } - num_max_token = num_max_token_map[self.model_type.value] + num_max_token = self.model_type.num_tokens num_max_completion_tokens = num_max_token - num_prompt_tokens - self.model_config_dict['max_tokens'] = num_max_completion_tokens - - response = openai.ChatCompletion.create(*args, **kwargs, model=self.model_type.value, - **self.model_config_dict) + self.model_config_dict["max_tokens"] = num_max_completion_tokens - cost = prompt_cost( - self.model_type.value, - num_prompt_tokens=response["usage"]["prompt_tokens"], - num_completion_tokens=response["usage"]["completion_tokens"] + response = openai.ChatCompletion.create( + *args, **kwargs, model=self.model_type.value, **self.model_config_dict ) + # cost = prompt_cost( + # self.model_type.name, + # num_prompt_tokens=response["usage"]["prompt_tokens"], + # num_completion_tokens=response["usage"]["completion_tokens"], + # ) + # log_macnet( # "\n**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ncompletion_tokens: {}\ntotal_tokens: {}\ncost: ${:.6f}\n".format( # response["usage"]["prompt_tokens"], response["usage"]["completion_tokens"], @@ -162,8 +140,10 @@ def run(self, *args, **kwargs) -> Dict[str, Any]: id="stub_model_id", usage=dict(), choices=[ - dict(finish_reason="stop", - message=dict(content=ARBITRARY_STRING, role="assistant")) + dict( + finish_reason="stop", + message=dict(content=ARBITRARY_STRING, role="assistant"), + ) ], ) @@ -177,28 +157,11 @@ class ModelFactory: @staticmethod def create(model_type: ModelType, model_config_dict: Dict) -> ModelBackend: - default_model_type = ModelType.GPT_3_5_TURBO - - if model_type in { - ModelType.GPT_3_5_TURBO, - ModelType.GPT_3_5_TURBO_NEW, - ModelType.GPT_4, - ModelType.GPT_4_32k, - ModelType.GPT_4_TURBO, - ModelType.GPT_4_TURBO_V, - ModelType.GPT_4O, - ModelType.GPT_4O_MINI, - None - }: - model_class = OpenAIModel - elif model_type == ModelType.STUB: - model_class = StubModel - else: - raise ValueError("Unknown model") - if model_type is None: - model_type = default_model_type + model_class = ( + OpenAIModel if model_type and model_type.name != "stub" else StubModel + ) # log_visualize("Model Type: {}".format(model_type)) inst = model_class(model_type, model_config_dict) - return inst \ No newline at end of file + return inst diff --git a/camel/typing.py b/camel/typing.py index 9690f643c..024d6977d 100644 --- a/camel/typing.py +++ b/camel/typing.py @@ -12,6 +12,7 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from enum import Enum +import json class TaskType(Enum): @@ -43,21 +44,27 @@ class RoleType(Enum): CHATDEV_CCO = "chief creative officer (CCO)" -class ModelType(Enum): - GPT_3_5_TURBO = "gpt-3.5-turbo-16k-0613" - GPT_3_5_TURBO_NEW = "gpt-3.5-turbo-16k" - GPT_4 = "gpt-4" - GPT_4_32k = "gpt-4-32k" - GPT_4_TURBO = "gpt-4-turbo" - GPT_4_TURBO_V = "gpt-4-turbo" - GPT_4O = "gpt-4o" - GPT_4O_MINI = "gpt-4o-mini" +class ModelType: - STUB = "stub" + def __init__(self, name: str, num_tokens: int): + self.name = name + self.num_tokens = num_tokens + return - @property - def value_for_tiktoken(self): - return self.value if self.name != "STUB" else "gpt-4o-mini" + # GPT_3_5_TURBO = "gpt-3.5-turbo-16k-0613" + # GPT_3_5_TURBO_NEW = "gpt-3.5-turbo-16k" + # GPT_4 = "gpt-4" + # GPT_4_32k = "gpt-4-32k" + # GPT_4_TURBO = "gpt-4-turbo" + # GPT_4_TURBO_V = "gpt-4-turbo" + # GPT_4O = "gpt-4o" + # GPT_4O_MINI = "gpt-4o-mini" + # CUSTOM = "custom" + # STUB = "stub" + + # @property + # def value_for_tiktoken(self): + # return self.value["name"] if self.name != "STUB" else "gpt-4o-mini" class PhaseType(Enum): diff --git a/camel/utils.py b/camel/utils.py index beecc3b82..246e47283 100644 --- a/camel/utils.py +++ b/camel/utils.py @@ -23,14 +23,14 @@ from camel.messages import OpenAIMessage from camel.typing import ModelType, TaskType -F = TypeVar('F', bound=Callable[..., Any]) +F = TypeVar("F", bound=Callable[..., Any]) import time def count_tokens_openai_chat_models( - messages: List[OpenAIMessage], - encoding: Any, + messages: List[OpenAIMessage], + encoding: Any, ) -> int: r"""Counts the number of tokens required to generate an OpenAI chat based on a given list of messages. @@ -55,8 +55,8 @@ def count_tokens_openai_chat_models( def num_tokens_from_messages( - messages: List[OpenAIMessage], - model: ModelType, + messages: List[OpenAIMessage], + model: ModelType, ) -> int: r"""Returns the number of tokens used by a list of messages. @@ -77,61 +77,13 @@ def num_tokens_from_messages( - https://platform.openai.com/docs/models/gpt-3-5 """ try: - value_for_tiktoken = model.value_for_tiktoken - encoding = tiktoken.encoding_for_model(value_for_tiktoken) + encoding = tiktoken.encoding_for_model(model.name) except KeyError: encoding = tiktoken.get_encoding("cl100k_base") + except: + encoding = tiktoken.get_encoding("cl100k_base") - if model in { - ModelType.GPT_3_5_TURBO, - ModelType.GPT_3_5_TURBO_NEW, - ModelType.GPT_4, - ModelType.GPT_4_32k, - ModelType.GPT_4_TURBO, - ModelType.GPT_4_TURBO_V, - ModelType.GPT_4O, - ModelType.GPT_4O_MINI, - ModelType.STUB - }: - return count_tokens_openai_chat_models(messages, encoding) - else: - raise NotImplementedError( - f"`num_tokens_from_messages`` is not presently implemented " - f"for model {model}. " - f"See https://github.com/openai/openai-python/blob/main/chatml.md " - f"for information on how messages are converted to tokens. " - f"See https://platform.openai.com/docs/models/gpt-4" - f"or https://platform.openai.com/docs/models/gpt-3-5" - f"for information about openai chat models.") - - -def get_model_token_limit(model: ModelType) -> int: - r"""Returns the maximum token limit for a given model. - - Args: - model (ModelType): The type of the model. - - Returns: - int: The maximum token limit for the given model. - """ - if model == ModelType.GPT_3_5_TURBO: - return 16384 - elif model == ModelType.GPT_3_5_TURBO_NEW: - return 16384 - elif model == ModelType.GPT_4: - return 8192 - elif model == ModelType.GPT_4_32k: - return 32768 - elif model == ModelType.GPT_4_TURBO: - return 128000 - elif model == ModelType.STUB: - return 4096 - elif model == ModelType.GPT_4O: - return 128000 - elif model == ModelType.GPT_4O_MINI: - return 128000 - else: - raise ValueError("Unknown model type") + return count_tokens_openai_chat_models(messages, encoding) def openai_api_key_required(func: F) -> F: @@ -152,14 +104,15 @@ def openai_api_key_required(func: F) -> F: @wraps(func) def wrapper(self, *args, **kwargs): from camel.agents.chat_agent import ChatAgent + if not isinstance(self, ChatAgent): raise ValueError("Expected ChatAgent") - if self.model == ModelType.STUB: + if self.model.name == "stub": return func(self, *args, **kwargs) - elif 'OPENAI_API_KEY' in os.environ: + elif "OPENAI_API_KEY" in os.environ: return func(self, *args, **kwargs) else: - raise ValueError('OpenAI API key not found.') + raise ValueError("OpenAI API key not found.") return wrapper @@ -177,7 +130,7 @@ def print_text_animated(text, delay: float = 0.005, end: str = ""): for char in text: print(char, end=end, flush=True) time.sleep(delay) - print('\n') + print("\n") def get_prompt_template_key_words(template: str) -> Set[str]: @@ -194,7 +147,7 @@ def get_prompt_template_key_words(template: str) -> Set[str]: >>> get_prompt_template_key_words('Hi, {name}! How are you {status}?') {'name', 'status'} """ - return set(re.findall(r'{([^}]*)}', template)) + return set(re.findall(r"{([^}]*)}", template)) def get_first_int(string: str) -> Optional[int]: @@ -209,7 +162,7 @@ def get_first_int(string: str) -> Optional[int]: int or None: The first integer number found in the string, or None if no integer number is found. """ - match = re.search(r'\d+', string) + match = re.search(r"\d+", string) if match: return int(match.group()) else: @@ -221,8 +174,10 @@ def download_tasks(task: TaskType, folder_path: str) -> None: zip_file_path = os.path.join(folder_path, "tasks.zip") # Download the zip file from the Google Drive link - response = requests.get("https://huggingface.co/datasets/camel-ai/" - f"metadata/resolve/main/{task.value}_tasks.zip") + response = requests.get( + "https://huggingface.co/datasets/camel-ai/" + f"metadata/resolve/main/{task.value}_tasks.zip" + ) # Save the zip file with open(zip_file_path, "wb") as f: @@ -232,4 +187,4 @@ def download_tasks(task: TaskType, folder_path: str) -> None: zip_ref.extractall(folder_path) # Delete the zip file - os.remove(zip_file_path) \ No newline at end of file + os.remove(zip_file_path) diff --git a/models_registry.json b/models_registry.json new file mode 100644 index 000000000..982e6602b --- /dev/null +++ b/models_registry.json @@ -0,0 +1,56 @@ +{ + "models": [ + { + "name": "gpt-3.5-turbo", + "num_tokens": 4096 + }, + { + "name": "gpt-3.5-turbo-16k", + "num_tokens": 16384 + }, + { + "name": "gpt-3.5-turbo-0613", + "num_tokens": 4096 + }, + { + "name": "gpt-3.5-turbo-16k-0613", + "num_tokens": 16384 + }, + { + "name": "gpt-4", + "num_tokens": 8192 + }, + { + "name": "gpt-4-0613", + "num_tokens": 8192 + }, + { + "name": "gpt-4-32k", + "num_tokens": 32768 + }, + { + "name": "gpt-4-turbo", + "num_tokens": 100000 + }, + { + "name": "stub", + "num_tokens": 16384 + }, + { + "name":"llama3.1:8b", + "num_tokens":100000 + }, + { + "name":"llama3.2:3b", + "num_tokens":100000 + }, + { + "name":"nemotron-mini:latest", + "num_tokens":16384 + }, + { + "name":"qwen2.5-coder:32b", + "num_tokens":16384 + } + ] + } \ No newline at end of file diff --git a/run.py b/run.py index f0a19ab81..ee196ac72 100644 --- a/run.py +++ b/run.py @@ -1,35 +1,68 @@ import argparse from graph import * import yaml +import json -def load_config(): + +def load_config(config_file: str = "config.yaml"): """Load configuration from YAML file.""" - with open("config.yaml", "r", encoding="utf-8") as f: + with open(config_file, "r", encoding="utf-8") as f: return yaml.load(f.read(), Loader=yaml.FullLoader) + def parse_arguments(): """Parse command line arguments.""" - parser = argparse.ArgumentParser(description='argparse') - parser.add_argument('--task', type=str, default="Develop a basic Gomoku game.", help="Prompt of software") - parser.add_argument('--name', type=str, default="Gomoku") - parser.add_argument('--type', type=str, default="None") + parser = argparse.ArgumentParser(description="argparse") + parser.add_argument( + "--config", type=str, default="config.yaml", help="Configuration file" + ) + parser.add_argument( + "--task", + type=str, + default="Develop a basic Gomoku game.", + help="Prompt of software", + ) + parser.add_argument("--name", type=str, default="Gomoku") + parser.add_argument("--type", type=str, default="None") + parser.add_argument( + "--registry", + type=str, + default="models_registry.json", + help="JSON file listing allowed models and tokens window size", + ) return parser.parse_args() + def main(): """Main execution function.""" - config = load_config() args = parse_arguments() + config = load_config(args.config) + models_registry_file = args.registry + model_name = config.get("Model") + with open(models_registry_file, "r") as f: + models_registry = json.load(f)["models"] + + model = ModelType("stub", 100000) + model_known = False + for m in models_registry: + if m["name"] == model_name: + model = ModelType(**m) + model_known = True + break + if not model_known: + raise ValueError(f"Unknown model: {model_name}") # Graph construction and agents deployment - graph = Graph(config) + graph = Graph(config, model) graph.build_graph(args.type) graph.agent_deployment(args.type) graph.execute(args.task, args.name) - with open(graph.directory + "/config.yaml", "w", encoding="utf-8") as f: + with open(graph.directory + "/" + args.config, "w", encoding="utf-8") as f: yaml.dump(config, f) - + print("MacNet completes!") + if __name__ == "__main__": - main() \ No newline at end of file + main() From ef8b85affed7c1a10bac6548b491ce06de64567c Mon Sep 17 00:00:00 2001 From: ma3oun Date: Thu, 5 Dec 2024 17:12:37 +0100 Subject: [PATCH 4/6] Added custom files --- .gitignore | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.gitignore b/.gitignore index f851dbab6..9ea418ef9 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,12 @@ .venv/ env/ venv/ +env.sh + +# project +MacNetLog/* +custom.yaml +tmp/* +WareHouse/* +CompanyConfig/* + From 88e7042b41131d4f811c5b9dcf341bf9e6fdb474 Mon Sep 17 00:00:00 2001 From: ma3oun Date: Thu, 5 Dec 2024 17:15:16 +0100 Subject: [PATCH 5/6] Updated README with support for Ollama --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0ebe7af14..84a5af1f2 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ ## 📖 Overview -MacNet is a specialized branch of [ChatDev](https://github.com/OpenBMB/ChatDev) that focuses on supporting the research of agent scaling on arbitrary topologies using the Multi-Agent Collaboration Networks (MacNet) framework. MacNet utilizes directed acyclic graphs to facilitate effective task-oriented collaboration among agents through linguistic interactions. It supports cooperation across various topologies and among a large number of agents without exceeding context limits. +MacNet is a specialized branch of [ChatDev](https://github.com/OpenBMB/ChatDev) that focuses on supporting the research of agent scaling on arbitrary topologies using the Multi-Agent Collaboration Networks (MacNet) framework. MacNet utilizes directed acyclic graphs to facilitate effective task-oriented collaboration among agents through linguistic interactions. It supports cooperation across various topologies and among a large number of agents without exceeding context limits. MacNet can now be used with ollama and open-webui. ## ⚡️ Quickstart From 59b72d9156319853f344247fc828c4a42f13540f Mon Sep 17 00:00:00 2001 From: ma3oun Date: Thu, 5 Dec 2024 17:31:54 +0100 Subject: [PATCH 6/6] bug fix --- graph.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graph.py b/graph.py index 8005e7ee5..70aad0ae2 100644 --- a/graph.py +++ b/graph.py @@ -348,9 +348,9 @@ def build_graph(self, type_: str) -> None: to_node_tuple[0], to_node_tuple[1] + 1 ): if from_node_id not in self.nodes: - self.add_node(Node(from_node_id, model=self.model)) + self.add_node(Node(from_node_id, model=self.model,config=self.config)) if to_node_id not in self.nodes: - self.add_node(Node(to_node_id, model=self.model)) + self.add_node(Node(to_node_id, model=self.model,config=self.config)) self.add_edge(from_node_id, to_node_id) self.input_layer = self.get_input_layer() self.output_layer = self.get_output_layer()