diff --git a/agents/code_review.py b/agents/code_review.py new file mode 100644 index 0000000..5114f4e --- /dev/null +++ b/agents/code_review.py @@ -0,0 +1,108 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import List, Dict + +def analyze_code_changes(): + """Simulates code analysis""" + issues = [ + {"type": "style", "severity": "low", "file": "main.py"}, + {"type": "security", "severity": "high", "file": "auth.py"}, + {"type": "performance", "severity": "medium", "file": "data.py"} + ] + return issues[int(time.time()) % 3] + +def suggest_fixes(issue: Dict): + """Simulates fix suggestions""" + fixes = { + "style": "Apply PEP 8 formatting", + "security": "Implement input validation", + "performance": "Use list comprehension" + } + return fixes.get(issue["type"], "Review manually") + +def apply_automated_fix(fix: str): + """Simulates applying automated fixes""" + success = int(time.time()) % 2 == 0 + return "fixed" if success else "manual_review" + +# Create specialized agents +analyzer = Agent( + name="Code Analyzer", + role="Code analysis", + goal="Analyze code changes and identify issues", + instructions="Review code changes and report issues", + tools=[analyze_code_changes] +) + +fix_suggester = Agent( + name="Fix Suggester", + role="Solution provider", + goal="Suggest fixes for identified issues", + instructions="Provide appropriate fix suggestions", + tools=[suggest_fixes] +) + +fix_applier = Agent( + name="Fix Applier", + role="Fix implementation", + goal="Apply suggested fixes automatically when possible", + instructions="Implement suggested fixes and report results", + tools=[apply_automated_fix] +) + +# Create workflow tasks +analysis_task = Task( + name="analyze_code", + description="Analyze code changes for issues", + expected_output="Identified code issues", + agent=analyzer, + is_start=True, + next_tasks=["suggest_fixes"] +) + +suggestion_task = Task( + name="suggest_fixes", + description="Suggest fixes for identified issues", + expected_output="Fix suggestions", + agent=fix_suggester, + next_tasks=["apply_fixes"] +) + +fix_task = Task( + name="apply_fixes", + description="Apply suggested fixes", + expected_output="Fix application status", + agent=fix_applier, + task_type="decision", + condition={ + "fixed": "", + "manual_review": ["suggest_fixes"] # Loop back for manual review + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[analyzer, fix_suggester, fix_applier], + tasks=[analysis_task, suggestion_task, fix_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Code Review Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nCode Review Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/agents/llamaindex_example.py b/agents/llamaindex_example.py new file mode 100644 index 0000000..e69de29 diff --git a/agents/multimodal.py b/agents/multimodal.py index 68dde97..0362abf 100644 --- a/agents/multimodal.py +++ b/agents/multimodal.py @@ -51,9 +51,4 @@ ) # Run all tasks -result = agents.start() - -# Print results -for task_id, task_result in result["task_results"].items(): - print(f"\nTask {task_id} Result:") - print(task_result.raw) \ No newline at end of file +agents.start() \ No newline at end of file diff --git a/agents/pdf_agents_example.py b/agents/pdf_agents_example.py new file mode 100644 index 0000000..326c775 --- /dev/null +++ b/agents/pdf_agents_example.py @@ -0,0 +1,41 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents + +# Create PDF Analysis Agent +pdf_agent = Agent( + name="PDFAnalyst", + role="PDF Document Specialist", + goal="Analyze PDF documents to extract meaningful information", + backstory="""You are an expert in PDF document analysis and text extraction. + You excel at understanding document structure, extracting content, and analyzing textual information.""", + llm="gpt-4o-mini", + self_reflect=False +) + +# 1. Task with PDF URL +task1 = Task( + name="analyze_pdf_url", + description="Extract and analyze content from this PDF document.", + expected_output="Detailed analysis of the PDF content and structure", + agent=pdf_agent, + input=["https://example.com/document.pdf"] +) + +# 2. Task with Local PDF File +task2 = Task( + name="analyze_local_pdf", + description="What information can you extract from this PDF? Analyze its content.", + expected_output="Detailed analysis of the PDF content and structure", + agent=pdf_agent, + input=["document.pdf"] +) + +# Create PraisonAIAgents instance +agents = PraisonAIAgents( + agents=[pdf_agent], + tasks=[task1, task2], + process="sequential", + verbose=1 +) + +# Run all tasks +agents.start() \ No newline at end of file diff --git a/cookbooks/usecases/adaptive-learning.py b/cookbooks/usecases/adaptive-learning.py new file mode 100644 index 0000000..23e09d4 --- /dev/null +++ b/cookbooks/usecases/adaptive-learning.py @@ -0,0 +1,132 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict + +def assess_student_level(): + """Simulates student assessment""" + levels = ["beginner", "intermediate", "advanced"] + current_time = int(time.time()) + return levels[current_time % 3] + +def generate_content(level: str): + """Simulates content generation""" + content_types = { + "beginner": "basic concepts and examples", + "intermediate": "practice problems and applications", + "advanced": "complex scenarios and projects" + } + return content_types.get(level, "basic concepts") + +def evaluate_performance(): + """Simulates performance evaluation""" + scores = ["low", "medium", "high"] + current_time = int(time.time()) + return scores[current_time % 3] + +def adapt_difficulty(performance: str): + """Simulates difficulty adaptation""" + adaptations = { + "low": "decrease", + "medium": "maintain", + "high": "increase" + } + return adaptations.get(performance, "maintain") + +# Create specialized agents +assessor = Agent( + name="Student Assessor", + role="Level Assessment", + goal="Assess student's current level", + instructions="Evaluate student's knowledge and skills", + tools=[assess_student_level] +) + +generator = Agent( + name="Content Generator", + role="Content Creation", + goal="Generate appropriate learning content", + instructions="Create content based on student's level", + tools=[generate_content] +) + +evaluator = Agent( + name="Performance Evaluator", + role="Performance Assessment", + goal="Evaluate student's performance", + instructions="Assess learning outcomes", + tools=[evaluate_performance] +) + +adapter = Agent( + name="Difficulty Adapter", + role="Content Adaptation", + goal="Adapt content difficulty", + instructions="Adjust difficulty based on performance", + tools=[adapt_difficulty] +) + +# Create workflow tasks +assessment_task = Task( + name="assess_level", + description="Assess student's current level", + expected_output="Student's proficiency level", + agent=assessor, + is_start=True, + next_tasks=["generate_content"] +) + +generation_task = Task( + name="generate_content", + description="Generate appropriate content", + expected_output="Learning content", + agent=generator, + next_tasks=["evaluate_performance"] +) + +evaluation_task = Task( + name="evaluate_performance", + description="Evaluate student's performance", + expected_output="Performance assessment", + agent=evaluator, + next_tasks=["adapt_difficulty"] +) + +adaptation_task = Task( + name="adapt_difficulty", + description="Adapt content difficulty", + expected_output="Difficulty adjustment", + agent=adapter, + task_type="decision", + condition={ + "decrease": ["generate_content"], + "maintain": "", + "increase": ["generate_content"] + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[assessor, generator, evaluator, adapter], + tasks=[assessment_task, generation_task, evaluation_task, adaptation_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Adaptive Learning Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nAdaptive Learning Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/cookbooks/usecases/code-review.py b/cookbooks/usecases/code-review.py new file mode 100644 index 0000000..5114f4e --- /dev/null +++ b/cookbooks/usecases/code-review.py @@ -0,0 +1,108 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import List, Dict + +def analyze_code_changes(): + """Simulates code analysis""" + issues = [ + {"type": "style", "severity": "low", "file": "main.py"}, + {"type": "security", "severity": "high", "file": "auth.py"}, + {"type": "performance", "severity": "medium", "file": "data.py"} + ] + return issues[int(time.time()) % 3] + +def suggest_fixes(issue: Dict): + """Simulates fix suggestions""" + fixes = { + "style": "Apply PEP 8 formatting", + "security": "Implement input validation", + "performance": "Use list comprehension" + } + return fixes.get(issue["type"], "Review manually") + +def apply_automated_fix(fix: str): + """Simulates applying automated fixes""" + success = int(time.time()) % 2 == 0 + return "fixed" if success else "manual_review" + +# Create specialized agents +analyzer = Agent( + name="Code Analyzer", + role="Code analysis", + goal="Analyze code changes and identify issues", + instructions="Review code changes and report issues", + tools=[analyze_code_changes] +) + +fix_suggester = Agent( + name="Fix Suggester", + role="Solution provider", + goal="Suggest fixes for identified issues", + instructions="Provide appropriate fix suggestions", + tools=[suggest_fixes] +) + +fix_applier = Agent( + name="Fix Applier", + role="Fix implementation", + goal="Apply suggested fixes automatically when possible", + instructions="Implement suggested fixes and report results", + tools=[apply_automated_fix] +) + +# Create workflow tasks +analysis_task = Task( + name="analyze_code", + description="Analyze code changes for issues", + expected_output="Identified code issues", + agent=analyzer, + is_start=True, + next_tasks=["suggest_fixes"] +) + +suggestion_task = Task( + name="suggest_fixes", + description="Suggest fixes for identified issues", + expected_output="Fix suggestions", + agent=fix_suggester, + next_tasks=["apply_fixes"] +) + +fix_task = Task( + name="apply_fixes", + description="Apply suggested fixes", + expected_output="Fix application status", + agent=fix_applier, + task_type="decision", + condition={ + "fixed": "", + "manual_review": ["suggest_fixes"] # Loop back for manual review + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[analyzer, fix_suggester, fix_applier], + tasks=[analysis_task, suggestion_task, fix_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Code Review Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nCode Review Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/cookbooks/usecases/customer-service.py b/cookbooks/usecases/customer-service.py new file mode 100644 index 0000000..8adbe92 --- /dev/null +++ b/cookbooks/usecases/customer-service.py @@ -0,0 +1,139 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def classify_query(): + """Simulates query classification""" + query_types = [ + {"type": "technical", "priority": "high", "complexity": "complex"}, + {"type": "billing", "priority": "medium", "complexity": "simple"}, + {"type": "general", "priority": "low", "complexity": "simple"} + ] + return query_types[int(time.time()) % 3] + +def handle_query(query: Dict): + """Simulates query handling""" + responses = { + "technical": "Technical support solution provided", + "billing": "Billing inquiry resolved", + "general": "General information provided" + } + return responses.get(query["type"], "Query forwarded to specialist") + +def evaluate_satisfaction(): + """Simulates satisfaction evaluation""" + scores = ["satisfied", "neutral", "unsatisfied"] + return scores[int(time.time()) % 3] + +def optimize_response(satisfaction: str): + """Simulates response optimization""" + optimizations = { + "satisfied": "maintain_approach", + "neutral": "minor_adjustments", + "unsatisfied": "major_revision" + } + return optimizations.get(satisfaction, "review_process") + +# Create specialized agents +classifier = Agent( + name="Query Classifier", + role="Query Classification", + goal="Classify incoming customer queries", + instructions="Analyze and categorize customer queries", + tools=[classify_query] +) + +handler = Agent( + name="Query Handler", + role="Query Resolution", + goal="Handle customer queries appropriately", + instructions="Provide appropriate responses to queries", + tools=[handle_query] +) + +evaluator = Agent( + name="Satisfaction Evaluator", + role="Satisfaction Assessment", + goal="Evaluate customer satisfaction", + instructions="Assess response effectiveness", + tools=[evaluate_satisfaction] +) + +optimizer = Agent( + name="Response Optimizer", + role="Service Optimization", + goal="Optimize service based on feedback", + instructions="Improve response strategies", + tools=[optimize_response] +) + +# Create workflow tasks +classification_task = Task( + name="classify_query", + description="Classify customer query", + expected_output="Query classification", + agent=classifier, + is_start=True, + task_type="decision", + condition={ + "high": ["handle_query", "evaluate_satisfaction"], + "medium": ["handle_query", "evaluate_satisfaction"], + "low": ["handle_query"] + } +) + +handling_task = Task( + name="handle_query", + description="Handle customer query", + expected_output="Query response", + agent=handler, + next_tasks=["evaluate_satisfaction"] +) + +evaluation_task = Task( + name="evaluate_satisfaction", + description="Evaluate customer satisfaction", + expected_output="Satisfaction level", + agent=evaluator, + next_tasks=["optimize_response"] +) + +optimization_task = Task( + name="optimize_response", + description="Optimize response strategy", + expected_output="Optimization recommendations", + agent=optimizer, + task_type="decision", + condition={ + "major_revision": ["classify_query"], + "minor_adjustments": "", + "maintain_approach": "" + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[classifier, handler, evaluator, optimizer], + tasks=[classification_task, handling_task, evaluation_task, optimization_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Customer Service Optimization Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nCustomer Service Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/cookbooks/usecases/emergency-response.py b/cookbooks/usecases/emergency-response.py new file mode 100644 index 0000000..c76b187 --- /dev/null +++ b/cookbooks/usecases/emergency-response.py @@ -0,0 +1,139 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def assess_emergency(incident: Dict): + """Simulates emergency assessment""" + severity_levels = ["low", "medium", "high", "critical"] + current_time = int(time.time()) + severity = severity_levels[current_time % 4] + print(f"Incident assessed with {severity} severity") + return severity + +def dispatch_resources(severity: str): + """Simulates resource dispatch""" + resources = { + "low": ["local_police"], + "medium": ["local_police", "ambulance"], + "high": ["local_police", "ambulance", "fire"], + "critical": ["local_police", "ambulance", "fire", "special_units"] + } + dispatched = resources.get(severity, ["local_police"]) + print(f"Dispatching resources: {dispatched}") + return dispatched + +def monitor_response(): + """Simulates response monitoring""" + current_time = int(time.time()) + status = "completed" if current_time % 3 == 0 else "ongoing" + return status + +# Create specialized agents +router = Agent( + name="Emergency Router", + role="Emergency Assessment", + goal="Evaluate emergency severity and type", + instructions="Assess incident and determine required response", + tools=[assess_emergency] +) + +dispatcher = Agent( + name="Resource Dispatcher", + role="Resource Management", + goal="Coordinate and dispatch appropriate resources", + instructions="Deploy resources based on emergency assessment", + tools=[dispatch_resources] +) + +monitor = Agent( + name="Response Monitor", + role="Response Tracking", + goal="Track response progress and effectiveness", + instructions="Monitor ongoing response and provide status updates", + tools=[monitor_response] +) + +synthesizer = Agent( + name="Response Coordinator", + role="Response Synthesis", + goal="Coordinate multi-agency response", + instructions="Synthesize information and coordinate overall response" +) + +# Create workflow tasks +assessment_task = Task( + name="assess_emergency", + description="Evaluate emergency severity and type", + expected_output="Emergency severity level", + agent=router, + is_start=True, + task_type="decision", + condition={ + "critical": ["dispatch_resources", "monitor_response"], + "high": ["dispatch_resources", "monitor_response"], + "medium": ["dispatch_resources"], + "low": ["dispatch_resources"] + } +) + +dispatch_task = Task( + name="dispatch_resources", + description="Deploy appropriate emergency resources", + expected_output="List of dispatched resources", + agent=dispatcher, + next_tasks=["monitor_response"] +) + +monitor_task = Task( + name="monitor_response", + description="Track response progress", + expected_output="Response status", + agent=monitor, + task_type="decision", + condition={ + "ongoing": ["coordinate_response"], + "completed": "" + } +) + +coordinate_task = Task( + name="coordinate_response", + description="Coordinate overall emergency response", + expected_output="Coordinated response plan", + agent=synthesizer, + context=[assessment_task, dispatch_task, monitor_task] +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[router, dispatcher, monitor, synthesizer], + tasks=[assessment_task, dispatch_task, monitor_task, coordinate_task], + process="workflow", + verbose=True +) + +def main(): + # Simulate emergency incident + incident = { + "type": "fire", + "location": "123 Main St", + "reported_time": time.time() + } + + print("\nStarting Emergency Response Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start(initial_input=incident) + + # Print results + print("\nEmergency Response Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/cookbooks/usecases/fraud-detection.py b/cookbooks/usecases/fraud-detection.py new file mode 100644 index 0000000..7ea6fba --- /dev/null +++ b/cookbooks/usecases/fraud-detection.py @@ -0,0 +1,136 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List +import asyncio + +def analyze_transaction(): + """Simulates transaction analysis""" + transactions = [ + {"type": "credit_card", "amount": 5000, "location": "foreign", "risk": "high"}, + {"type": "wire", "amount": 2000, "location": "domestic", "risk": "medium"}, + {"type": "online", "amount": 500, "location": "domestic", "risk": "low"} + ] + return transactions[int(time.time()) % 3] + +def check_patterns(transaction: Dict): + """Simulates pattern checking""" + patterns = { + "high": ["unusual_location", "large_amount"], + "medium": ["frequency_anomaly"], + "low": ["within_normal_limits"] + } + return patterns.get(transaction["risk"], ["unknown"]) + +def verify_identity(): + """Simulates identity verification""" + results = ["verified", "suspicious", "failed"] + return results[int(time.time()) % 3] + +def generate_alert(verification: str, patterns: List[str]): + """Simulates alert generation""" + if verification == "failed" or "unusual_location" in patterns: + return "high_priority_alert" + elif verification == "suspicious": + return "medium_priority_alert" + return "low_priority_alert" + +# Create specialized agents +transaction_analyzer = Agent( + name="Transaction Analyzer", + role="Transaction Analysis", + goal="Analyze transactions for suspicious patterns", + instructions="Monitor and analyze financial transactions", + tools=[analyze_transaction] +) + +pattern_checker = Agent( + name="Pattern Checker", + role="Pattern Detection", + goal="Identify suspicious patterns", + instructions="Check for known fraud patterns", + tools=[check_patterns] +) + +identity_verifier = Agent( + name="Identity Verifier", + role="Identity Verification", + goal="Verify transaction identities", + instructions="Perform identity verification checks", + tools=[verify_identity] +) + +alert_generator = Agent( + name="Alert Generator", + role="Alert Management", + goal="Generate appropriate alerts", + instructions="Create and prioritize alerts", + tools=[generate_alert] +) + +# Create workflow tasks +analysis_task = Task( + name="analyze_transaction", + description="Analyze transaction details", + expected_output="Transaction analysis", + agent=transaction_analyzer, + is_start=True, + task_type="decision", + condition={ + "high": ["check_patterns", "verify_identity"], + "medium": ["check_patterns"], + "low": ["check_patterns"] + } +) + +pattern_task = Task( + name="check_patterns", + description="Check for suspicious patterns", + expected_output="Identified patterns", + agent=pattern_checker, + next_tasks=["generate_alert"], + async_execution=True +) + +verification_task = Task( + name="verify_identity", + description="Verify transaction identity", + expected_output="Verification result", + agent=identity_verifier, + next_tasks=["generate_alert"], + async_execution=True +) + +alert_task = Task( + name="generate_alert", + description="Generate fraud alert", + expected_output="Alert priority", + agent=alert_generator, + context=[pattern_task, verification_task] +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[transaction_analyzer, pattern_checker, identity_verifier, alert_generator], + tasks=[analysis_task, pattern_task, verification_task, alert_task], + process="workflow", + verbose=True +) + +async def main(): + print("\nStarting Fraud Detection Workflow...") + print("=" * 50) + + # Run workflow + results = await workflow.astart() + + # Print results + print("\nFraud Detection Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/cookbooks/usecases/healthcare-diagnosis.py b/cookbooks/usecases/healthcare-diagnosis.py new file mode 100644 index 0000000..2f6749e --- /dev/null +++ b/cookbooks/usecases/healthcare-diagnosis.py @@ -0,0 +1,164 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def analyze_symptoms(): + """Simulates symptom analysis""" + cases = [ + {"symptoms": ["fever", "cough", "fatigue"], "severity": "high", "duration": "5_days"}, + {"symptoms": ["headache", "nausea"], "severity": "medium", "duration": "2_days"}, + {"symptoms": ["rash", "itching"], "severity": "low", "duration": "1_week"} + ] + return cases[int(time.time()) % 3] + +def process_lab_results(): + """Simulates lab result processing""" + results = [ + {"blood_count": "abnormal", "inflammation": "high", "markers": "elevated"}, + {"blood_count": "normal", "inflammation": "low", "markers": "normal"}, + {"blood_count": "normal", "inflammation": "medium", "markers": "elevated"} + ] + return results[int(time.time()) % 3] + +def analyze_medical_history(): + """Simulates medical history analysis""" + histories = [ + {"chronic_conditions": True, "allergies": True, "risk_factors": "high"}, + {"chronic_conditions": False, "allergies": True, "risk_factors": "medium"}, + {"chronic_conditions": False, "allergies": False, "risk_factors": "low"} + ] + return histories[int(time.time()) % 3] + +def generate_diagnosis(symptoms: Dict, lab_results: Dict, history: Dict): + """Simulates diagnosis generation""" + if symptoms["severity"] == "high" and lab_results["markers"] == "elevated": + return {"diagnosis": "serious_condition", "confidence": "high"} + elif symptoms["severity"] == "medium" or lab_results["inflammation"] == "medium": + return {"diagnosis": "moderate_condition", "confidence": "medium"} + return {"diagnosis": "mild_condition", "confidence": "high"} + +def recommend_treatment(diagnosis: Dict): + """Simulates treatment recommendation""" + treatments = { + "serious_condition": ["immediate_intervention", "specialist_referral"], + "moderate_condition": ["medication", "follow_up"], + "mild_condition": ["rest", "observation"] + } + return treatments.get(diagnosis["diagnosis"], ["general_care"]) + +# Create specialized agents +symptom_analyzer = Agent( + name="Symptom Analyzer", + role="Symptom Analysis", + goal="Analyze patient symptoms", + instructions="Evaluate reported symptoms and their severity", + tools=[analyze_symptoms] +) + +lab_processor = Agent( + name="Lab Processor", + role="Lab Analysis", + goal="Process laboratory results", + instructions="Analyze and interpret lab test results", + tools=[process_lab_results] +) + +history_analyzer = Agent( + name="History Analyzer", + role="Medical History Analysis", + goal="Analyze patient medical history", + instructions="Review and assess patient medical history", + tools=[analyze_medical_history] +) + +diagnosis_generator = Agent( + name="Diagnosis Generator", + role="Diagnosis Generation", + goal="Generate comprehensive diagnosis", + instructions="Combine all inputs to generate diagnosis", + tools=[generate_diagnosis] +) + +treatment_recommender = Agent( + name="Treatment Recommender", + role="Treatment Planning", + goal="Recommend appropriate treatment", + instructions="Suggest treatment based on diagnosis", + tools=[recommend_treatment] +) + +# Create workflow tasks +symptom_task = Task( + name="analyze_symptoms", + description="Analyze patient symptoms", + expected_output="Symptom analysis", + agent=symptom_analyzer, + is_start=True, + task_type="decision", + condition={ + "high": ["process_labs", "analyze_history"], + "medium": ["process_labs", "analyze_history"], + "low": ["process_labs"] + } +) + +lab_task = Task( + name="process_labs", + description="Process lab results", + expected_output="Lab analysis", + agent=lab_processor, + next_tasks=["generate_diagnosis"] +) + +history_task = Task( + name="analyze_history", + description="Analyze medical history", + expected_output="History analysis", + agent=history_analyzer, + next_tasks=["generate_diagnosis"] +) + +diagnosis_task = Task( + name="generate_diagnosis", + description="Generate diagnosis", + expected_output="Diagnosis and confidence level", + agent=diagnosis_generator, + next_tasks=["recommend_treatment"], + context=[symptom_task, lab_task, history_task] +) + +treatment_task = Task( + name="recommend_treatment", + description="Recommend treatment", + expected_output="Treatment recommendations", + agent=treatment_recommender, + context=[diagnosis_task] +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[symptom_analyzer, lab_processor, history_analyzer, + diagnosis_generator, treatment_recommender], + tasks=[symptom_task, lab_task, history_task, diagnosis_task, treatment_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Healthcare Diagnosis Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nDiagnosis Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/cookbooks/usecases/multilingual-content.py b/cookbooks/usecases/multilingual-content.py new file mode 100644 index 0000000..d73b5cc --- /dev/null +++ b/cookbooks/usecases/multilingual-content.py @@ -0,0 +1,161 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def generate_base_content(): + """Simulates base content generation""" + content_types = [ + {"type": "marketing", "tone": "professional", "length": "medium"}, + {"type": "technical", "tone": "formal", "length": "long"}, + {"type": "social", "tone": "casual", "length": "short"} + ] + return content_types[int(time.time()) % 3] + +def translate_content(content: Dict): + """Simulates content translation""" + languages = ["spanish", "french", "german", "japanese", "chinese"] + translations = {lang: f"Translated content in {lang}" for lang in languages} + return translations + +def check_cultural_context(translations: Dict): + """Simulates cultural context verification""" + cultural_issues = { + "spanish": [], + "french": ["idiom_mismatch"], + "german": [], + "japanese": ["formality_level"], + "chinese": ["cultural_reference"] + } + return cultural_issues + +def adapt_content(issues: Dict): + """Simulates content adaptation""" + adaptations = { + "idiom_mismatch": "localized_expression", + "formality_level": "adjusted_tone", + "cultural_reference": "localized_reference" + } + return {lang: [adaptations[issue] for issue in issues] + for lang, issues in issues.items() if issues} + +def quality_check(): + """Simulates quality assessment""" + quality_levels = ["high", "medium", "needs_revision"] + return quality_levels[int(time.time()) % 3] + +# Create specialized agents +content_generator = Agent( + name="Content Generator", + role="Base Content Creation", + goal="Generate high-quality base content", + instructions="Create engaging base content", + tools=[generate_base_content] +) + +translator = Agent( + name="Content Translator", + role="Translation", + goal="Translate content accurately", + instructions="Translate content while maintaining meaning", + tools=[translate_content] +) + +cultural_checker = Agent( + name="Cultural Checker", + role="Cultural Verification", + goal="Verify cultural appropriateness", + instructions="Check for cultural sensitivities", + tools=[check_cultural_context] +) + +content_adapter = Agent( + name="Content Adapter", + role="Content Adaptation", + goal="Adapt content for cultural fit", + instructions="Modify content based on cultural context", + tools=[adapt_content] +) + +quality_assessor = Agent( + name="Quality Assessor", + role="Quality Assessment", + goal="Ensure content quality", + instructions="Assess overall content quality", + tools=[quality_check] +) + +# Create workflow tasks +generation_task = Task( + name="generate_content", + description="Generate base content", + expected_output="Base content for translation", + agent=content_generator, + is_start=True, + next_tasks=["translate_content"] +) + +translation_task = Task( + name="translate_content", + description="Translate content to target languages", + expected_output="Translated content", + agent=translator, + next_tasks=["check_cultural"] +) + +cultural_task = Task( + name="check_cultural", + description="Check cultural appropriateness", + expected_output="Cultural context issues", + agent=cultural_checker, + next_tasks=["adapt_content"] +) + +adaptation_task = Task( + name="adapt_content", + description="Adapt content for cultural fit", + expected_output="Culturally adapted content", + agent=content_adapter, + next_tasks=["assess_quality"] +) + +quality_task = Task( + name="assess_quality", + description="Assess content quality", + expected_output="Quality assessment", + agent=quality_assessor, + task_type="decision", + condition={ + "high": "", # Complete workflow + "medium": ["adapt_content"], # Minor revisions needed + "needs_revision": ["translate_content"] # Major revisions needed + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[content_generator, translator, cultural_checker, + content_adapter, quality_assessor], + tasks=[generation_task, translation_task, cultural_task, + adaptation_task, quality_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Multilingual Content Generation Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nContent Generation Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/cookbooks/usecases/predictive-maintenance.py b/cookbooks/usecases/predictive-maintenance.py new file mode 100644 index 0000000..7eabc5c --- /dev/null +++ b/cookbooks/usecases/predictive-maintenance.py @@ -0,0 +1,180 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List +import asyncio + +def collect_sensor_data(): + """Simulates sensor data collection""" + sensor_readings = { + "temperature": 75 + (int(time.time()) % 20), + "vibration": 0.5 + (int(time.time()) % 10) / 10, + "pressure": 100 + (int(time.time()) % 50), + "noise_level": 60 + (int(time.time()) % 30) + } + return sensor_readings + +def analyze_performance(): + """Simulates performance analysis""" + metrics = { + "efficiency": 0.8 + (int(time.time()) % 20) / 100, + "uptime": 0.95 + (int(time.time()) % 5) / 100, + "output_quality": 0.9 + (int(time.time()) % 10) / 100 + } + return metrics + +def detect_anomalies(sensor_data: Dict, performance: Dict): + """Simulates anomaly detection""" + anomalies = [] + if sensor_data["temperature"] > 90: + anomalies.append({"type": "temperature_high", "severity": "critical"}) + if sensor_data["vibration"] > 1.2: + anomalies.append({"type": "vibration_excess", "severity": "warning"}) + if performance["efficiency"] < 0.85: + anomalies.append({"type": "efficiency_low", "severity": "warning"}) + return anomalies + +def predict_failures(anomalies: List[Dict]): + """Simulates failure prediction""" + predictions = [] + severity_scores = {"critical": 0.9, "warning": 0.6} + + for anomaly in anomalies: + predictions.append({ + "component": anomaly["type"].split("_")[0], + "probability": severity_scores[anomaly["severity"]], + "timeframe": "24_hours" if anomaly["severity"] == "critical" else "7_days" + }) + return predictions + +def schedule_maintenance(predictions: List[Dict]): + """Simulates maintenance scheduling""" + schedule = [] + for pred in predictions: + schedule.append({ + "component": pred["component"], + "priority": "immediate" if pred["timeframe"] == "24_hours" else "planned", + "estimated_duration": "2_hours", + "required_parts": ["replacement_" + pred["component"]] + }) + return schedule + +# Create specialized agents +sensor_monitor = Agent( + name="Sensor Monitor", + role="Data Collection", + goal="Collect sensor data", + instructions="Monitor and collect sensor readings", + tools=[collect_sensor_data] +) + +performance_analyzer = Agent( + name="Performance Analyzer", + role="Performance Analysis", + goal="Analyze equipment performance", + instructions="Analyze operational metrics", + tools=[analyze_performance] +) + +anomaly_detector = Agent( + name="Anomaly Detector", + role="Anomaly Detection", + goal="Detect operational anomalies", + instructions="Identify abnormal patterns", + tools=[detect_anomalies] +) + +failure_predictor = Agent( + name="Failure Predictor", + role="Failure Prediction", + goal="Predict potential failures", + instructions="Predict equipment failures", + tools=[predict_failures] +) + +maintenance_scheduler = Agent( + name="Maintenance Scheduler", + role="Maintenance Planning", + goal="Schedule maintenance activities", + instructions="Plan and schedule maintenance", + tools=[schedule_maintenance] +) + +# Create workflow tasks +sensor_task = Task( + name="collect_data", + description="Collect sensor data", + expected_output="Sensor readings", + agent=sensor_monitor, + is_start=True, + next_tasks=["analyze_performance"], + async_execution=True +) + +performance_task = Task( + name="analyze_performance", + description="Analyze performance metrics", + expected_output="Performance analysis", + agent=performance_analyzer, + next_tasks=["detect_anomalies"], + async_execution=True +) + +anomaly_task = Task( + name="detect_anomalies", + description="Detect operational anomalies", + expected_output="Detected anomalies", + agent=anomaly_detector, + next_tasks=["predict_failures"], + context=[sensor_task, performance_task] +) + +prediction_task = Task( + name="predict_failures", + description="Predict potential failures", + expected_output="Failure predictions", + agent=failure_predictor, + next_tasks=["schedule_maintenance"], + task_type="decision", + condition={ + "critical": ["schedule_maintenance"], + "warning": ["schedule_maintenance"], + "normal": "" + } +) + +scheduling_task = Task( + name="schedule_maintenance", + description="Schedule maintenance activities", + expected_output="Maintenance schedule", + agent=maintenance_scheduler, + context=[prediction_task] +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[sensor_monitor, performance_analyzer, anomaly_detector, + failure_predictor, maintenance_scheduler], + tasks=[sensor_task, performance_task, anomaly_task, + prediction_task, scheduling_task], + process="workflow", + verbose=True +) + +async def main(): + print("\nStarting Predictive Maintenance Workflow...") + print("=" * 50) + + # Run workflow + results = await workflow.astart() + + # Print results + print("\nMaintenance Planning Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/cookbooks/usecases/smart-city.py b/cookbooks/usecases/smart-city.py new file mode 100644 index 0000000..1322b96 --- /dev/null +++ b/cookbooks/usecases/smart-city.py @@ -0,0 +1,186 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def monitor_utilities(): + """Simulates utility usage monitoring""" + readings = { + "power": { + "consumption": int(time.time()) % 1000, + "peak_hours": ["morning", "evening"], + "grid_load": "medium" + }, + "water": { + "consumption": int(time.time()) % 500, + "pressure": "normal", + "quality": "good" + }, + "traffic": { + "congestion": "high", + "peak_zones": ["downtown", "industrial"], + "incidents": 2 + } + } + return readings + +def analyze_patterns(): + """Simulates usage pattern analysis""" + patterns = [ + {"type": "daily_cycle", "confidence": 0.85, "trend": "increasing"}, + {"type": "weekly_cycle", "confidence": 0.92, "trend": "stable"}, + {"type": "seasonal", "confidence": 0.78, "trend": "decreasing"} + ] + return patterns[int(time.time()) % 3] + +def optimize_resources(readings: Dict, patterns: Dict): + """Simulates resource optimization""" + optimizations = { + "power": { + "action": "load_balancing", + "target_zones": ["residential", "commercial"], + "expected_savings": "15%" + }, + "water": { + "action": "pressure_adjustment", + "target_zones": ["industrial"], + "expected_savings": "8%" + }, + "traffic": { + "action": "signal_timing", + "target_zones": ["downtown"], + "expected_impact": "20% reduction" + } + } + return optimizations + +def implement_changes(optimizations: Dict): + """Simulates implementation of optimization changes""" + success_rates = { + "load_balancing": 0.95, + "pressure_adjustment": 0.88, + "signal_timing": 0.85 + } + return {"status": "implemented", "success_rate": success_rates[optimizations["power"]["action"]]} + +def monitor_feedback(): + """Simulates monitoring of optimization feedback""" + feedbacks = ["positive", "neutral", "negative"] + return feedbacks[int(time.time()) % 3] + +# Create specialized agents +utility_monitor = Agent( + name="Utility Monitor", + role="Resource Monitoring", + goal="Monitor city utility usage", + instructions="Track and report utility consumption patterns", + tools=[monitor_utilities] +) + +pattern_analyzer = Agent( + name="Pattern Analyzer", + role="Pattern Analysis", + goal="Analyze usage patterns", + instructions="Identify and analyze resource usage patterns", + tools=[analyze_patterns] +) + +resource_optimizer = Agent( + name="Resource Optimizer", + role="Resource Optimization", + goal="Optimize resource allocation", + instructions="Generate resource optimization strategies", + tools=[optimize_resources] +) + +implementation_agent = Agent( + name="Implementation Agent", + role="Change Implementation", + goal="Implement optimization changes", + instructions="Execute optimization strategies", + tools=[implement_changes] +) + +feedback_monitor = Agent( + name="Feedback Monitor", + role="Feedback Monitoring", + goal="Monitor optimization results", + instructions="Track and analyze optimization feedback", + tools=[monitor_feedback] +) + +# Create workflow tasks +monitoring_task = Task( + name="monitor_utilities", + description="Monitor utility usage", + expected_output="Current utility readings", + agent=utility_monitor, + is_start=True, + next_tasks=["analyze_patterns"] +) + +pattern_task = Task( + name="analyze_patterns", + description="Analyze usage patterns", + expected_output="Usage patterns analysis", + agent=pattern_analyzer, + next_tasks=["optimize_resources"] +) + +optimization_task = Task( + name="optimize_resources", + description="Generate optimization strategies", + expected_output="Resource optimization plans", + agent=resource_optimizer, + next_tasks=["implement_changes"], + context=[monitoring_task, pattern_task] +) + +implementation_task = Task( + name="implement_changes", + description="Implement optimization changes", + expected_output="Implementation status", + agent=implementation_agent, + next_tasks=["monitor_feedback"] +) + +feedback_task = Task( + name="monitor_feedback", + description="Monitor optimization feedback", + expected_output="Optimization feedback", + agent=feedback_monitor, + task_type="decision", + condition={ + "negative": ["monitor_utilities"], # Start over if negative feedback + "neutral": ["optimize_resources"], # Adjust optimization if neutral + "positive": "" # End workflow if positive + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[utility_monitor, pattern_analyzer, resource_optimizer, + implementation_agent, feedback_monitor], + tasks=[monitoring_task, pattern_task, optimization_task, + implementation_task, feedback_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Smart City Resource Optimization Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nOptimization Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/cookbooks/usecases/supply-chain.py b/cookbooks/usecases/supply-chain.py new file mode 100644 index 0000000..14cba9b --- /dev/null +++ b/cookbooks/usecases/supply-chain.py @@ -0,0 +1,113 @@ +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def monitor_global_events(): + """Simulates monitoring of global events""" + events = [ + {"type": "natural_disaster", "severity": "high", "region": "Asia"}, + {"type": "political_unrest", "severity": "medium", "region": "Europe"}, + {"type": "economic_crisis", "severity": "critical", "region": "Americas"} + ] + return events[int(time.time()) % 3] + +def analyze_supply_impact(event: Dict): + """Simulates impact analysis on supply chain""" + impact_matrix = { + "natural_disaster": {"delay": "severe", "cost": "high", "risk_level": 9}, + "political_unrest": {"delay": "moderate", "cost": "medium", "risk_level": 6}, + "economic_crisis": {"delay": "significant", "cost": "extreme", "risk_level": 8} + } + return impact_matrix.get(event["type"]) + +def generate_mitigation_strategies(impact: Dict): + """Simulates generation of mitigation strategies""" + strategies = { + "severe": ["activate_backup_suppliers", "emergency_logistics_routing"], + "moderate": ["increase_buffer_stock", "alternative_transport"], + "significant": ["diversify_suppliers", "hedge_currency_risks"] + } + return strategies.get(impact["delay"], ["review_supply_chain"]) + +# Create specialized agents +monitor_agent = Agent( + name="Global Monitor", + role="Event Monitoring", + goal="Monitor and identify global events affecting supply chain", + instructions="Track and report significant global events", + tools=[monitor_global_events] +) + +impact_analyzer = Agent( + name="Impact Analyzer", + role="Impact Assessment", + goal="Analyze event impact on supply chain", + instructions="Assess potential disruptions and risks", + tools=[analyze_supply_impact] +) + +strategy_generator = Agent( + name="Strategy Generator", + role="Strategy Development", + goal="Generate mitigation strategies", + instructions="Develop strategies to address identified risks", + tools=[generate_mitigation_strategies] +) + +# Create workflow tasks +monitoring_task = Task( + name="monitor_events", + description="Monitor global events affecting supply chain", + expected_output="Identified global events", + agent=monitor_agent, + is_start=True, + task_type="decision", + condition={ + "high": ["analyze_impact"], + "medium": ["analyze_impact"], + "critical": ["analyze_impact"] + } +) + +impact_task = Task( + name="analyze_impact", + description="Analyze impact on supply chain", + expected_output="Impact assessment", + agent=impact_analyzer, + next_tasks=["generate_strategies"] +) + +strategy_task = Task( + name="generate_strategies", + description="Generate mitigation strategies", + expected_output="List of mitigation strategies", + agent=strategy_generator, + context=[monitoring_task, impact_task] +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[monitor_agent, impact_analyzer, strategy_generator], + tasks=[monitoring_task, impact_task, strategy_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Supply Chain Risk Management Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nRisk Management Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/docs/examples.mdx b/docs/examples.mdx new file mode 100644 index 0000000..e0b9396 --- /dev/null +++ b/docs/examples.mdx @@ -0,0 +1,109 @@ +--- +title: "Examples" +sidebarTitle: "Examples" +description: "Explore real-world examples and use cases built with PraisonAI Agents." +icon: "code" +--- + +## Use Cases + + + + Learn how to create AI agents for predictive maintenance and equipment monitoring. + + + + Learn how to create AI agents for coordinated emergency response and resource management. + + + + Learn how to create AI agents for automated code review and issue resolution. + + + + Learn how to create AI agents for personalized adaptive learning experiences. + + + + Learn how to create AI agents for supply chain risk management and mitigation. + + + + Learn how to create AI agents for automated customer service and support. + + + + Learn how to create AI agents for real-time fraud detection and alert management. + + + + Learn how to create AI agents for medical diagnosis and treatment recommendations. + + + + Learn how to create AI agents for smart city resource optimization and management. + + + + Learn how to create AI agents for multilingual content generation and cultural adaptation. + + + +## What's Next? + + + + Explore advanced features like prompt chaining and parallel execution + + + View the complete API reference documentation + + diff --git a/docs/mint.json b/docs/mint.json index c18c870..e723e85 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -54,6 +54,18 @@ "destination": "/" } ], + "tabs": [ + { + "name": "Documentation", + "url": "/index" + }, + { + "name": "Use Cases", + "url": "/usecases" + } + + ] + , "navigation": [ { "group": "", @@ -119,6 +131,21 @@ "models/other" ] }, + { + "group": "Usecases", + "pages": [ + "usecases/predictive-maintenance", + "usecases/emergency-response", + "usecases/code-review", + "usecases/fraud-detection", + "usecases/supply-chain", + "usecases/healthcare-diagnosis", + "usecases/customer-service", + "usecases/smart-city", + "usecases/multilingual-content", + "usecases/adaptive-learning" + ] + }, { "group": "Tools", "pages": [ @@ -182,6 +209,7 @@ { "group": "Developers", "pages": [ + "examples", "developers/test", "developers/agents-playbook", "developers/wrapper", diff --git a/docs/usecases/adaptive-learning.mdx b/docs/usecases/adaptive-learning.mdx new file mode 100644 index 0000000..849f3d6 --- /dev/null +++ b/docs/usecases/adaptive-learning.mdx @@ -0,0 +1,244 @@ +--- +title: "Adaptive Learning" +description: "Learn how to create AI agents for personalized adaptive learning experiences." +icon: "graduation-cap" +--- + +```mermaid +flowchart LR + In[In] --> Assessor[Student Assessor] + Assessor --> Generator[Content Generator] + Generator --> Evaluator[Performance Evaluator] + Evaluator --> Adapter[Difficulty Adapter] + Adapter --> |decrease/increase| Generator + Adapter --> |maintain| Out[Out] + + style In fill:#8B0000,color:#fff + style Assessor fill:#2E8B57,color:#fff + style Generator fill:#2E8B57,color:#fff + style Evaluator fill:#2E8B57,color:#fff + style Adapter fill:#2E8B57,color:#fff + style Out fill:#8B0000,color:#fff +``` + +Learn how to implement an adaptive learning system using AI agents for personalized education and dynamic content adjustment. + +## Quick Start + + + + First, install the PraisonAI Agents package: + ```bash + pip install praisonaiagents + ``` + + + + Set your OpenAI API key as an environment variable in your terminal: + ```bash + export OPENAI_API_KEY=your_api_key_here + ``` + + + + Create a new file `app.py` with the basic setup: +```python +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict + +def assess_student_level(): + """Simulates student assessment""" + levels = ["beginner", "intermediate", "advanced"] + current_time = int(time.time()) + return levels[current_time % 3] + +def generate_content(level: str): + """Simulates content generation""" + content_types = { + "beginner": "basic concepts and examples", + "intermediate": "practice problems and applications", + "advanced": "complex scenarios and projects" + } + return content_types.get(level, "basic concepts") + +def evaluate_performance(): + """Simulates performance evaluation""" + scores = ["low", "medium", "high"] + current_time = int(time.time()) + return scores[current_time % 3] + +def adapt_difficulty(performance: str): + """Simulates difficulty adaptation""" + adaptations = { + "low": "decrease", + "medium": "maintain", + "high": "increase" + } + return adaptations.get(performance, "maintain") + +# Create specialized agents +assessor = Agent( + name="Student Assessor", + role="Level Assessment", + goal="Assess student's current level", + instructions="Evaluate student's knowledge and skills", + tools=[assess_student_level] +) + +generator = Agent( + name="Content Generator", + role="Content Creation", + goal="Generate appropriate learning content", + instructions="Create content based on student's level", + tools=[generate_content] +) + +evaluator = Agent( + name="Performance Evaluator", + role="Performance Assessment", + goal="Evaluate student's performance", + instructions="Assess learning outcomes", + tools=[evaluate_performance] +) + +adapter = Agent( + name="Difficulty Adapter", + role="Content Adaptation", + goal="Adapt content difficulty", + instructions="Adjust difficulty based on performance", + tools=[adapt_difficulty] +) + +# Create workflow tasks +assessment_task = Task( + name="assess_level", + description="Assess student's current level", + expected_output="Student's proficiency level", + agent=assessor, + is_start=True, + next_tasks=["generate_content"] +) + +generation_task = Task( + name="generate_content", + description="Generate appropriate content", + expected_output="Learning content", + agent=generator, + next_tasks=["evaluate_performance"] +) + +evaluation_task = Task( + name="evaluate_performance", + description="Evaluate student's performance", + expected_output="Performance assessment", + agent=evaluator, + next_tasks=["adapt_difficulty"] +) + +adaptation_task = Task( + name="adapt_difficulty", + description="Adapt content difficulty", + expected_output="Difficulty adjustment", + agent=adapter, + task_type="decision", + condition={ + "decrease": ["generate_content"], + "maintain": "", + "increase": ["generate_content"] + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[assessor, generator, evaluator, adapter], + tasks=[assessment_task, generation_task, evaluation_task, adaptation_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Adaptive Learning Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nAdaptive Learning Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() +``` + + + + Run your adaptive learning system: + ```bash + python app.py + ``` + + + + + **Requirements** + - Python 3.10 or higher + - OpenAI API key. Generate OpenAI API key [here](https://platform.openai.com/api-keys). Use Other models using [this guide](/models). + + +## Understanding Adaptive Learning + + + Adaptive learning enables: + - Personalized learning experiences + - Dynamic content adjustment + - Performance-based progression + - Continuous skill assessment + - Intelligent difficulty scaling + + +## Features + + + + Evaluate student proficiency: + - Knowledge level assessment + - Skill gap identification + - Learning style analysis + + + Create personalized content: + - Level-appropriate materials + - Custom learning paths + - Interactive exercises + + + Monitor learning progress: + - Real-time evaluation + - Progress tracking + - Achievement metrics + + + Adjust learning experience: + - Difficulty scaling + - Content optimization + - Pace adjustment + + + +## Next Steps + + + + Learn about chaining prompts for complex workflows + + + Explore how to optimize and evaluate solutions + + \ No newline at end of file diff --git a/docs/usecases/code-review.mdx b/docs/usecases/code-review.mdx new file mode 100644 index 0000000..fb6c450 --- /dev/null +++ b/docs/usecases/code-review.mdx @@ -0,0 +1,207 @@ +--- +title: "Code Review" +sidebarTitle: "Code Review" +description: "Learn how to create AI agents for automated code review and issue resolution." +icon: "code" +--- + +```mermaid +flowchart LR + In[In] --> Analyzer[Code Analyzer] + Analyzer --> Suggester[Fix Suggester] + Suggester --> Applier[Fix Applier] + Applier -->|fixed| Out[Out] + Applier -->|manual_review| Suggester + + style In fill:#8B0000,color:#fff + style Analyzer fill:#2E8B57,color:#fff + style Suggester fill:#2E8B57,color:#fff + style Applier fill:#2E8B57,color:#fff + style Out fill:#8B0000,color:#fff +``` + +A workflow demonstrating how AI agents can automate code review, from analysis through fix suggestion and application. + +## Quick Start + + + + First, install the PraisonAI Agents package: + ```bash + pip install praisonaiagents + ``` + + + + Set your OpenAI API key as an environment variable in your terminal: + ```bash + export OPENAI_API_KEY=your_api_key_here + ``` + + + + Create a new file `app.py` with the basic setup: + ```python + from praisonaiagents import Agent, Task, PraisonAIAgents + import time + from typing import List, Dict + + def analyze_code_changes(): + """Simulates code analysis""" + issues = [ + {"type": "style", "severity": "low", "file": "main.py"}, + {"type": "security", "severity": "high", "file": "auth.py"}, + {"type": "performance", "severity": "medium", "file": "data.py"} + ] + return issues[int(time.time()) % 3] + + def suggest_fixes(issue: Dict): + """Simulates fix suggestions""" + fixes = { + "style": "Apply PEP 8 formatting", + "security": "Implement input validation", + "performance": "Use list comprehension" + } + return fixes.get(issue["type"], "Review manually") + + def apply_automated_fix(fix: str): + """Simulates applying automated fixes""" + success = int(time.time()) % 2 == 0 + return "fixed" if success else "manual_review" + + # Create specialized agents + analyzer = Agent( + name="Code Analyzer", + role="Code analysis", + goal="Analyze code changes and identify issues", + instructions="Review code changes and report issues", + tools=[analyze_code_changes] + ) + + fix_suggester = Agent( + name="Fix Suggester", + role="Solution provider", + goal="Suggest fixes for identified issues", + instructions="Provide appropriate fix suggestions", + tools=[suggest_fixes] + ) + + fix_applier = Agent( + name="Fix Applier", + role="Fix implementation", + goal="Apply suggested fixes automatically when possible", + instructions="Implement suggested fixes and report results", + tools=[apply_automated_fix] + ) + + # Create workflow tasks + analysis_task = Task( + name="analyze_code", + description="Analyze code changes for issues", + expected_output="Identified code issues", + agent=analyzer, + is_start=True, + next_tasks=["suggest_fixes"] + ) + + suggestion_task = Task( + name="suggest_fixes", + description="Suggest fixes for identified issues", + expected_output="Fix suggestions", + agent=fix_suggester, + next_tasks=["apply_fixes"] + ) + + fix_task = Task( + name="apply_fixes", + description="Apply suggested fixes", + expected_output="Fix application status", + agent=fix_applier, + task_type="decision", + condition={ + "fixed": "", + "manual_review": ["suggest_fixes"] # Loop back for manual review + } + ) + + # Create workflow + workflow = PraisonAIAgents( + agents=[analyzer, fix_suggester, fix_applier], + tasks=[analysis_task, suggestion_task, fix_task], + process="workflow", + verbose=True + ) + + def main(): + print("\nStarting Code Review Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nCode Review Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + + if __name__ == "__main__": + main() + ``` + + + + Type this in your terminal to run your agents: + ```bash + python app.py + ``` + + + + + **Requirements** + - Python 3.10 or higher + - OpenAI API key. Generate OpenAI API key [here](https://platform.openai.com/api-keys). Use Other models using [this guide](/models). + - Basic understanding of Python + + +## Understanding Code Review + + + Automated code review workflow enables: + - Automated issue detection + - Intelligent fix suggestions + - Automated fix application + - Manual review routing when needed + + +## Features + + + + Automatically identify code issues and their severity. + + + Generate appropriate fix suggestions based on issue type. + + + Apply fixes automatically when possible. + + + Route complex issues for manual review. + + + +## Next Steps + + + + Learn about sequential prompt execution + + + Explore optimization techniques + + \ No newline at end of file diff --git a/docs/usecases/customer-service.mdx b/docs/usecases/customer-service.mdx new file mode 100644 index 0000000..9bab99d --- /dev/null +++ b/docs/usecases/customer-service.mdx @@ -0,0 +1,206 @@ +--- +title: Customer Service +sidebar_title: Customer Service +icon: "user-group" +--- + +```mermaid +flowchart LR + In[In] --> Classifier[Query Classifier] + Classifier --> Handler[Query Handler] + Handler --> Evaluator[Satisfaction Evaluator] + Evaluator --> Optimizer[Response Optimizer] + Optimizer --> Out[Out] + Optimizer --> Classifier + + style In fill:#8B0000,color:#fff + style Out fill:#8B0000,color:#fff + style Classifier fill:#2E8B57,color:#fff + style Handler fill:#2E8B57,color:#fff + style Evaluator fill:#2E8B57,color:#fff + style Optimizer fill:#2E8B57,color:#fff +``` + +Learn how to create AI agents for automated customer service and response optimization. + +## Quick Start + +1. Install the PraisonAI Agents package: +```bash +pip install praisonaiagents +``` + +2. Set your OpenAI API key: +```bash +export OPENAI_API_KEY=your_api_key_here +``` + +3. Create a file named `app.py`: +```python +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def classify_query(): + """Simulates query classification""" + query_types = [ + {"type": "technical", "priority": "high", "complexity": "complex"}, + {"type": "billing", "priority": "medium", "complexity": "simple"}, + {"type": "general", "priority": "low", "complexity": "simple"} + ] + return query_types[int(time.time()) % 3] + +def handle_query(query: Dict): + """Simulates query handling""" + responses = { + "technical": "Technical support solution provided", + "billing": "Billing inquiry resolved", + "general": "General information provided" + } + return responses.get(query["type"], "Query forwarded to specialist") + +def evaluate_satisfaction(): + """Simulates satisfaction evaluation""" + scores = ["satisfied", "neutral", "unsatisfied"] + return scores[int(time.time()) % 3] + +def optimize_response(satisfaction: str): + """Simulates response optimization""" + optimizations = { + "satisfied": "maintain_approach", + "neutral": "minor_adjustments", + "unsatisfied": "major_revision" + } + return optimizations.get(satisfaction, "review_process") + +# Create specialized agents +classifier = Agent( + name="Query Classifier", + role="Query Classification", + goal="Classify incoming customer queries", + instructions="Analyze and categorize customer queries", + tools=[classify_query] +) + +handler = Agent( + name="Query Handler", + role="Query Resolution", + goal="Handle customer queries appropriately", + instructions="Provide appropriate responses to queries", + tools=[handle_query] +) + +evaluator = Agent( + name="Satisfaction Evaluator", + role="Satisfaction Assessment", + goal="Evaluate customer satisfaction", + instructions="Assess response effectiveness", + tools=[evaluate_satisfaction] +) + +optimizer = Agent( + name="Response Optimizer", + role="Service Optimization", + goal="Optimize service based on feedback", + instructions="Improve response strategies", + tools=[optimize_response] +) + +# Create workflow tasks +classification_task = Task( + name="classify_query", + description="Classify customer query", + expected_output="Query classification", + agent=classifier, + is_start=True, + task_type="decision", + condition={ + "high": ["handle_query", "evaluate_satisfaction"], + "medium": ["handle_query", "evaluate_satisfaction"], + "low": ["handle_query"] + } +) + +handling_task = Task( + name="handle_query", + description="Handle customer query", + expected_output="Query response", + agent=handler, + next_tasks=["evaluate_satisfaction"] +) + +evaluation_task = Task( + name="evaluate_satisfaction", + description="Evaluate customer satisfaction", + expected_output="Satisfaction level", + agent=evaluator, + next_tasks=["optimize_response"] +) + +optimization_task = Task( + name="optimize_response", + description="Optimize response strategy", + expected_output="Optimization recommendations", + agent=optimizer, + task_type="decision", + condition={ + "major_revision": ["classify_query"], + "minor_adjustments": "", + "maintain_approach": "" + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[classifier, handler, evaluator, optimizer], + tasks=[classification_task, handling_task, evaluation_task, optimization_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Customer Service Optimization Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nCustomer Service Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() +``` + +4. Run the system: +```bash +python app.py +``` + + +**Requirements** +- Python 3.10 or higher +- OpenAI API key + + +## Understanding Customer Service + +The customer service workflow demonstrates how AI agents can automate and optimize customer support operations. The system uses specialized agents to classify queries, handle responses, evaluate satisfaction, and continuously improve service quality through feedback-driven optimization. + +## Features + +- **Query Classification**: Automatically categorizes customer inquiries based on type, priority, and complexity +- **Intelligent Response Handling**: Provides appropriate responses based on query classification +- **Satisfaction Evaluation**: Assesses customer satisfaction with provided solutions +- **Response Optimization**: Continuously improves service quality through feedback analysis + +## Next Steps + +- Learn about [Prompt Chaining](/features/promptchaining) +- Explore [Evaluator Optimizer](/features/evaluator-optimiser) \ No newline at end of file diff --git a/docs/usecases/emergency-response.mdx b/docs/usecases/emergency-response.mdx new file mode 100644 index 0000000..0031665 --- /dev/null +++ b/docs/usecases/emergency-response.mdx @@ -0,0 +1,241 @@ +--- +title: "Emergency Response" +sidebarTitle: "Emergency Response" +description: "Learn how to create AI agents for coordinated emergency response and resource management." +icon: "truck-medical" +--- + +```mermaid +flowchart LR + In[In] --> Router[Emergency Router] + Router -->|critical/high| Dispatcher[Resource Dispatcher] + Router -->|medium/low| Dispatcher + Dispatcher --> Monitor[Response Monitor] + Monitor -->|ongoing| Coordinator[Response Coordinator] + Monitor -->|completed| Out[Out] + Coordinator --> Out + + style In fill:#8B0000,color:#fff + style Router fill:#2E8B57,color:#fff + style Dispatcher fill:#2E8B57,color:#fff + style Monitor fill:#2E8B57,color:#fff + style Coordinator fill:#2E8B57,color:#fff + style Out fill:#8B0000,color:#fff +``` + +A workflow demonstrating how AI agents can coordinate emergency response, from initial assessment through resource dispatch and response monitoring. + +## Quick Start + + + + First, install the PraisonAI Agents package: + ```bash + pip install praisonaiagents + ``` + + + + Set your OpenAI API key as an environment variable in your terminal: + ```bash + export OPENAI_API_KEY=your_api_key_here + ``` + + + + Create a new file `app.py` with the basic setup: + ```python + from praisonaiagents import Agent, Task, PraisonAIAgents + import time + from typing import Dict, List + + def assess_emergency(incident: Dict): + """Simulates emergency assessment""" + severity_levels = ["low", "medium", "high", "critical"] + current_time = int(time.time()) + severity = severity_levels[current_time % 4] + print(f"Incident assessed with {severity} severity") + return severity + + def dispatch_resources(severity: str): + """Simulates resource dispatch""" + resources = { + "low": ["local_police"], + "medium": ["local_police", "ambulance"], + "high": ["local_police", "ambulance", "fire"], + "critical": ["local_police", "ambulance", "fire", "special_units"] + } + dispatched = resources.get(severity, ["local_police"]) + print(f"Dispatching resources: {dispatched}") + return dispatched + + def monitor_response(): + """Simulates response monitoring""" + current_time = int(time.time()) + status = "completed" if current_time % 3 == 0 else "ongoing" + return status + + # Create specialized agents + router = Agent( + name="Emergency Router", + role="Emergency Assessment", + goal="Evaluate emergency severity and type", + instructions="Assess incident and determine required response", + tools=[assess_emergency] + ) + + dispatcher = Agent( + name="Resource Dispatcher", + role="Resource Management", + goal="Coordinate and dispatch appropriate resources", + instructions="Deploy resources based on emergency assessment", + tools=[dispatch_resources] + ) + + monitor = Agent( + name="Response Monitor", + role="Response Tracking", + goal="Track response progress and effectiveness", + instructions="Monitor ongoing response and provide status updates", + tools=[monitor_response] + ) + + synthesizer = Agent( + name="Response Coordinator", + role="Response Synthesis", + goal="Coordinate multi-agency response", + instructions="Synthesize information and coordinate overall response" + ) + + # Create workflow tasks + assessment_task = Task( + name="assess_emergency", + description="Evaluate emergency severity and type", + expected_output="Emergency severity level", + agent=router, + is_start=True, + task_type="decision", + condition={ + "critical": ["dispatch_resources", "monitor_response"], + "high": ["dispatch_resources", "monitor_response"], + "medium": ["dispatch_resources"], + "low": ["dispatch_resources"] + } + ) + + dispatch_task = Task( + name="dispatch_resources", + description="Deploy appropriate emergency resources", + expected_output="List of dispatched resources", + agent=dispatcher, + next_tasks=["monitor_response"] + ) + + monitor_task = Task( + name="monitor_response", + description="Track response progress", + expected_output="Response status", + agent=monitor, + task_type="decision", + condition={ + "ongoing": ["coordinate_response"], + "completed": "" + } + ) + + coordinate_task = Task( + name="coordinate_response", + description="Coordinate overall emergency response", + expected_output="Coordinated response plan", + agent=synthesizer, + context=[assessment_task, dispatch_task, monitor_task] + ) + + # Create workflow + workflow = PraisonAIAgents( + agents=[router, dispatcher, monitor, synthesizer], + tasks=[assessment_task, dispatch_task, monitor_task, coordinate_task], + process="workflow", + verbose=True + ) + + def main(): + # Simulate emergency incident + incident = { + "type": "fire", + "location": "123 Main St", + "reported_time": time.time() + } + + print("\nStarting Emergency Response Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start(initial_input=incident) + + # Print results + print("\nEmergency Response Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + + if __name__ == "__main__": + main() + ``` + + + + Type this in your terminal to run your agents: + ```bash + python app.py + ``` + + + + + **Requirements** + - Python 3.10 or higher + - OpenAI API key. Generate OpenAI API key [here](https://platform.openai.com/api-keys). Use Other models using [this guide](/models). + - Basic understanding of Python + + +## Understanding Emergency Response + + + Emergency response workflow enables: + - Dynamic severity assessment + - Automated resource dispatch + - Real-time response monitoring + - Multi-agency coordination + + +## Features + + + + Automatically evaluate incident severity and type. + + + Dispatch appropriate resources based on severity. + + + Track response progress and effectiveness. + + + Coordinate multi-agency response efforts. + + + +## Next Steps + + + + Learn about sequential prompt execution + + + Explore optimization techniques + + \ No newline at end of file diff --git a/docs/usecases/fraud-detection.mdx b/docs/usecases/fraud-detection.mdx new file mode 100644 index 0000000..c77dc25 --- /dev/null +++ b/docs/usecases/fraud-detection.mdx @@ -0,0 +1,237 @@ +--- +title: "Fraud Detection" +sidebarTitle: "Fraud Detection" +description: "Learn how to create AI agents for real-time fraud detection and alert management." +icon: "shield-check" +--- + +```mermaid +flowchart LR + In[In] --> Analyzer[Transaction Analyzer] + Analyzer -->|high risk| Verifier[Identity Verifier] + Analyzer -->|all risks| Checker[Pattern Checker] + Checker --> Generator[Alert Generator] + Verifier --> Generator + Generator --> Out[Out] + + style In fill:#8B0000,color:#fff + style Analyzer fill:#2E8B57,color:#fff + style Verifier fill:#2E8B57,color:#fff + style Checker fill:#2E8B57,color:#fff + style Generator fill:#2E8B57,color:#fff + style Out fill:#8B0000,color:#fff +``` + +A workflow demonstrating how AI agents can detect fraud in real-time, from transaction analysis through alert generation. + +## Quick Start + + + + First, install the PraisonAI Agents package: + ```bash + pip install praisonaiagents + ``` + + + + Set your OpenAI API key as an environment variable in your terminal: + ```bash + export OPENAI_API_KEY=your_api_key_here + ``` + + + + Create a new file `app.py` with the basic setup: + ```python + from praisonaiagents import Agent, Task, PraisonAIAgents + import time + from typing import Dict, List + import asyncio + + def analyze_transaction(): + """Simulates transaction analysis""" + transactions = [ + {"type": "credit_card", "amount": 5000, "location": "foreign", "risk": "high"}, + {"type": "wire", "amount": 2000, "location": "domestic", "risk": "medium"}, + {"type": "online", "amount": 500, "location": "domestic", "risk": "low"} + ] + return transactions[int(time.time()) % 3] + + def check_patterns(transaction: Dict): + """Simulates pattern checking""" + patterns = { + "high": ["unusual_location", "large_amount"], + "medium": ["frequency_anomaly"], + "low": ["within_normal_limits"] + } + return patterns.get(transaction["risk"], ["unknown"]) + + def verify_identity(): + """Simulates identity verification""" + results = ["verified", "suspicious", "failed"] + return results[int(time.time()) % 3] + + def generate_alert(verification: str, patterns: List[str]): + """Simulates alert generation""" + if verification == "failed" or "unusual_location" in patterns: + return "high_priority_alert" + elif verification == "suspicious": + return "medium_priority_alert" + return "low_priority_alert" + + # Create specialized agents + transaction_analyzer = Agent( + name="Transaction Analyzer", + role="Transaction Analysis", + goal="Analyze transactions for suspicious patterns", + instructions="Monitor and analyze financial transactions", + tools=[analyze_transaction] + ) + + pattern_checker = Agent( + name="Pattern Checker", + role="Pattern Detection", + goal="Identify suspicious patterns", + instructions="Check for known fraud patterns", + tools=[check_patterns] + ) + + identity_verifier = Agent( + name="Identity Verifier", + role="Identity Verification", + goal="Verify transaction identities", + instructions="Perform identity verification checks", + tools=[verify_identity] + ) + + alert_generator = Agent( + name="Alert Generator", + role="Alert Management", + goal="Generate appropriate alerts", + instructions="Create and prioritize alerts", + tools=[generate_alert] + ) + + # Create workflow tasks + analysis_task = Task( + name="analyze_transaction", + description="Analyze transaction details", + expected_output="Transaction analysis", + agent=transaction_analyzer, + is_start=True, + task_type="decision", + condition={ + "high": ["check_patterns", "verify_identity"], + "medium": ["check_patterns"], + "low": ["check_patterns"] + } + ) + + pattern_task = Task( + name="check_patterns", + description="Check for suspicious patterns", + expected_output="Identified patterns", + agent=pattern_checker, + next_tasks=["generate_alert"], + async_execution=True + ) + + verification_task = Task( + name="verify_identity", + description="Verify transaction identity", + expected_output="Verification result", + agent=identity_verifier, + next_tasks=["generate_alert"], + async_execution=True + ) + + alert_task = Task( + name="generate_alert", + description="Generate fraud alert", + expected_output="Alert priority", + agent=alert_generator, + context=[pattern_task, verification_task] + ) + + # Create workflow + workflow = PraisonAIAgents( + agents=[transaction_analyzer, pattern_checker, identity_verifier, alert_generator], + tasks=[analysis_task, pattern_task, verification_task, alert_task], + process="workflow", + verbose=True + ) + + async def main(): + print("\nStarting Fraud Detection Workflow...") + print("=" * 50) + + # Run workflow + results = await workflow.astart() + + # Print results + print("\nFraud Detection Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + + if __name__ == "__main__": + asyncio.run(main()) + ``` + + + + Type this in your terminal to run your agents: + ```bash + python app.py + ``` + + + + + **Requirements** + - Python 3.10 or higher + - OpenAI API key. Generate OpenAI API key [here](https://platform.openai.com/api-keys). Use Other models using [this guide](/models). + - Basic understanding of Python + + +## Understanding Fraud Detection + + + Automated fraud detection workflow enables: + - Real-time transaction monitoring + - Pattern-based detection + - Identity verification + - Prioritized alert generation + + +## Features + + + + Analyze transactions for risk levels and suspicious patterns. + + + Identify known fraud patterns and anomalies. + + + Verify transaction identities for high-risk cases. + + + Generate and prioritize fraud alerts. + + + +## Next Steps + + + + Learn about sequential prompt execution + + + Explore optimization techniques + + \ No newline at end of file diff --git a/docs/usecases/healthcare-diagnosis.mdx b/docs/usecases/healthcare-diagnosis.mdx new file mode 100644 index 0000000..001b861 --- /dev/null +++ b/docs/usecases/healthcare-diagnosis.mdx @@ -0,0 +1,278 @@ +--- +title: "Healthcare Diagnosis" +description: "Learn how to create AI agents for automated medical diagnosis and treatment recommendations." +icon: "stethoscope" +--- + +```mermaid +flowchart LR + In[In] --> Symptoms[Symptom Analyzer] + Symptoms --> Labs[Lab Processor] + Symptoms --> History[History Analyzer] + Labs --> Diagnosis[Diagnosis Generator] + History --> Diagnosis + Diagnosis --> Treatment[Treatment Recommender] + Treatment --> Out[Out] + + style In fill:#8B0000,color:#fff + style Symptoms fill:#2E8B57,color:#fff + style Labs fill:#2E8B57,color:#fff + style History fill:#2E8B57,color:#fff + style Diagnosis fill:#2E8B57,color:#fff + style Treatment fill:#2E8B57,color:#fff + style Out fill:#8B0000,color:#fff +``` + +Learn how to implement an automated healthcare diagnosis system using AI agents for symptom analysis, lab processing, and treatment recommendations. + +## Quick Start + + + + First, install the PraisonAI Agents package: + ```bash + pip install praisonaiagents + ``` + + + + Set your OpenAI API key as an environment variable in your terminal: + ```bash + export OPENAI_API_KEY=your_api_key_here + ``` + + + + Create a new file `app.py` with the basic setup: +```python +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def analyze_symptoms(): + """Simulates symptom analysis""" + cases = [ + {"symptoms": ["fever", "cough", "fatigue"], "severity": "high", "duration": "5_days"}, + {"symptoms": ["headache", "nausea"], "severity": "medium", "duration": "2_days"}, + {"symptoms": ["rash", "itching"], "severity": "low", "duration": "1_week"} + ] + return cases[int(time.time()) % 3] + +def process_lab_results(): + """Simulates lab result processing""" + results = [ + {"blood_count": "abnormal", "inflammation": "high", "markers": "elevated"}, + {"blood_count": "normal", "inflammation": "low", "markers": "normal"}, + {"blood_count": "normal", "inflammation": "medium", "markers": "elevated"} + ] + return results[int(time.time()) % 3] + +def analyze_medical_history(): + """Simulates medical history analysis""" + histories = [ + {"chronic_conditions": True, "allergies": True, "risk_factors": "high"}, + {"chronic_conditions": False, "allergies": True, "risk_factors": "medium"}, + {"chronic_conditions": False, "allergies": False, "risk_factors": "low"} + ] + return histories[int(time.time()) % 3] + +def generate_diagnosis(symptoms: Dict, lab_results: Dict, history: Dict): + """Simulates diagnosis generation""" + if symptoms["severity"] == "high" and lab_results["markers"] == "elevated": + return {"diagnosis": "serious_condition", "confidence": "high"} + elif symptoms["severity"] == "medium" or lab_results["inflammation"] == "medium": + return {"diagnosis": "moderate_condition", "confidence": "medium"} + return {"diagnosis": "mild_condition", "confidence": "high"} + +def recommend_treatment(diagnosis: Dict): + """Simulates treatment recommendation""" + treatments = { + "serious_condition": ["immediate_intervention", "specialist_referral"], + "moderate_condition": ["medication", "follow_up"], + "mild_condition": ["rest", "observation"] + } + return treatments.get(diagnosis["diagnosis"], ["general_care"]) + +# Create specialized agents +symptom_analyzer = Agent( + name="Symptom Analyzer", + role="Symptom Analysis", + goal="Analyze patient symptoms", + instructions="Evaluate reported symptoms and their severity", + tools=[analyze_symptoms] +) + +lab_processor = Agent( + name="Lab Processor", + role="Lab Analysis", + goal="Process laboratory results", + instructions="Analyze and interpret lab test results", + tools=[process_lab_results] +) + +history_analyzer = Agent( + name="History Analyzer", + role="Medical History Analysis", + goal="Analyze patient medical history", + instructions="Review and assess patient medical history", + tools=[analyze_medical_history] +) + +diagnosis_generator = Agent( + name="Diagnosis Generator", + role="Diagnosis Generation", + goal="Generate comprehensive diagnosis", + instructions="Combine all inputs to generate diagnosis", + tools=[generate_diagnosis] +) + +treatment_recommender = Agent( + name="Treatment Recommender", + role="Treatment Planning", + goal="Recommend appropriate treatment", + instructions="Suggest treatment based on diagnosis", + tools=[recommend_treatment] +) + +# Create workflow tasks +symptom_task = Task( + name="analyze_symptoms", + description="Analyze patient symptoms", + expected_output="Symptom analysis", + agent=symptom_analyzer, + is_start=True, + task_type="decision", + condition={ + "high": ["process_labs", "analyze_history"], + "medium": ["process_labs", "analyze_history"], + "low": ["process_labs"] + } +) + +lab_task = Task( + name="process_labs", + description="Process lab results", + expected_output="Lab analysis", + agent=lab_processor, + next_tasks=["generate_diagnosis"] +) + +history_task = Task( + name="analyze_history", + description="Analyze medical history", + expected_output="History analysis", + agent=history_analyzer, + next_tasks=["generate_diagnosis"] +) + +diagnosis_task = Task( + name="generate_diagnosis", + description="Generate diagnosis", + expected_output="Diagnosis and confidence level", + agent=diagnosis_generator, + next_tasks=["recommend_treatment"], + context=[symptom_task, lab_task, history_task] +) + +treatment_task = Task( + name="recommend_treatment", + description="Recommend treatment", + expected_output="Treatment recommendations", + agent=treatment_recommender, + context=[diagnosis_task] +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[symptom_analyzer, lab_processor, history_analyzer, + diagnosis_generator, treatment_recommender], + tasks=[symptom_task, lab_task, history_task, diagnosis_task, treatment_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Healthcare Diagnosis Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nDiagnosis Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() +``` + + + + Run your healthcare diagnosis system: + ```bash + python app.py + ``` + + + + + **Requirements** + - Python 3.10 or higher + - OpenAI API key. Generate OpenAI API key [here](https://platform.openai.com/api-keys). Use Other models using [this guide](/models). + + +## Understanding Healthcare Diagnosis + + + Healthcare diagnosis enables: + - Automated symptom analysis + - Lab result processing + - Medical history review + - Diagnosis generation + - Treatment recommendations + + +## Features + + + + Analyze patient symptoms: + - Severity assessment + - Duration tracking + - Pattern recognition + + + Process medical tests: + - Blood count analysis + - Inflammation markers + - Test result interpretation + + + Review medical history: + - Chronic conditions + - Allergies + - Risk factors + + + Generate treatment plans: + - Immediate interventions + - Medication recommendations + - Follow-up scheduling + + + +## Next Steps + + + + Learn about chaining prompts for complex workflows + + + Explore how to optimize and evaluate solutions + + \ No newline at end of file diff --git a/docs/usecases/multilingual-content.mdx b/docs/usecases/multilingual-content.mdx new file mode 100644 index 0000000..da82fa3 --- /dev/null +++ b/docs/usecases/multilingual-content.mdx @@ -0,0 +1,276 @@ +--- +title: "Multilingual Content" +description: "Learn how to create AI agents for multilingual content generation and cultural adaptation." +icon: "language" +--- + +```mermaid +flowchart LR + In[In] --> Generator[Content Generator] + Generator --> Translator[Content Translator] + Translator --> Cultural[Cultural Checker] + Cultural --> Adapter[Content Adapter] + Adapter --> Quality[Quality Assessor] + Quality --> Out[Out] + Quality --> Adapter + Quality --> Translator + + style In fill:#8B0000,color:#fff + style Generator fill:#2E8B57,color:#fff + style Translator fill:#2E8B57,color:#fff + style Cultural fill:#2E8B57,color:#fff + style Adapter fill:#2E8B57,color:#fff + style Quality fill:#2E8B57,color:#fff + style Out fill:#8B0000,color:#fff +``` + +Learn how to implement a multilingual content generation system using AI agents for content creation, translation, cultural adaptation, and quality assurance. + +## Quick Start + + + + First, install the PraisonAI Agents package: + ```bash + pip install praisonaiagents + ``` + + + + Set your OpenAI API key as an environment variable in your terminal: + ```bash + export OPENAI_API_KEY=your_api_key_here + ``` + + + + Create a new file `app.py` with the basic setup: +```python +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def generate_base_content(): + """Simulates base content generation""" + content_types = [ + {"type": "marketing", "tone": "professional", "length": "medium"}, + {"type": "technical", "tone": "formal", "length": "long"}, + {"type": "social", "tone": "casual", "length": "short"} + ] + return content_types[int(time.time()) % 3] + +def translate_content(content: Dict): + """Simulates content translation""" + languages = ["spanish", "french", "german", "japanese", "chinese"] + translations = {lang: f"Translated content in {lang}" for lang in languages} + return translations + +def check_cultural_context(translations: Dict): + """Simulates cultural context verification""" + cultural_issues = { + "spanish": [], + "french": ["idiom_mismatch"], + "german": [], + "japanese": ["formality_level"], + "chinese": ["cultural_reference"] + } + return cultural_issues + +def adapt_content(issues: Dict): + """Simulates content adaptation""" + adaptations = { + "idiom_mismatch": "localized_expression", + "formality_level": "adjusted_tone", + "cultural_reference": "localized_reference" + } + return {lang: [adaptations[issue] for issue in issues] + for lang, issues in issues.items() if issues} + +def quality_check(): + """Simulates quality assessment""" + quality_levels = ["high", "medium", "needs_revision"] + return quality_levels[int(time.time()) % 3] + +# Create specialized agents +content_generator = Agent( + name="Content Generator", + role="Base Content Creation", + goal="Generate high-quality base content", + instructions="Create engaging base content", + tools=[generate_base_content] +) + +translator = Agent( + name="Content Translator", + role="Translation", + goal="Translate content accurately", + instructions="Translate content while maintaining meaning", + tools=[translate_content] +) + +cultural_checker = Agent( + name="Cultural Checker", + role="Cultural Verification", + goal="Verify cultural appropriateness", + instructions="Check for cultural sensitivities", + tools=[check_cultural_context] +) + +content_adapter = Agent( + name="Content Adapter", + role="Content Adaptation", + goal="Adapt content for cultural fit", + instructions="Modify content based on cultural context", + tools=[adapt_content] +) + +quality_assessor = Agent( + name="Quality Assessor", + role="Quality Assessment", + goal="Ensure content quality", + instructions="Assess overall content quality", + tools=[quality_check] +) + +# Create workflow tasks +generation_task = Task( + name="generate_content", + description="Generate base content", + expected_output="Base content for translation", + agent=content_generator, + is_start=True, + next_tasks=["translate_content"] +) + +translation_task = Task( + name="translate_content", + description="Translate content to target languages", + expected_output="Translated content", + agent=translator, + next_tasks=["check_cultural"] +) + +cultural_task = Task( + name="check_cultural", + description="Check cultural appropriateness", + expected_output="Cultural context issues", + agent=cultural_checker, + next_tasks=["adapt_content"] +) + +adaptation_task = Task( + name="adapt_content", + description="Adapt content for cultural fit", + expected_output="Culturally adapted content", + agent=content_adapter, + next_tasks=["assess_quality"] +) + +quality_task = Task( + name="assess_quality", + description="Assess content quality", + expected_output="Quality assessment", + agent=quality_assessor, + task_type="decision", + condition={ + "high": "", # Complete workflow + "medium": ["adapt_content"], # Minor revisions needed + "needs_revision": ["translate_content"] # Major revisions needed + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[content_generator, translator, cultural_checker, + content_adapter, quality_assessor], + tasks=[generation_task, translation_task, cultural_task, + adaptation_task, quality_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Multilingual Content Generation Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nContent Generation Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() +``` + + + + Run your multilingual content generation system: + ```bash + python app.py + ``` + + + + + **Requirements** + - Python 3.10 or higher + - OpenAI API key. Generate OpenAI API key [here](https://platform.openai.com/api-keys). Use Other models using [this guide](/models). + + +## Understanding Multilingual Content Generation + + + Multilingual content generation enables: + - Base content creation + - Accurate translation + - Cultural context verification + - Content adaptation + - Quality assurance + + +## Features + + + + Generate base content: + - Marketing content + - Technical documentation + - Social media posts + + + Translate content: + - Multiple languages + - Meaning preservation + - Context awareness + + + Ensure cultural fit: + - Cultural sensitivity + - Idiom localization + - Reference adaptation + + + Quality assurance: + - Content assessment + - Revision workflow + - Continuous improvement + + + +## Next Steps + + + + Learn about chaining prompts for complex workflows + + + Explore how to optimize and evaluate solutions + + \ No newline at end of file diff --git a/docs/usecases/predictive-maintenance.mdx b/docs/usecases/predictive-maintenance.mdx new file mode 100644 index 0000000..2ce0ea1 --- /dev/null +++ b/docs/usecases/predictive-maintenance.mdx @@ -0,0 +1,287 @@ +--- +title: "Predictive Maintenance" +description: "Learn how to create AI agents for predictive maintenance and equipment monitoring." +icon: "gear" +--- + +```mermaid +flowchart LR + In[In] --> Monitor[Sensor Monitor] + Monitor --> Analyzer[Performance Analyzer] + Analyzer --> Detector[Anomaly Detector] + Detector --> Predictor[Failure Predictor] + Predictor -->|critical| Scheduler[Maintenance Scheduler] + Predictor -->|warning| Scheduler + Predictor -->|normal| Out[Out] + Scheduler --> Out + + style In fill:#8B0000,color:#fff + style Out fill:#8B0000,color:#fff + style Monitor fill:#2E8B57,color:#fff + style Analyzer fill:#2E8B57,color:#fff + style Detector fill:#2E8B57,color:#fff + style Predictor fill:#2E8B57,color:#fff + style Scheduler fill:#2E8B57,color:#fff +``` + +Learn how to implement a predictive maintenance system using AI agents for real-time equipment monitoring and maintenance scheduling. + +## Quick Start + + + + First, install the PraisonAI Agents package: + ```bash + pip install praisonaiagents + ``` + + + + Set your OpenAI API key as an environment variable in your terminal: + ```bash + export OPENAI_API_KEY=your_api_key_here + ``` + + + + Create a new file `predictive_maintenance.py` with the basic setup: +```python +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List +import asyncio + +def collect_sensor_data(): + """Simulates sensor data collection""" + sensor_readings = { + "temperature": 75 + (int(time.time()) % 20), + "vibration": 0.5 + (int(time.time()) % 10) / 10, + "pressure": 100 + (int(time.time()) % 50), + "noise_level": 60 + (int(time.time()) % 30) + } + return sensor_readings + +def analyze_performance(): + """Simulates performance analysis""" + metrics = { + "efficiency": 0.8 + (int(time.time()) % 20) / 100, + "uptime": 0.95 + (int(time.time()) % 5) / 100, + "output_quality": 0.9 + (int(time.time()) % 10) / 100 + } + return metrics + +def detect_anomalies(sensor_data: Dict, performance: Dict): + """Simulates anomaly detection""" + anomalies = [] + if sensor_data["temperature"] > 90: + anomalies.append({"type": "temperature_high", "severity": "critical"}) + if sensor_data["vibration"] > 1.2: + anomalies.append({"type": "vibration_excess", "severity": "warning"}) + if performance["efficiency"] < 0.85: + anomalies.append({"type": "efficiency_low", "severity": "warning"}) + return anomalies + +def predict_failures(anomalies: List[Dict]): + """Simulates failure prediction""" + predictions = [] + severity_scores = {"critical": 0.9, "warning": 0.6} + + for anomaly in anomalies: + predictions.append({ + "component": anomaly["type"].split("_")[0], + "probability": severity_scores[anomaly["severity"]], + "timeframe": "24_hours" if anomaly["severity"] == "critical" else "7_days" + }) + return predictions + +def schedule_maintenance(predictions: List[Dict]): + """Simulates maintenance scheduling""" + schedule = [] + for pred in predictions: + schedule.append({ + "component": pred["component"], + "priority": "immediate" if pred["timeframe"] == "24_hours" else "planned", + "estimated_duration": "2_hours", + "required_parts": ["replacement_" + pred["component"]] + }) + return schedule + +# Create specialized agents +sensor_monitor = Agent( + name="Sensor Monitor", + role="Data Collection", + goal="Collect sensor data", + instructions="Monitor and collect sensor readings", + tools=[collect_sensor_data] +) + +performance_analyzer = Agent( + name="Performance Analyzer", + role="Performance Analysis", + goal="Analyze equipment performance", + instructions="Analyze operational metrics", + tools=[analyze_performance] +) + +anomaly_detector = Agent( + name="Anomaly Detector", + role="Anomaly Detection", + goal="Detect operational anomalies", + instructions="Identify abnormal patterns", + tools=[detect_anomalies] +) + +failure_predictor = Agent( + name="Failure Predictor", + role="Failure Prediction", + goal="Predict potential failures", + instructions="Predict equipment failures", + tools=[predict_failures] +) + +maintenance_scheduler = Agent( + name="Maintenance Scheduler", + role="Maintenance Planning", + goal="Schedule maintenance activities", + instructions="Plan and schedule maintenance", + tools=[schedule_maintenance] +) + +# Create workflow tasks +sensor_task = Task( + name="collect_data", + description="Collect sensor data", + expected_output="Sensor readings", + agent=sensor_monitor, + is_start=True, + next_tasks=["analyze_performance"], + async_execution=True +) + +performance_task = Task( + name="analyze_performance", + description="Analyze performance metrics", + expected_output="Performance analysis", + agent=performance_analyzer, + next_tasks=["detect_anomalies"], + async_execution=True +) + +anomaly_task = Task( + name="detect_anomalies", + description="Detect operational anomalies", + expected_output="Detected anomalies", + agent=anomaly_detector, + next_tasks=["predict_failures"], + context=[sensor_task, performance_task] +) + +prediction_task = Task( + name="predict_failures", + description="Predict potential failures", + expected_output="Failure predictions", + agent=failure_predictor, + next_tasks=["schedule_maintenance"], + task_type="decision", + condition={ + "critical": ["schedule_maintenance"], + "warning": ["schedule_maintenance"], + "normal": "" + } +) + +scheduling_task = Task( + name="schedule_maintenance", + description="Schedule maintenance activities", + expected_output="Maintenance schedule", + agent=maintenance_scheduler, + context=[prediction_task] +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[sensor_monitor, performance_analyzer, anomaly_detector, + failure_predictor, maintenance_scheduler], + tasks=[sensor_task, performance_task, anomaly_task, + prediction_task, scheduling_task], + process="workflow", + verbose=True +) + +async def main(): + print("\nStarting Predictive Maintenance Workflow...") + print("=" * 50) + + # Run workflow + results = await workflow.astart() + + # Print results + print("\nMaintenance Planning Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + asyncio.run(main()) +``` + + + + Run your predictive maintenance system: + ```bash + python predictive_maintenance.py + ``` + + + + + **Requirements** + - Python 3.10 or higher + - OpenAI API key. Generate OpenAI API key [here](https://platform.openai.com/api-keys). Use Other models using [this guide](/models). + + +## Understanding Predictive Maintenance + + + Predictive maintenance using AI agents enables: + - Real-time equipment monitoring + - Performance analysis and anomaly detection + - Failure prediction and prevention + - Automated maintenance scheduling + - Optimized resource allocation + + +## Features + + + + Continuous monitoring of equipment sensors and performance metrics. + + + Automated detection of abnormal patterns and potential issues. + + + Advanced analytics to predict potential equipment failures. + + + Automated scheduling of maintenance activities based on predictions. + + + +## Next Steps + + + + Learn about sequential prompt chaining for complex workflows + + + Explore how to optimize agent outputs through evaluation + + + + + For optimal results, ensure your sensor data collection and analysis parameters are properly configured for your specific equipment. + \ No newline at end of file diff --git a/docs/usecases/smart-city.mdx b/docs/usecases/smart-city.mdx new file mode 100644 index 0000000..c68531c --- /dev/null +++ b/docs/usecases/smart-city.mdx @@ -0,0 +1,301 @@ +--- +title: "Smart City" +description: "Learn how to create AI agents for smart city resource management and optimization." +icon: "city" +--- + +```mermaid +flowchart LR + In[In] --> Monitor[Utility Monitor] + Monitor --> Analyzer[Pattern Analyzer] + Analyzer --> Optimizer[Resource Optimizer] + Optimizer --> Implementation[Implementation Agent] + Implementation --> Feedback[Feedback Monitor] + Feedback --> Monitor + Feedback --> Optimizer + Feedback --> Out[Out] + + style In fill:#8B0000,color:#fff + style Monitor fill:#2E8B57,color:#fff + style Analyzer fill:#2E8B57,color:#fff + style Optimizer fill:#2E8B57,color:#fff + style Implementation fill:#2E8B57,color:#fff + style Feedback fill:#2E8B57,color:#fff + style Out fill:#8B0000,color:#fff +``` + +Learn how to implement a smart city resource management system using AI agents for monitoring, analysis, and optimization of city utilities. + +## Quick Start + + + + First, install the PraisonAI Agents package: + ```bash + pip install praisonaiagents + ``` + + + + Set your OpenAI API key as an environment variable in your terminal: + ```bash + export OPENAI_API_KEY=your_api_key_here + ``` + + + + Create a new file `app.py` with the basic setup: +```python +from praisonaiagents import Agent, Task, PraisonAIAgents +import time +from typing import Dict, List + +def monitor_utilities(): + """Simulates utility usage monitoring""" + readings = { + "power": { + "consumption": int(time.time()) % 1000, + "peak_hours": ["morning", "evening"], + "grid_load": "medium" + }, + "water": { + "consumption": int(time.time()) % 500, + "pressure": "normal", + "quality": "good" + }, + "traffic": { + "congestion": "high", + "peak_zones": ["downtown", "industrial"], + "incidents": 2 + } + } + return readings + +def analyze_patterns(): + """Simulates usage pattern analysis""" + patterns = [ + {"type": "daily_cycle", "confidence": 0.85, "trend": "increasing"}, + {"type": "weekly_cycle", "confidence": 0.92, "trend": "stable"}, + {"type": "seasonal", "confidence": 0.78, "trend": "decreasing"} + ] + return patterns[int(time.time()) % 3] + +def optimize_resources(readings: Dict, patterns: Dict): + """Simulates resource optimization""" + optimizations = { + "power": { + "action": "load_balancing", + "target_zones": ["residential", "commercial"], + "expected_savings": "15%" + }, + "water": { + "action": "pressure_adjustment", + "target_zones": ["industrial"], + "expected_savings": "8%" + }, + "traffic": { + "action": "signal_timing", + "target_zones": ["downtown"], + "expected_impact": "20% reduction" + } + } + return optimizations + +def implement_changes(optimizations: Dict): + """Simulates implementation of optimization changes""" + success_rates = { + "load_balancing": 0.95, + "pressure_adjustment": 0.88, + "signal_timing": 0.85 + } + return {"status": "implemented", "success_rate": success_rates[optimizations["power"]["action"]]} + +def monitor_feedback(): + """Simulates monitoring of optimization feedback""" + feedbacks = ["positive", "neutral", "negative"] + return feedbacks[int(time.time()) % 3] + +# Create specialized agents +utility_monitor = Agent( + name="Utility Monitor", + role="Resource Monitoring", + goal="Monitor city utility usage", + instructions="Track and report utility consumption patterns", + tools=[monitor_utilities] +) + +pattern_analyzer = Agent( + name="Pattern Analyzer", + role="Pattern Analysis", + goal="Analyze usage patterns", + instructions="Identify and analyze resource usage patterns", + tools=[analyze_patterns] +) + +resource_optimizer = Agent( + name="Resource Optimizer", + role="Resource Optimization", + goal="Optimize resource allocation", + instructions="Generate resource optimization strategies", + tools=[optimize_resources] +) + +implementation_agent = Agent( + name="Implementation Agent", + role="Change Implementation", + goal="Implement optimization changes", + instructions="Execute optimization strategies", + tools=[implement_changes] +) + +feedback_monitor = Agent( + name="Feedback Monitor", + role="Feedback Monitoring", + goal="Monitor optimization results", + instructions="Track and analyze optimization feedback", + tools=[monitor_feedback] +) + +# Create workflow tasks +monitoring_task = Task( + name="monitor_utilities", + description="Monitor utility usage", + expected_output="Current utility readings", + agent=utility_monitor, + is_start=True, + next_tasks=["analyze_patterns"] +) + +pattern_task = Task( + name="analyze_patterns", + description="Analyze usage patterns", + expected_output="Usage patterns analysis", + agent=pattern_analyzer, + next_tasks=["optimize_resources"] +) + +optimization_task = Task( + name="optimize_resources", + description="Generate optimization strategies", + expected_output="Resource optimization plans", + agent=resource_optimizer, + next_tasks=["implement_changes"], + context=[monitoring_task, pattern_task] +) + +implementation_task = Task( + name="implement_changes", + description="Implement optimization changes", + expected_output="Implementation status", + agent=implementation_agent, + next_tasks=["monitor_feedback"] +) + +feedback_task = Task( + name="monitor_feedback", + description="Monitor optimization feedback", + expected_output="Optimization feedback", + agent=feedback_monitor, + task_type="decision", + condition={ + "negative": ["monitor_utilities"], # Start over if negative feedback + "neutral": ["optimize_resources"], # Adjust optimization if neutral + "positive": "" # End workflow if positive + } +) + +# Create workflow +workflow = PraisonAIAgents( + agents=[utility_monitor, pattern_analyzer, resource_optimizer, + implementation_agent, feedback_monitor], + tasks=[monitoring_task, pattern_task, optimization_task, + implementation_task, feedback_task], + process="workflow", + verbose=True +) + +def main(): + print("\nStarting Smart City Resource Optimization Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nOptimization Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + +if __name__ == "__main__": + main() +``` + + + + Run your smart city management system: + ```bash + python app.py + ``` + + + + + **Requirements** + - Python 3.10 or higher + - OpenAI API key. Generate OpenAI API key [here](https://platform.openai.com/api-keys). Use Other models using [this guide](/models). + + +## Understanding Smart City Management + + + Smart city management enables: + - Real-time utility monitoring + - Pattern analysis and prediction + - Resource optimization + - Automated implementation + - Continuous feedback monitoring + + +## Features + + + + Monitor city utilities: + - Power consumption + - Water usage + - Traffic patterns + + + Analyze usage patterns: + - Daily cycles + - Weekly trends + - Seasonal variations + + + Optimize resource allocation: + - Load balancing + - Pressure adjustment + - Traffic signal timing + + + Continuous improvement: + - Implementation tracking + - Success rate monitoring + - Adaptive optimization + + + +## Next Steps + + + + Learn about chaining prompts for complex workflows + + + Explore how to optimize and evaluate solutions + + \ No newline at end of file diff --git a/docs/usecases/supply-chain.mdx b/docs/usecases/supply-chain.mdx new file mode 100644 index 0000000..d91f5d8 --- /dev/null +++ b/docs/usecases/supply-chain.mdx @@ -0,0 +1,211 @@ +--- +title: "Supply Chain" +sidebarTitle: "Supply Chain" +description: "Learn how to create AI agents for supply chain risk management and mitigation." +icon: "truck" +--- + +```mermaid +flowchart LR + In[In] --> Monitor[Global Monitor] + Monitor -->|high/medium/critical| Analyzer[Impact Analyzer] + Analyzer --> Generator[Strategy Generator] + Generator --> Out[Out] + + style In fill:#8B0000,color:#fff + style Monitor fill:#2E8B57,color:#fff + style Analyzer fill:#2E8B57,color:#fff + style Generator fill:#2E8B57,color:#fff + style Out fill:#8B0000,color:#fff +``` + +A workflow demonstrating how AI agents can monitor global events, analyze supply chain impacts, and generate mitigation strategies. + +## Quick Start + + + + First, install the PraisonAI Agents package: + ```bash + pip install praisonaiagents + ``` + + + + Set your OpenAI API key as an environment variable in your terminal: + ```bash + export OPENAI_API_KEY=your_api_key_here + ``` + + + + Create a new file `app.py` with the basic setup: + ```python + from praisonaiagents import Agent, Task, PraisonAIAgents + import time + from typing import Dict, List + + def monitor_global_events(): + """Simulates monitoring of global events""" + events = [ + {"type": "natural_disaster", "severity": "high", "region": "Asia"}, + {"type": "political_unrest", "severity": "medium", "region": "Europe"}, + {"type": "economic_crisis", "severity": "critical", "region": "Americas"} + ] + return events[int(time.time()) % 3] + + def analyze_supply_impact(event: Dict): + """Simulates impact analysis on supply chain""" + impact_matrix = { + "natural_disaster": {"delay": "severe", "cost": "high", "risk_level": 9}, + "political_unrest": {"delay": "moderate", "cost": "medium", "risk_level": 6}, + "economic_crisis": {"delay": "significant", "cost": "extreme", "risk_level": 8} + } + return impact_matrix.get(event["type"]) + + def generate_mitigation_strategies(impact: Dict): + """Simulates generation of mitigation strategies""" + strategies = { + "severe": ["activate_backup_suppliers", "emergency_logistics_routing"], + "moderate": ["increase_buffer_stock", "alternative_transport"], + "significant": ["diversify_suppliers", "hedge_currency_risks"] + } + return strategies.get(impact["delay"], ["review_supply_chain"]) + + # Create specialized agents + monitor_agent = Agent( + name="Global Monitor", + role="Event Monitoring", + goal="Monitor and identify global events affecting supply chain", + instructions="Track and report significant global events", + tools=[monitor_global_events] + ) + + impact_analyzer = Agent( + name="Impact Analyzer", + role="Impact Assessment", + goal="Analyze event impact on supply chain", + instructions="Assess potential disruptions and risks", + tools=[analyze_supply_impact] + ) + + strategy_generator = Agent( + name="Strategy Generator", + role="Strategy Development", + goal="Generate mitigation strategies", + instructions="Develop strategies to address identified risks", + tools=[generate_mitigation_strategies] + ) + + # Create workflow tasks + monitoring_task = Task( + name="monitor_events", + description="Monitor global events affecting supply chain", + expected_output="Identified global events", + agent=monitor_agent, + is_start=True, + task_type="decision", + condition={ + "high": ["analyze_impact"], + "medium": ["analyze_impact"], + "critical": ["analyze_impact"] + } + ) + + impact_task = Task( + name="analyze_impact", + description="Analyze impact on supply chain", + expected_output="Impact assessment", + agent=impact_analyzer, + next_tasks=["generate_strategies"] + ) + + strategy_task = Task( + name="generate_strategies", + description="Generate mitigation strategies", + expected_output="List of mitigation strategies", + agent=strategy_generator, + context=[monitoring_task, impact_task] + ) + + # Create workflow + workflow = PraisonAIAgents( + agents=[monitor_agent, impact_analyzer, strategy_generator], + tasks=[monitoring_task, impact_task, strategy_task], + process="workflow", + verbose=True + ) + + def main(): + print("\nStarting Supply Chain Risk Management Workflow...") + print("=" * 50) + + # Run workflow + results = workflow.start() + + # Print results + print("\nRisk Management Results:") + print("=" * 50) + for task_id, result in results["task_results"].items(): + if result: + print(f"\nTask: {task_id}") + print(f"Result: {result.raw}") + print("-" * 50) + + if __name__ == "__main__": + main() + ``` + + + + Type this in your terminal to run your agents: + ```bash + python app.py + ``` + + + + + **Requirements** + - Python 3.10 or higher + - OpenAI API key. Generate OpenAI API key [here](https://platform.openai.com/api-keys). Use Other models using [this guide](/models). + - Basic understanding of Python + + +## Understanding Supply Chain Risk Management + + + Automated supply chain risk management workflow enables: + - Real-time global event monitoring + - Impact assessment and risk analysis + - Strategy generation for risk mitigation + - Proactive supply chain optimization + + +## Features + + + + Monitor global events that could affect supply chain operations. + + + Assess potential disruptions and quantify risks to the supply chain. + + + Generate mitigation strategies based on event impact. + + + Proactively manage and mitigate supply chain risks. + + + +## Next Steps + + + + Learn about sequential prompt execution + + + Explore optimization techniques + + \ No newline at end of file