Loading...
# autogen_actors.py - AutoGen v0.4 Actor Model
from autogen_agentchat.agents import AssistantAgent, UserProxyAgent
from autogen_agentchat.teams import RoundRobinGroupChat
from autogen_ext.models import OpenAIChatCompletionClient
from autogen_core.application import SingleThreadedAgentRuntime
from autogen_core.base import MessageContext
import asyncio
class ConversationOrchestrator:
def __init__(self):
self.runtime = SingleThreadedAgentRuntime()
self.model_client = OpenAIChatCompletionClient(
model="gpt-4",
api_key="your-api-key"
)
async def create_research_team(self):
"""Create a team of specialized agents"""
# Research Agent - Information gathering
researcher = AssistantAgent(
name="Researcher",
model_client=self.model_client,
system_message="""You are a research specialist who gathers
comprehensive information on technical topics. You provide detailed,
accurate information with citations.""",
tools=[
self._create_web_search_tool(),
self._create_documentation_tool()
]
)
# Analyst Agent - Critical analysis
analyst = AssistantAgent(
name="Analyst",
model_client=self.model_client,
system_message="""You are a critical analyst who evaluates
information for accuracy, completeness, and practical applicability.
You identify gaps and inconsistencies."""
)
# Synthesizer Agent - Creates actionable output
synthesizer = AssistantAgent(
name="Synthesizer",
model_client=self.model_client,
system_message="""You are a synthesis expert who combines
research and analysis into clear, actionable recommendations.
You create structured, practical outputs."""
)
# User Proxy - Represents the user
user_proxy = UserProxyAgent(
name="User",
code_execution_config=False
)
# Create group chat with round-robin pattern
team = RoundRobinGroupChat(
participants=[researcher, analyst, synthesizer, user_proxy]
)
return team
def _create_web_search_tool(self):
"""Create web search tool for research agent"""
async def web_search(query: str) -> str:
"""Search the web for information"""
# Implementation using search API
return f"Search results for: {query}"
return web_search
def _create_documentation_tool(self):
"""Create documentation lookup tool"""
async def lookup_docs(topic: str, framework: str) -> str:
"""Look up official documentation"""
# Implementation using docs API
return f"Documentation for {topic} in {framework}"
return lookup_docs
async def run_conversation(self, task: str):
"""Execute conversational workflow"""
team = await self.create_research_team()
# Start conversation
result = await team.run(
task=task,
max_turns=10
)
return result
# Usage
async def main():
orchestrator = ConversationOrchestrator()
task = """Research and analyze the best practices for implementing
microservices architecture with Node.js. Provide actionable
recommendations for a team of 10 developers."""
result = await orchestrator.run_conversation(task)
print(f"Result: {result}")
asyncio.run(main())# python_agent.py - Python Agent in AutoGen v0.4
from autogen_core.application import SingleThreadedAgentRuntime
from autogen_core.base import MessageContext, TopicId
from autogen_core.components import DefaultTopicId, TypeSubscription
from dataclasses import dataclass
@dataclass
class AnalysisRequest:
"""Message type for analysis requests"""
code: str
language: str
analysis_type: str
@dataclass
class AnalysisResponse:
"""Message type for analysis responses"""
issues: list
recommendations: list
score: float
class PythonAnalyzerAgent:
"""Python agent that analyzes code"""
def __init__(self, runtime: SingleThreadedAgentRuntime):
self.runtime = runtime
# Subscribe to analysis requests
self.runtime.subscribe(
type_subscription=TypeSubscription(
topic_type="analysis",
agent_type="PythonAnalyzer"
),
message_type=AnalysisRequest,
handler=self.handle_analysis_request
)
async def handle_analysis_request(
self,
message: AnalysisRequest,
ctx: MessageContext
) -> None:
"""Handle incoming analysis requests"""
# Perform analysis
issues = await self._analyze_code(
message.code,
message.language
)
recommendations = await self._generate_recommendations(issues)
score = self._calculate_quality_score(issues)
# Send response
response = AnalysisResponse(
issues=issues,
recommendations=recommendations,
score=score
)
await self.runtime.publish_message(
message=response,
topic_id=TopicId("analysis_results", ctx.sender)
)
async def _analyze_code(self, code: str, language: str) -> list:
"""Analyze code for issues"""
# Use AST parsing, linting tools, etc.
return [
{"type": "security", "severity": "high", "line": 42,
"message": "SQL injection vulnerability"},
{"type": "performance", "severity": "medium", "line": 15,
"message": "Inefficient loop detected"}
]
async def _generate_recommendations(self, issues: list) -> list:
"""Generate fix recommendations"""
recommendations = []
for issue in issues:
if issue["type"] == "security":
recommendations.append({
"issue": issue["message"],
"fix": "Use parameterized queries",
"code_example": "db.execute('SELECT * FROM users WHERE id = ?', [user_id])"
})
return recommendations
def _calculate_quality_score(self, issues: list) -> float:
"""Calculate overall quality score"""
if not issues:
return 10.0
severity_weights = {"critical": 3, "high": 2, "medium": 1, "low": 0.5}
penalty = sum(severity_weights.get(i["severity"], 1) for i in issues)
return max(0.0, 10.0 - penalty)// CSharpAgent.cs - .NET Agent in AutoGen v0.4
using AutoGen.Core;
using AutoGen.Messages;
using System.Threading.Tasks;
public record CodeReviewRequest(
string Code,
string Author,
string PullRequestId
);
public record CodeReviewResponse(
bool Approved,
List<ReviewComment> Comments,
string Reviewer
);
public class DotNetReviewerAgent : IAgent
{
private readonly IAgentRuntime _runtime;
public DotNetReviewerAgent(IAgentRuntime runtime)
{
_runtime = runtime;
// Subscribe to review requests
_runtime.Subscribe<CodeReviewRequest>(
topic: "code_review",
handler: HandleReviewRequest
);
}
private async Task HandleReviewRequest(
CodeReviewRequest message,
MessageContext context)
{
// Perform code review
var comments = await AnalyzeCode(message.Code);
// Request analysis from Python agent (cross-language!)
var analysisRequest = new AnalysisRequest(
Code: message.Code,
Language: "csharp",
AnalysisType: "security"
);
await _runtime.PublishAsync(
message: analysisRequest,
topicId: new TopicId("analysis", "PythonAnalyzer")
);
// Wait for Python agent response
var analysisResult = await _runtime.ReceiveAsync<AnalysisResponse>(
topicId: new TopicId("analysis_results", this.Name),
timeout: TimeSpan.FromSeconds(30)
);
// Combine local and Python analysis
comments.AddRange(ConvertToComments(analysisResult.Issues));
// Send review response
var response = new CodeReviewResponse(
Approved: analysisResult.Score >= 7.0 && comments.Count(c => c.Severity == "critical") == 0,
Comments: comments,
Reviewer: this.Name
);
await _runtime.PublishAsync(
message: response,
topicId: new TopicId("review_results", context.Sender)
);
}
private async Task<List<ReviewComment>> AnalyzeCode(string code)
{
// .NET-specific code analysis
var comments = new List<ReviewComment>();
// Use Roslyn analyzers
comments.Add(new ReviewComment
{
Line = 10,
Severity = "medium",
Message = "Consider using async/await pattern",
Suggestion = "Make this method async for better scalability"
});
return comments;
}
}# autogen_studio_config.py
from autogen_studio import Studio, AgentConfig, WorkflowConfig
class AutoGenStudioWorkflow:
def __init__(self):
self.studio = Studio()
def create_customer_support_workflow(self):
"""Create customer support workflow in AutoGen Studio"""
# Define agent configurations
triage_agent = AgentConfig(
name="TriageAgent",
type="assistant",
llm_config={
"model": "gpt-4",
"temperature": 0.3
},
system_message="""You are a customer support triage specialist.
Categorize incoming requests as: technical, billing, or general inquiry."""
)
technical_agent = AgentConfig(
name="TechnicalSupportAgent",
type="assistant",
llm_config={"model": "gpt-4", "temperature": 0.2},
system_message="You are a technical support expert.",
tools=["search_knowledge_base", "create_ticket", "escalate_to_engineer"]
)
billing_agent = AgentConfig(
name="BillingAgent",
type="assistant",
llm_config={"model": "gpt-4", "temperature": 0.1},
system_message="You are a billing specialist.",
tools=["check_invoice", "process_refund", "update_subscription"]
)
# Define workflow
workflow = WorkflowConfig(
name="CustomerSupportWorkflow",
description="Automated customer support with specialized agents",
entry_point=triage_agent,
routing_logic={
"technical": technical_agent,
"billing": billing_agent,
"general": triage_agent
},
max_turns=15,
human_in_loop=True, # Require human approval for refunds
termination_condition="user_satisfied or max_turns_reached"
)
# Deploy to Studio
self.studio.deploy_workflow(workflow)
return workflow
def monitor_workflow_performance(self, workflow_id: str):
"""Monitor workflow metrics in real-time"""
metrics = self.studio.get_metrics(workflow_id)
return {
'total_conversations': metrics.conversation_count,
'average_resolution_time': metrics.avg_resolution_time,
'satisfaction_score': metrics.csat_score,
'escalation_rate': metrics.escalation_rate,
'cost_per_conversation': metrics.avg_cost
}# group_chat_patterns.py
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.teams import SelectorGroupChat
from autogen_agentchat.base import TerminationCondition
from autogen_ext.models import OpenAIChatCompletionClient
class CollaborativeAgentTeam:
def __init__(self):
self.model_client = OpenAIChatCompletionClient(
model="gpt-4",
api_key="your-key"
)
async def create_code_review_team(self):
"""Create collaborative code review team"""
# Security Expert
security_expert = AssistantAgent(
name="SecurityExpert",
model_client=self.model_client,
system_message="""You are a security expert. Review code for
vulnerabilities: SQL injection, XSS, CSRF, insecure dependencies."""
)
# Performance Expert
performance_expert = AssistantAgent(
name="PerformanceExpert",
model_client=self.model_client,
system_message="""You are a performance optimization expert.
Identify bottlenecks, inefficient algorithms, memory leaks."""
)
# Architecture Expert
architecture_expert = AssistantAgent(
name="ArchitectureExpert",
model_client=self.model_client,
system_message="""You are a software architect. Review for
SOLID principles, design patterns, maintainability."""
)
# Create selector group chat (agents speak when relevant)
team = SelectorGroupChat(
participants=[
security_expert,
performance_expert,
architecture_expert
],
model_client=self.model_client,
termination_condition=TerminationCondition.max_messages(20)
)
return team
async def review_pull_request(self, pr_code: str):
"""Review PR using collaborative team"""
team = await self.create_code_review_team()
task = f"""
Review this pull request code:
{pr_code}
Each expert should:
1. Analyze from your domain perspective
2. Identify specific issues with line numbers
3. Provide actionable recommendations
4. Rate severity (critical/high/medium/low)
Collaborate to produce comprehensive review.
"""
result = await team.run(task=task)
return result{
"maxTokens": 4000,
"temperature": 0.3,
"systemPrompt": "You are an AutoGen v0.4 specialist focused on building conversation-based multi-agent systems with actor model architecture"
}AutoGen v0.4 agents not receiving messages in cross-language setup
Verify runtime.subscribe() includes correct topic_type and agent_type. Check message serialization matches schema between Python and .NET. Enable OpenTelemetry tracing to debug message flow. Ensure SingleThreadedAgentRuntime is properly initialized.
Group chat conversation terminating prematurely before task completion
Increase max_turns parameter in team.run() configuration. Review TerminationCondition logic for premature exits. Add explicit task completion signals in agent responses. Monitor conversation state with logging to identify early termination triggers.
AssistantAgent function calling not invoking registered tools correctly
Verify tool function signatures match AutoGen expected format with async def. Check model_client supports function calling with tools parameter. Add proper docstrings for tool discovery. Test tools independently before integration.
RoundRobinGroupChat agents speaking out of turn causing conversation chaos
Switch to SelectorGroupChat for dynamic speaker selection based on relevance. Implement custom speaker_selection_method with turn-taking logic. Add conversation state management to track previous speakers. Configure max_consecutive_auto_reply limits.
Agent responses contain hallucinated information not grounded in context
Lower temperature to 0.2-0.3 in model_client configuration. Add explicit context retrieval tools for fact-checking. Implement RAG pattern with vector database for grounded responses. Use system_message to emphasize factual accuracy requirements.
Loading reviews...
Multi-agent orchestration specialist using LangGraph and CrewAI for complex, stateful workflows with graph-driven reasoning and role-based agent coordination
Microsoft Semantic Kernel enterprise agent specialist for building Azure-native AI applications with multi-language SDK support, plugin governance, and enterprise-grade deployment
Expert code reviewer that provides thorough, constructive feedback on code quality, security, performance, and best practices
Growing community of AI engineers actively building with Claude
Live in 5 minutes • Growing community