Examples¶
This section provides practical examples of using HAP in various scenarios.
Basic Examples¶
Single Agent Workflow¶
The simplest HAP workflow with one agent:
basic_workflow.py¶
"""Basic AGP workflow example."""
import asyncio
from haive.agp.models.context import AgentContext
from haive.agp.models.graph import AGPGraph
from haive.agp.server.runtime import AGPRuntime
from haive.agents.simple.agent import SimpleAgent
from haive.core.engine.aug_llm import AugLLMConfig
async def main():
"""Run a basic AGP workflow."""
print("🚀 Basic AGP Workflow Example\n")
# Step 1: Create an agent
print("1️⃣ Creating SimpleAgent...")
agent = SimpleAgent(
name="assistant",
engine=AugLLMConfig(
temperature=0.7,
system_message="You are a helpful AI assistant."
)
)
# Step 2: Create AGP graph
print("2️⃣ Building AGP graph...")
graph = AGPGraph()
# Add the agent as a node
node = graph.add_agent_node(
Multi-Agent Workflow¶
Orchestrating multiple agents with tools:
multi_agent_flow.py¶
"""Multi-agent orchestration example with AGP."""
import asyncio
from haive.agp.models.context import AgentContext
from haive.agp.models.graph import AGPGraph
from haive.agp.server.runtime import AGPRuntime
from haive.agents.simple.agent import SimpleAgent
from haive.agents.react.agent import ReactAgent
from haive.core.engine.aug_llm import AugLLMConfig
from langchain_core.tools import tool
# Define some tools for the React agent
@tool
def analyze_sentiment(text: str) -> str:
"""Analyze the sentiment of text."""
# Simple mock sentiment analysis
positive_words = ["good", "great", "excellent", "happy", "wonderful"]
negative_words = ["bad", "terrible", "awful", "sad", "horrible"]
text_lower = text.lower()
positive_count = sum(1 for word in positive_words if word in text_lower)
negative_count = sum(1 for word in negative_words if word in text_lower)
if positive_count > negative_count:
return "Positive sentiment"
elif negative_count > positive_count:
return "Negative sentiment"
else:
return "Neutral sentiment"
@tool
def extract_keywords(text: str) -> str:
"""Extract key words from text."""
# Simple keyword extraction
words = text.split()
# Filter out common words
common_words = {"the", "a", "an", "is", "are", "was", "were", "in", "on", "at", "to", "for"}
keywords = [w for w in words if w.lower() not in common_words and len(w) > 3]
return f"Keywords: {', '.join(keywords[:5])}"
async def main():
"""Run a multi-agent workflow."""
print("🚀 Multi-Agent AGP Workflow Example\n")
# Step 1: Create multiple agents with different capabilities
print("1️⃣ Creating specialized agents...")
Advanced Examples¶
Research Assistant¶
A comprehensive research assistant using multiple specialized agents:
import asyncio
from haive.hap.models import HAPGraph
from haive.hap.server.runtime import HAPRuntime
from haive.agents.react.agent import ReactAgent
from haive.agents.simple.agent import SimpleAgent
from haive.core.engine.aug_llm import AugLLMConfig
from langchain_core.tools import tool
@tool
def web_search(query: str) -> str:
"""Search the web for information."""
# Simulate web search
return f"Search results for: {query}"
@tool
def extract_facts(text: str) -> str:
"""Extract key facts from text."""
# Simulate fact extraction
return "Key facts: [1, 2, 3]"
async def research_assistant():
# Create specialized agents
searcher = ReactAgent(
name="searcher",
engine=AugLLMConfig(
temperature=0.3,
system_message="You are a research specialist. Find relevant information."
),
tools=[web_search]
)
analyzer = ReactAgent(
name="analyzer",
engine=AugLLMConfig(
temperature=0.4,
system_message="You analyze and extract insights from research."
),
tools=[extract_facts]
)
writer = SimpleAgent(
name="writer",
engine=AugLLMConfig(
temperature=0.7,
system_message="You write comprehensive research reports."
)
)
# Build research workflow
graph = HAPGraph()
graph.add_agent_node("search", searcher, next_nodes=["analyze"])
graph.add_agent_node("analyze", analyzer, next_nodes=["write"])
graph.add_agent_node("write", writer)
graph.entry_node = "search"
# Execute research
runtime = HAPRuntime(graph)
result = await runtime.run({
"topic": "Recent advances in quantum computing",
"requirements": "Focus on practical applications"
})
return result
Content Processing Pipeline¶
Process content through multiple transformation stages:
async def content_pipeline():
# Content processing agents
extractor = SimpleAgent(
name="extractor",
engine=AugLLMConfig(
system_message="Extract main content from documents."
)
)
enhancer = SimpleAgent(
name="enhancer",
engine=AugLLMConfig(
system_message="Enhance content with additional context."
)
)
formatter = SimpleAgent(
name="formatter",
engine=AugLLMConfig(
system_message="Format content for publication."
)
)
# Build pipeline
graph = HAPGraph()
graph.add_agent_node("extract", extractor, ["enhance"])
graph.add_agent_node("enhance", enhancer, ["format"])
graph.add_agent_node("format", formatter)
graph.entry_node = "extract"
# Process document
runtime = HAPRuntime(graph)
result = await runtime.run({
"document": "Raw document text...",
"format": "markdown",
"style": "professional"
})
return result
Customer Service System¶
Multi-agent customer service with routing:
async def customer_service():
# Specialized service agents
classifier = ReactAgent(
name="classifier",
engine=AugLLMConfig(
temperature=0.2,
system_message="Classify customer inquiries by type."
),
tools=[classify_intent]
)
technical_support = SimpleAgent(
name="tech_support",
engine=AugLLMConfig(
system_message="Provide technical support and solutions."
)
)
billing_support = SimpleAgent(
name="billing",
engine=AugLLMConfig(
system_message="Handle billing and account inquiries."
)
)
general_support = SimpleAgent(
name="general",
engine=AugLLMConfig(
system_message="Handle general customer inquiries."
)
)
# Build routing workflow
graph = HAPGraph()
graph.add_agent_node(
"classify",
classifier,
["tech", "billing", "general"]
)
graph.add_agent_node("tech", technical_support)
graph.add_agent_node("billing", billing_support)
graph.add_agent_node("general", general_support)
graph.entry_node = "classify"
# Handle customer inquiry
runtime = HAPRuntime(graph)
result = await runtime.run({
"customer_id": "12345",
"inquiry": "My internet connection is slow",
"history": ["previous interactions..."]
})
return result
Testing Patterns¶
Unit Testing Agents¶
import pytest
from haive.hap.models import HAPGraph, HAPContext
from haive.hap.server.runtime import HAPRuntime
@pytest.mark.asyncio
async def test_single_agent_workflow():
# Create test agent
agent = SimpleAgent(
name="test_agent",
engine=AugLLMConfig(temperature=0.1)
)
# Build graph
graph = HAPGraph()
graph.add_agent_node("test", agent)
graph.entry_node = "test"
# Execute
runtime = HAPRuntime(graph)
result = await runtime.run({"test": "input"})
# Verify
assert isinstance(result, HAPContext)
assert "test" in result.execution_path
assert len(result.agent_metadata) == 1
Integration Testing¶
@pytest.mark.asyncio
async def test_multi_agent_integration():
# Create workflow
graph = HAPGraph()
graph.add_agent_node("a", agent_a, ["b"])
graph.add_agent_node("b", agent_b)
graph.entry_node = "a"
# Test data flow
runtime = HAPRuntime(graph)
initial_data = {"value": 100}
result = await runtime.run(initial_data)
# Verify execution order
assert result.execution_path == ["a", "b"]
# Verify data transformation
assert "value" in result.inputs
assert result.outputs is not None
Performance Patterns¶
Caching Agent Results¶
from functools import lru_cache
class CachedRuntime(HAPRuntime):
@lru_cache(maxsize=100)
async def execute_cached(self, node_id, input_hash):
# Cache based on node and input
return await self.execute_node(node_id, input_hash)
Parallel Node Execution¶
async def execute_parallel_nodes(runtime, nodes, context):
# Execute independent nodes in parallel
tasks = []
for node in nodes:
task = runtime.execute_node(node, context.copy())
tasks.append(task)
results = await asyncio.gather(*tasks)
# Merge results
merged_context = merge_contexts(results)
return merged_context
Best Practices¶
Start Simple: Begin with single-agent workflows
Test Incrementally: Test each agent before combining
Use Real Components: No mocks in testing
Track Metadata: Store debugging information
Handle Errors: Implement proper error handling
Monitor Performance: Track execution times
Document Workflows: Clear documentation for complex graphs