Source code for haive.hap.server.runtime

import importlib
from typing import Dict, Any
from haive.agents.base import Agent
from haive.hap.models.context import HAPContext
from haive.hap.models.graph import HAPGraph

[docs] class HAPRuntime: """Execute an HAP graph with proper error handling and Haive integration."""
[docs] def __init__(self, graph: HAPGraph): self.graph = graph
[docs] def _load_agent(self, entrypoint: str) -> Agent: """Load agent from entrypoint with proper error handling.""" if ":" not in entrypoint: raise ValueError(f"Invalid entrypoint format: {entrypoint}. Expected 'module:Class'") module_name, class_name = entrypoint.split(":", 1) try: module = importlib.import_module(module_name) except ImportError as e: raise ImportError(f"Could not import module '{module_name}': {e}") try: agent_class = getattr(module, class_name) except AttributeError as e: raise AttributeError(f"Module '{module_name}' has no class '{class_name}': {e}") if not issubclass(agent_class, Agent): raise TypeError(f"{agent_class} is not a subclass of Agent") # Try to instantiate with minimal config from haive.core.engine.aug_llm import AugLLMConfig try: # First try with no args (for agents with defaults) return agent_class() except Exception: try: # Try with name only return agent_class(name=class_name.lower()) except Exception: try: # Try with name and engine return agent_class( name=class_name.lower(), engine=AugLLMConfig() ) except Exception as e: raise RuntimeError(f"Could not instantiate {agent_class}: {e}")
[docs] async def run(self, initial_context: Dict[str, Any]) -> HAPContext: """Execute the graph asynchronously.""" return await self.graph.execute(initial_context)
[docs] def run_sync(self, initial_context: Dict[str, Any]) -> HAPContext: """Execute the graph synchronously (for backward compatibility).""" import asyncio # Create context if needed if not isinstance(initial_context, HAPContext): context = HAPContext() context.update(initial_context) else: context = initial_context # Simple synchronous execution for backward compatibility execution_order = self.graph.topological_order() for node_id in execution_order: node = self.graph.nodes[node_id] agent = node.load_agent() # Update execution path context.execution_path.append(node_id) # Execute agent synchronously result = agent.run(context.model_dump()) # Update context if isinstance(result, dict): context.update(result) else: context.outputs[node_id] = result return context