from typing import Any
from agent_control_models import EvaluatorResult
from agent_control_evaluators import (
Evaluator,
EvaluatorConfig,
EvaluatorMetadata,
register_evaluator,
)
class MyEvaluatorConfig(EvaluatorConfig):
"""Configuration schema for your evaluator."""
threshold: float = 0.5
custom_option: str = "default"
@register_evaluator
class MyEvaluator(Evaluator[MyEvaluatorConfig]):
"""Your custom evaluator."""
metadata = EvaluatorMetadata(
name="my-evaluator",
version="1.0.0",
description="Detects custom patterns using proprietary logic",
requires_api_key=True, # Set to True if you need credentials
timeout_ms=5000,
)
config_model = MyEvaluatorConfig
def __init__(self, config: MyEvaluatorConfig) -> None:
"""Initialize with validated configuration."""
super().__init__(config)
# Set up any clients, load models, etc.
async def evaluate(self, data: Any) -> EvaluatorResult:
"""
Evaluate the input data.
Args:
data: The content to evaluate (string, dict, etc.)
Returns:
EvaluatorResult with:
- matched: bool — Did this trigger the control?
- confidence: float — How confident (0.0-1.0)?
- message: str — Human-readable explanation
- metadata: dict — Additional context for logging
"""
# Your detection logic here
score = await self._analyze(data)
return EvaluatorResult(
matched=score > self.config.threshold,
confidence=score,
message=f"Custom analysis score: {score:.2f}",
metadata={
"score": score,
"threshold": self.config.threshold,
}
)
async def _analyze(self, data: Any) -> float:
"""Your proprietary analysis logic."""
# Call your API, run your model, etc.
return 0.0