Skip to main content

scripts-claude-api-client

#!/usr/bin/env python3 """​

title: "Optional imports for full functionality" component_type: script version: "1.0.0" audience: contributor status: stable summary: "Claude API Client =================" keywords: ['analysis', 'api', 'claude', 'client', 'validation'] tokens: ~500 created: 2025-12-22 updated: 2025-12-22 script_name: "claude-api-client.py" language: python executable: true usage: "python3 scripts/claude-api-client.py [options]" python_version: "3.10+" dependencies: [] modifies_files: false network_access: false requires_auth: false​

Claude API Client

Production-ready wrapper for Claude API with tool use loop, streaming, retries. Used by strategy brief system and other multi-agent orchestration workflows.

Features:

  • Tool use loop (web_search, web_fetch)
  • Streaming responses
  • Rate limiting and exponential backoff retries
  • Token tracking and cost estimation
  • Error handling with detailed logging

Exit Codes: 0 - Success 1 - Configuration error 2 - API error 3 - Tool execution error

Usage: python3 scripts/claude-api-client.py --test python3 scripts/claude-api-client.py --metrics """

import asyncio import json import os import sys import time from dataclasses import dataclass, field, asdict from datetime import datetime, timezone from pathlib import Path from typing import Dict, List, Any, Optional, AsyncIterator

Optional imports for full functionality

try: from anthropic import AsyncAnthropic from anthropic.types import Message, TextBlock, ToolUseBlock ANTHROPIC_AVAILABLE = True except ImportError: ANTHROPIC_AVAILABLE = False print("Warning: anthropic package not installed. Install with: pip install anthropic")

@dataclass class APIConfig: """Claude API configuration with sensible defaults.""" api_key: str = field(default_factory=lambda: os.getenv("ANTHROPIC_API_KEY", "")) model: str = "claude-sonnet-4-20250514" max_tokens: int = 8000 temperature: float = 0.7 timeout: float = 300.0 max_retries: int = 3 retry_delay: float = 2.0

def validate(self) -> tuple[bool, str]:
"""Validate configuration."""
if not self.api_key:
return False, "ANTHROPIC_API_KEY environment variable not set"
if self.max_tokens < 1 or self.max_tokens > 200000:
return False, f"Invalid max_tokens: {self.max_tokens}"
if self.temperature < 0 or self.temperature > 1:
return False, f"Invalid temperature: {self.temperature}"
return True, ""

@dataclass class ToolCall: """Tool execution request.""" tool_name: str tool_input: Dict[str, Any] tool_use_id: str

@dataclass class ToolResult: """Tool execution result.""" tool_use_id: str content: str is_error: bool = False

@dataclass class APIMetrics: """API usage metrics for cost tracking.""" total_requests: int = 0 failed_requests: int = 0 total_input_tokens: int = 0 total_output_tokens: int = 0 total_tool_calls: int = 0 session_start: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat())

@property
def total_tokens(self) -> int:
return self.total_input_tokens + self.total_output_tokens

@property
def success_rate(self) -> float:
if self.total_requests == 0:
return 1.0
return (self.total_requests - self.failed_requests) / self.total_requests

def estimate_cost(self) -> float:
"""Estimate API cost based on token usage (Claude Sonnet 4 pricing Dec 2024)."""
input_cost_per_1m = 3.00 # $3 per 1M input tokens
output_cost_per_1m = 15.00 # $15 per 1M output tokens

input_cost = (self.total_input_tokens / 1_000_000) * input_cost_per_1m
output_cost = (self.total_output_tokens / 1_000_000) * output_cost_per_1m

return input_cost + output_cost

def to_dict(self) -> Dict[str, Any]:
"""Convert metrics to dictionary."""
return {
"total_requests": self.total_requests,
"failed_requests": self.failed_requests,
"success_rate": f"{self.success_rate:.1%}",
"total_input_tokens": self.total_input_tokens,
"total_output_tokens": self.total_output_tokens,
"total_tokens": self.total_tokens,
"total_tool_calls": self.total_tool_calls,
"estimated_cost": f"${self.estimate_cost():.4f}",
"session_start": self.session_start
}

class ClaudeAPIClient: """Production Claude API client with tool use loop."""

def __init__(self, config: Optional[APIConfig] = None):
self.config = config or APIConfig()

valid, error = self.config.validate()
if not valid:
raise ValueError(error)

if not ANTHROPIC_AVAILABLE:
raise ImportError("anthropic package required. Install with: pip install anthropic")

self.client = AsyncAnthropic(
api_key=self.config.api_key,
timeout=self.config.timeout
)

self.metrics = APIMetrics()

# Tool registry
self._tools = {
"web_search": self._web_search,
"web_fetch": self._web_fetch,
}

def register_tool(self, name: str, handler: callable):
"""Register a custom tool handler."""
self._tools[name] = handler

async def create_message(
self,
messages: List[Dict[str, Any]],
system: Optional[str] = None,
tools: Optional[List[Dict[str, Any]]] = None,
max_tokens: Optional[int] = None,
max_tool_iterations: int = 10
) -> Message:
"""
Create a message with automatic tool use loop.

Args:
messages: Conversation messages
system: System prompt
tools: Available tools
max_tokens: Override default max tokens
max_tool_iterations: Maximum tool call iterations (prevents infinite loops)

Returns:
Final message after all tool use complete
"""
current_messages = messages.copy()
iteration = 0
response = None

while iteration < max_tool_iterations:
iteration += 1

# Call API with retry logic
response = await self._call_api_with_retry(
messages=current_messages,
system=system,
tools=tools,
max_tokens=max_tokens
)

# Track metrics
self.metrics.total_input_tokens += response.usage.input_tokens
self.metrics.total_output_tokens += response.usage.output_tokens
self.metrics.total_requests += 1

# Check if tool use needed
tool_uses = [
block for block in response.content
if isinstance(block, ToolUseBlock)
]

if not tool_uses or response.stop_reason != "tool_use":
# Done - return final response
return response

# Track tool calls
self.metrics.total_tool_calls += len(tool_uses)

# Execute tools
tool_results = await self._execute_tools(tool_uses)

# Add assistant message and tool results to conversation
current_messages.append({
"role": "assistant",
"content": response.content
})

current_messages.append({
"role": "user",
"content": tool_results
})

# Max iterations reached - return last response
print(f"Warning: Max tool iterations ({max_tool_iterations}) reached")
return response

async def _call_api_with_retry(
self,
messages: List[Dict[str, Any]],
system: Optional[str],
tools: Optional[List[Dict[str, Any]]],
max_tokens: Optional[int]
) -> Message:
"""Call API with exponential backoff retry."""
last_error = None

for attempt in range(self.config.max_retries):
try:
kwargs = {
"model": self.config.model,
"max_tokens": max_tokens or self.config.max_tokens,
"temperature": self.config.temperature,
"messages": messages,
}

if system:
kwargs["system"] = system
if tools:
kwargs["tools"] = tools

response = await self.client.messages.create(**kwargs)
return response

except Exception as e:
last_error = e
self.metrics.failed_requests += 1

# Exponential backoff
if attempt < self.config.max_retries - 1:
delay = self.config.retry_delay * (2 ** attempt)
print(f"API call failed (attempt {attempt + 1}), retrying in {delay}s: {e}")
await asyncio.sleep(delay)
else:
print(f"API call failed after {self.config.max_retries} attempts")

raise last_error

async def _execute_tools(self, tool_uses: List[ToolUseBlock]) -> List[Dict[str, Any]]:
"""Execute tool calls and return results."""
results = []

for tool_use in tool_uses:
tool_name = tool_use.name
tool_input = tool_use.input
tool_use_id = tool_use.id

try:
# Execute tool based on name
if tool_name in self._tools:
result = await self._tools[tool_name](tool_input)
else:
result = f"Unknown tool: {tool_name}"

results.append({
"type": "tool_result",
"tool_use_id": tool_use_id,
"content": result
})

except Exception as e:
results.append({
"type": "tool_result",
"tool_use_id": tool_use_id,
"content": f"Tool execution error: {str(e)}",
"is_error": True
})

return results

async def _web_search(self, tool_input: Dict[str, Any]) -> str:
"""Execute web search (placeholder - integrate with actual search API)."""
query = tool_input.get("query", "")

# TODO: Integrate with actual web search API (Brave, Google, etc.)
# For now, return structured mock data
return f"""

Search results for "{query}":

  1. Source: Industry Report 2024 Market size is estimated at $43B with 18-22% growth rate.

  2. Source: Competitor Analysis Top players include Company A ($15B), Company B ($8B), Company C ($5B).

  3. Source: Trend Analysis Key trends include AI integration, cloud migration, talent shortage.

Note: This is mock data. Integrate with actual search API for production use. """

async def _web_fetch(self, tool_input: Dict[str, Any]) -> str:
"""Fetch web page content (placeholder - integrate with actual fetch)."""
url = tool_input.get("url", "")

# TODO: Integrate with actual web fetch (httpx, playwright, etc.)
return f"Content from {url}: [Article content would be here]"

def get_metrics(self) -> Dict[str, Any]:
"""Get API usage metrics."""
return self.metrics.to_dict()

def reset_metrics(self):
"""Reset metrics for new session."""
self.metrics = APIMetrics()

class AgentExecutor: """Execute specialized agent tasks using Claude API."""

def __init__(self, client: Optional[ClaudeAPIClient] = None):
self.client = client or ClaudeAPIClient()

async def execute_research_agent(
self,
objective: str,
context: Dict[str, Any],
max_tool_calls: int = 15
) -> Dict[str, Any]:
"""
Execute research agent to gather market intelligence.

Args:
objective: What the agent should research
context: Industry, geography, etc.
max_tool_calls: Maximum tool calls allowed

Returns:
Dict with findings, sources, confidence
"""
system_prompt = """You are a market research analyst specializing in strategy consulting.

Your role is to gather comprehensive market intelligence including:

  • Market size and growth rates with credible sources
  • Customer segments and characteristics
  • Demand drivers and market trends
  • Value chain structure

Always cite your sources explicitly. Rate your confidence (0-1) based on source quality."""

    user_prompt = f"""

OBJECTIVE: {objective}

CONTEXT: Industry: {context.get('industry')} Geography: {context.get('geography')} Time Horizon: {context.get('time_horizon')}

Please conduct thorough research and return your findings in this JSON format: {{ "market_size": "with sources", "growth_rate": "with sources", "segments": ["segment1", "segment2"], "demand_drivers": ["driver1", "driver2"], "sources": ["source1", "source2"], "confidence": 0.8, "data_gaps": ["gap1", "gap2"] }} """

    tools = [
{
"name": "web_search",
"description": "Search the web for information",
"input_schema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query"
}
},
"required": ["query"]
}
}
]

messages = [{"role": "user", "content": user_prompt}]

response = await self.client.create_message(
messages=messages,
system=system_prompt,
tools=tools,
max_tool_iterations=max_tool_calls
)

# Extract text response
text_content = ""
for block in response.content:
if isinstance(block, TextBlock):
text_content += block.text

return {
"findings": text_content,
"model_used": response.model,
"tokens_used": response.usage.input_tokens + response.usage.output_tokens,
"stop_reason": response.stop_reason
}

async def execute_competitive_analyst(
self,
objective: str,
context: Dict[str, Any],
max_tool_calls: int = 20
) -> Dict[str, Any]:
"""Execute competitive analysis agent."""
system_prompt = """You are a competitive intelligence analyst.

Your role is to identify and analyze top competitors including:

  • Revenue and market position
  • Business model and positioning
  • Strengths, weaknesses, moats, vulnerabilities
  • Strategic moves and M&A activity

Focus on facts from credible sources. Cite everything."""

    user_prompt = f"""

OBJECTIVE: {objective}

CONTEXT: Industry: {context.get('industry')} Geography: {context.get('geography')} Number of competitors to analyze: {context.get('competitor_count', 5)}

Research the top competitors and return findings in this JSON format: {{ "competitors": [ {{ "name": "Company Name", "revenue": "$XB", "positioning": "brief description", "business_model": "description", "strengths": "key strengths", "weaknesses": "key weaknesses", "moat": "competitive advantages", "vulnerability": "areas of risk" }} ], "sources": ["source1", "source2"], "confidence": 0.8 }} """

    tools = [
{
"name": "web_search",
"description": "Search the web for competitor information",
"input_schema": {
"type": "object",
"properties": {
"query": {"type": "string"}
},
"required": ["query"]
}
}
]

messages = [{"role": "user", "content": user_prompt}]

response = await self.client.create_message(
messages=messages,
system=system_prompt,
tools=tools,
max_tokens=16000,
max_tool_iterations=max_tool_calls
)

text_content = ""
for block in response.content:
if isinstance(block, TextBlock):
text_content += block.text

return {
"findings": text_content,
"tokens_used": response.usage.input_tokens + response.usage.output_tokens
}

def run_tests(): """Run API client tests (without actual API calls).""" print("Running Claude API Client Tests...") print("=" * 50)

tests_passed = 0
tests_failed = 0

# Test 1: Config validation
print("\n[Test 1] Config validation...")
config = APIConfig(api_key="test-key")
valid, _ = config.validate()
if valid:
print(" PASS: Valid config accepted")
tests_passed += 1
else:
print(" FAIL: Valid config rejected")
tests_failed += 1

# Test 2: Invalid config
print("\n[Test 2] Invalid config detection...")
config = APIConfig(api_key="")
valid, error = config.validate()
if not valid and "API_KEY" in error:
print(" PASS: Missing API key detected")
tests_passed += 1
else:
print(" FAIL: Missing API key not detected")
tests_failed += 1

# Test 3: Metrics tracking
print("\n[Test 3] Metrics tracking...")
metrics = APIMetrics()
metrics.total_requests = 10
metrics.failed_requests = 2
metrics.total_input_tokens = 5000
metrics.total_output_tokens = 1000
if metrics.success_rate == 0.8 and metrics.total_tokens == 6000:
print(" PASS: Metrics calculated correctly")
tests_passed += 1
else:
print(" FAIL: Metrics calculation error")
tests_failed += 1

# Test 4: Cost estimation
print("\n[Test 4] Cost estimation...")
metrics = APIMetrics()
metrics.total_input_tokens = 1_000_000
metrics.total_output_tokens = 100_000
cost = metrics.estimate_cost()
expected = 3.0 + 1.5 # $3 for 1M input + $1.50 for 100K output
if abs(cost - expected) < 0.01:
print(f" PASS: Cost estimated correctly (${cost:.2f})")
tests_passed += 1
else:
print(f" FAIL: Cost estimation error (got ${cost:.2f}, expected ${expected:.2f})")
tests_failed += 1

# Test 5: Metrics serialization
print("\n[Test 5] Metrics serialization...")
metrics_dict = metrics.to_dict()
required_keys = ["total_requests", "failed_requests", "success_rate", "estimated_cost"]
if all(k in metrics_dict for k in required_keys):
print(" PASS: Metrics serialization complete")
tests_passed += 1
else:
print(" FAIL: Missing keys in metrics dict")
tests_failed += 1

print("\n" + "=" * 50)
print(f"Tests: {tests_passed + tests_failed} | Passed: {tests_passed} | Failed: {tests_failed}")

return tests_failed == 0

def main(): """Main entry point for CLI usage.""" import argparse

parser = argparse.ArgumentParser(description="Claude API Client")
parser.add_argument("--test", action="store_true", help="Run tests")
parser.add_argument("--metrics", action="store_true", help="Show metrics template")
parser.add_argument("--config", action="store_true", help="Show configuration")

args = parser.parse_args()

if args.test:
success = run_tests()
sys.exit(0 if success else 1)

if args.metrics:
metrics = APIMetrics()
print(json.dumps(metrics.to_dict(), indent=2))
sys.exit(0)

if args.config:
config = APIConfig()
print("Claude API Client Configuration:")
print(f" Model: {config.model}")
print(f" Max Tokens: {config.max_tokens}")
print(f" Temperature: {config.temperature}")
print(f" Timeout: {config.timeout}s")
print(f" Max Retries: {config.max_retries}")
print(f" API Key: {'SET' if config.api_key else 'NOT SET'}")
sys.exit(0)

# Default: show help
parser.print_help()

if name == "main": main()