scripts-test-self-improving-eval
""" Tests for Self-Improving Eval Loop (H.3.6.7).
Tests cover:
- EvalCase and EvalResult dataclasses
- F1 score computation
- Eval runner with mocked API calls
- Critic agent response parsing
- Improvement applier validation
- Eval loop convergence and termination """
import json import tempfile import unittest from datetime import datetime, timezone from pathlib import Path from unittest.mock import MagicMock, AsyncMock, patch
import sys sys.path.insert(0, str(Path(file).parent.parent.parent / "skills" / "self-improving-eval" / "scripts"))
from eval_runner import ( EvalCase, EvalResult, EvalRoundResult, EvalRunner, load_eval_cases, load_prompt_template, compute_f1_scores, get_worst_cases, ) from critic_agent import ( CriticAgent, CriticAnalysis, PromptSuggestion, EvalSuggestion, propose_prompt_changes, propose_eval_changes, ) from improvement_applier import ( ImprovementApplier, ApplyResult, validate_changes, )
class TestEvalCase(unittest.TestCase): """Tests for EvalCase dataclass."""
def test_create_eval_case(self):
"""Test creating an EvalCase."""
case = EvalCase(
input={"text": "Hello world"},
gold={"classification": "greeting"},
id="test_1",
)
self.assertEqual(case.id, "test_1")
self.assertEqual(case.input["text"], "Hello world")
self.assertEqual(case.gold["classification"], "greeting")
def test_eval_case_with_metadata(self):
"""Test EvalCase with metadata."""
case = EvalCase(
input={"text": "Test"},
gold={"label": "test"},
id="test_2",
metadata={"source": "manual", "annotator": "user1"},
)
self.assertEqual(case.metadata["source"], "manual")
class TestEvalResult(unittest.TestCase): """Tests for EvalResult dataclass."""
def test_create_eval_result(self):
"""Test creating an EvalResult."""
case = EvalCase(
input={"text": "Hello"},
gold={"classification": "greeting"},
id="test_1",
)
result = EvalResult(
case=case,
pred={"classification": "greeting"},
correct=True,
score=1.0,
model_used="claude-sonnet-4",
latency_ms=500.0,
token_usage=100,
)
self.assertTrue(result.correct)
self.assertEqual(result.score, 1.0)
self.assertEqual(result.model_used, "claude-sonnet-4")
def test_eval_result_to_dict(self):
"""Test EvalResult serialization."""
case = EvalCase(
input={"text": "Hello"},
gold={"classification": "greeting"},
id="test_1",
)
result = EvalResult(
case=case,
pred={"classification": "farewell"},
correct=False,
score=0.0,
model_used="gpt-4o",
latency_ms=300.0,
token_usage=50,
)
d = result.to_dict()
self.assertEqual(d["case_id"], "test_1")
self.assertFalse(d["correct"])
self.assertEqual(d["model_used"], "gpt-4o")
class TestF1Computation(unittest.TestCase): """Tests for F1 score computation."""
def test_perfect_f1(self):
"""Test F1 with all correct predictions."""
y_true = ["a", "b", "a", "b"]
y_pred = ["a", "b", "a", "b"]
micro, macro = compute_f1_scores(y_true, y_pred)
self.assertEqual(micro, 1.0)
self.assertEqual(macro, 1.0)
def test_all_wrong_f1(self):
"""Test F1 with all incorrect predictions."""
y_true = ["a", "a", "a"]
y_pred = ["b", "b", "b"]
micro, macro = compute_f1_scores(y_true, y_pred, labels=["a", "b"])
self.assertEqual(micro, 0.0)
self.assertEqual(macro, 0.0)
def test_partial_f1(self):
"""Test F1 with partial correctness."""
y_true = ["a", "a", "b", "b"]
y_pred = ["a", "b", "b", "b"] # 3/4 correct
micro, macro = compute_f1_scores(y_true, y_pred, labels=["a", "b"])
# micro F1 should be > 0.5
self.assertGreater(micro, 0.5)
self.assertLess(micro, 1.0)
def test_empty_inputs(self):
"""Test F1 with empty inputs."""
micro, macro = compute_f1_scores([], [])
self.assertEqual(micro, 0.0)
self.assertEqual(macro, 0.0)
class TestLoadEvalCases(unittest.TestCase): """Tests for loading eval cases from JSONL."""
def test_load_valid_jsonl(self):
"""Test loading valid JSONL file."""
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
f.write('{"input": {"text": "Hello"}, "gold": {"label": "greeting"}}\n')
f.write('{"input": {"text": "Bye"}, "gold": {"label": "farewell"}}\n')
f.flush()
cases = load_eval_cases(f.name)
self.assertEqual(len(cases), 2)
self.assertEqual(cases[0].gold["label"], "greeting")
self.assertEqual(cases[1].gold["label"], "farewell")
def test_load_with_ids(self):
"""Test loading JSONL with explicit IDs."""
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
f.write('{"id": "custom_1", "input": {"x": 1}, "gold": {"y": 2}}\n')
f.flush()
cases = load_eval_cases(f.name)
self.assertEqual(cases[0].id, "custom_1")
def test_load_auto_assigns_ids(self):
"""Test auto-assignment of IDs when not provided."""
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
f.write('{"input": {"x": 1}, "gold": {"y": 2}}\n')
f.flush()
cases = load_eval_cases(f.name)
self.assertEqual(cases[0].id, "case_0")
class TestLoadPromptTemplate(unittest.TestCase): """Tests for loading prompt templates."""
def test_load_prompt(self):
"""Test loading prompt template."""
with tempfile.NamedTemporaryFile(mode='w', suffix='.txt', delete=False) as f:
f.write("Classify the following: {text}")
f.flush()
prompt = load_prompt_template(f.name)
self.assertIn("{text}", prompt)
class TestGetWorstCases(unittest.TestCase): """Tests for getting worst cases."""
def test_get_worst_k(self):
"""Test getting K worst cases."""
results = []
for i, score in enumerate([0.8, 0.2, 0.5, 0.1, 0.9]):
case = EvalCase(input={}, gold={}, id=f"case_{i}")
results.append(EvalResult(
case=case,
pred={},
correct=score > 0.5,
score=score,
model_used="test",
latency_ms=100,
token_usage=50,
))
worst = get_worst_cases(results, k=3)
self.assertEqual(len(worst), 3)
# Should be sorted by score ascending
self.assertEqual(worst[0].case.id, "case_3") # score 0.1
self.assertEqual(worst[1].case.id, "case_1") # score 0.2
self.assertEqual(worst[2].case.id, "case_2") # score 0.5
class TestCriticAnalysis(unittest.TestCase): """Tests for CriticAnalysis dataclass."""
def test_create_analysis(self):
"""Test creating CriticAnalysis."""
analysis = CriticAnalysis(
failure_patterns=["Missing examples", "Unclear instructions"],
prompt_suggestion=PromptSuggestion(
new_prompt="Improved prompt",
rationale="Added clarity",
changes_summary=["Added examples"],
),
eval_suggestion=None,
confidence=0.85,
model_used="claude-sonnet-4",
)
self.assertEqual(len(analysis.failure_patterns), 2)
self.assertEqual(analysis.confidence, 0.85)
self.assertIsNotNone(analysis.prompt_suggestion)
def test_analysis_to_dict(self):
"""Test CriticAnalysis serialization."""
analysis = CriticAnalysis(
failure_patterns=["Pattern 1"],
prompt_suggestion=None,
eval_suggestion=None,
confidence=0.5,
model_used="gpt-4o",
)
d = analysis.to_dict()
self.assertEqual(d["failure_patterns"], ["Pattern 1"])
self.assertEqual(d["confidence"], 0.5)
self.assertIsNone(d["prompt_suggestion"])
class TestProposeChanges(unittest.TestCase): """Tests for propose_prompt_changes and propose_eval_changes."""
def test_propose_prompt_changes(self):
"""Test extracting prompt changes from analysis."""
analysis = CriticAnalysis(
failure_patterns=[],
prompt_suggestion=PromptSuggestion(
new_prompt="New prompt text",
rationale="Better",
changes_summary=[],
),
eval_suggestion=None,
confidence=0.8,
model_used="test",
)
new_prompt = propose_prompt_changes("old prompt", analysis)
self.assertEqual(new_prompt, "New prompt text")
def test_propose_prompt_changes_none(self):
"""Test when no prompt suggestion."""
analysis = CriticAnalysis(
failure_patterns=[],
prompt_suggestion=None,
eval_suggestion=None,
confidence=0.5,
model_used="test",
)
new_prompt = propose_prompt_changes("old prompt", analysis)
self.assertIsNone(new_prompt)
def test_propose_eval_changes_add_cases(self):
"""Test adding new eval cases."""
current_cases = [
EvalCase(input={"x": 1}, gold={"y": 2}, id="existing_1"),
]
analysis = CriticAnalysis(
failure_patterns=[],
prompt_suggestion=None,
eval_suggestion=EvalSuggestion(
new_cases=[
EvalCase(input={"x": 2}, gold={"y": 3}, id="new_1"),
],
fixed_cases=[],
removed_case_ids=[],
rationale="Added case",
),
confidence=0.8,
model_used="test",
)
updated = propose_eval_changes(current_cases, analysis)
self.assertEqual(len(updated), 2)
class TestImprovementApplier(unittest.TestCase): """Tests for ImprovementApplier."""
def test_validate_prompt_change_valid(self):
"""Test validating a valid prompt change."""
applier = ImprovementApplier()
warnings = applier.validate_prompt_change(
current_prompt="Classify: {text}",
new_prompt="Please classify the following text: {text}",
)
self.assertEqual(warnings, [])
def test_validate_prompt_change_missing_placeholder(self):
"""Test validating prompt with missing placeholder."""
applier = ImprovementApplier()
warnings = applier.validate_prompt_change(
current_prompt="Classify: {text}",
new_prompt="Please classify the input", # Missing {text}
)
self.assertTrue(any("placeholder" in w.lower() for w in warnings))
def test_validate_prompt_change_empty(self):
"""Test validating empty prompt."""
applier = ImprovementApplier()
warnings = applier.validate_prompt_change(
current_prompt="Old prompt",
new_prompt="",
)
self.assertTrue(any("empty" in w.lower() for w in warnings))
def test_apply_prompt_change(self):
"""Test applying prompt change to file."""
with tempfile.TemporaryDirectory() as tmpdir:
prompt_path = Path(tmpdir) / "prompt.txt"
prompt_path.write_text("Old prompt: {text}")
applier = ImprovementApplier(backup_dir=Path(tmpdir) / "backups")
success, backup, warnings = applier.apply_prompt_change(
prompt_path,
"New prompt: {text}",
)
self.assertTrue(success)
self.assertIsNotNone(backup)
self.assertEqual(prompt_path.read_text(), "New prompt: {text}")
def test_hold_out_protection(self):
"""Test that hold-out cases cannot be modified."""
applier = ImprovementApplier(hold_out_ids={"protected_1"})
warnings = applier.validate_eval_change(
EvalCase(input={}, gold={}, id="protected_1")
)
self.assertTrue(any("hold-out" in w.lower() for w in warnings))
class TestValidateChanges(unittest.TestCase): """Tests for validate_changes function."""
def test_validate_all_changes(self):
"""Test validating all changes without applying."""
analysis = CriticAnalysis(
failure_patterns=[],
prompt_suggestion=PromptSuggestion(
new_prompt="", # Invalid - empty
rationale="",
changes_summary=[],
),
eval_suggestion=EvalSuggestion(
new_cases=[
EvalCase(input={}, gold={}, id="new_1"), # Invalid - empty
],
fixed_cases=[],
removed_case_ids=[],
rationale="",
),
confidence=0.5,
model_used="test",
)
warnings = validate_changes(
analysis,
current_prompt="Old prompt",
current_cases=[],
)
self.assertGreater(len(warnings), 0)
class TestEvalRoundResult(unittest.TestCase): """Tests for EvalRoundResult aggregation."""
def test_create_round_result(self):
"""Test creating EvalRoundResult."""
results = []
for i in range(5):
case = EvalCase(input={}, gold={}, id=f"case_{i}")
results.append(EvalResult(
case=case,
pred={},
correct=i < 3, # 3 correct
score=1.0 if i < 3 else 0.0,
model_used="test",
latency_ms=100,
token_usage=50,
))
round_result = EvalRoundResult(
results=results,
total_cases=5,
correct_count=3,
accuracy=0.6,
micro_f1=0.6,
macro_f1=0.6,
total_tokens=250,
total_latency_ms=500,
model_used="test",
)
self.assertEqual(round_result.total_cases, 5)
self.assertEqual(round_result.correct_count, 3)
self.assertEqual(round_result.accuracy, 0.6)
def test_round_result_to_dict(self):
"""Test EvalRoundResult serialization."""
round_result = EvalRoundResult(
results=[],
total_cases=10,
correct_count=8,
accuracy=0.8,
micro_f1=0.75,
macro_f1=0.78,
total_tokens=1000,
total_latency_ms=5000,
model_used="claude-sonnet-4",
)
d = round_result.to_dict()
self.assertEqual(d["total_cases"], 10)
self.assertEqual(d["micro_f1"], 0.75)
self.assertIn("timestamp", d)
class TestApplyResult(unittest.TestCase): """Tests for ApplyResult dataclass."""
def test_create_apply_result(self):
"""Test creating ApplyResult."""
result = ApplyResult(
prompt_updated=True,
prompt_backup_path="/backup/prompt.txt",
cases_added=3,
cases_fixed=1,
cases_removed=0,
eval_backup_path="/backup/evals.jsonl",
validation_warnings=["Warning 1"],
)
self.assertTrue(result.prompt_updated)
self.assertEqual(result.cases_added, 3)
self.assertEqual(len(result.validation_warnings), 1)
def test_apply_result_to_dict(self):
"""Test ApplyResult serialization."""
result = ApplyResult(
prompt_updated=False,
prompt_backup_path=None,
cases_added=0,
cases_fixed=0,
cases_removed=0,
eval_backup_path=None,
validation_warnings=[],
)
d = result.to_dict()
self.assertFalse(d["prompt_updated"])
self.assertIn("timestamp", d)
if name == 'main': unittest.main()