#!/usr/bin/env python3 """ CODITECT QA Browser Verification (Tier 3)
Browser-based component verification using QAAgentBrowserTools.
Integrates with /qa review to add live browser checks:
- Page load verification for components with URLs/routes
- Element existence checking for expected DOM structure
- Visual regression comparison against baselines
- Console error detection
Usage: python3 scripts/qa/browser_verify.py --url URL [--selectors SEL1,SEL2] python3 scripts/qa/browser_verify.py --url URL --baseline-id ID python3 scripts/qa/browser_verify.py --url URL --capture-baseline NAME
Task: H.8.2.6 ADR Reference: ADR-109-qa-agent-browser-automation.md """
import argparse import asyncio import json import sys from pathlib import Path
Resolve paths for imports
_CORE_ROOT = Path(file).resolve().parents[2] _SCRIPTS_CORE = _CORE_ROOT / "scripts" / "core" sys.path.insert(0, str(_CORE_ROOT)) sys.path.insert(0, str(_SCRIPTS_CORE))
from ralph_wiggum.browser_automation import ( BrowserAutomationConfig, QAAgentBrowserTools, )
async def run_browser_verification( url: str, selectors: list[str] | None = None, expected_title: str | None = None, baseline_id: str | None = None, capture_baseline: str | None = None, threshold: float | None = None, check_console: bool = True, bridge: object | None = None, ) -> dict: """ Run browser verification checks against a URL.
Args:
url: URL to verify.
selectors: CSS selectors to check for element existence.
expected_title: Expected page title.
baseline_id: Visual baseline ID for regression comparison.
capture_baseline: Name for new baseline capture.
threshold: Visual diff threshold (0.0-1.0).
check_console: Whether to check for console errors.
bridge: Optional PlaywrightMCPBridge instance.
Returns:
dict with verification results, score, and grade.
"""
tools = QAAgentBrowserTools(bridge=bridge)
results = {
"url": url,
"checks": [],
"passed": 0,
"failed": 0,
"total": 0,
"score": 0,
"grade": "F",
}
# --- Check 1: Page loads ---
page_result = await tools.verify_page_loads(
url=url,
expected_title=expected_title,
)
check = {
"name": "page_load",
"passed": page_result.success,
"details": {
"title": page_result.title,
"load_time_ms": page_result.load_time_ms,
},
}
if not page_result.success:
check["error"] = page_result.error_message
results["checks"].append(check)
# --- Check 2: Element existence (if selectors provided) ---
if selectors:
for selector in selectors:
elem_result = await tools.verify_element_exists(
url=url,
selector=selector,
)
check = {
"name": f"element_{selector}",
"passed": elem_result.exists,
"details": {
"selector": selector,
"exists": elem_result.exists,
"visible": elem_result.visible,
},
}
if not elem_result.exists:
check["error"] = elem_result.error_message or f"Element '{selector}' not found"
results["checks"].append(check)
# --- Check 3: Console errors ---
if check_console:
console_result = await tools.analyze_console_errors(url=url)
check = {
"name": "console_errors",
"passed": not console_result["has_errors"],
"details": {
"error_count": len(console_result["errors"]),
"warning_count": len(console_result["warnings"]),
},
}
if console_result["has_errors"]:
check["error"] = f"{len(console_result['errors'])} JS error(s) detected"
check["details"]["errors"] = console_result["errors"]
results["checks"].append(check)
# --- Check 4: Visual regression (if baseline_id provided) ---
if baseline_id:
compare_result = await tools.compare_visual_regression(
url=url,
baseline_id=baseline_id,
threshold=threshold,
)
check = {
"name": "visual_regression",
"passed": compare_result.passed,
"details": {
"diff_percentage": compare_result.diff_percentage,
"threshold": compare_result.threshold,
"baseline_id": baseline_id,
},
}
if not compare_result.passed:
if compare_result.error_message:
check["error"] = compare_result.error_message
else:
check["error"] = (
f"Visual diff {compare_result.diff_percentage:.2%} "
f"exceeds threshold {compare_result.threshold:.2%}"
)
results["checks"].append(check)
# --- Optional: Capture new baseline ---
if capture_baseline:
baseline = await tools.capture_visual_baseline(
url=url,
name=capture_baseline,
selectors=selectors,
)
results["baseline_captured"] = {
"id": baseline.baseline_id,
"name": baseline.name,
"screenshots": len(baseline.screenshots),
}
# --- Score calculation ---
results["total"] = len(results["checks"])
results["passed"] = sum(1 for c in results["checks"] if c["passed"])
results["failed"] = results["total"] - results["passed"]
if results["total"] > 0:
pct = (results["passed"] / results["total"]) * 100
results["score"] = round(pct, 1)
if pct >= 90:
results["grade"] = "A"
elif pct >= 80:
results["grade"] = "B"
elif pct >= 70:
results["grade"] = "C"
elif pct >= 60:
results["grade"] = "D"
else:
results["grade"] = "F"
return results
def format_report(results: dict) -> str: """Format browser verification results as a text report.""" lines = [ f"BROWSER VERIFICATION: {results['url']}", "=" * 50, f"Score: {results['score']}% ({results['grade']})", f"Checks: {results['passed']}/{results['total']} passed", "", ]
for check in results["checks"]:
icon = "PASS" if check["passed"] else "FAIL"
lines.append(f" [{icon}] {check['name']}")
if not check["passed"] and "error" in check:
lines.append(f" {check['error']}")
if "baseline_captured" in results:
bc = results["baseline_captured"]
lines.append("")
lines.append(f" Baseline captured: {bc['name']} ({bc['screenshots']} screenshots)")
lines.append(f" Baseline ID: {bc['id']}")
return "\n".join(lines)
def main(): parser = argparse.ArgumentParser( description="CODITECT QA Browser Verification (Tier 3)" ) parser.add_argument("--url", required=True, help="URL to verify") parser.add_argument( "--selectors", help="Comma-separated CSS selectors to check", ) parser.add_argument("--expected-title", help="Expected page title") parser.add_argument( "--baseline-id", help="Visual baseline ID for regression check" ) parser.add_argument( "--capture-baseline", help="Capture new baseline with this name", ) parser.add_argument( "--threshold", type=float, help="Visual diff threshold (0.0-1.0)", ) parser.add_argument( "--no-console", action="store_true", help="Skip console error check", ) parser.add_argument("--json", dest="json_output", help="Output JSON to file") parser.add_argument("--verbose", action="store_true", help="Verbose output")
args = parser.parse_args()
selectors = args.selectors.split(",") if args.selectors else None
results = asyncio.run(
run_browser_verification(
url=args.url,
selectors=selectors,
expected_title=args.expected_title,
baseline_id=args.baseline_id,
capture_baseline=args.capture_baseline,
threshold=args.threshold,
check_console=not args.no_console,
)
)
# Output
if args.json_output:
Path(args.json_output).write_text(json.dumps(results, indent=2))
print(f"Results written to {args.json_output}")
else:
print(format_report(results))
# Exit code
sys.exit(0 if results["failed"] == 0 else 1)
if name == "main": main()