Skip to main content

#!/usr/bin/env python3 """ CODITECT Complete Test Suite

Master test runner that executes all component validation tests and generates a comprehensive gap analysis report.

Run: python3 scripts/tests/test_all.py python3 scripts/tests/test_all.py -v # Verbose python3 scripts/tests/test_all.py --report # Generate markdown report python3 scripts/tests/test_all.py --fix # Attempt auto-fixes

Author: CODITECT Team Version: 1.0.0 Created: 2025-12-22 """

import os import sys import json import argparse from pathlib import Path from datetime import datetime from typing import Dict, List, Optional from dataclasses import dataclass

Shared Colors module (consolidates 36 duplicate definitions)

_script_dir = Path(file).parent.parent # tests/ -> scripts/ sys.path.insert(0, str(_script_dir / "core")) from colors import Colors

class TestSuiteResult: name: str total: int passed: int failed: int tests_total: int tests_passed: int failures: List[Dict]

def get_framework_root() -> Path: """Get framework root directory""" script_path = Path(file).resolve() return script_path.parent.parent.parent

def run_agent_tests(framework_root: Path, verbose: bool = False) -> TestSuiteResult: """Run agent validation tests""" sys.path.insert(0, str(framework_root / "scripts" / "tests"))

from test_agents import AgentTestSuite
suite = AgentTestSuite(framework_root, verbose=verbose)
suite.run_all()

failures = [
{'name': r.name, 'errors': r.errors}
for r in suite.results if not r.passed
]

return TestSuiteResult(
name="Agents",
total=len(suite.results),
passed=sum(1 for r in suite.results if r.passed),
failed=sum(1 for r in suite.results if not r.passed),
tests_total=sum(len(r.tests) for r in suite.results),
tests_passed=sum(1 for r in suite.results for t in r.tests if t.passed),
failures=failures
)

def run_command_tests(framework_root: Path, verbose: bool = False) -> TestSuiteResult: """Run command validation tests""" sys.path.insert(0, str(framework_root / "scripts" / "tests"))

from test_commands import CommandTestSuite
suite = CommandTestSuite(framework_root, verbose=verbose)
suite.run_all()

failures = [
{'name': r.name, 'errors': r.errors}
for r in suite.results if not r.passed
]

return TestSuiteResult(
name="Commands",
total=len(suite.results),
passed=sum(1 for r in suite.results if r.passed),
failed=sum(1 for r in suite.results if not r.passed),
tests_total=sum(len(r.tests) for r in suite.results),
tests_passed=sum(1 for r in suite.results for t in r.tests if t.passed),
failures=failures
)

def run_skill_tests(framework_root: Path, verbose: bool = False) -> TestSuiteResult: """Run skill validation tests""" sys.path.insert(0, str(framework_root / "scripts" / "tests"))

from test_skills import SkillTestSuite
suite = SkillTestSuite(framework_root, verbose=verbose)
suite.run_all()

failures = [
{'name': r.name, 'errors': r.errors}
for r in suite.results if not r.passed
]

return TestSuiteResult(
name="Skills",
total=len(suite.results),
passed=sum(1 for r in suite.results if r.passed),
failed=sum(1 for r in suite.results if not r.passed),
tests_total=sum(len(r.tests) for r in suite.results),
tests_passed=sum(1 for r in suite.results for t in r.tests if t.passed),
failures=failures
)

def run_script_tests(framework_root: Path, verbose: bool = False) -> TestSuiteResult: """Run script validation tests""" sys.path.insert(0, str(framework_root / "scripts" / "tests"))

from test_scripts import ScriptTestSuite
suite = ScriptTestSuite(framework_root, verbose=verbose)
suite.run_all()

failures = [
{'name': r.name, 'errors': r.errors}
for r in suite.results if not r.passed
]

return TestSuiteResult(
name="Scripts",
total=len(suite.results),
passed=sum(1 for r in suite.results if r.passed),
failed=sum(1 for r in suite.results if not r.passed),
tests_total=sum(len(r.tests) for r in suite.results),
tests_passed=sum(1 for r in suite.results for t in r.tests if t.passed),
failures=failures
)

def run_hook_tests(framework_root: Path, verbose: bool = False) -> TestSuiteResult: """Run hook validation tests""" sys.path.insert(0, str(framework_root / "scripts" / "tests"))

from test_hooks import HookTestSuite
suite = HookTestSuite(framework_root, verbose=verbose)
suite.run_all()

failures = [
{'name': r.name, 'errors': r.errors}
for r in suite.results if not r.passed
]

return TestSuiteResult(
name="Hooks",
total=len(suite.results),
passed=sum(1 for r in suite.results if r.passed),
failed=sum(1 for r in suite.results if not r.passed),
tests_total=sum(len(r.tests) for r in suite.results),
tests_passed=sum(1 for r in suite.results for t in r.tests if t.passed),
failures=failures
)

def generate_gap_analysis(results: List[TestSuiteResult]) -> str: """Generate gap analysis markdown report""" now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

total_components = sum(r.total for r in results)
total_passed = sum(r.passed for r in results)
total_failed = sum(r.failed for r in results)
total_tests = sum(r.tests_total for r in results)
tests_passed = sum(r.tests_passed for r in results)

overall_pass_rate = (total_passed / total_components * 100) if total_components > 0 else 0
test_pass_rate = (tests_passed / total_tests * 100) if total_tests > 0 else 0

report = f"""# CODITECT Component Validation Gap Analysis

Generated: {now} Framework Version: 1.7.2


Executive Summary

MetricValue
Total Components{total_components}
Components Passing{total_passed} ({overall_pass_rate:.1f}%)
Components Failing{total_failed}
Total Tests Run{total_tests}
Tests Passing{tests_passed} ({test_pass_rate:.1f}%)

Results by Component Type

TypeTotalPassedFailedPass Rate
"""
for r in results:
pass_rate = (r.passed / r.total * 100) if r.total > 0 else 0
status = "✅" if r.failed == 0 else "❌"
report += f"| {r.name} | {r.total} | {r.passed} | {r.failed} | {pass_rate:.1f}% {status} |\n"

report += f"""

Detailed Failures

"""

has_failures = False
for r in results:
if r.failures:
has_failures = True
report += f"### {r.name} Failures ({len(r.failures)})\n\n"
for failure in r.failures:
report += f"**{failure['name']}**\n"
for error in failure['errors']:
report += f"- {error}\n"
report += "\n"

if not has_failures:
report += "_No failures detected._\n"

report += f"""

Recommendations

"""

if total_failed == 0:
report += """✅ **All components passing validation!**

No immediate action required. Continue monitoring with regular test runs. """ else: report += f"""### Priority Fixes Required

  1. {total_failed} components need attention
  2. Focus on required frontmatter and documentation first
  3. Run individual test suites for detailed error messages:
    python3 scripts/tests/test_agents.py -v
    python3 scripts/tests/test_commands.py -v
    python3 scripts/tests/test_skills.py -v
    python3 scripts/tests/test_scripts.py -v
    python3 scripts/tests/test_hooks.py -v

Common Issues to Address

""" # Analyze common error patterns all_errors = [] for r in results: for f in r.failures: all_errors.extend(f['errors'])

    error_patterns = {}
for error in all_errors:
key = error.split(':')[0] if ':' in error else error[:30]
error_patterns[key] = error_patterns.get(key, 0) + 1

for pattern, count in sorted(error_patterns.items(), key=lambda x: -x[1])[:5]:
report += f"- **{pattern}**: {count} occurrences\n"

report += f"""

Test Coverage Summary

Test SuiteTestsPassedCoverage
"""
for r in results:
coverage = (r.tests_passed / r.tests_total * 100) if r.tests_total > 0 else 0
report += f"| {r.name} | {r.tests_total} | {r.tests_passed} | {coverage:.1f}% |\n"

report += f"""| **Total** | **{total_tests}** | **{tests_passed}** | **{test_pass_rate:.1f}%** |

Report generated by CODITECT Test Suite v1.0.0 """

return report

def main(): parser = argparse.ArgumentParser(description='CODITECT Complete Test Suite') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output') parser.add_argument('--report', action='store_true', help='Generate markdown report') parser.add_argument('--json', action='store_true', help='Output as JSON') parser.add_argument('--suite', type=str, choices=['agents', 'commands', 'skills', 'scripts', 'hooks'], help='Run specific test suite only') args = parser.parse_args()

framework_root = get_framework_root()

print(f"{Colors.BOLD}{Colors.CYAN}")
print("=" * 60)
print(" CODITECT COMPLETE COMPONENT VALIDATION TEST SUITE")
print("=" * 60)
print(f"{Colors.RESET}\n")

results = []

# Run selected or all test suites
suites_to_run = {
'agents': ("Agents", run_agent_tests),
'commands': ("Commands", run_command_tests),
'skills': ("Skills", run_skill_tests),
'scripts': ("Scripts", run_script_tests),
'hooks': ("Hooks", run_hook_tests),
}

if args.suite:
suites_to_run = {args.suite: suites_to_run[args.suite]}

for key, (name, runner) in suites_to_run.items():
print(f"\n{Colors.BLUE}{'=' * 60}{Colors.RESET}")
print(f"{Colors.BOLD}Running {name} Tests...{Colors.RESET}")
print(f"{Colors.BLUE}{'=' * 60}{Colors.RESET}\n")

try:
result = runner(framework_root, verbose=args.verbose)
results.append(result)
except Exception as e:
print(f"{Colors.RED}Error running {name} tests: {e}{Colors.RESET}")
results.append(TestSuiteResult(
name=name, total=0, passed=0, failed=0,
tests_total=0, tests_passed=0, failures=[]
))

# Final Summary
print(f"\n{Colors.CYAN}{'=' * 60}{Colors.RESET}")
print(f"{Colors.BOLD}FINAL SUMMARY{Colors.RESET}")
print(f"{Colors.CYAN}{'=' * 60}{Colors.RESET}\n")

total_components = sum(r.total for r in results)
total_passed = sum(r.passed for r in results)
total_failed = sum(r.failed for r in results)
total_tests = sum(r.tests_total for r in results)
tests_passed = sum(r.tests_passed for r in results)

print(f"{'Component Type':<15} {'Total':>8} {'Passed':>8} {'Failed':>8} {'Status':>10}")
print("-" * 55)

for r in results:
status = f"{Colors.GREEN}✓ PASS{Colors.RESET}" if r.failed == 0 else f"{Colors.RED}✗ FAIL{Colors.RESET}"
print(f"{r.name:<15} {r.total:>8} {r.passed:>8} {r.failed:>8} {status:>10}")

print("-" * 55)
overall_status = f"{Colors.GREEN}✓ ALL PASS{Colors.RESET}" if total_failed == 0 else f"{Colors.RED}✗ FAILURES{Colors.RESET}"
print(f"{'TOTAL':<15} {total_components:>8} {total_passed:>8} {total_failed:>8} {overall_status:>10}")

print(f"\n{Colors.BOLD}Test Statistics:{Colors.RESET}")
print(f" Total Tests: {total_tests}")
print(f" Passed: {tests_passed}")
print(f" Failed: {total_tests - tests_passed}")
print(f" Pass Rate: {(tests_passed/total_tests*100):.1f}%" if total_tests > 0 else " Pass Rate: N/A")

# Generate report if requested
if args.report:
report = generate_gap_analysis(results)
report_path = framework_root / "GAP-ANALYSIS-REPORT.md"
report_path.write_text(report)
print(f"\n{Colors.GREEN}Report saved to: {report_path}{Colors.RESET}")

# JSON output
if args.json:
output = {
'timestamp': datetime.now().isoformat(),
'summary': {
'total_components': total_components,
'passed': total_passed,
'failed': total_failed,
'total_tests': total_tests,
'tests_passed': tests_passed
},
'suites': [
{
'name': r.name,
'total': r.total,
'passed': r.passed,
'failed': r.failed,
'failures': r.failures
}
for r in results
]
}
print(json.dumps(output, indent=2))

# Exit code
sys.exit(0 if total_failed == 0 else 1)

if name == 'main': main()