Skip to main content

#!/usr/bin/env python3 """ Production Readiness Check

Comprehensive production readiness audit against CODITECT standards. Calculates score, identifies blocking issues, and provides remediation.

Usage: python production-readiness-check.py [path] [--report] [--json]

Examples: python production-readiness-check.py # Audit current directory python production-readiness-check.py --report # Generate detailed report python production-readiness-check.py --json # Output as JSON """

import argparse import json import os import re import sys from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple from datetime import datetime

ANSI colors

Shared Colors module (consolidates 36 duplicate definitions)

_script_dir = Path(file).parent sys.path.insert(0, str(_script_dir / "core")) from colors import Colors

class CheckResult: """Result of a single check.""" name: str category: str passed: bool points: int max_points: int message: str severity: str = 'info' # critical, high, medium, low, info fix: Optional[str] = None

@dataclass class AuditResult: """Complete audit result.""" score: int = 0 max_score: int = 100 grade: str = 'F' checks: List[CheckResult] = field(default_factory=list) blocking_issues: List[CheckResult] = field(default_factory=list) high_priority: List[CheckResult] = field(default_factory=list)

@property
def is_production_ready(self) -> bool:
return self.score >= 80 and len(self.blocking_issues) == 0

def calculate_grade(score: int) -> str: """Calculate letter grade from score.""" if score >= 95: return 'A+' if score >= 90: return 'A' if score >= 85: return 'A-' if score >= 80: return 'B+' if score >= 75: return 'B' if score >= 70: return 'C+' if score >= 65: return 'C' if score >= 60: return 'D' return 'F'

class ProductionAuditor: """Production readiness auditor."""

def __init__(self, path: Path):
self.path = path
self.checks: List[CheckResult] = []

def add_check(self, result: CheckResult):
"""Add a check result."""
self.checks.append(result)

# Repository Structure Checks (15 points)
def check_readme(self) -> CheckResult:
"""Check for README.md at root."""
exists = (self.path / 'README.md').exists()
return CheckResult(
name='Root README.md',
category='structure',
passed=exists,
points=5 if exists else 0,
max_points=5,
message='README.md present' if exists else 'Missing README.md',
severity='high' if not exists else 'info',
fix='Create README.md with project overview'
)

def check_license(self) -> CheckResult:
"""Check for LICENSE file."""
exists = (self.path / 'LICENSE').exists() or (self.path / 'LICENSE.md').exists()
return CheckResult(
name='LICENSE file',
category='structure',
passed=exists,
points=3 if exists else 0,
max_points=3,
message='LICENSE present' if exists else 'Missing LICENSE',
severity='medium' if not exists else 'info',
fix='Add LICENSE file'
)

def check_gitignore(self) -> CheckResult:
"""Check for .gitignore."""
exists = (self.path / '.gitignore').exists()
return CheckResult(
name='.gitignore',
category='structure',
passed=exists,
points=2 if exists else 0,
max_points=2,
message='.gitignore present' if exists else 'Missing .gitignore',
severity='medium' if not exists else 'info',
fix='Create .gitignore with appropriate patterns'
)

def check_claude_md(self) -> CheckResult:
"""Check for CLAUDE.md (CODITECT requirement)."""
exists = (self.path / 'CLAUDE.md').exists()
return CheckResult(
name='CLAUDE.md',
category='structure',
passed=exists,
points=5 if exists else 0,
max_points=5,
message='CLAUDE.md present' if exists else 'Missing CLAUDE.md',
severity='critical' if not exists else 'info',
fix='Create CLAUDE.md following CODITECT standards'
)

# Documentation Checks (20 points)
def check_readme_coverage(self) -> CheckResult:
"""Check README coverage in directories."""
dirs_checked = 0
dirs_with_readme = 0

skip_dirs = {'.git', 'node_modules', 'venv', '.venv', '__pycache__'}

for item in self.path.rglob('*'):
if item.is_dir():
if any(p in str(item) for p in skip_dirs):
continue
dirs_checked += 1
if (item / 'README.md').exists():
dirs_with_readme += 1

coverage = dirs_with_readme / dirs_checked if dirs_checked > 0 else 0
points = int(coverage * 10)

return CheckResult(
name='README coverage',
category='documentation',
passed=coverage >= 0.8,
points=points,
max_points=10,
message=f'{coverage*100:.0f}% directories have README',
severity='medium' if coverage < 0.8 else 'info',
fix='Generate missing READMEs with /readme-gen'
)

def check_docs_directory(self) -> CheckResult:
"""Check for docs/ directory."""
exists = (self.path / 'docs').is_dir()
return CheckResult(
name='docs/ directory',
category='documentation',
passed=exists,
points=5 if exists else 0,
max_points=5,
message='docs/ directory present' if exists else 'Missing docs/',
severity='medium' if not exists else 'info',
fix='Create docs/ directory with documentation'
)

def check_claude_md_length(self) -> CheckResult:
"""Check CLAUDE.md is under limit."""
claude_path = self.path / 'CLAUDE.md'
if not claude_path.exists():
return CheckResult(
name='CLAUDE.md length',
category='documentation',
passed=False,
points=0,
max_points=5,
message='CLAUDE.md not found',
severity='info'
)

lines = len(claude_path.read_text().split('\n'))
passed = lines <= 200

return CheckResult(
name='CLAUDE.md length',
category='documentation',
passed=passed,
points=5 if passed else 2,
max_points=5,
message=f'CLAUDE.md is {lines} lines' + (' (over 200 limit)' if not passed else ''),
severity='low' if not passed else 'info',
fix='Reduce CLAUDE.md to under 200 lines'
)

# File Organization Checks (15 points)
def check_stray_files(self) -> CheckResult:
"""Check for stray files at root."""
stray_patterns = [r'.*\.log$', r'.*\.tmp$', r'.*\.bak$', r'CHECKPOINT-.*\.md$']
stray_files = []

for item in self.path.iterdir():
if item.is_file():
for pattern in stray_patterns:
if re.match(pattern, item.name):
stray_files.append(item.name)
break

passed = len(stray_files) == 0

return CheckResult(
name='No stray files at root',
category='organization',
passed=passed,
points=8 if passed else max(0, 8 - len(stray_files)),
max_points=8,
message=f'{len(stray_files)} stray files found' if not passed else 'No stray files',
severity='medium' if not passed else 'info',
fix='Move stray files to appropriate directories'
)

def check_naming_conventions(self) -> CheckResult:
"""Quick check of naming conventions."""
violations = 0
checked = 0

dir_pattern = re.compile(r'^[a-z][a-z0-9-]*$')

for item in self.path.iterdir():
if item.name.startswith('.'):
continue
checked += 1
if item.is_dir() and not dir_pattern.match(item.name):
violations += 1

compliance = 1 - (violations / checked) if checked > 0 else 1
passed = compliance >= 0.9

return CheckResult(
name='Naming conventions',
category='organization',
passed=passed,
points=int(compliance * 7),
max_points=7,
message=f'{compliance*100:.0f}% naming compliance',
severity='low' if not passed else 'info',
fix='Fix naming violations with /naming-check --fix'
)

# Git Checks (10 points)
def check_git_repo(self) -> CheckResult:
"""Check if path is a git repository."""
is_git = (self.path / '.git').is_dir()
return CheckResult(
name='Git repository',
category='git',
passed=is_git,
points=5 if is_git else 0,
max_points=5,
message='Git repository initialized' if is_git else 'Not a git repository',
severity='high' if not is_git else 'info',
fix='Initialize git repository with git init'
)

def check_gitignore_content(self) -> CheckResult:
"""Check .gitignore has common patterns."""
gitignore_path = self.path / '.gitignore'
if not gitignore_path.exists():
return CheckResult(
name='.gitignore content',
category='git',
passed=False,
points=0,
max_points=5,
message='.gitignore not found',
severity='medium'
)

content = gitignore_path.read_text()
essential_patterns = ['node_modules', '.env', '__pycache__', '.venv', '*.log']
found = sum(1 for p in essential_patterns if p in content)

score = int((found / len(essential_patterns)) * 5)
passed = found >= 3

return CheckResult(
name='.gitignore content',
category='git',
passed=passed,
points=score,
max_points=5,
message=f'{found}/{len(essential_patterns)} essential patterns found',
severity='low' if not passed else 'info',
fix='Add missing patterns to .gitignore'
)

# Code Quality Checks (15 points)
def check_tests_directory(self) -> CheckResult:
"""Check for tests directory."""
has_tests = (self.path / 'tests').is_dir() or (self.path / 'test').is_dir()
return CheckResult(
name='Tests directory',
category='quality',
passed=has_tests,
points=8 if has_tests else 0,
max_points=8,
message='Tests directory present' if has_tests else 'No tests directory',
severity='high' if not has_tests else 'info',
fix='Create tests/ directory with test files'
)

def check_linter_config(self) -> CheckResult:
"""Check for linter configuration."""
linter_files = [
'.eslintrc', '.eslintrc.js', '.eslintrc.json',
'pyproject.toml', 'setup.cfg', '.flake8',
'.prettierrc', 'rustfmt.toml'
]
has_linter = any((self.path / f).exists() for f in linter_files)

return CheckResult(
name='Linter configuration',
category='quality',
passed=has_linter,
points=7 if has_linter else 0,
max_points=7,
message='Linter configured' if has_linter else 'No linter configuration',
severity='medium' if not has_linter else 'info',
fix='Add linter configuration file'
)

# CI/CD Checks (10 points)
def check_github_workflows(self) -> CheckResult:
"""Check for GitHub Actions workflows."""
workflows_dir = self.path / '.github' / 'workflows'
has_workflows = workflows_dir.is_dir() and any(workflows_dir.iterdir())

return CheckResult(
name='CI/CD workflows',
category='cicd',
passed=has_workflows,
points=10 if has_workflows else 0,
max_points=10,
message='GitHub Actions configured' if has_workflows else 'No CI/CD configuration',
severity='high' if not has_workflows else 'info',
fix='Add .github/workflows/ with CI/CD pipeline'
)

# Security Checks (10 points)
def check_no_secrets(self) -> CheckResult:
"""Quick check for exposed secrets."""
suspicious_files = ['.env', 'credentials.json', 'secrets.json', '.aws/credentials']
found_secrets = []

for f in suspicious_files:
if (self.path / f).exists():
found_secrets.append(f)

passed = len(found_secrets) == 0

return CheckResult(
name='No exposed secrets',
category='security',
passed=passed,
points=10 if passed else 0,
max_points=10,
message='No secrets files found' if passed else f'Found: {", ".join(found_secrets)}',
severity='critical' if not passed else 'info',
fix='Remove secrets files and use environment variables'
)

# Metadata Checks (5 points)
def check_version_info(self) -> CheckResult:
"""Check for version information."""
version_files = ['package.json', 'pyproject.toml', 'Cargo.toml', 'VERSION']
has_version = any((self.path / f).exists() for f in version_files)

return CheckResult(
name='Version information',
category='metadata',
passed=has_version,
points=5 if has_version else 0,
max_points=5,
message='Version info present' if has_version else 'No version file',
severity='low' if not has_version else 'info',
fix='Add version information to project'
)

def run_audit(self) -> AuditResult:
"""Run complete audit."""
checks = [
# Structure (15)
self.check_readme(),
self.check_license(),
self.check_gitignore(),
self.check_claude_md(),

# Documentation (20)
self.check_readme_coverage(),
self.check_docs_directory(),
self.check_claude_md_length(),

# Organization (15)
self.check_stray_files(),
self.check_naming_conventions(),

# Git (10)
self.check_git_repo(),
self.check_gitignore_content(),

# Quality (15)
self.check_tests_directory(),
self.check_linter_config(),

# CI/CD (10)
self.check_github_workflows(),

# Security (10)
self.check_no_secrets(),

# Metadata (5)
self.check_version_info(),
]

total_score = sum(c.points for c in checks)
max_score = sum(c.max_points for c in checks)

# Normalize to 100
score = int((total_score / max_score) * 100) if max_score > 0 else 0

blocking = [c for c in checks if c.severity == 'critical' and not c.passed]
high_priority = [c for c in checks if c.severity == 'high' and not c.passed]

return AuditResult(
score=score,
max_score=100,
grade=calculate_grade(score),
checks=checks,
blocking_issues=blocking,
high_priority=high_priority
)

def print_report(result: AuditResult, verbose: bool = False): """Print audit report.""" print(f"\n{Colors.BOLD}{'='*60}{Colors.RESET}") print(f"{Colors.BOLD} PRODUCTION READINESS AUDIT{Colors.RESET}") print(f"{Colors.BOLD}{'='*60}{Colors.RESET}\n")

# Score
score_color = Colors.GREEN if result.score >= 80 else (Colors.YELLOW if result.score >= 60 else Colors.RED)
status = "✓ PRODUCTION READY" if result.is_production_ready else "✗ NOT READY"
status_color = Colors.GREEN if result.is_production_ready else Colors.RED

print(f" {Colors.BOLD}Score:{Colors.RESET} {score_color}{result.score}/100{Colors.RESET}")
print(f" {Colors.BOLD}Grade:{Colors.RESET} {score_color}{result.grade}{Colors.RESET}")
print(f" {Colors.BOLD}Status:{Colors.RESET} {status_color}{status}{Colors.RESET}")
print()

# Category scores
categories = {}
for check in result.checks:
if check.category not in categories:
categories[check.category] = {'points': 0, 'max': 0}
categories[check.category]['points'] += check.points
categories[check.category]['max'] += check.max_points

print(f" {Colors.BOLD}Scores by Category:{Colors.RESET}")
for cat, scores in categories.items():
pct = (scores['points'] / scores['max'] * 100) if scores['max'] > 0 else 0
color = Colors.GREEN if pct >= 80 else (Colors.YELLOW if pct >= 60 else Colors.RED)
print(f" {cat.capitalize():15} {color}{scores['points']:2}/{scores['max']:2} ({pct:.0f}%){Colors.RESET}")
print()

# Blocking issues
if result.blocking_issues:
print(f" {Colors.RED}{Colors.BOLD}BLOCKING ISSUES (must fix):{Colors.RESET}")
for issue in result.blocking_issues:
print(f" • {issue.name}: {issue.message}")
if issue.fix:
print(f" Fix: {issue.fix}")
print()

# High priority
if result.high_priority:
print(f" {Colors.YELLOW}{Colors.BOLD}HIGH PRIORITY:{Colors.RESET}")
for issue in result.high_priority:
print(f" • {issue.name}: {issue.message}")
print()

if verbose:
# All checks
print(f" {Colors.BOLD}All Checks:{Colors.RESET}")
for check in result.checks:
status = '✓' if check.passed else '✗'
color = Colors.GREEN if check.passed else Colors.RED
print(f" {color}{status}{Colors.RESET} {check.name}: {check.points}/{check.max_points}")
print()

print(f"{Colors.BOLD}{'='*60}{Colors.RESET}\n")

def main(): parser = argparse.ArgumentParser( description='Production readiness audit' ) parser.add_argument('path', nargs='?', default='.', help='Path to audit') parser.add_argument('--report', '-r', action='store_true', help='Generate detailed report') parser.add_argument('--json', action='store_true', help='Output as JSON') parser.add_argument('--check', action='store_true', help='Exit non-zero if not ready')

args = parser.parse_args()

path = Path(args.path).resolve()
if not path.exists():
print(f"Error: Path does not exist: {path}", file=sys.stderr)
sys.exit(1)

auditor = ProductionAuditor(path)
result = auditor.run_audit()

if args.json:
output = {
'score': result.score,
'grade': result.grade,
'production_ready': result.is_production_ready,
'blocking_issues': len(result.blocking_issues),
'high_priority': len(result.high_priority),
'checks': [
{
'name': c.name,
'category': c.category,
'passed': c.passed,
'points': c.points,
'max_points': c.max_points,
'message': c.message,
'severity': c.severity,
'fix': c.fix
}
for c in result.checks
]
}
print(json.dumps(output, indent=2))
else:
print_report(result, verbose=args.report)

if args.check and not result.is_production_ready:
sys.exit(1)

sys.exit(0)

if name == 'main': main()