scripts-orchestrate-submodule
#!/usr/bin/env python3 """
title: "CODITECT Standards" component_type: script version: "1.0.0" audience: contributor status: stable summary: "CODITECT Submodule Orchestrator - Zero Human-in-Loop Automation" keywords: ['analysis', 'automation', 'generation', 'git', 'orchestrate'] tokens: ~500 created: 2025-12-22 updated: 2025-12-22 script_name: "orchestrate-submodule.py" language: python executable: true usage: "python3 scripts/orchestrate-submodule.py [options]" python_version: "3.10+" dependencies: [] modifies_files: false network_access: false requires_auth: false
CODITECT Submodule Orchestrator - Zero Human-in-Loop Automation
Automatically brings a submodule to CODITECT compliance standards using specialized AI agents. Designed for both internal CODITECT use and customer/contributor projects.
Usage: # Single submodule python3 orchestrate-submodule.py --path /path/to/submodule
# With specific phase target
python3 orchestrate-submodule.py --path /path/to/submodule --target-score 95
# Dry run (analysis only)
python3 orchestrate-submodule.py --path /path/to/submodule --dry-run
# Full automation
python3 orchestrate-submodule.py --path /path/to/submodule --auto-commit --auto-push
Agent Invocations: This script orchestrates the following agents: - codebase-analyzer: Structure analysis - project-organizer: Directory standardization - codi-documentation-writer: CLAUDE.md generation - compliance-checker-agent: Score validation - git-workflow-orchestrator: Sync operations
Author: CODITECT Team License: MIT """
import argparse import json import os import subprocess import sys from datetime import datetime from pathlib import Path from typing import Dict, List, Optional, Tuple
CODITECT Standards
CODITECT_STANDARDS = { "min_compliance_score": 90, "max_claude_md_lines": 150, "required_directories": ["docs"], "required_files": ["CLAUDE.md", "README.md", ".gitignore"], "required_symlinks": [".coditect", ".claude"], }
Agent definitions for agentic orchestration
AGENTS = { "codebase-analyzer": { "purpose": "Analyze directory structure and identify gaps", "invocation": 'Task(subagent_type="codebase-analyzer", prompt="{prompt}")', }, "project-organizer": { "purpose": "Standardize directory structure to CODITECT standards", "invocation": 'Task(subagent_type="project-organizer", prompt="{prompt}")', }, "codi-documentation-writer": { "purpose": "Generate or refactor CLAUDE.md files", "invocation": 'Task(subagent_type="codi-documentation-writer", prompt="{prompt}")', }, "compliance-checker-agent": { "purpose": "Validate compliance score", "invocation": 'Task(subagent_type="compliance-checker-agent", prompt="{prompt}")', }, "git-workflow-orchestrator": { "purpose": "Commit and sync changes", "invocation": '/git-sync --target {target}', }, }
def run_command(cmd: str, cwd: Optional[Path] = None) -> Tuple[int, str, str]: """Execute shell command and return exit code, stdout, stderr.""" result = subprocess.run( cmd, shell=True, cwd=cwd, capture_output=True, text=True ) return result.returncode, result.stdout, result.stderr
def analyze_submodule(path: Path) -> Dict: """Analyze submodule structure and calculate compliance score.""" analysis = { "path": str(path), "name": path.name, "timestamp": datetime.now().isoformat(), "score": 0, "max_score": 100, "checks": {}, "recommendations": [], "agent_tasks": [], }
score = 0
# Check 1: CLAUDE.md exists and is under 150 lines (25 points)
claude_md = path / "CLAUDE.md"
if claude_md.exists():
lines = len(claude_md.read_text().splitlines())
if lines <= CODITECT_STANDARDS["max_claude_md_lines"]:
score += 25
analysis["checks"]["claude_md"] = {"status": "pass", "lines": lines}
else:
score += 15
analysis["checks"]["claude_md"] = {"status": "partial", "lines": lines}
analysis["recommendations"].append(
f"Refactor CLAUDE.md from {lines} to <150 lines"
)
analysis["agent_tasks"].append({
"agent": "codi-documentation-writer",
"task": f"Refactor CLAUDE.md in {path.name} from {lines} lines to <150 lines. Move detailed content to docs/.",
"priority": "high"
})
else:
analysis["checks"]["claude_md"] = {"status": "missing"}
analysis["recommendations"].append("Create CLAUDE.md from CODITECT template")
analysis["agent_tasks"].append({
"agent": "codi-documentation-writer",
"task": f"Create CLAUDE.md for {path.name} following CODITECT standards (<150 lines)",
"priority": "high"
})
# Check 2: Symlinks exist (20 points)
coditect_link = path / ".coditect"
claude_link = path / ".claude"
symlink_score = 0
if coditect_link.is_symlink():
symlink_score += 10
analysis["checks"]["coditect_symlink"] = {"status": "pass"}
else:
analysis["checks"]["coditect_symlink"] = {"status": "missing"}
analysis["recommendations"].append("Create .coditect symlink")
analysis["agent_tasks"].append({
"agent": "project-organizer",
"task": f"Create .coditect symlink in {path.name} pointing to distributed intelligence",
"priority": "high"
})
if claude_link.is_symlink():
symlink_score += 10
analysis["checks"]["claude_symlink"] = {"status": "pass"}
else:
analysis["checks"]["claude_symlink"] = {"status": "missing"}
analysis["recommendations"].append("Create .claude symlink → .coditect")
score += symlink_score
# Check 3: docs/ directory exists (15 points)
docs_dir = path / "docs"
if docs_dir.is_dir():
score += 15
analysis["checks"]["docs_directory"] = {"status": "pass"}
else:
analysis["checks"]["docs_directory"] = {"status": "missing"}
analysis["recommendations"].append("Create docs/ directory structure")
analysis["agent_tasks"].append({
"agent": "project-organizer",
"task": f"Create docs/ directory in {path.name} with appropriate subdirectories",
"priority": "medium"
})
# Check 4: README.md exists (10 points)
readme = path / "README.md"
if readme.exists():
score += 10
analysis["checks"]["readme"] = {"status": "pass"}
else:
analysis["checks"]["readme"] = {"status": "missing"}
analysis["recommendations"].append("Create README.md")
# Check 5: .gitignore exists (10 points)
gitignore = path / ".gitignore"
if gitignore.exists():
score += 10
analysis["checks"]["gitignore"] = {"status": "pass"}
else:
analysis["checks"]["gitignore"] = {"status": "missing"}
analysis["recommendations"].append("Create .gitignore")
# Check 6: PROJECT-PLAN.md in docs/project-management/ (10 points)
project_plan = path / "docs" / "project-management" / "PROJECT-PLAN.md"
if project_plan.exists():
score += 10
analysis["checks"]["project_plan"] = {"status": "pass"}
else:
analysis["checks"]["project_plan"] = {"status": "missing"}
analysis["recommendations"].append("Add PROJECT-PLAN.md to docs/project-management/")
# Check 7: Clean root directory (10 points)
root_files = [f for f in path.iterdir() if f.is_file()]
expected_root_files = {"CLAUDE.md", "README.md", ".gitignore", "LICENSE", "Makefile",
"package.json", "Cargo.toml", "pyproject.toml", "setup.py"}
unexpected = [f.name for f in root_files if f.name not in expected_root_files
and not f.name.startswith(".")]
if len(unexpected) <= 2:
score += 10
analysis["checks"]["clean_root"] = {"status": "pass", "unexpected": unexpected}
else:
score += 5
analysis["checks"]["clean_root"] = {"status": "partial", "unexpected": unexpected}
analysis["recommendations"].append(f"Move {len(unexpected)} files from root to appropriate directories")
analysis["agent_tasks"].append({
"agent": "project-organizer",
"task": f"Organize root directory of {path.name}, move misplaced files to appropriate locations",
"priority": "medium"
})
analysis["score"] = score
analysis["compliant"] = score >= CODITECT_STANDARDS["min_compliance_score"]
return analysis
def generate_agent_script(analysis: Dict) -> str: """Generate executable agent invocation script.""" tasks = analysis.get("agent_tasks", []) if not tasks: return "# No agent tasks required - submodule is compliant!"
script_lines = [
"#!/usr/bin/env python3",
'"""',
f"Auto-generated agent tasks for {analysis['name']}",
f"Generated: {analysis['timestamp']}",
f"Current Score: {analysis['score']}/100",
f"Target Score: {CODITECT_STANDARDS['min_compliance_score']}/100",
'"""',
"",
"# Agent Task Invocations",
"# Execute these in Claude Code or via Task tool",
"",
]
for i, task in enumerate(tasks, 1):
agent = task["agent"]
prompt = task["task"]
priority = task["priority"]
script_lines.extend([
f"# Task {i}: [{priority.upper()}] {agent}",
f"# {prompt}",
f'Task(subagent_type="{agent}", prompt="{prompt}")',
"",
])
# Add verification task
script_lines.extend([
"# Final: Verify compliance",
f'Task(subagent_type="compliance-checker-agent", prompt="Verify {analysis["name"]} compliance score is now 90+")',
"",
"# Sync changes",
f'# /git-sync --target {analysis["path"]}',
])
return "\n".join(script_lines)
def execute_agents(analysis: Dict, auto_commit: bool = False, auto_push: bool = False) -> Dict: """Execute agent tasks (placeholder for actual agent invocation).""" results = { "executed_tasks": [], "success": True, "final_score": None, }
print("\n" + "="*60)
print("AGENT EXECUTION PLAN")
print("="*60)
for i, task in enumerate(analysis.get("agent_tasks", []), 1):
agent = task["agent"]
prompt = task["task"]
priority = task["priority"]
print(f"\n[Task {i}] {agent} ({priority})")
print(f" Prompt: {prompt}")
print(f" Invocation: {AGENTS[agent]['invocation'].format(prompt=prompt)}")
# In actual implementation, this would invoke the agent
# For now, we output the invocation for manual execution or
# integration with Claude Code's Task tool
results["executed_tasks"].append({
"task_id": i,
"agent": agent,
"prompt": prompt,
"status": "pending_manual_execution"
})
if auto_commit:
print("\n[Auto-Commit] Would commit changes with conventional message")
results["auto_commit"] = "pending"
if auto_push:
print("[Auto-Push] Would push to remote")
results["auto_push"] = "pending"
return results
def main(): parser = argparse.ArgumentParser( description="CODITECT Submodule Orchestrator - Zero Human-in-Loop Automation" ) parser.add_argument( "--path", "-p", type=Path, required=True, help="Path to submodule directory" ) parser.add_argument( "--target-score", type=int, default=90, help="Target compliance score (default: 90)" ) parser.add_argument( "--dry-run", action="store_true", help="Analysis only, don't execute agents" ) parser.add_argument( "--auto-commit", action="store_true", help="Automatically commit changes after agent execution" ) parser.add_argument( "--auto-push", action="store_true", help="Automatically push after commit" ) parser.add_argument( "--output", "-o", type=Path, help="Output analysis to JSON file" ) parser.add_argument( "--generate-script", action="store_true", help="Generate executable agent script" )
args = parser.parse_args()
# Validate path
if not args.path.exists():
print(f"Error: Path does not exist: {args.path}")
sys.exit(1)
# Update target score if specified
CODITECT_STANDARDS["min_compliance_score"] = args.target_score
print("="*60)
print("CODITECT SUBMODULE ORCHESTRATOR")
print("="*60)
print(f"Submodule: {args.path.name}")
print(f"Path: {args.path}")
print(f"Target Score: {args.target_score}/100")
print(f"Mode: {'Dry Run (Analysis Only)' if args.dry_run else 'Full Automation'}")
# Analyze submodule
print("\n[Phase 1] Analyzing submodule structure...")
analysis = analyze_submodule(args.path)
# Display results
print(f"\n{'='*60}")
print("ANALYSIS RESULTS")
print("="*60)
print(f"Current Score: {analysis['score']}/100")
print(f"Compliant: {'✅ YES' if analysis['compliant'] else '❌ NO'}")
print("\nChecks:")
for check, result in analysis["checks"].items():
status = result.get("status", "unknown")
emoji = "✅" if status == "pass" else "⚠️" if status == "partial" else "❌"
print(f" {emoji} {check}: {status}")
if analysis["recommendations"]:
print("\nRecommendations:")
for rec in analysis["recommendations"]:
print(f" → {rec}")
if analysis["agent_tasks"]:
print(f"\nAgent Tasks Required: {len(analysis['agent_tasks'])}")
for task in analysis["agent_tasks"]:
print(f" [{task['priority'].upper()}] {task['agent']}: {task['task'][:60]}...")
# Generate script if requested
if args.generate_script:
script = generate_agent_script(analysis)
script_path = args.path / "agent_tasks.py"
script_path.write_text(script)
print(f"\nGenerated agent script: {script_path}")
# Save analysis to JSON if requested
if args.output:
args.output.write_text(json.dumps(analysis, indent=2))
print(f"\nAnalysis saved to: {args.output}")
# Execute agents if not dry run
if not args.dry_run and analysis["agent_tasks"]:
results = execute_agents(
analysis,
auto_commit=args.auto_commit,
auto_push=args.auto_push
)
print("\n" + "="*60)
print("EXECUTION SUMMARY")
print("="*60)
print(f"Tasks Prepared: {len(results['executed_tasks'])}")
print("\nTo execute these tasks, run the generated commands in Claude Code")
print("or use the Task tool with the provided invocations.")
# Return appropriate exit code
sys.exit(0 if analysis["compliant"] else 1)
if name == "main": main()