Skip to main content

scripts-interactive-project-builder

#!/usr/bin/env python3 """

CODITECT Interactive Project Builder Copyright © 2025 AZ1.AI INC - All Rights Reserved

Main orchestration script for the interactive project builder workflow. Coordinates discovery, inventory, analysis, and deliverable generation.

Usage: python3 interactive-project-builder.py [options] python3 interactive-project-builder.py --analysis-type markdown-quality --scope docs/ python3 interactive-project-builder.py --interactive

Options: --interactive Run in interactive mode (default) --analysis-type Pre-select analysis type --scope Target directory or pattern --deliverables Output type (project-plan-tasklist, full-infrastructure, etc.) --dry-run Preview without creating files --output-dir Directory for deliverables (default: current) """

import argparse import json import logging import subprocess import sys from datetime import datetime from pathlib import Path from typing import Any, Optional

Configure logging

logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) logger = logging.getLogger(name)

Analysis type configurations

ANALYSIS_TYPES = { "markdown-quality": { "name": "Markdown Quality", "description": "Lint errors, formatting, documentation quality", "tools": ["markdownlint-cli2"], "file_patterns": [".md"], "command": 'markdownlint-cli2 "{scope}/**/.md" 2>&1' }, "code-quality": { "name": "Code Quality", "description": "Python/JS lint, type coverage, test coverage", "tools": ["ruff", "eslint", "pylint"], "file_patterns": [".py", ".js", ".ts"], "command": 'ruff check "{scope}" --statistics 2>&1' }, "security-audit": { "name": "Security Audit", "description": "Vulnerability scan, dependency audit, secrets detection", "tools": ["pip-audit", "npm audit", "trufflehog"], "file_patterns": [""], "command": 'pip-audit 2>&1 || echo "pip-audit not found"' }, "documentation-coverage": { "name": "Documentation Coverage", "description": "Missing docs, outdated content, broken links", "tools": ["markdown-link-check"], "file_patterns": [".md", "README"], "command": 'find "{scope}" -name "README.md" | wc -l' }, "performance-analysis": { "name": "Performance Analysis", "description": "Bottlenecks, optimization opportunities", "tools": ["py-spy", "perf"], "file_patterns": [".py", ".js"], "command": 'echo "Performance analysis requires manual profiling"' }, "architecture-review": { "name": "Architecture Review", "description": "Patterns, dependencies, technical debt", "tools": ["dependency-cruiser", "import-linter"], "file_patterns": [""], "command": 'find "{scope}" -type f -name ".py" | head -20' } }

DELIVERABLE_TYPES = { "project-plan-tasklist": { "name": "PROJECT-PLAN + TASKLIST", "description": "Standard planning documents with checkboxes", "files": ["PROJECT-PLAN.md", "TASKLIST-WITH-CHECKBOXES.md"] }, "full-infrastructure": { "name": "Full Infrastructure", "description": "Also create agents, commands, scripts, skills", "files": ["PROJECT-PLAN.md", "TASKLIST-WITH-CHECKBOXES.md", "agents/{name}-specialist.md", "commands/{name}.md", "skills/{name}/SKILL.md", "scripts/{name}-runner.py"] }, "automation-only": { "name": "Automation Only", "description": "Scripts to fix issues automatically", "files": ["scripts/{name}-fixer.py", "scripts/{name}-runner.py"] }, "report-only": { "name": "Report Only", "description": "Analysis report without action plan", "files": ["{name}-ANALYSIS-REPORT.md"] } }

def get_script_dir() -> Path: """Get the directory containing this script.""" return Path(file).parent.resolve()

def get_template_dir() -> Path: """Get the CODITECT-CORE-STANDARDS/TEMPLATES directory.""" script_dir = get_script_dir() template_dir = script_dir.parent / "CODITECT-CORE-STANDARDS" / "TEMPLATES" if template_dir.exists(): return template_dir # Fallback to current directory return Path.cwd()

def run_command(command: str, dry_run: bool = False) -> tuple[int, str]: """Run a shell command and return exit code and output.""" if dry_run: logger.info(f"[DRY RUN] Would run: {command}") return 0, "[DRY RUN] Command not executed"

try:
result = subprocess.run(
command,
shell=True,
capture_output=True,
text=True,
timeout=300
)
return result.returncode, result.stdout + result.stderr
except subprocess.TimeoutExpired:
return 1, "Command timed out after 300 seconds"
except Exception as e:
return 1, f"Error running command: {e}"

def count_files(scope: Path, patterns: list[str]) -> dict[str, int]: """Count files matching patterns in scope.""" counts = {} for pattern in patterns: try: files = list(scope.rglob(pattern)) counts[pattern] = len(files) except Exception as e: logger.warning(f"Error counting {pattern}: {e}") counts[pattern] = 0 return counts

def generate_inventory(scope: Path) -> dict[str, Any]: """Generate folder and file inventory for the scope.""" inventory = { "scope": str(scope), "generated_at": datetime.now().isoformat(), "folders": [], "total_files": 0, "file_types": {} }

if not scope.exists():
logger.error(f"Scope directory not found: {scope}")
return inventory

# Enumerate folders
for folder in sorted(scope.iterdir()):
if folder.is_dir() and not folder.name.startswith('.'):
folder_info = {
"path": str(folder.relative_to(scope)),
"file_count": sum(1 for _ in folder.rglob("*") if _.is_file()),
"subdirs": sum(1 for _ in folder.iterdir() if _.is_dir())
}
inventory["folders"].append(folder_info)
inventory["total_files"] += folder_info["file_count"]

# Count by file type
for ext in [".md", ".py", ".js", ".ts", ".json", ".yaml", ".yml"]:
count = len(list(scope.rglob(f"*{ext}")))
if count > 0:
inventory["file_types"][ext] = count

return inventory

def run_analysis( analysis_type: str, scope: Path, dry_run: bool = False ) -> dict[str, Any]: """Run analysis tools and return results.""" config = ANALYSIS_TYPES.get(analysis_type, {}) if not config: logger.error(f"Unknown analysis type: {analysis_type}") return {"error": f"Unknown analysis type: {analysis_type}"}

results = {
"analysis_type": analysis_type,
"scope": str(scope),
"timestamp": datetime.now().isoformat(),
"tool_results": [],
"summary": {}
}

# Run the analysis command
command = config.get("command", "").format(scope=scope)
if command:
exit_code, output = run_command(command, dry_run)
results["tool_results"].append({
"tool": config.get("tools", ["unknown"])[0],
"command": command,
"exit_code": exit_code,
"output": output[:5000] # Limit output size
})

# Parse output for summary
if analysis_type == "markdown-quality":
# Count errors by rule
error_counts = {}
for line in output.split("\n"):
if "MD" in line:
import re
match = re.search(r'(MD\d{3})', line)
if match:
rule = match.group(1)
error_counts[rule] = error_counts.get(rule, 0) + 1
results["summary"]["error_counts"] = error_counts
results["summary"]["total_errors"] = sum(error_counts.values())

return results

def generate_project_plan( analysis_type: str, inventory: dict[str, Any], analysis_results: dict[str, Any], output_dir: Path, dry_run: bool = False ) -> Optional[Path]: """Generate PROJECT-PLAN.md from analysis results.""" config = ANALYSIS_TYPES.get(analysis_type, {}) name = config.get("name", analysis_type).upper().replace(" ", "-")

template = f"""# {name} - Project Plan

Product: {name} Analysis Project Repository: {Path.cwd().name} Status: Planning Phase Last Updated: {datetime.now().strftime("%Y-%m-%d")}


Executive Summary

This project addresses {config.get('description', 'analysis')} across the target scope. Analysis identified areas for improvement that are documented below.

Analysis Results

Inventory Summary

FolderFilesPurpose
"""
# Add folder inventory
for folder in inventory.get("folders", [])[:20]:
template += f"| {folder['path']} | {folder['file_count']} | TBD |\n"

template += f"""

Total Files: {inventory.get('total_files', 0)}

Issue Distribution

"""

# Add error summary if available
if "summary" in analysis_results:
summary = analysis_results["summary"]
if "error_counts" in summary:
template += "| Rule | Count | Description |\n|------|-------|-------------|\n"
for rule, count in sorted(
summary.get("error_counts", {}).items(),
key=lambda x: x[1],
reverse=True
)[:10]:
template += f"| {rule} | {count} | See markdownlint documentation |\n"
template += f"\n**Total Issues:** {summary.get('total_errors', 0)}\n"

template += """

Implementation Roadmap

Phase 1: Critical Issues

  • Address highest-priority issues
  • Verify fixes with analysis tools

Phase 2: Medium Priority

  • Fix remaining issues by severity
  • Update documentation

Phase 3: Validation

  • Run full analysis to verify 0 errors
  • Document lessons learned

Success Metrics

  • All critical issues resolved
  • Analysis passes with 0 errors
  • Documentation updated

Generated by: CODITECT Interactive Project Builder Timestamp: """ + datetime.now().isoformat()

output_path = output_dir / f"{name}-PROJECT-PLAN.md"

if dry_run:
logger.info(f"[DRY RUN] Would create: {output_path}")
return output_path

output_path.write_text(template)
logger.info(f"Created: {output_path}")
return output_path

def generate_tasklist( analysis_type: str, inventory: dict[str, Any], analysis_results: dict[str, Any], output_dir: Path, dry_run: bool = False ) -> Optional[Path]: """Generate TASKLIST-WITH-CHECKBOXES.md from analysis results.""" config = ANALYSIS_TYPES.get(analysis_type, {}) name = config.get("name", analysis_type).upper().replace(" ", "-")

template = f"""# {name} - Task List with Checkboxes

Product: {name} Analysis Project Repository: {Path.cwd().name} Status: In Progress Last Updated: {datetime.now().strftime("%Y-%m-%d")}


Legend

  • Pending
  • Completed
  • [>] In Progress
  • [!] Blocked

Phase 0: Discovery (Complete)

  • Analysis type selected: {analysis_type}
  • Scope defined: {inventory.get('scope', 'N/A')}
  • Inventory generated: {inventory.get('total_files', 0)} files
  • Analysis executed

Phase 1: Analysis Review

  • Review analysis results
  • Prioritize issues by severity
  • Identify quick wins

Phase 2: Implementation

"""

# Generate tasks from analysis results
if "summary" in analysis_results:
summary = analysis_results["summary"]
if "error_counts" in summary:
for rule, count in sorted(
summary.get("error_counts", {}).items(),
key=lambda x: x[1],
reverse=True
)[:15]:
template += f"- [ ] Fix {rule} issues ({count} occurrences)\n"

template += """

Phase 3: Validation

  • Run analysis to verify fixes
  • Document remaining issues
  • Update PROJECT-PLAN.md with results

Phase 4: Documentation

  • Update relevant documentation
  • Create remediation notes
  • Archive analysis artifacts

Statistics

Total Tasks: TBD Completed: 4 (Discovery phase) In Progress: 0 Pending: TBD


Generated by: CODITECT Interactive Project Builder Timestamp: """ + datetime.now().isoformat()

output_path = output_dir / f"{name}-TASKLIST-WITH-CHECKBOXES.md"

if dry_run:
logger.info(f"[DRY RUN] Would create: {output_path}")
return output_path

output_path.write_text(template)
logger.info(f"Created: {output_path}")
return output_path

def interactive_discovery() -> dict[str, str]: """Run interactive discovery to gather user requirements.""" print("\n" + "=" * 60) print("CODITECT Interactive Project Builder") print("=" * 60)

# Analysis type selection
print("\n## What do you want to analyze and build a project for?\n")
for i, (key, config) in enumerate(ANALYSIS_TYPES.items(), 1):
print(f" {i}. {config['name']} - {config['description']}")
print(f" {len(ANALYSIS_TYPES) + 1}. Other - Describe your analysis type")

while True:
try:
choice = input("\nSelect option (1-7): ").strip()
idx = int(choice) - 1
if 0 <= idx < len(ANALYSIS_TYPES):
analysis_type = list(ANALYSIS_TYPES.keys())[idx]
break
elif idx == len(ANALYSIS_TYPES):
analysis_type = input("Describe your analysis type: ").strip()
break
except (ValueError, IndexError):
print("Invalid selection. Please try again.")

# Scope selection
print("\n## What is the scope of analysis?\n")
print(" 1. Single Folder - Analyze one specific directory")
print(" 2. Full Repository - Analyze entire codebase")
print(" 3. Multiple Submodules - Analyze across git submodules")
print(" 4. Custom Selection - Specify folders")

scope = "."
while True:
try:
choice = input("\nSelect option (1-4): ").strip()
if choice == "1":
scope = input("Enter folder path: ").strip() or "."
break
elif choice == "2":
scope = "."
break
elif choice == "3":
scope = "submodules/"
break
elif choice == "4":
scope = input("Enter custom path: ").strip() or "."
break
except ValueError:
print("Invalid selection. Please try again.")

# Deliverables selection
print("\n## What deliverables do you need?\n")
for i, (key, config) in enumerate(DELIVERABLE_TYPES.items(), 1):
print(f" {i}. {config['name']} - {config['description']}")
print(f" {len(DELIVERABLE_TYPES) + 1}. Other - Describe your deliverables")

deliverables = "project-plan-tasklist"
while True:
try:
choice = input("\nSelect option (1-5): ").strip()
idx = int(choice) - 1
if 0 <= idx < len(DELIVERABLE_TYPES):
deliverables = list(DELIVERABLE_TYPES.keys())[idx]
break
elif idx == len(DELIVERABLE_TYPES):
deliverables = input("Describe your deliverables: ").strip()
break
except (ValueError, IndexError):
print("Invalid selection. Please try again.")

return {
"analysis_type": analysis_type,
"scope": scope,
"deliverables": deliverables
}

def main() -> int: """Main entry point.""" parser = argparse.ArgumentParser( description="CODITECT Interactive Project Builder", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=doc ) parser.add_argument( "--interactive", "-i", action="store_true", help="Run in interactive mode" ) parser.add_argument( "--analysis-type", "-t", choices=list(ANALYSIS_TYPES.keys()), help="Pre-select analysis type" ) parser.add_argument( "--scope", "-s", default=".", help="Target directory or pattern" ) parser.add_argument( "--deliverables", "-d", choices=list(DELIVERABLE_TYPES.keys()), default="project-plan-tasklist", help="Output type" ) parser.add_argument( "--output-dir", "-o", default=".", help="Directory for deliverables" ) parser.add_argument( "--dry-run", action="store_true", help="Preview without creating files" ) parser.add_argument( "--json", action="store_true", help="Output results as JSON" )

args = parser.parse_args()

# Determine mode
if args.interactive or (not args.analysis_type):
config = interactive_discovery()
analysis_type = config["analysis_type"]
scope = Path(config["scope"])
deliverables = config["deliverables"]
else:
analysis_type = args.analysis_type
scope = Path(args.scope)
deliverables = args.deliverables

output_dir = Path(args.output_dir)

logger.info(f"Analysis Type: {analysis_type}")
logger.info(f"Scope: {scope}")
logger.info(f"Deliverables: {deliverables}")
logger.info(f"Output Directory: {output_dir}")

# Phase 2: Generate inventory
logger.info("Generating inventory...")
inventory = generate_inventory(scope)

# Phase 3: Run analysis
logger.info("Running analysis...")
results = run_analysis(analysis_type, scope, args.dry_run)

# Phase 4: Generate deliverables
logger.info("Generating deliverables...")
plan_path = generate_project_plan(
analysis_type, inventory, results, output_dir, args.dry_run
)
tasklist_path = generate_tasklist(
analysis_type, inventory, results, output_dir, args.dry_run
)

# Output summary
summary = {
"analysis_type": analysis_type,
"scope": str(scope),
"inventory": inventory,
"analysis_results": results,
"deliverables": {
"project_plan": str(plan_path) if plan_path else None,
"tasklist": str(tasklist_path) if tasklist_path else None
}
}

if args.json:
print(json.dumps(summary, indent=2))
else:
print("\n" + "=" * 60)
print("Project Build Complete")
print("=" * 60)
print(f"\nInventory: {inventory.get('total_files', 0)} files in "
f"{len(inventory.get('folders', []))} folders")
if "summary" in results:
print(f"Issues Found: {results['summary'].get('total_errors', 0)}")
print(f"\nDeliverables:")
if plan_path:
print(f" - {plan_path}")
if tasklist_path:
print(f" - {tasklist_path}")

return 0

if name == "main": sys.exit(main())