Skip to main content

scripts-test-interactive-project-builder

#!/usr/bin/env python3 """

CODITECT Interactive Project Builder - Test Suite Copyright © 2025 AZ1.AI INC - All Rights Reserved

Unit tests for the interactive project builder infrastructure.

Usage: python3 test-interactive-project-builder.py python3 test-interactive-project-builder.py -v python3 -m pytest test-interactive-project-builder.py """

import argparse import json import os import sys import tempfile import unittest from pathlib import Path from unittest.mock import patch, MagicMock

def parse_args(): """Parse command line arguments""" parser = argparse.ArgumentParser( description='Test suite for CODITECT Interactive Project Builder infrastructure.', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=''' Examples: %(prog)s # Run all tests %(prog)s -v # Run with verbose output %(prog)s --quick # Run quick tests only %(prog)s --class TestCodebaseInventory # Run specific test class

Test Categories:

  • TestCodebaseInventory: Directory scanning and formatting tests
  • TestAnalysisTypes: Analysis configuration validation
  • TestRunCommand: Command execution tests
  • TestFileComponents: File existence verification
  • TestSkillContent: Skill file content validation
  • TestAgentContent: Agent file content validation
  • TestCommandContent: Command file content validation
  • TestIntegration: End-to-end workflow tests

Also compatible with pytest: python3 -m pytest %(prog)s ''' ) parser.add_argument('-v', '--verbose', action='store_true', help='Verbose test output') parser.add_argument('--quick', action='store_true', help='Run quick tests only (skip integration)') parser.add_argument('--class', dest='test_class', type=str, default=None, help='Run specific test class') return parser.parse_args()

Add scripts directory to path

sys.path.insert(0, str(Path(file).parent))

Import modules under test

try: from codebase_inventory import ( scan_directory, format_table, format_markdown, should_exclude, count_lines, get_file_size_str ) INVENTORY_AVAILABLE = True except ImportError: INVENTORY_AVAILABLE = False

try: from interactive_project_builder import ( ANALYSIS_TYPES, DELIVERABLE_TYPES, run_command, generate_inventory, run_analysis, generate_project_plan, generate_tasklist ) BUILDER_AVAILABLE = True except ImportError: # Module uses hyphens in filename, try alternate import BUILDER_AVAILABLE = False

class TestCodebaseInventory(unittest.TestCase): """Tests for codebase-inventory.py functions."""

def setUp(self):
"""Create temporary test directory structure."""
self.test_dir = tempfile.mkdtemp()
self.test_path = Path(self.test_dir)

# Create test structure
(self.test_path / "src").mkdir()
(self.test_path / "docs").mkdir()
(self.test_path / "src" / "main.py").write_text("print('hello')\n")
(self.test_path / "src" / "utils.py").write_text("def helper():\n pass\n")
(self.test_path / "docs" / "README.md").write_text("# Test\n\nContent\n")
(self.test_path / ".hidden").mkdir()
(self.test_path / ".hidden" / "secret.txt").write_text("hidden")

def tearDown(self):
"""Clean up test directory."""
import shutil
shutil.rmtree(self.test_dir)

@unittest.skipUnless(INVENTORY_AVAILABLE, "codebase_inventory not available")
def test_scan_directory_basic(self):
"""Test basic directory scanning."""
result = scan_directory(self.test_path)

self.assertEqual(result["type"], "directory")
self.assertGreater(result["file_count"], 0)
self.assertGreater(result["dir_count"], 0)
self.assertIn(".py", result["file_types"])
self.assertIn(".md", result["file_types"])

@unittest.skipUnless(INVENTORY_AVAILABLE, "codebase_inventory not available")
def test_scan_directory_excludes_hidden(self):
"""Test that hidden files are excluded by default."""
result = scan_directory(self.test_path, include_hidden=False)

# Should not include hidden directory
child_names = [c["name"] for c in result["children"]]
self.assertNotIn(".hidden", child_names)

@unittest.skipUnless(INVENTORY_AVAILABLE, "codebase_inventory not available")
def test_scan_directory_includes_hidden(self):
"""Test that hidden files can be included."""
result = scan_directory(self.test_path, include_hidden=True)

# Should include hidden directory
child_names = [c["name"] for c in result["children"]]
self.assertIn(".hidden", child_names)

@unittest.skipUnless(INVENTORY_AVAILABLE, "codebase_inventory not available")
def test_should_exclude(self):
"""Test exclusion pattern matching."""
self.assertTrue(should_exclude(Path("/test/node_modules/pkg"), ["node_modules"]))
self.assertTrue(should_exclude(Path("/test/__pycache__/"), ["__pycache__"]))
self.assertFalse(should_exclude(Path("/test/src/main.py"), ["node_modules"]))

@unittest.skipUnless(INVENTORY_AVAILABLE, "codebase_inventory not available")
def test_count_lines(self):
"""Test line counting."""
test_file = self.test_path / "src" / "main.py"
lines = count_lines(test_file)
self.assertEqual(lines, 1)

@unittest.skipUnless(INVENTORY_AVAILABLE, "codebase_inventory not available")
def test_get_file_size_str(self):
"""Test file size formatting."""
self.assertEqual(get_file_size_str(500), "500.0 B")
self.assertEqual(get_file_size_str(1024), "1.0 KB")
self.assertEqual(get_file_size_str(1024 * 1024), "1.0 MB")

@unittest.skipUnless(INVENTORY_AVAILABLE, "codebase_inventory not available")
def test_format_table(self):
"""Test table format output."""
result = scan_directory(self.test_path)
output = format_table(result)

self.assertIn("CODEBASE INVENTORY", output)
self.assertIn("Total Files:", output)
self.assertIn("File Types", output)

@unittest.skipUnless(INVENTORY_AVAILABLE, "codebase_inventory not available")
def test_format_markdown(self):
"""Test markdown format output."""
result = scan_directory(self.test_path)
output = format_markdown(result)

self.assertIn("# Codebase Inventory", output)
self.assertIn("| Metric | Value |", output)
self.assertIn("## Summary", output)

class TestAnalysisTypes(unittest.TestCase): """Tests for analysis type configuration."""

@unittest.skipUnless(BUILDER_AVAILABLE, "interactive_project_builder not available")
def test_all_analysis_types_have_required_fields(self):
"""Verify all analysis types have required configuration."""
required_fields = ["name", "description", "tools", "file_patterns", "command"]

for type_key, config in ANALYSIS_TYPES.items():
for field in required_fields:
self.assertIn(
field, config,
f"Analysis type '{type_key}' missing field '{field}'"
)

@unittest.skipUnless(BUILDER_AVAILABLE, "interactive_project_builder not available")
def test_deliverable_types_have_required_fields(self):
"""Verify all deliverable types have required configuration."""
required_fields = ["name", "description", "files"]

for type_key, config in DELIVERABLE_TYPES.items():
for field in required_fields:
self.assertIn(
field, config,
f"Deliverable type '{type_key}' missing field '{field}'"
)

class TestRunCommand(unittest.TestCase): """Tests for command execution."""

@unittest.skipUnless(BUILDER_AVAILABLE, "interactive_project_builder not available")
def test_run_command_success(self):
"""Test successful command execution."""
exit_code, output = run_command("echo 'test'")
self.assertEqual(exit_code, 0)
self.assertIn("test", output)

@unittest.skipUnless(BUILDER_AVAILABLE, "interactive_project_builder not available")
def test_run_command_dry_run(self):
"""Test dry run mode."""
exit_code, output = run_command("echo 'test'", dry_run=True)
self.assertEqual(exit_code, 0)
self.assertIn("[DRY RUN]", output)

class TestFileComponents(unittest.TestCase): """Tests verifying all required files exist."""

def setUp(self):
"""Set up paths to components."""
self.base_path = Path(__file__).parent.parent

def test_skill_file_exists(self):
"""Verify SKILL.md exists."""
skill_path = self.base_path / "skills" / "interactive-project-builder" / "SKILL.md"
self.assertTrue(
skill_path.exists(),
f"SKILL.md not found at {skill_path}"
)

def test_agent_file_exists(self):
"""Verify agent file exists."""
agent_path = self.base_path / "agents" / "project-builder-orchestrator.md"
self.assertTrue(
agent_path.exists(),
f"Agent file not found at {agent_path}"
)

def test_command_file_exists(self):
"""Verify command file exists."""
command_path = self.base_path / "commands" / "build-project.md"
self.assertTrue(
command_path.exists(),
f"Command file not found at {command_path}"
)

def test_main_script_exists(self):
"""Verify main script exists."""
script_path = self.base_path / "scripts" / "interactive-project-builder.py"
self.assertTrue(
script_path.exists(),
f"Main script not found at {script_path}"
)

def test_inventory_script_exists(self):
"""Verify inventory script exists."""
script_path = self.base_path / "scripts" / "codebase-inventory.py"
self.assertTrue(
script_path.exists(),
f"Inventory script not found at {script_path}"
)

class TestSkillContent(unittest.TestCase): """Tests for skill file content."""

def setUp(self):
"""Load skill content."""
skill_path = Path(__file__).parent.parent / "skills" / "interactive-project-builder" / "SKILL.md"
if skill_path.exists():
self.content = skill_path.read_text()
else:
self.content = ""

def test_skill_has_yaml_frontmatter(self):
"""Verify skill has YAML frontmatter."""
self.assertTrue(
self.content.startswith("---"),
"SKILL.md must start with YAML frontmatter"
)
self.assertIn("name:", self.content)
self.assertIn("description:", self.content)

def test_skill_has_required_sections(self):
"""Verify skill has required sections."""
required_sections = [
"## Purpose",
"## When to Use",
"## Instructions",
"## Examples"
]
for section in required_sections:
self.assertIn(
section, self.content,
f"SKILL.md missing section: {section}"
)

def test_skill_has_interactive_discovery(self):
"""Verify skill includes interactive discovery phase."""
self.assertIn("Interactive Discovery", self.content)
self.assertIn("Other", self.content) # Must have "Other" option

class TestAgentContent(unittest.TestCase): """Tests for agent file content."""

def setUp(self):
"""Load agent content."""
agent_path = Path(__file__).parent.parent / "agents" / "project-builder-orchestrator.md"
if agent_path.exists():
self.content = agent_path.read_text()
else:
self.content = ""

def test_agent_has_yaml_frontmatter(self):
"""Verify agent has YAML frontmatter."""
self.assertTrue(
self.content.startswith("---"),
"Agent file must start with YAML frontmatter"
)
self.assertIn("name:", self.content)
self.assertIn("description:", self.content)
self.assertIn("tools:", self.content)
self.assertIn("model:", self.content)

def test_agent_has_required_sections(self):
"""Verify agent has required sections."""
required_sections = [
"## Role",
"## Core Responsibilities",
"## Capabilities",
"## Invocation Examples"
]
for section in required_sections:
self.assertIn(
section, self.content,
f"Agent file missing section: {section}"
)

class TestCommandContent(unittest.TestCase): """Tests for command file content."""

def setUp(self):
"""Load command content."""
command_path = Path(__file__).parent.parent / "commands" / "build-project.md"
if command_path.exists():
self.content = command_path.read_text()
else:
self.content = ""

def test_command_has_yaml_frontmatter(self):
"""Verify command has YAML frontmatter."""
self.assertTrue(
self.content.startswith("---"),
"Command file must start with YAML frontmatter"
)
self.assertIn("name:", self.content)
self.assertIn("description:", self.content)

def test_command_has_required_sections(self):
"""Verify command has required sections."""
required_sections = [
"## Usage",
"## Arguments",
"<default_behavior>",
"<verification>"
]
for section in required_sections:
self.assertIn(
section, self.content,
f"Command file missing section: {section}"
)

class TestIntegration(unittest.TestCase): """Integration tests for the complete workflow."""

def setUp(self):
"""Create temporary test directory."""
self.test_dir = tempfile.mkdtemp()
self.test_path = Path(self.test_dir)

# Create realistic test structure
(self.test_path / "docs").mkdir()
(self.test_path / "src").mkdir()

# Create some markdown files
for i in range(5):
(self.test_path / "docs" / f"file{i}.md").write_text(f"# File {i}\n\nContent\n")

# Create some Python files
for i in range(3):
(self.test_path / "src" / f"module{i}.py").write_text(f"# Module {i}\n")

def tearDown(self):
"""Clean up test directory."""
import shutil
shutil.rmtree(self.test_dir)

@unittest.skipUnless(BUILDER_AVAILABLE, "interactive_project_builder not available")
def test_generate_inventory_integration(self):
"""Test inventory generation on test directory."""
inventory = generate_inventory(self.test_path)

self.assertEqual(inventory["scope"], str(self.test_path))
self.assertGreater(inventory["total_files"], 0)
self.assertGreater(len(inventory["folders"]), 0)

@unittest.skipUnless(BUILDER_AVAILABLE, "interactive_project_builder not available")
def test_run_analysis_markdown_quality(self):
"""Test markdown quality analysis."""
results = run_analysis("markdown-quality", self.test_path, dry_run=True)

self.assertEqual(results["analysis_type"], "markdown-quality")
self.assertEqual(results["scope"], str(self.test_path))
self.assertIn("tool_results", results)

@unittest.skipUnless(BUILDER_AVAILABLE, "interactive_project_builder not available")
def test_generate_project_plan_creates_file(self):
"""Test project plan generation."""
inventory = {"scope": str(self.test_path), "total_files": 10, "folders": []}
results = {"summary": {"total_errors": 5, "error_counts": {"MD001": 3, "MD009": 2}}}

output_path = generate_project_plan(
"markdown-quality",
inventory,
results,
self.test_path,
dry_run=False
)

self.assertIsNotNone(output_path)
self.assertTrue(output_path.exists())
content = output_path.read_text()
self.assertIn("MARKDOWN-QUALITY", content)
self.assertIn("Project Plan", content)

@unittest.skipUnless(BUILDER_AVAILABLE, "interactive_project_builder not available")
def test_generate_tasklist_creates_file(self):
"""Test tasklist generation."""
inventory = {"scope": str(self.test_path), "total_files": 10, "folders": []}
results = {"summary": {"total_errors": 5, "error_counts": {"MD001": 3, "MD009": 2}}}

output_path = generate_tasklist(
"markdown-quality",
inventory,
results,
self.test_path,
dry_run=False
)

self.assertIsNotNone(output_path)
self.assertTrue(output_path.exists())
content = output_path.read_text()
self.assertIn("Task List with Checkboxes", content)
self.assertIn("[ ]", content) # Must have checkbox items

def run_tests(): """Run all tests with verbose output.""" args = parse_args()

loader = unittest.TestLoader()
suite = unittest.TestSuite()

# Add all test classes
suite.addTests(loader.loadTestsFromTestCase(TestCodebaseInventory))
suite.addTests(loader.loadTestsFromTestCase(TestAnalysisTypes))
suite.addTests(loader.loadTestsFromTestCase(TestRunCommand))
suite.addTests(loader.loadTestsFromTestCase(TestFileComponents))
suite.addTests(loader.loadTestsFromTestCase(TestSkillContent))
suite.addTests(loader.loadTestsFromTestCase(TestAgentContent))
suite.addTests(loader.loadTestsFromTestCase(TestCommandContent))
suite.addTests(loader.loadTestsFromTestCase(TestIntegration))

# Run with verbosity
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(suite)

# Print summary
print("\n" + "=" * 60)
print("TEST SUMMARY")
print("=" * 60)
print(f"Tests run: {result.testsRun}")
print(f"Failures: {len(result.failures)}")
print(f"Errors: {len(result.errors)}")
print(f"Skipped: {len(result.skipped)}")

return 0 if result.wasSuccessful() else 1

if name == "main": sys.exit(run_tests())