Agent Skills Framework Extension
Skill Quality Patterns Skill
When to Use This Skill
Use this skill when implementing skill quality patterns patterns in your codebase.
How to Use This Skill
- Review the patterns and examples below
- Apply the relevant patterns to your implementation
- Follow the best practices outlined in this skill
Skill evaluation, optimization techniques, token efficiency analysis, and quality assessment specifically for CODITECT skill files.
Core Capabilities
- Structure Validation - YAML frontmatter, required sections, format compliance
- Token Efficiency - Token budget adherence, content density analysis
- Code Example Quality - Completeness, runability, best practices
- Progressive Disclosure - Information hierarchy, readability
- Composability - Integration points, skill chaining, reusability
Skill Validator
# scripts/skill_validator.py
from typing import List, Dict, Optional
from dataclasses import dataclass
from pathlib import Path
import yaml
import re
@dataclass
class SkillIssue:
"""Issue found in skill file."""
file: str
severity: str # 'error', 'warning', 'info'
category: str
message: str
suggestion: Optional[str] = None
@dataclass
class SkillQualityScore:
"""Quality score for a skill."""
overall: float # 0-100
structure: float
token_efficiency: float
code_examples: float
documentation: float
composability: float
issues: List[SkillIssue]
class SkillValidator:
"""Validate CODITECT skill quality."""
REQUIRED_FRONTMATTER = [
'name', 'description', 'allowed-tools', 'metadata', 'priority'
]
REQUIRED_SECTIONS = [
'Core Capabilities',
'Usage Examples',
'Integration Points'
]
TOKEN_BUDGET_MAX = 5000
def __init__(self):
self.issues: List[SkillIssue] = []
def validate(self, skill_path: str) -> SkillQualityScore:
"""Validate skill and calculate quality score."""
self.issues = []
with open(skill_path, 'r') as f:
content = f.read()
# Parse frontmatter and body
frontmatter, body = self._parse_skill(content)
# Validate components
structure_score = self._validate_structure(skill_path, frontmatter, body)
token_score = self._validate_token_efficiency(skill_path, content)
examples_score = self._validate_code_examples(skill_path, body)
docs_score = self._validate_documentation(skill_path, body)
composability_score = self._validate_composability(skill_path, frontmatter, body)
# Calculate overall score
overall = (
structure_score * 0.25 +
token_score * 0.20 +
examples_score * 0.25 +
docs_score * 0.15 +
composability_score * 0.15
)
return SkillQualityScore(
overall=overall,
structure=structure_score,
token_efficiency=token_score,
code_examples=examples_score,
documentation=docs_score,
composability=composability_score,
issues=self.issues
)
def _parse_skill(self, content: str) -> tuple:
"""Parse skill into frontmatter and body."""
parts = content.split('---', 2)
if len(parts) < 3:
return {}, content
try:
frontmatter = yaml.safe_load(parts[1])
body = parts[2]
return frontmatter, body
except:
return {}, content
def _validate_structure(
self,
path: str,
frontmatter: Dict,
body: str
) -> float:
"""Validate skill structure."""
score = 100.0
# Check required frontmatter fields
for field in self.REQUIRED_FRONTMATTER:
if field not in frontmatter:
self.issues.append(SkillIssue(
file=path,
severity='error',
category='structure',
message=f'Missing required frontmatter field: {field}',
suggestion=f'Add {field} to YAML frontmatter'
))
score -= 15
# Check Agent Skills Framework extension
if 'agent_skills_framework' not in frontmatter:
self.issues.append(SkillIssue(
file=path,
severity='error',
category='structure',
message='Missing agent_skills_framework section',
suggestion='Add agent_skills_framework block to frontmatter'
))
score -= 20
# Check required sections
for section in self.REQUIRED_SECTIONS:
if section not in body:
self.issues.append(SkillIssue(
file=path,
severity='warning',
category='structure',
message=f'Missing section: {section}',
suggestion=f'Add ## {section} section to skill'
))
score -= 10
return max(0, score)
def _validate_token_efficiency(self, path: str, content: str) -> float:
"""Validate token usage efficiency."""
# Rough token count (1 token ≈ 4 chars)
estimated_tokens = len(content) / 4
score = 100.0
if estimated_tokens > self.TOKEN_BUDGET_MAX:
overage = estimated_tokens - self.TOKEN_BUDGET_MAX
overage_pct = (overage / self.TOKEN_BUDGET_MAX) * 100
self.issues.append(SkillIssue(
file=path,
severity='warning',
category='token_efficiency',
message=f'Skill exceeds token budget: {estimated_tokens:.0f} > {self.TOKEN_BUDGET_MAX}',
suggestion='Condense examples or move content to external files'
))
score -= min(50, overage_pct)
return max(0, score)
def _validate_code_examples(self, path: str, body: str) -> float:
"""Validate code example quality."""
score = 100.0
# Find all code blocks
code_blocks = re.findall(r'```(\w+)?\n(.*?)\n```', body, re.DOTALL)
if len(code_blocks) < 3:
self.issues.append(SkillIssue(
file=path,
severity='warning',
category='code_examples',
message=f'Only {len(code_blocks)} code examples found (recommended: 3-4)',
suggestion='Add more code examples to demonstrate skill usage'
))
score -= 20
# Check for language specification
unspecified = [i for i, (lang, code) in enumerate(code_blocks) if not lang]
if unspecified:
self.issues.append(SkillIssue(
file=path,
severity='info',
category='code_examples',
message=f'{len(unspecified)} code blocks missing language specification',
suggestion='Add language identifier: ```python or ```typescript'
))
score -= 5
# Check example completeness (imports, usage, output)
for i, (lang, code) in enumerate(code_blocks):
if lang in ['python', 'typescript', 'javascript']:
if 'import' not in code and 'from' not in code:
self.issues.append(SkillIssue(
file=path,
severity='info',
category='code_examples',
message=f'Code block {i+1} may be missing imports',
suggestion='Include imports for standalone examples'
))
return max(0, score)
def _validate_documentation(self, path: str, body: str) -> float:
"""Validate documentation quality."""
score = 100.0
# Check for numbered lists in Core Capabilities
if '##' in body and 'Core Capabilities' in body:
capabilities_section = body.split('Core Capabilities')[1].split('##')[0]
if not re.search(r'^\d+\.', capabilities_section, re.MULTILINE):
self.issues.append(SkillIssue(
file=path,
severity='info',
category='documentation',
message='Core Capabilities should use numbered list',
suggestion='Format as: 1. **Capability** - Description'
))
score -= 5
# Check for usage example formatting
if 'Usage Examples' in body:
examples_section = body.split('Usage Examples')[1]
if not re.search(r'Apply .+ skill to', examples_section):
self.issues.append(SkillIssue(
file=path,
severity='warning',
category='documentation',
message='Usage examples should follow "Apply X skill to Y" format',
suggestion='Format as: Apply skill-name skill to perform action'
))
score -= 10
return max(0, score)
def _validate_composability(
self,
path: str,
frontmatter: Dict,
body: str
) -> float:
"""Validate composability and integration points."""
score = 100.0
# Check composable_with in agent_skills_framework
asf = frontmatter.get('agent_skills_framework', {})
composable = asf.get('composable_with', [])
if not composable:
self.issues.append(SkillIssue(
file=path,
severity='warning',
category='composability',
message='No composable skills listed',
suggestion='Add composable_with list to agent_skills_framework'
))
score -= 15
# Check Integration Points section matches composable_with
if 'Integration Points' in body:
integration_section = body.split('Integration Points')[1]
for skill in composable:
if skill not in integration_section:
self.issues.append(SkillIssue(
file=path,
severity='info',
category='composability',
message=f'Composable skill "{skill}" not mentioned in Integration Points',
suggestion=f'Add bullet point for {skill} integration'
))
score -= 5
return max(0, score)
# Usage
validator = SkillValidator()
score = validator.validate('skills/codebase-analysis-patterns/SKILL.md')
print(f"Overall Quality: {score.overall:.1f}/100")
print(f" Structure: {score.structure:.1f}/100")
print(f" Token Efficiency: {score.token_efficiency:.1f}/100")
print(f" Code Examples: {score.code_examples:.1f}/100")
print(f" Documentation: {score.documentation:.1f}/100")
print(f" Composability: {score.composability:.1f}/100")
if score.issues:
print(f"\nIssues ({len(score.issues)}):")
for issue in score.issues:
print(f" [{issue.severity}] {issue.message}")
Token Optimization Tool
// tools/skill-optimizer.ts
interface OptimizationSuggestion {
type: string;
impact: 'high' | 'medium' | 'low';
suggestion: string;
estimated_savings: number; // tokens
}
class SkillOptimizer {
optimize(skillContent: string): OptimizationSuggestion[] {
const suggestions: OptimizationSuggestion[] = [];
// Check for verbose comments
const verboseComments = this.findVerboseComments(skillContent);
if (verboseComments > 0) {
suggestions.push({
type: 'comments',
impact: 'medium',
suggestion: 'Condense verbose comments to essential information only',
estimated_savings: verboseComments * 10
});
}
// Check for redundant examples
const codeBlocks = this.extractCodeBlocks(skillContent);
if (codeBlocks.length > 4) {
suggestions.push({
type: 'examples',
impact: 'high',
suggestion: 'Consolidate or remove redundant code examples',
estimated_savings: (codeBlocks.length - 4) * 200
});
}
// Check for duplicate content
const duplicateRatio = this.calculateDuplication(skillContent);
if (duplicateRatio > 0.05) {
suggestions.push({
type: 'duplication',
impact: 'high',
suggestion: 'Extract common patterns into reusable functions',
estimated_savings: Math.floor(skillContent.length / 4 * duplicateRatio)
});
}
return suggestions.sort((a, b) => b.estimated_savings - a.estimated_savings);
}
}
Usage Examples
Validate Skill Quality
Apply skill-quality-patterns skill to validate skill structure, token efficiency, and code examples
Optimize Token Usage
Apply skill-quality-patterns skill to analyze token usage and generate optimization recommendations
Assess Composability
Apply skill-quality-patterns skill to evaluate integration points and composability scores
Success Output
When this skill is successfully applied, you should see:
✅ SKILL COMPLETE: skill-quality-patterns
Completed:
- [x] Skill structure validated (YAML frontmatter, required sections)
- [x] Token efficiency analyzed (<5000 tokens budget)
- [x] Code examples quality checked (3+ examples, language specified, runnable)
- [x] Documentation quality assessed (progressive disclosure, clear formatting)
- [x] Composability validated (integration points documented)
- [x] Overall quality score calculated (Grade A-F)
Outputs:
- SkillQualityScore object with dimension scores
- Issue list with severity (error/warning/info) and suggestions
- Optimization recommendations (estimated token savings)
- Quality grade (A: 90-100%, B: 80-89%, C: 70-79%)
- Validation report with actionable fixes
Example Output:
Skill: codebase-analysis-patterns
- Overall Quality: 85.2/100 (Grade B)
- Structure: 95.0/100
- Token Efficiency: 78.0/100 (4,200 tokens, within budget)
- Code Examples: 90.0/100 (4 examples, all with language tags)
- Documentation: 82.0/100
- Composability: 80.0/100 (3 integration points documented)
- Issues: 3 warnings, 2 info
- Token Optimization: Consolidate 2 redundant examples (save ~400 tokens)
Completion Checklist
Before marking this skill as complete, verify:
- YAML frontmatter complete (name, description, allowed-tools, metadata, priority)
- Required sections present (Core Capabilities, Usage Examples, Integration Points)
- Token budget respected (<5000 estimated tokens)
- Code examples complete (3-4 examples minimum)
- Language tags specified for all code blocks (
python,typescript) - Usage examples follow "Apply X skill to Y" format
- Composable skills listed in frontmatter
- Integration points documented and match composable_with list
- Quality score calculated across all 5 dimensions
- Improvement recommendations generated
Failure Indicators
This skill has FAILED if:
- ❌ Missing required frontmatter fields (name, description, etc.)
- ❌ No agent_skills_framework section in frontmatter
- ❌ Missing required sections (Core Capabilities, Usage Examples)
- ❌ Token budget exceeded (>5000 tokens, overage >20%)
- ❌ Fewer than 3 code examples
- ❌ Code blocks missing language specification
- ❌ Code examples not runnable (missing imports, incomplete logic)
- ❌ No composable skills listed (should integrate with at least 1 other skill)
- ❌ Integration points not documented
- ❌ Overall quality score <70% (Grade C or below)
When NOT to Use
Do NOT use this skill when:
- Validating non-skill files - This is for SKILL.md files only, not agents/commands
- Quick prototypes - Validation overhead not worth it for throwaway code
- Simple skills (<1000 tokens) - Overhead of full validation exceeds skill complexity
- Pre-existing validated skills - Don't re-validate skills with quality badges
- Legacy skills being deprecated - Focus on new/active skills
- Non-CODITECT codebases - Frontmatter requirements specific to CODITECT
Use alternatives:
- Agent files → Use agent-specific validation (CODITECT-STANDARD-AGENTS.md)
- Command files → Use command-specific validation
- Documentation → Use documentation quality standards
- Quick checks → Grep for required sections instead of full validation
Anti-Patterns (Avoid)
| Anti-Pattern | Problem | Solution |
|---|---|---|
| No frontmatter | Skill can't be discovered or categorized | Add YAML frontmatter with all required fields |
| Verbose comments | Token waste, poor signal-to-noise ratio | Condense to essential information only |
| Redundant examples | >4 examples with overlapping functionality | Consolidate to 3-4 diverse, focused examples |
| Missing language tags | Code blocks render poorly, no syntax highlighting | Always use python, typescript, etc. |
| Incomplete examples | Missing imports, won't run standalone | Include all imports and context needed |
| No composability | Skill exists in isolation, low reusability | List composable_with skills in frontmatter |
| Vague usage examples | "Use this skill for X" instead of "Apply X to Y" | Follow "Apply skill-name skill to [specific action]" format |
| Token bloat | Skill >5000 tokens (exceeds budget) | Extract content to external files, condense examples |
| No validation checklist | Unclear when skill is "done" | Add completion criteria in skill itself |
Principles
This skill embodies CODITECT automation principles:
#1 Recycle → Extend → Re-Use → Create
- Recycle validation logic - Reuse SkillValidator across all skill files
- Extend scoring criteria - Add domain-specific quality dimensions as needed
- Re-use optimization patterns - Token reduction techniques apply broadly
- Create new validators - Only when existing patterns don't fit
#2 First Principles Thinking
- Understand token economics - 1 token ≈ 4 characters, budget impacts cost
- Know quality dimensions - Structure, efficiency, examples, docs, composability
- Measure objectively - Use grade scale (A-F) instead of subjective "good/bad"
#5 Eliminate Ambiguity
- Explicit quality scores - 85.2/100 is unambiguous, "high quality" is vague
- Severity classification - error/warning/info clearly indicates urgency
- Token budget threshold - 5000 tokens is hard limit, not suggestion
#6 Clear, Understandable, Explainable
- Dimension breakdown - Show structure=95%, efficiency=78%, etc. instead of just overall score
- Actionable suggestions - "Add language identifier: ```python" is clear fix
- Quality grading - A/B/C/D/F grades universally understood
#8 No Assumptions
- Validate frontmatter - Don't assume required fields exist
- Check composability - Verify listed skills actually exist
- Test examples - Don't assume code blocks are runnable
#10 Automation First
- Auto-validate on commit - Run SkillValidator in pre-commit hook
- Auto-optimize - Apply token reduction recommendations automatically
- Auto-grade - Calculate quality scores without manual review
Integration Points
- qa-review-methodology - Quality scoring frameworks
- documentation-quality - Documentation standards
- code-review-patterns - Review methodology