Skip to main content

Agent Skills Framework Extension

Prompt Analysis Patterns Skill

When to Use This Skill

Use this skill when implementing prompt analysis patterns patterns in your codebase.

How to Use This Skill

  1. Review the patterns and examples below
  2. Apply the relevant patterns to your implementation
  3. Follow the best practices outlined in this skill

Multi-dimensional prompt analysis, intent classification, complexity assessment, and optimization.

Core Capabilities

  1. Intent Classification - Understand user goals
  2. Complexity Assessment - Estimate task difficulty
  3. Context Analysis - Determine information needs
  4. Ambiguity Detection - Find unclear aspects
  5. Optimization Suggestions - Improve prompts
  6. Quality Scoring - Rate prompt effectiveness

Prompt Analyzer

# scripts/prompt-analyzer.py
from dataclasses import dataclass
from typing import List, Dict, Optional
from enum import Enum
import re

class IntentCategory(Enum):
CODE_GENERATION = "code_generation"
DEBUGGING = "debugging"
EXPLANATION = "explanation"
REFACTORING = "refactoring"
RESEARCH = "research"
DESIGN = "design"
PLANNING = "planning"
REVIEW = "review"

@dataclass
class PromptAnalysis:
intent: IntentCategory
complexity: str # 'simple', 'moderate', 'complex'
context_needs: List[str]
ambiguities: List[str]
optimization_suggestions: List[str]
quality_score: float
estimated_time: int # minutes

class PromptAnalyzer:
"""Analyze prompts across multiple dimensions"""

INTENT_KEYWORDS = {
IntentCategory.CODE_GENERATION: ['implement', 'create', 'build', 'write code', 'generate'],
IntentCategory.DEBUGGING: ['fix', 'error', 'bug', 'not working', 'debug'],
IntentCategory.EXPLANATION: ['explain', 'what is', 'how does', 'why', 'understand'],
IntentCategory.REFACTORING: ['refactor', 'improve', 'optimize', 'clean up', 'reorganize'],
IntentCategory.RESEARCH: ['research', 'find', 'compare', 'analyze', 'investigate'],
IntentCategory.DESIGN: ['design', 'architecture', 'structure', 'plan'],
IntentCategory.PLANNING: ['plan', 'roadmap', 'strategy', 'approach'],
IntentCategory.REVIEW: ['review', 'check', 'validate', 'assess'],
}

def analyze(self, prompt: str, context: Optional[Dict] = None) -> PromptAnalysis:
"""Perform comprehensive prompt analysis"""
intent = self._classify_intent(prompt)
complexity = self._assess_complexity(prompt)
context_needs = self._analyze_context_needs(prompt, intent)
ambiguities = self._detect_ambiguities(prompt)
optimizations = self._generate_optimizations(prompt, ambiguities, context_needs)
quality = self._score_quality(prompt, ambiguities, context_needs)
time_estimate = self._estimate_time(complexity, intent)

return PromptAnalysis(
intent=intent,
complexity=complexity,
context_needs=context_needs,
ambiguities=ambiguities,
optimization_suggestions=optimizations,
quality_score=quality,
estimated_time=time_estimate
)

def _classify_intent(self, prompt: str) -> IntentCategory:
"""Classify primary intent"""
prompt_lower = prompt.lower()

scores = {}
for intent, keywords in self.INTENT_KEYWORDS.items():
score = sum(1 for kw in keywords if kw in prompt_lower)
scores[intent] = score

if scores:
return max(scores.items(), key=lambda x: x[1])[0]
else:
return IntentCategory.EXPLANATION # Default

def _assess_complexity(self, prompt: str) -> str:
"""Assess task complexity"""
factors = {
'length': len(prompt.split()),
'technical_terms': len(re.findall(r'\b[A-Z]{2,}\b', prompt)), # Acronyms
'requirements': prompt.lower().count('should') + prompt.lower().count('must'),
'constraints': prompt.lower().count('but') + prompt.lower().count('however'),
}

complexity_score = (
(factors['length'] / 50) * 0.3 +
(factors['technical_terms'] / 5) * 0.3 +
(factors['requirements'] / 3) * 0.2 +
(factors['constraints'] / 2) * 0.2
)

if complexity_score < 0.3:
return 'simple'
elif complexity_score < 0.7:
return 'moderate'
else:
return 'complex'

def _analyze_context_needs(self, prompt: str, intent: IntentCategory) -> List[str]:
"""Determine what context is needed"""
needs = []

# Check for code context
if any(word in prompt.lower() for word in ['this code', 'current', 'existing']):
needs.append('code_context')

# Check for project context
if any(word in prompt.lower() for word in ['project', 'application', 'system']):
needs.append('project_context')

# Check for technical context
if intent in [IntentCategory.CODE_GENERATION, IntentCategory.DESIGN]:
needs.append('technical_specifications')

# Check for historical context
if any(word in prompt.lower() for word in ['previous', 'earlier', 'before']):
needs.append('session_history')

return needs

def _detect_ambiguities(self, prompt: str) -> List[str]:
"""Detect ambiguous elements"""
ambiguities = []

# Vague pronouns
vague_pronouns = ['it', 'this', 'that', 'these', 'those']
for pronoun in vague_pronouns:
if re.search(r'\b' + pronoun + r'\b', prompt, re.IGNORECASE):
ambiguities.append(f"Vague pronoun: '{pronoun}' - what does this refer to?")

# Missing specifics
if any(word in prompt.lower() for word in ['something', 'somehow', 'whatever']):
ambiguities.append("Contains non-specific terms")

# Multiple possible interpretations
if prompt.lower().count('or') > 2:
ambiguities.append("Multiple alternatives mentioned - which is preferred?")

# Unclear scope
if not any(word in prompt.lower() for word in ['all', 'only', 'just', 'specific']):
if len(prompt.split()) > 20:
ambiguities.append("Scope not clearly defined")

return ambiguities

def _generate_optimizations(
self,
prompt: str,
ambiguities: List[str],
context_needs: List[str]
) -> List[str]:
"""Generate optimization suggestions"""
suggestions = []

# Address ambiguities
if ambiguities:
suggestions.append("Clarify ambiguous terms and pronouns")

# Add specificity
if 'simple' in self._assess_complexity(prompt):
suggestions.append("Consider adding more details about requirements")

# Request context
if context_needs and 'code_context' in context_needs:
suggestions.append("Provide relevant code snippets for context")

# Structure for complex prompts
if self._assess_complexity(prompt) == 'complex':
suggestions.append("Consider breaking into sub-tasks")

# Add constraints
if not any(word in prompt.lower() for word in ['should', 'must', 'requirement']):
suggestions.append("Specify any constraints or requirements")

return suggestions

def _score_quality(
self,
prompt: str,
ambiguities: List[str],
context_needs: List[str]
) -> float:
"""Score prompt quality (0.0-1.0)"""
score = 1.0

# Penalize ambiguities
score -= len(ambiguities) * 0.1

# Penalize too short
if len(prompt.split()) < 5:
score -= 0.2

# Penalize too long without structure
if len(prompt.split()) > 100 and '\n' not in prompt:
score -= 0.15

# Reward specificity
if any(word in prompt.lower() for word in ['specifically', 'exactly', 'precisely']):
score += 0.1

return max(0.0, min(1.0, score))

def _estimate_time(self, complexity: str, intent: IntentCategory) -> int:
"""Estimate time in minutes"""
base_times = {
IntentCategory.CODE_GENERATION: 15,
IntentCategory.DEBUGGING: 10,
IntentCategory.EXPLANATION: 5,
IntentCategory.REFACTORING: 20,
IntentCategory.RESEARCH: 30,
IntentCategory.DESIGN: 25,
IntentCategory.PLANNING: 20,
IntentCategory.REVIEW: 10,
}

multipliers = {
'simple': 0.5,
'moderate': 1.0,
'complex': 2.0,
}

base = base_times.get(intent, 15)
multiplier = multipliers.get(complexity, 1.0)

return int(base * multiplier)

# Usage
analyzer = PromptAnalyzer()

prompt = "Help me fix this code that's not working"
analysis = analyzer.analyze(prompt)

print(f"Intent: {analysis.intent.value}")
print(f"Complexity: {analysis.complexity}")
print(f"Quality Score: {analysis.quality_score:.2f}")
print(f"Ambiguities: {len(analysis.ambiguities)}")
for amb in analysis.ambiguities:
print(f" - {amb}")
print(f"Suggestions:")
for sug in analysis.optimization_suggestions:
print(f" - {sug}")
print(f"Estimated time: {analysis.estimated_time} minutes")

Prompt Optimizer

# scripts/prompt-optimizer.py
from dataclasses import dataclass
from typing import List

@dataclass
class OptimizedPrompt:
original: str
optimized: str
improvements: List[str]
quality_delta: float

class PromptOptimizer:
"""Optimize prompts for clarity and effectiveness"""

def optimize(self, prompt: str) -> OptimizedPrompt:
"""Optimize a prompt"""
original_quality = self._calculate_quality(prompt)

# Apply optimizations
optimized = prompt
improvements = []

# Add structure
if '\n' not in optimized and len(optimized.split()) > 30:
optimized = self._add_structure(optimized)
improvements.append("Added structure for clarity")

# Replace vague terms
optimized, replaced = self._replace_vague_terms(optimized)
if replaced:
improvements.extend(replaced)

# Add specificity
optimized, added_specificity = self._add_specificity(optimized)
if added_specificity:
improvements.append("Added specific details")

# Calculate improvement
optimized_quality = self._calculate_quality(optimized)
quality_delta = optimized_quality - original_quality

return OptimizedPrompt(
original=prompt,
optimized=optimized,
improvements=improvements,
quality_delta=quality_delta
)

def _add_structure(self, prompt: str) -> str:
"""Add structure to long prompts"""
# Simple version: add newlines before "and", "also"
structured = prompt.replace(' and ', '.\n- ')
structured = structured.replace(' Also, ', '\n\n')
return structured

def _replace_vague_terms(self, prompt: str) -> tuple[str, List[str]]:
"""Replace vague terms with specific ones"""
replacements = {
'something': '[specify what]',
'somehow': '[specify how]',
'it': '[specify what "it" refers to]',
}

improved = prompt
replaced = []

for vague, specific in replacements.items():
if vague in improved.lower():
# Case-insensitive replace
import re
improved = re.sub(
r'\b' + vague + r'\b',
specific,
improved,
flags=re.IGNORECASE
)
replaced.append(f"Replaced '{vague}' with clearer reference")

return improved, replaced

def _add_specificity(self, prompt: str) -> tuple[str, bool]:
"""Add specific details if missing"""
# Check if prompt mentions requirements
if 'requirement' not in prompt.lower() and 'should' not in prompt.lower():
enhanced = prompt + "\n\nRequirements:\n- [Add specific requirements]"
return enhanced, True

return prompt, False

def _calculate_quality(self, prompt: str) -> float:
"""Calculate prompt quality"""
# Simplified quality score
score = 0.5 # Base

# Has structure
if '\n' in prompt:
score += 0.2

# Reasonable length
words = len(prompt.split())
if 10 <= words <= 100:
score += 0.2

# No vague terms
vague_terms = ['something', 'somehow', 'it', 'this', 'that']
if not any(term in prompt.lower() for term in vague_terms):
score += 0.1

return min(score, 1.0)

# Usage
optimizer = PromptOptimizer()

original = "Fix this code somehow so it works with something"
result = optimizer.optimize(original)

print(f"Original: {result.original}")
print(f"\nOptimized: {result.optimized}")
print(f"\nImprovements:")
for imp in result.improvements:
print(f" - {imp}")
print(f"\nQuality improvement: {result.quality_delta:+.2f}")

Usage Examples

Prompt Analysis

Apply prompt-analysis-patterns skill to analyze user request for intent, complexity, and ambiguities

Prompt Optimization

Apply prompt-analysis-patterns skill to optimize vague prompt for clarity and effectiveness

Context Need Analysis

Apply prompt-analysis-patterns skill to determine what context information is needed

Integration Points

  • memory-context-patterns - Context injection
  • novelty-detection-patterns - Intent classification
  • uncertainty-quantification-patterns - Quality scoring

Success Output

When successful, this skill MUST output:

✅ SKILL COMPLETE: prompt-analysis-patterns

Completed:
- [x] Intent classified: {intent_category}
- [x] Complexity assessed: {simple|moderate|complex}
- [x] Ambiguities detected: {count}
- [x] Context needs identified: {needs_list}
- [x] Quality scored: {score}/1.0
- [x] Optimization suggestions generated: {count}
- [x] Time estimated: {minutes} minutes

Outputs:
- Intent: {IntentCategory}
- Complexity: {complexity_level}
- Quality Score: {0.0-1.0}
- Ambiguities: {list_of_ambiguities}
- Context Needs: {list_of_needs}
- Optimizations: {list_of_suggestions}
- Estimated Time: {minutes} min

Completion Checklist

Before marking this skill as complete, verify:

  • Intent classified into one of 8 categories (code_generation, debugging, explanation, refactoring, research, design, planning, review)
  • Complexity assessed (simple/moderate/complex) based on length, technical terms, requirements, constraints
  • All ambiguities detected (vague pronouns, non-specific terms, unclear scope)
  • Context needs identified (code_context, project_context, technical_specs, session_history)
  • Quality score calculated (0.0-1.0 range)
  • Optimization suggestions generated for each identified issue
  • Time estimate provided based on intent and complexity
  • PromptAnalysis object returned with all fields populated

Failure Indicators

This skill has FAILED if:

  • ❌ Intent classification returns None or invalid category
  • ❌ Complexity score calculation produces NaN or out-of-range value
  • ❌ Quality score is negative or exceeds 1.0
  • ❌ Time estimate is zero or negative
  • ❌ Ambiguity detection misses obvious vague terms (it, this, something, somehow)
  • ❌ Context needs analysis fails to identify clear context requirements
  • ❌ Optimization suggestions are generic rather than specific to detected issues
  • ❌ Analysis fails on empty or very short prompts (<3 words)

When NOT to Use

Do NOT use this skill when:

  • Prompt is already crystal clear and well-structured (wastes analysis time)
  • Immediate execution needed without optimization (analysis adds latency)
  • Prompt is a simple one-word command (e.g., "help", "list", "status")
  • User explicitly requests no analysis or optimization
  • Batch processing many prompts where uniform structure exists
  • Interactive session where real-time feedback loop handles ambiguities
  • Prompt is code or structured data (not natural language)

Use alternatives:

  • Direct execution: For clear, unambiguous prompts
  • Interactive clarification: For conversational contexts
  • Batch validation: For standardized prompt templates
  • Code linting: For code-based inputs

Anti-Patterns (Avoid)

Anti-PatternProblemSolution
Over-analyzing simple promptsWastes processing timeSkip analysis for prompts <10 words or known patterns
Generic optimization suggestionsNot actionableGenerate specific suggestions tied to detected issues
Ignoring domain contextMisclassifies intentConsider session history and project context
Binary ambiguity detectionMisses nuanced issuesUse severity scoring (BLOCKING/HIGH/MEDIUM/LOW)
Fixed complexity thresholdsInaccurate for domainAdjust thresholds based on technical domain
Missing edge casesFails on unusual promptsHandle empty, very long, multi-language prompts
No confidence scoringAll outputs treated equalAdd confidence score to intent classification

Principles

This skill embodies the following CODITECT principles:

  • #5 Eliminate Ambiguity - Explicit detection and reporting of ambiguous elements
  • #6 Clear, Understandable, Explainable - Specific optimization suggestions with rationale
  • #8 No Assumptions - Documents all inferred context needs and missing information
  • Ambiguity Handling - Multi-dimensional ambiguity detection (referential, scope, definitional)
  • Factual Grounding - Analysis based on concrete linguistic features (pronouns, keywords, structure)
  • Trust & Transparency - Quality scoring exposes analysis confidence and limitations

Version: 1.1.0 | Created: 2025-12-22 | Updated: 2026-01-04 | Author: CODITECT Team