Skip to main content

scripts-context-query

#!/usr/bin/env python3 """

Context Query - ADR-118 Four-Tier Memory System

Queries session and organization knowledge from the ADR-118 compliant databases:

  • projects.db (Tier 4): Registered projects, project embeddings (regenerable)
  • sessions.db (Tier 3): Messages, token economics, tool analytics, activity associations
  • org.db (Tier 2): Decisions, skill learnings, error solutions
  • platform.db (Tier 1): Component registry (read-only)

This replaces context-db.py which used the deprecated context.db.

Usage: python3 scripts/context-query.py # Show help python3 scripts/context-query.py "search term" # Search messages python3 scripts/context-query.py "error AND auth" # Boolean AND search (J.4.4.1) python3 scripts/context-query.py "deploy OR k8s" # Boolean OR search python3 scripts/context-query.py "security NOT test" # Boolean NOT (exclude) python3 scripts/context-query.py --boolean-help # Show Boolean search syntax python3 scripts/context-query.py --decisions # View decisions python3 scripts/context-query.py --errors # View error-solutions python3 scripts/context-query.py --recent 20 # Recent messages python3 scripts/context-query.py --projects # List registered projects (J.16.2.1) python3 scripts/context-query.py --project-search coditect-core "auth" # Search within project (J.16.2.2) python3 scripts/context-query.py --all-projects "database migration" # Cross-project search (J.16.2.3) python3 scripts/context-query.py --project-tree rollout-master "API" # Search project + children (J.16.2.4) python3 scripts/context-query.py --tokens # View token economics (J.16.1) python3 scripts/context-query.py --token-stats # Aggregated token statistics python3 scripts/context-query.py --tools # View tool analytics python3 scripts/context-query.py --tool-stats # Tool usage by tool name python3 scripts/context-query.py --activities # View activity associations python3 scripts/context-query.py --stats # Database statistics python3 scripts/context-query.py --graph "task" # Build context graph (ADR-151) python3 scripts/context-query.py --graph-stats # Knowledge graph statistics python3 scripts/context-query.py --queries # List query templates (ADR-154) python3 scripts/context-query.py --template # Execute query template python3 scripts/context-query.py --traversal bfs --start-node # Traverse graph (J.4.7.3) python3 scripts/context-query.py --traversal shortest --start-node --end-node # Find path python3 scripts/context-query.py --correlate "messages,token_economics" --by session_id # Cross-table (J.4.7.5) python3 scripts/context-query.py --correlate-help # Show correlation help python3 scripts/context-query.py --create-query # Create template interactively (J.4.8.5) python3 scripts/context-query.py --create-query quick # Quick template creation python3 scripts/context-query.py --create-query clone # Clone existing template python3 scripts/context-query.py "error" --show-scores # Search with score breakdown (J.4.5) python3 scripts/context-query.py "auth" --rank-weights 0.7,0.2,0.1 # Custom ranking weights python3 scripts/context-query.py "deploy" --no-rank # Disable ranking (chronological order) python3 scripts/context-query.py --rank-help # Show ranking help python3 scripts/context-query.py "error" --by-day # Aggregate by day (J.4.6.1) python3 scripts/context-query.py --recent 500 --by-week # Weekly message volume python3 scripts/context-query.py --recent 200 --topics # Find topic clusters (J.4.6.2) python3 scripts/context-query.py --recent 500 --trending # Show trending topics (J.4.6.3) python3 scripts/context-query.py --aggregation-help # Show aggregation options python3 scripts/context-query.py --semantic "auth patterns" # Semantic search (J.16.3.1) python3 scripts/context-query.py --hybrid "database migration" # Hybrid FTS5+vector (J.16.3.1) python3 scripts/context-query.py --recall "context architecture" # RAG recall (J.16.3.2) python3 scripts/context-query.py --semantic-help # Show semantic search help

# J.15.5: Project Code/Document Search (uses project_embeddings from project_embedder.py)
python3 scripts/context-query.py --code-search coditect-core "authentication" # Single project (J.15.5.1)
python3 scripts/context-query.py --code-tree rollout-master "API patterns" # Project + children (J.15.5.3)
python3 scripts/context-query.py --all-code "database migration" # All projects (J.15.5.2)
python3 scripts/context-query.py --code-search coditect-core --code-mode semantic "auth" # Semantic only
python3 scripts/context-query.py --code-search coditect-core --code-mode fts "error" # FTS only
python3 scripts/context-query.py --create-code-fts # Create FTS index (admin)

"""

import sqlite3 import json import sys import os import argparse from pathlib import Path from datetime import datetime, timedelta from typing import Optional, List, Dict, Any

ADR-114: Use centralized path discovery

_script_dir = Path(file).resolve().parent _coditect_root = _script_dir.parent if str(_coditect_root) not in sys.path: sys.path.insert(0, str(_coditect_root))

try: from scripts.core.paths import get_context_storage_dir, CONTEXT_STORAGE PATHS_AVAILABLE = True except ImportError: PATHS_AVAILABLE = False

ADR-151: Context Graph Builder

try: from scripts.context_graph import ContextGraphBuilder CONTEXT_GRAPH_AVAILABLE = True except ImportError: CONTEXT_GRAPH_AVAILABLE = False

ADR-154: Query Templates

try: from scripts.context_graph.query_templates import QueryTemplateRegistry QUERY_TEMPLATES_AVAILABLE = True except ImportError: QUERY_TEMPLATES_AVAILABLE = False

J.4.8.5: Interactive Query Template Creator (ADR-154)

try: from scripts.context_graph.query_template_creator import QueryTemplateCreator TEMPLATE_CREATOR_AVAILABLE = True except ImportError: TEMPLATE_CREATOR_AVAILABLE = False

J.4.7.1: WHERE Expression Parser (ADR-149 Phase 1)

try: from scripts.context_graph.where_parser import ( parse_where, validate_fields, get_allowed_fields, ParseResult, ) WHERE_PARSER_AVAILABLE = True except ImportError: WHERE_PARSER_AVAILABLE = False

J.4.7.2: GROUP BY and SORT Processor (ADR-149 Phase 1)

try: from scripts.context_graph.group_sort_processor import ( parse_group_by, parse_sort, build_aggregation_select, GroupByResult, SortResult, ) GROUP_SORT_AVAILABLE = True except ImportError: GROUP_SORT_AVAILABLE = False

J.4.7.3: Knowledge Graph Traversal (ADR-149 Phase 1)

try: from scripts.context_graph.traversal import ( traverse_graph, format_traversal_result, validate_traversal_strategy, TRAVERSAL_STRATEGIES, TraversalResult, ) TRAVERSAL_AVAILABLE = True except ImportError: TRAVERSAL_AVAILABLE = False

J.4.7.4: Field-Specific Search (ADR-149 Phase 1)

try: from scripts.context_graph.field_parser import ( parse_field_query, validate_fields as validate_field_constraints, get_valid_fields, format_field_help, FieldQuery, ) FIELD_PARSER_AVAILABLE = True except ImportError: FIELD_PARSER_AVAILABLE = False

J.4.7.5: Cross-Table Correlation (ADR-149 Phase 1)

try: from scripts.context_graph.correlator import ( parse_correlation, execute_correlation, format_correlation_result, format_correlation_help, CorrelationQuery, CorrelationResult, ) CORRELATOR_AVAILABLE = True except ImportError: CORRELATOR_AVAILABLE = False

J.4.4.1: Boolean Search Parser (ADR-149)

try: from scripts.context_graph.boolean_parser import ( parse_boolean_query, to_fts5, is_boolean_query, get_help as get_boolean_help, BOOLEAN_SEARCH_HELP, BooleanParseResult, ) BOOLEAN_PARSER_AVAILABLE = True except ImportError: BOOLEAN_PARSER_AVAILABLE = False

J.4.5: Result Ranking (ADR-149)

try: from scripts.context_graph.result_ranker import ( ResultRanker, RankingConfig, rank_results, get_ranking_help, ) RESULT_RANKER_AVAILABLE = True except ImportError: RESULT_RANKER_AVAILABLE = False

J.4.6: Aggregations (ADR-149)

try: from scripts.context_graph.aggregator import ( aggregate_by_time, cluster_by_topic, analyze_trends, format_time_aggregation, format_topic_clusters, format_trends, get_aggregation_help, VALID_PERIODS, ) AGGREGATOR_AVAILABLE = True except ImportError: AGGREGATOR_AVAILABLE = False

J.8.3: Session Linker - Cross-Session Context Linking

try: from scripts.context_graph.session_linker import ( SessionLinker, SessionLink, format_links_output, format_chain_output, format_link_help, VALID_RELATIONSHIPS, ) SESSION_LINKER_AVAILABLE = True except ImportError: SESSION_LINKER_AVAILABLE = False

J.8.4: Session Resume Optimizer

try: from scripts.context_graph.session_resume import ( SessionResumeOptimizer, format_resume_output, format_resume_help, ) SESSION_RESUME_AVAILABLE = True except ImportError: SESSION_RESUME_AVAILABLE = False

J.16.3: Semantic Search (MCP Semantic Search Server)

Import from tools/mcp-semantic-search/server.py

_tools_dir = _coditect_root / "tools" / "mcp-semantic-search" if str(_tools_dir) not in sys.path: sys.path.insert(0, str(_tools_dir)) try: from server import ( hybrid_search as mcp_hybrid_search, vector_search as mcp_vector_search, fts5_search as mcp_fts5_search, search_decisions as mcp_search_decisions, search_errors as mcp_search_errors, get_database_stats as mcp_get_stats, EMBEDDINGS_AVAILABLE as MCP_EMBEDDINGS_AVAILABLE, ) SEMANTIC_SEARCH_AVAILABLE = True except ImportError: SEMANTIC_SEARCH_AVAILABLE = False MCP_EMBEDDINGS_AVAILABLE = False

J.15.5: Project Code/Document Search

try: from scripts.project_search import ( search_project_code, search_project_tree_code, search_all_projects_code, format_search_results as format_project_code_results, create_fts_index as create_project_fts_index, ProjectSearchResults, ) PROJECT_CODE_SEARCH_AVAILABLE = True except ImportError: PROJECT_CODE_SEARCH_AVAILABLE = False

def find_context_storage() -> Path: """Find context storage directory per ADR-114.""" if PATHS_AVAILABLE: return get_context_storage_dir()

# Fallback locations per ADR-114
candidates = [
Path.home() / "PROJECTS" / ".coditect-data" / "context-storage",
Path.home() / ".coditect-data" / "context-storage",
Path.home() / ".coditect" / "context-storage",
]

for candidate in candidates:
if candidate.exists():
return candidate

# Default to new ADR-114 location
new_location = Path.home() / "PROJECTS" / ".coditect-data" / "context-storage"
new_location.mkdir(parents=True, exist_ok=True)
return new_location

ADR-118 Four-Tier Database Paths

_CONTEXT_STORAGE = find_context_storage() SESSIONS_DB_PATH = _CONTEXT_STORAGE / "sessions.db" ORG_DB_PATH = _CONTEXT_STORAGE / "org.db"

Platform.db (Tier 1) is in framework directory, not context-storage

_CODITECT_ROOT = Path.home() / ".coditect" PLATFORM_DB_PATH = _CODITECT_ROOT / "context-storage" / "platform.db"

Fallback to context-storage if framework path doesn't exist

if not PLATFORM_DB_PATH.exists(): PLATFORM_DB_PATH = _CONTEXT_STORAGE / "platform.db"

Projects.db (Tier 4) - Project registration and embeddings

PROJECTS_DB_PATH = _CONTEXT_STORAGE / "projects.db"

def get_sessions_db() -> sqlite3.Connection: """Get connection to sessions.db (Tier 3).""" if not SESSIONS_DB_PATH.exists(): print(f"Error: sessions.db not found at {SESSIONS_DB_PATH}") print("Run /cx first to extract session data.") sys.exit(1)

conn = sqlite3.connect(str(SESSIONS_DB_PATH), timeout=30)
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA mmap_size = 268435456")
conn.execute("PRAGMA cache_size = -64000")
return conn

def get_org_db() -> sqlite3.Connection: """Get connection to org.db (Tier 2).""" if not ORG_DB_PATH.exists(): print(f"Error: org.db not found at {ORG_DB_PATH}") print("Run /cx first to extract organization knowledge.") sys.exit(1)

conn = sqlite3.connect(str(ORG_DB_PATH), timeout=30)
conn.row_factory = sqlite3.Row
conn.execute("PRAGMA mmap_size = 134217728")
conn.execute("PRAGMA cache_size = -32000")
return conn

def search_messages(query: str, limit: int = 20, project_only: bool = False) -> List[Dict]: """Search messages in sessions.db using FTS or LIKE.

Supports Boolean operators (J.4.4.1):
- AND: Both terms required (implicit or explicit)
- OR: Either term matches
- NOT: Exclude term
- "phrase": Exact phrase match
- prefix*: Wildcard prefix match

Args:
query: Search query
limit: Maximum results
project_only: ADR-156 - If True, exclude global scope data

Examples:
search_messages("error AND authentication")
search_messages("deploy OR kubernetes")
search_messages("security NOT test")
"""
conn = get_sessions_db()

# J.4.4.1: Convert Boolean operators to FTS5 syntax
fts_query = query
if BOOLEAN_PARSER_AVAILABLE and is_boolean_query(query):
result = parse_boolean_query(query)
if result.success:
fts_query = result.fts_query

# ADR-156: Build project filter
project_clause, project_params = get_project_filter(project_only)
project_where = f" AND {project_clause}" if project_clause else ""

# Try FTS5 first
try:
sql = f"""
SELECT m.*, highlight(messages_fts, 0, '>>>', '<<<') as highlighted
FROM messages m
JOIN messages_fts ON m.id = messages_fts.rowid
WHERE messages_fts MATCH ?{project_where}
ORDER BY m.timestamp DESC
LIMIT ?
"""
params = [fts_query] + project_params + [limit]
cursor = conn.execute(sql, params)
results = [dict(row) for row in cursor.fetchall()]
if results:
conn.close()
return results
except sqlite3.OperationalError:
pass # FTS not available

# Fallback to LIKE search (basic - doesn't support full Boolean)
where_clause = "content LIKE ?"
params = [f'%{query}%']
if project_clause:
where_clause += f" AND {project_clause}"
params.extend(project_params)

cursor = conn.execute(f"""
SELECT * FROM messages
WHERE {where_clause}
ORDER BY timestamp DESC
LIMIT ?
""", params + [limit])
results = [dict(row) for row in cursor.fetchall()]
conn.close()
return results

def get_recent_messages(limit: int = 20, project_only: bool = False) -> List[Dict]: """Get most recent messages from sessions.db.

Args:
limit: Maximum results
project_only: ADR-156 - If True, exclude global scope data
"""
conn = get_sessions_db()

# ADR-156: Build project filter
project_clause, project_params = get_project_filter(project_only)

if project_clause:
sql = f"SELECT * FROM messages WHERE {project_clause} ORDER BY timestamp DESC LIMIT ?"
params = project_params + [limit]
else:
sql = "SELECT * FROM messages ORDER BY timestamp DESC LIMIT ?"
params = [limit]

cursor = conn.execute(sql, params)
results = [dict(row) for row in cursor.fetchall()]
conn.close()
return results

def get_project_filter(project_only: bool = False, table: str = "messages") -> tuple: """ Build project filter clause for queries (ADR-156).

Args:
project_only: If True, exclude global scope data
table: Target table name — 'messages' (sessions.db, no scope column)
or org.db tables like 'decisions' (have scope column)

Returns:
Tuple of (where_clause, params) or (None, [])
"""
project_id = os.environ.get('CODITECT_PROJECT')

if not project_id:
return None, []

if project_only:
# Project-specific only
return "project_id = ?", [project_id]
else:
# Project-specific + global scope
# Note: 'scope' column only exists in org.db tables (decisions, etc.)
# The messages table in sessions.db does NOT have a scope column
if table == "messages":
return "(project_id = ? OR project_id IS NULL)", [project_id]
else:
return "(project_id = ? OR scope = 'global' OR project_id IS NULL)", [project_id]

def get_decisions(limit: int = 20, decision_type: str = None, where_clause: str = None, where_params: List = None, project_only: bool = False) -> List[Dict]: """ Get decisions from org.db.

Args:
limit: Maximum results
decision_type: Filter by type
where_clause: J.4.7.1 - Additional SQL WHERE clause from --where flag
where_params: Parameters for where_clause placeholders
project_only: ADR-156 - If True, exclude global scope data
"""
conn = get_org_db()
params = []
where_parts = []

if decision_type:
where_parts.append("decision_type = ?")
params.append(decision_type)

# ADR-156: Project filtering (org.db decisions table has 'scope' column)
project_clause, project_params = get_project_filter(project_only, table="decisions")
if project_clause:
where_parts.append(project_clause)
params.extend(project_params)

# J.4.7.1: Add custom WHERE clause
if where_clause and where_params is not None:
where_parts.append(f"({where_clause})")
params.extend(where_params)

# Build query
query = "SELECT * FROM decisions"
if where_parts:
query += " WHERE " + " AND ".join(where_parts)
query += " ORDER BY created_at DESC LIMIT ?"
params.append(limit)

cursor = conn.execute(query, params)
results = [dict(row) for row in cursor.fetchall()]
conn.close()
return results

def get_error_solutions(query: str = None, limit: int = 20, where_clause: str = None, where_params: List = None, project_only: bool = False) -> List[Dict]: """ Get error solutions from org.db.

Args:
query: Text search query
limit: Maximum results
where_clause: J.4.7.1 - Additional SQL WHERE clause from --where flag
where_params: Parameters for where_clause placeholders
project_only: ADR-156 - If True, exclude global scope data
"""
conn = get_org_db()
params = []
where_parts = []

if query:
where_parts.append("(error_type LIKE ? OR error_message LIKE ? OR solution LIKE ?)")
params.extend([f'%{query}%', f'%{query}%', f'%{query}%'])

# ADR-156: Project filtering (org.db error_solutions table has 'scope' column)
project_clause, project_params = get_project_filter(project_only, table="error_solutions")
if project_clause:
where_parts.append(project_clause)
params.extend(project_params)

# J.4.7.1: Add custom WHERE clause
if where_clause and where_params is not None:
where_parts.append(f"({where_clause})")
params.extend(where_params)

# Build query
sql = "SELECT * FROM error_solutions"
if where_parts:
sql += " WHERE " + " AND ".join(where_parts)
sql += " ORDER BY success_count DESC, created_at DESC LIMIT ?"
params.append(limit)

cursor = conn.execute(sql, params)
results = [dict(row) for row in cursor.fetchall()]
conn.close()
return results

def get_skill_learnings(limit: int = 20, pattern_type: str = None, where_clause: str = None, where_params: List = None, project_only: bool = False) -> List[Dict]: """ Get skill learnings from org.db.

Args:
limit: Maximum results
pattern_type: Filter by type
where_clause: J.4.7.1 - Additional SQL WHERE clause from --where flag
where_params: Parameters for where_clause placeholders
project_only: ADR-156 - If True, exclude global scope data
"""
conn = get_org_db()
params = []
where_parts = []

if pattern_type:
where_parts.append("pattern_type = ?")
params.append(pattern_type)

# ADR-156: Project filtering (org.db skill_learnings table has 'scope' column)
project_clause, project_params = get_project_filter(project_only, table="skill_learnings")
if project_clause:
where_parts.append(project_clause)
params.extend(project_params)

# J.4.7.1: Add custom WHERE clause
if where_clause and where_params is not None:
where_parts.append(f"({where_clause})")
params.extend(where_params)

# Build query
query = "SELECT * FROM skill_learnings"
if where_parts:
query += " WHERE " + " AND ".join(where_parts)
query += " ORDER BY created_at DESC LIMIT ?"
params.append(limit)

cursor = conn.execute(query, params)
results = [dict(row) for row in cursor.fetchall()]
conn.close()
return results

def get_stats() -> Dict[str, Any]: """Get statistics from all ADR-118 databases.""" stats = { 'tier_4_projects': {}, 'tier_3_sessions': {}, 'tier_2_org': {}, 'tier_1_platform': {}, }

# Tier 3: sessions.db
if SESSIONS_DB_PATH.exists():
conn = get_sessions_db()
try:
stats['tier_3_sessions']['messages'] = conn.execute(
"SELECT COUNT(*) FROM messages"
).fetchone()[0]
except:
stats['tier_3_sessions']['messages'] = 0
try:
stats['tier_3_sessions']['token_economics'] = conn.execute(
"SELECT COUNT(*) FROM token_economics"
).fetchone()[0]
except:
stats['tier_3_sessions']['token_economics'] = 0
try:
stats['tier_3_sessions']['tool_analytics'] = conn.execute(
"SELECT COUNT(*) FROM tool_analytics"
).fetchone()[0]
except:
stats['tier_3_sessions']['tool_analytics'] = 0

# Get database size
stats['tier_3_sessions']['size_mb'] = round(
SESSIONS_DB_PATH.stat().st_size / 1024 / 1024, 2
)

# LLM context distribution (derive from source_file patterns)
# J.23.9: Detection priority: ADR-122 folders > native paths > legacy paths
try:
llm_results = conn.execute('''
SELECT
CASE
WHEN source_file LIKE '%sessions-export-pending-anthropic%' THEN 'claude'
WHEN source_file LIKE '%sessions-export-pending-codex%' THEN 'codex'
WHEN source_file LIKE '%sessions-export-pending-gemini%' THEN 'gemini'
WHEN source_file LIKE '%sessions-export-pending-kimi%' THEN 'kimi'
WHEN source_file LIKE '%kimi-sessions-pending%' THEN 'kimi'
WHEN source_file LIKE '%gemini-sessions-pending%' THEN 'gemini'
WHEN source_file LIKE '%codex-sessions-pending%' THEN 'codex'
WHEN source_file LIKE '%/.claude/%' THEN 'claude'
WHEN source_file LIKE '%/.codex/%' THEN 'codex'
WHEN source_file LIKE '%/.gemini/%' THEN 'gemini'
WHEN source_file LIKE '%/.kimi/%' THEN 'kimi'
WHEN source_file LIKE '%exports-pending/%' THEN 'claude'
WHEN source_file LIKE '%exports-archive/%' THEN 'claude'
WHEN source_file LIKE '%.txt' THEN 'claude'
WHEN source_file NOT LIKE '/%' THEN 'claude'
ELSE 'unknown'
END as llm,
COUNT(*) as count,
SUM(content_length) as total_chars
FROM messages
GROUP BY llm
ORDER BY count DESC
''').fetchall()

total_msgs = sum(r[1] for r in llm_results)
total_chars = sum(r[2] or 0 for r in llm_results)

stats['llm_context'] = {}
for llm, count, chars in llm_results:
msg_pct = (count / total_msgs * 100) if total_msgs > 0 else 0
char_pct = (chars / total_chars * 100) if total_chars and total_chars > 0 else 0
stats['llm_context'][llm] = {
'messages': count,
'messages_pct': round(msg_pct, 1),
'chars': chars or 0,
'chars_pct': round(char_pct, 1)
}
stats['llm_context']['_total_messages'] = total_msgs
stats['llm_context']['_total_chars'] = total_chars
except Exception as e:
stats['llm_context'] = {'error': str(e)}

conn.close()

# Tier 2: org.db
if ORG_DB_PATH.exists():
conn = get_org_db()
try:
stats['tier_2_org']['decisions'] = conn.execute(
"SELECT COUNT(*) FROM decisions"
).fetchone()[0]
except:
stats['tier_2_org']['decisions'] = 0
try:
stats['tier_2_org']['skill_learnings'] = conn.execute(
"SELECT COUNT(*) FROM skill_learnings"
).fetchone()[0]
except:
stats['tier_2_org']['skill_learnings'] = 0
try:
stats['tier_2_org']['error_solutions'] = conn.execute(
"SELECT COUNT(*) FROM error_solutions"
).fetchone()[0]
except:
stats['tier_2_org']['error_solutions'] = 0

stats['tier_2_org']['size_mb'] = round(
ORG_DB_PATH.stat().st_size / 1024 / 1024, 2
)
conn.close()

# Tier 1: platform.db (read-only)
if PLATFORM_DB_PATH.exists():
conn = sqlite3.connect(str(PLATFORM_DB_PATH))
try:
stats['tier_1_platform']['components'] = conn.execute(
"SELECT COUNT(*) FROM components"
).fetchone()[0]
except:
stats['tier_1_platform']['components'] = 0

stats['tier_1_platform']['size_mb'] = round(
PLATFORM_DB_PATH.stat().st_size / 1024 / 1024, 2
)
conn.close()

# Tier 4: projects.db (regenerable - project embeddings)
if PROJECTS_DB_PATH.exists():
conn = sqlite3.connect(str(PROJECTS_DB_PATH))
try:
stats['tier_4_projects']['projects'] = conn.execute(
"SELECT COUNT(*) FROM projects"
).fetchone()[0]
except:
stats['tier_4_projects']['projects'] = 0
try:
stats['tier_4_projects']['active'] = conn.execute(
"SELECT COUNT(*) FROM projects WHERE status = 'active'"
).fetchone()[0]
except:
stats['tier_4_projects']['active'] = 0
try:
stats['tier_4_projects']['indexed'] = conn.execute(
"SELECT COUNT(*) FROM projects WHERE last_indexed_at IS NOT NULL"
).fetchone()[0]
except:
stats['tier_4_projects']['indexed'] = 0
try:
stats['tier_4_projects']['embeddings'] = conn.execute(
"SELECT COUNT(*) FROM project_embeddings"
).fetchone()[0]
except:
stats['tier_4_projects']['embeddings'] = 0

stats['tier_4_projects']['size_mb'] = round(
PROJECTS_DB_PATH.stat().st_size / 1024 / 1024, 2
)
conn.close()

return stats

=============================================================================

J.4.7.4: Field Filter Helper Function

=============================================================================

def _apply_field_filter(results: List[Dict], field_spec: str, query_type: str) -> List[Dict]: """ Apply field-specific filtering to query results.

Args:
results: List of result dictionaries
field_spec: Field specification string (e.g., "role:assistant,content:*error*")
query_type: Type of query for field validation

Returns:
Filtered list of results
"""
if not FIELD_PARSER_AVAILABLE:
return results

try:
field_query = parse_field_query(field_spec)
if not field_query.constraints:
return results
except ValueError as e:
print(f"Warning: Invalid field specification: {e}", file=sys.stderr)
return results

# Validate fields
invalid = validate_field_constraints(field_query, query_type)
if invalid:
print(f"Warning: Unknown fields for {query_type}: {', '.join(invalid)}", file=sys.stderr)
print(f"Valid fields: {', '.join(get_valid_fields(query_type))}", file=sys.stderr)

# Filter results
filtered = []
for result in results:
match = True
for constraint in field_query.constraints:
value = result.get(constraint.field)
if value is None:
match = False
break

value_str = str(value)

if constraint.operator == 'LIKE':
# Simple LIKE pattern matching
pattern = constraint.like_pattern.replace('%', '.*')
import re
if not re.match(f'^{pattern}$', value_str, re.IGNORECASE):
match = False
break
else:
# Exact match (case-insensitive for strings)
if isinstance(value, str):
if value.lower() != constraint.value.lower():
match = False
break
else:
if str(value) != constraint.value:
match = False
break

if match:
filtered.append(result)

return filtered

def format_message(msg: Dict, show_content: bool = True) -> str: """Format a message for display.""" lines = []

role = msg.get('role', 'unknown')
timestamp = msg.get('timestamp', 'unknown')
session_id = msg.get('session_id', 'unknown')

# Header
if session_id and session_id != 'unknown':
lines.append(f"[{role}] {timestamp} (session: {session_id[:8]}...)")
else:
lines.append(f"[{role}] {timestamp}")

# Content
if show_content:
content = msg.get('content', '')
if msg.get('highlighted'):
content = msg['highlighted']

# Truncate long content
if len(content) > 500:
content = content[:500] + "..."

lines.append(content)

lines.append("-" * 60)
return "\n".join(lines)

def get_components(query: str = None, component_type: str = None, limit: int = 20) -> List[Dict]: """Get components from platform.db (Tier 1).""" if not PLATFORM_DB_PATH.exists(): print(f"Warning: platform.db not found at {PLATFORM_DB_PATH}") return []

conn = sqlite3.connect(str(PLATFORM_DB_PATH))
conn.row_factory = sqlite3.Row

if query and component_type:
cursor = conn.execute("""
SELECT * FROM components
WHERE type = ? AND (name LIKE ? OR description LIKE ?)
ORDER BY name
LIMIT ?
""", (component_type, f'%{query}%', f'%{query}%', limit))
elif query:
cursor = conn.execute("""
SELECT * FROM components
WHERE name LIKE ? OR description LIKE ?
ORDER BY name
LIMIT ?
""", (f'%{query}%', f'%{query}%', limit))
elif component_type:
cursor = conn.execute("""
SELECT * FROM components
WHERE type = ?
ORDER BY name
LIMIT ?
""", (component_type, limit))
else:
cursor = conn.execute("""
SELECT * FROM components
ORDER BY type, name
LIMIT ?
""", (limit,))

results = [dict(row) for row in cursor.fetchall()]
conn.close()
return results

def find_project_db() -> Optional[Path]: """Find project.db in current working directory or parent projects.""" cwd = Path.cwd()

# Check current directory and parents
search_paths = [cwd]
search_paths.extend(cwd.parents)

for path in search_paths[:5]: # Limit depth
project_db = path / ".coditect" / "project.db"
if project_db.exists():
return project_db
# Also check direct .coditect-data location
project_db = path / ".coditect-data" / "project.db"
if project_db.exists():
return project_db

return None

def get_project_decisions(limit: int = 20, project_path: str = None) -> List[Dict]: """Get project-specific decisions from project.db (Tier 4).""" db_path = find_project_db() if not db_path: return []

conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row

try:
cursor = conn.execute("""
SELECT * FROM project_decisions
ORDER BY created_at DESC
LIMIT ?
""", (limit,))
results = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError:
results = []

conn.close()
return results

def get_project_context(limit: int = 20) -> List[Dict]: """Get project context from project.db (Tier 4).""" db_path = find_project_db() if not db_path: return []

conn = sqlite3.connect(str(db_path))
conn.row_factory = sqlite3.Row

try:
cursor = conn.execute("""
SELECT * FROM project_context
ORDER BY created_at DESC
LIMIT ?
""", (limit,))
results = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError:
results = []

conn.close()
return results

def get_registered_projects(query: str = None, limit: int = 20) -> List[Dict]: """Get registered projects from projects.db (Tier 4 - ADR-118).""" if not PROJECTS_DB_PATH.exists(): return []

conn = sqlite3.connect(str(PROJECTS_DB_PATH))
conn.row_factory = sqlite3.Row

try:
if query:
cursor = conn.execute("""
SELECT project_uuid, name, path, project_type, status,
primary_language, framework, github_url,
last_indexed_at, created_at
FROM projects
WHERE name LIKE ? OR path LIKE ? OR primary_language LIKE ?
ORDER BY name
LIMIT ?
""", (f'%{query}%', f'%{query}%', f'%{query}%', limit))
else:
cursor = conn.execute("""
SELECT project_uuid, name, path, project_type, status,
primary_language, framework, github_url,
last_indexed_at, created_at
FROM projects
ORDER BY name
LIMIT ?
""", (limit,))

results = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError:
results = []

conn.close()
return results

def get_project_by_name(name: str) -> Optional[Dict]: """Get a single project by name or UUID from projects.db (Tier 4).

J.16.2.2: Supports finding a project by name, path, or UUID prefix.

Args:
name: Project name, path substring, or UUID prefix

Returns:
Project dict or None if not found
"""
if not PROJECTS_DB_PATH.exists():
return None

conn = sqlite3.connect(str(PROJECTS_DB_PATH))
conn.row_factory = sqlite3.Row

try:
# Try exact name match first
cursor = conn.execute("""
SELECT project_uuid, id, name, path, project_type, status,
primary_language, framework, github_url,
parent_project_id, last_indexed_at, created_at
FROM projects
WHERE name = ? OR project_uuid LIKE ? || '%'
LIMIT 1
""", (name, name))
row = cursor.fetchone()
if row:
result = dict(row)
conn.close()
return result

# Fallback to LIKE search
cursor = conn.execute("""
SELECT project_uuid, id, name, path, project_type, status,
primary_language, framework, github_url,
parent_project_id, last_indexed_at, created_at
FROM projects
WHERE name LIKE ? OR path LIKE ?
LIMIT 1
""", (f'%{name}%', f'%{name}%'))
row = cursor.fetchone()
if row:
result = dict(row)
conn.close()
return result
except sqlite3.OperationalError:
pass

conn.close()
return None

def get_project_tree(project_name: str) -> List[Dict]: """Get a project and all its child projects (submodules) from projects.db.

J.16.2.4: Uses parent_project_id to find all child projects recursively.

Args:
project_name: Project name or UUID prefix

Returns:
List of project dicts (parent first, then children)
"""
if not PROJECTS_DB_PATH.exists():
return []

parent = get_project_by_name(project_name)
if not parent:
return []

parent_id = parent.get('id')
if not parent_id:
return [parent]

conn = sqlite3.connect(str(PROJECTS_DB_PATH))
conn.row_factory = sqlite3.Row

try:
# Get all children using recursive CTE
cursor = conn.execute("""
WITH RECURSIVE project_tree AS (
-- Base case: the parent project
SELECT project_uuid, id, name, path, project_type, status,
primary_language, framework, github_url,
parent_project_id, last_indexed_at, created_at, 0 as depth
FROM projects
WHERE id = ?

UNION ALL

-- Recursive case: children
SELECT p.project_uuid, p.id, p.name, p.path, p.project_type, p.status,
p.primary_language, p.framework, p.github_url,
p.parent_project_id, p.last_indexed_at, p.created_at, pt.depth + 1
FROM projects p
INNER JOIN project_tree pt ON p.parent_project_id = pt.id
)
SELECT * FROM project_tree
ORDER BY depth, name
""", (parent_id,))

results = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError:
results = [parent]

conn.close()
return results

def search_in_project( query: str, project_uuid: str, limit: int = 20, include_children: bool = False, child_uuids: List[str] = None, project_name: str = None, project_path: str = None, project_paths: List[str] = None ) -> Dict[str, Any]: """Search messages/decisions/learnings within a specific project.

J.16.2.2/J.16.2.4: Project-scoped search across Tier 3 and Tier 2 databases.

The search uses multiple strategies to match project context:
1. project_id field (if populated)
2. project_uuid field
3. source_file path containing project path
4. Project name in content/context

Args:
query: Search query (optional - if empty, return recent)
project_uuid: Project UUID to filter by
limit: Maximum results per category
include_children: If True, include child project UUIDs in search
child_uuids: List of child project UUIDs (for --project-tree)
project_name: Project name for path-based matching
project_path: Project path for source_file matching
project_paths: List of project paths (for tree search)

Returns:
Dict with messages, decisions, learnings keys
"""
results = {
'project_uuid': project_uuid,
'project_name': project_name,
'include_children': include_children,
'messages': [],
'decisions': [],
'learnings': [],
'errors': []
}

# Build project filter - multiple strategies for matching
# Strategy 1: Exact project_id match (UUID or name)
# Strategy 2: source_file path contains project path
filter_conditions = []
filter_params = []

if include_children and child_uuids:
placeholders = ','.join(['?' for _ in child_uuids])
filter_conditions.append(f"project_id IN ({placeholders})")
filter_params.extend(child_uuids)
else:
filter_conditions.append("project_id = ?")
filter_params.append(project_uuid)

# Add name-based matching
if project_name:
filter_conditions.append("project_id = ?")
filter_params.append(project_name)

# Add path-based matching for source_file
if include_children and project_paths:
for path in project_paths:
if path:
filter_conditions.append("source_file LIKE ?")
filter_params.append(f"%{path}%")
elif project_path:
filter_conditions.append("source_file LIKE ?")
filter_params.append(f"%{project_path}%")

# Combine with OR - any match is valid
project_filter = "(" + " OR ".join(filter_conditions) + ")"
project_params = filter_params

# Search messages in sessions.db (Tier 3)
if SESSIONS_DB_PATH.exists():
conn = get_sessions_db()
try:
if query:
# Try FTS first
try:
fts_query = query
if BOOLEAN_PARSER_AVAILABLE and is_boolean_query(query):
result = parse_boolean_query(query)
if result.success:
fts_query = result.fts_query

sql = f"""
SELECT m.*, highlight(messages_fts, 0, '>>>', '<<<') as highlighted
FROM messages m
JOIN messages_fts ON m.id = messages_fts.rowid
WHERE messages_fts MATCH ? AND {project_filter}
ORDER BY m.timestamp DESC
LIMIT ?
"""
cursor = conn.execute(sql, [fts_query] + project_params + [limit])
results['messages'] = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError:
# Fallback to LIKE
sql = f"""
SELECT * FROM messages
WHERE content LIKE ? AND {project_filter}
ORDER BY timestamp DESC
LIMIT ?
"""
cursor = conn.execute(sql, [f'%{query}%'] + project_params + [limit])
results['messages'] = [dict(row) for row in cursor.fetchall()]
else:
# No query - get recent
sql = f"""
SELECT * FROM messages
WHERE {project_filter}
ORDER BY timestamp DESC
LIMIT ?
"""
cursor = conn.execute(sql, project_params + [limit])
results['messages'] = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError as e:
results['errors'].append(f"Messages search error: {e}")
finally:
conn.close()

# Search decisions in org.db (Tier 2)
if ORG_DB_PATH.exists():
conn = get_org_db()
try:
if query:
sql = f"""
SELECT * FROM decisions
WHERE (decision_type LIKE ? OR context LIKE ? OR rationale LIKE ?)
AND {project_filter}
ORDER BY created_at DESC
LIMIT ?
"""
cursor = conn.execute(
sql,
[f'%{query}%', f'%{query}%', f'%{query}%'] + project_params + [limit]
)
else:
sql = f"""
SELECT * FROM decisions
WHERE {project_filter}
ORDER BY created_at DESC
LIMIT ?
"""
cursor = conn.execute(sql, project_params + [limit])
results['decisions'] = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError as e:
results['errors'].append(f"Decisions search error: {e}")

# Search skill learnings
try:
if query:
sql = f"""
SELECT * FROM skill_learnings
WHERE (skill_name LIKE ? OR pattern_type LIKE ? OR description LIKE ?)
AND {project_filter}
ORDER BY created_at DESC
LIMIT ?
"""
cursor = conn.execute(
sql,
[f'%{query}%', f'%{query}%', f'%{query}%'] + project_params + [limit]
)
else:
sql = f"""
SELECT * FROM skill_learnings
WHERE {project_filter}
ORDER BY created_at DESC
LIMIT ?
"""
cursor = conn.execute(sql, project_params + [limit])
results['learnings'] = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError as e:
results['errors'].append(f"Learnings search error: {e}")
finally:
conn.close()

return results

def search_all_projects(query: str, limit: int = 20) -> Dict[str, Any]: """Search across ALL registered projects.

J.16.2.3: Aggregates results from all projects with project attribution.

Args:
query: Search query
limit: Maximum results per project

Returns:
Dict with project-keyed results
"""
if not PROJECTS_DB_PATH.exists():
return {'error': 'projects.db not found', 'projects': {}}

projects = get_registered_projects(limit=100) # Get all projects
if not projects:
return {'error': 'No registered projects', 'projects': {}}

all_results = {
'total_projects': len(projects),
'query': query,
'projects': {},
'summary': {
'total_messages': 0,
'total_decisions': 0,
'total_learnings': 0
}
}

for proj in projects:
project_uuid = proj.get('project_uuid')
project_name = proj.get('name', 'unknown')
project_path = proj.get('path')

if not project_uuid:
continue

proj_results = search_in_project(
query=query,
project_uuid=project_uuid,
limit=limit,
project_name=project_name,
project_path=project_path
)

msg_count = len(proj_results.get('messages', []))
dec_count = len(proj_results.get('decisions', []))
learn_count = len(proj_results.get('learnings', []))

# Only include projects with results
if msg_count > 0 or dec_count > 0 or learn_count > 0:
all_results['projects'][project_name] = {
'project_uuid': project_uuid,
'path': proj.get('path'),
'messages': proj_results.get('messages', []),
'decisions': proj_results.get('decisions', []),
'learnings': proj_results.get('learnings', []),
'counts': {
'messages': msg_count,
'decisions': dec_count,
'learnings': learn_count
}
}

all_results['summary']['total_messages'] += msg_count
all_results['summary']['total_decisions'] += dec_count
all_results['summary']['total_learnings'] += learn_count

return all_results

def format_project_search_results(results: Dict[str, Any], query: str = None) -> str: """Format project search results for display.

Args:
results: Results from search_in_project or search_all_projects
query: Optional query string for header

Returns:
Formatted string for terminal display
"""
lines = []

# Single project results
if 'project_uuid' in results and 'projects' not in results:
project_uuid = results['project_uuid']
include_children = results.get('include_children', False)

header = f"Project: {project_uuid[:8]}..."
if include_children:
header += " (+ children)"
if query:
header += f" | Query: \"{query}\""
lines.append(header)
lines.append("=" * 70)

# Messages
messages = results.get('messages', [])
if messages:
lines.append(f"\n📝 Messages ({len(messages)} results):")
for msg in messages[:10]: # Limit display
role = msg.get('role', 'unknown')
ts = msg.get('timestamp', 'unknown')[:10]
content = msg.get('content', '')[:100]
lines.append(f" [{role}] {ts}: {content}...")
if len(messages) > 10:
lines.append(f" ... and {len(messages) - 10} more")

# Decisions
decisions = results.get('decisions', [])
if decisions:
lines.append(f"\n📋 Decisions ({len(decisions)} results):")
for dec in decisions[:5]:
dec_type = dec.get('decision_type', 'unknown')
created = dec.get('created_at', 'unknown')[:10]
context = dec.get('context', '')[:80]
lines.append(f" [{dec_type}] {created}: {context}...")

# Learnings
learnings = results.get('learnings', [])
if learnings:
lines.append(f"\n📚 Learnings ({len(learnings)} results):")
for learn in learnings[:5]:
skill = learn.get('skill_name', 'unknown')
pattern = learn.get('pattern_type', 'unknown')
lines.append(f" [{skill}] {pattern}")

if not messages and not decisions and not learnings:
lines.append("\n No results found for this project.")

# Multi-project results
elif 'projects' in results:
summary = results.get('summary', {})
total_projs = results.get('total_projects', 0)
proj_with_results = len(results.get('projects', {}))

query_str = results.get('query', '')
header = f"Cross-Project Search: \"{query_str}\""
lines.append(header)
lines.append(f"Projects: {proj_with_results}/{total_projs} with results")
lines.append(f"Totals: {summary.get('total_messages', 0)} messages, "
f"{summary.get('total_decisions', 0)} decisions, "
f"{summary.get('total_learnings', 0)} learnings")
lines.append("=" * 70)

for proj_name, proj_data in results.get('projects', {}).items():
counts = proj_data.get('counts', {})
lines.append(f"\n📦 {proj_name}")
lines.append(f" Path: {proj_data.get('path', 'unknown')}")
lines.append(f" Messages: {counts.get('messages', 0)}, "
f"Decisions: {counts.get('decisions', 0)}, "
f"Learnings: {counts.get('learnings', 0)}")

# Show top 3 messages per project
messages = proj_data.get('messages', [])[:3]
for msg in messages:
content = msg.get('content', '')[:60]
lines.append(f" - {content}...")

return "\n".join(lines)

def format_project(proj: Dict) -> str: """Format a project for display.""" lines = []

status_icon = "✅" if proj.get('status') == 'active' else "⏸️"
indexed = "✓" if proj.get('last_indexed_at') else "✗"

lines.append(f"{status_icon} {proj.get('name', 'unnamed')}")
lines.append(f" UUID: {proj.get('project_uuid', 'unknown')[:8]}...")
lines.append(f" Path: {proj.get('path', 'unknown')}")
lines.append(f" Type: {proj.get('project_type', 'unknown')}")

if proj.get('primary_language'):
lang = proj['primary_language']
if proj.get('framework'):
lang += f" ({proj['framework']})"
lines.append(f" Language: {lang}")

if proj.get('github_url'):
lines.append(f" GitHub: {proj['github_url']}")

lines.append(f" Indexed: {indexed}")
lines.append("-" * 60)

return "\n".join(lines)

def format_component(comp: Dict) -> str: """Format a component for display.""" lines = []

comp_type = comp.get('type', 'unknown')
name = comp.get('name', 'unnamed')
desc = comp.get('description', '')[:80] if comp.get('description') else 'No description'

lines.append(f"[{comp_type}] {name}")
lines.append(f" {desc}")

if comp.get('path'):
lines.append(f" Path: {comp['path']}")

lines.append("-" * 60)
return "\n".join(lines)

=============================================================================

J.16.1: Tier 3 Analytics Queries (ADR-118)

=============================================================================

def get_token_economics( session_id: str = None, limit: int = 20, aggregate: bool = False, group_by_sql: str = None, group_by_select: List[str] = None, sort_sql: str = None ) -> List[Dict]: """Get token economics data from sessions.db (Tier 3).

Args:
session_id: Filter by specific session
limit: Maximum results
aggregate: If True, return aggregated stats instead of raw records
group_by_sql: J.4.7.2 - SQL GROUP BY clause (without keyword)
group_by_select: J.4.7.2 - SELECT fields for grouping
sort_sql: J.4.7.2 - SQL ORDER BY clause (without keyword)
"""
conn = get_sessions_db()

try:
# J.4.7.2: GROUP BY query mode
if group_by_sql:
# Build SELECT with grouping fields and aggregations
select_parts = list(group_by_select) if group_by_select else []
select_parts.extend([
'COUNT(*) AS record_count',
'SUM(token_input) AS total_input',
'SUM(token_output) AS total_output',
'SUM(cost_total_usd) AS total_cost',
'AVG(cost_total_usd) AS avg_cost',
])

select_clause = ", ".join(select_parts)
order_clause = sort_sql if sort_sql else "total_cost DESC"

query = f"""
SELECT {select_clause}
FROM token_economics
GROUP BY {group_by_sql}
ORDER BY {order_clause}
LIMIT ?
"""
cursor = conn.execute(query, (limit,))
results = [dict(row) for row in cursor.fetchall()]

elif aggregate:
# Aggregated token statistics (using actual column names)
cursor = conn.execute("""
SELECT
COUNT(*) as total_records,
SUM(token_input) as total_input_tokens,
SUM(token_output) as total_output_tokens,
SUM(token_input + token_output) as grand_total_tokens,
AVG(token_input) as avg_input_tokens,
AVG(token_output) as avg_output_tokens,
SUM(cost_total_usd) as total_cost_usd,
MIN(created_at) as first_record,
MAX(created_at) as last_record,
COUNT(DISTINCT session_id) as unique_sessions,
COUNT(DISTINCT model_name) as unique_models
FROM token_economics
""")
row = cursor.fetchone()
if row:
results = [dict(row)]
else:
results = []
elif session_id:
order_clause = sort_sql if sort_sql else "created_at DESC"
cursor = conn.execute(f"""
SELECT id, session_id, model_name, model_tier,
token_input, token_output, token_cache_read, token_cache_write,
cost_input_usd, cost_output_usd, cost_total_usd,
task_id, agent_name, operation_type, created_at
FROM token_economics
WHERE session_id = ?
ORDER BY {order_clause}
LIMIT ?
""", (session_id, limit))
results = [dict(row) for row in cursor.fetchall()]
else:
order_clause = sort_sql if sort_sql else "created_at DESC"
cursor = conn.execute(f"""
SELECT id, session_id, model_name, model_tier,
token_input, token_output, token_cache_read, token_cache_write,
cost_input_usd, cost_output_usd, cost_total_usd,
task_id, agent_name, operation_type, created_at
FROM token_economics
ORDER BY {order_clause}
LIMIT ?
""", (limit,))
results = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError as e:
print(f"Warning: token_economics query error: {e}")
results = []

conn.close()
return results

def get_tool_analytics( tool_name: str = None, session_id: str = None, limit: int = 20, aggregate: bool = False, group_by_sql: str = None, group_by_select: List[str] = None, sort_sql: str = None ) -> List[Dict]: """Get tool analytics data from sessions.db (Tier 3).

Args:
tool_name: Filter by specific tool
session_id: Filter by specific session
limit: Maximum results
aggregate: If True, return aggregated stats instead of raw records
group_by_sql: J.4.7.2 - SQL GROUP BY clause (without keyword)
group_by_select: J.4.7.2 - SELECT fields for grouping
sort_sql: J.4.7.2 - SQL ORDER BY clause (without keyword)
"""
conn = get_sessions_db()

try:
# J.4.7.2: GROUP BY query mode
if group_by_sql:
# Build SELECT with grouping fields and aggregations
select_parts = list(group_by_select) if group_by_select else []
select_parts.extend([
'COUNT(*) AS invocation_count',
"SUM(CASE WHEN status = 'success' THEN 1 ELSE 0 END) AS success_count",
"SUM(CASE WHEN status != 'success' THEN 1 ELSE 0 END) AS failure_count",
'AVG(COALESCE(execution_time_ms, duration_ms)) AS avg_duration_ms',
])

select_clause = ", ".join(select_parts)
order_clause = sort_sql if sort_sql else "invocation_count DESC"

query = f"""
SELECT {select_clause}
FROM tool_analytics
GROUP BY {group_by_sql}
ORDER BY {order_clause}
LIMIT ?
"""
cursor = conn.execute(query, (limit,))
results = [dict(row) for row in cursor.fetchall()]

elif aggregate:
# Aggregated tool statistics (using actual column names)
cursor = conn.execute("""
SELECT
tool_name,
tool_category,
COUNT(*) as invocation_count,
SUM(CASE WHEN status = 'success' THEN 1 ELSE 0 END) as success_count,
SUM(CASE WHEN status != 'success' THEN 1 ELSE 0 END) as failure_count,
AVG(COALESCE(execution_time_ms, duration_ms)) as avg_duration_ms,
MIN(COALESCE(execution_time_ms, duration_ms)) as min_duration_ms,
MAX(COALESCE(execution_time_ms, duration_ms)) as max_duration_ms,
COUNT(DISTINCT session_id) as unique_sessions
FROM tool_analytics
GROUP BY tool_name
ORDER BY invocation_count DESC
LIMIT ?
""", (limit,))
results = [dict(row) for row in cursor.fetchall()]
elif tool_name and session_id:
cursor = conn.execute("""
SELECT id, session_id, tool_name, tool_category, agent_name, task_id,
status, error_type, error_message,
COALESCE(execution_time_ms, duration_ms) as duration_ms,
COALESCE(input_size_bytes, input_size) as input_size,
COALESCE(output_size_bytes, output_size) as output_size,
retry_count, created_at
FROM tool_analytics
WHERE tool_name = ? AND session_id = ?
ORDER BY created_at DESC
LIMIT ?
""", (tool_name, session_id, limit))
results = [dict(row) for row in cursor.fetchall()]
elif tool_name:
cursor = conn.execute("""
SELECT id, session_id, tool_name, tool_category, agent_name, task_id,
status, error_type, error_message,
COALESCE(execution_time_ms, duration_ms) as duration_ms,
COALESCE(input_size_bytes, input_size) as input_size,
COALESCE(output_size_bytes, output_size) as output_size,
retry_count, created_at
FROM tool_analytics
WHERE tool_name = ?
ORDER BY created_at DESC
LIMIT ?
""", (tool_name, limit))
results = [dict(row) for row in cursor.fetchall()]
elif session_id:
cursor = conn.execute("""
SELECT id, session_id, tool_name, tool_category, agent_name, task_id,
status, error_type, error_message,
COALESCE(execution_time_ms, duration_ms) as duration_ms,
COALESCE(input_size_bytes, input_size) as input_size,
COALESCE(output_size_bytes, output_size) as output_size,
retry_count, created_at
FROM tool_analytics
WHERE session_id = ?
ORDER BY created_at DESC
LIMIT ?
""", (session_id, limit))
results = [dict(row) for row in cursor.fetchall()]
else:
cursor = conn.execute("""
SELECT id, session_id, tool_name, tool_category, agent_name, task_id,
status, error_type, error_message,
COALESCE(execution_time_ms, duration_ms) as duration_ms,
COALESCE(input_size_bytes, input_size) as input_size,
COALESCE(output_size_bytes, output_size) as output_size,
retry_count, created_at
FROM tool_analytics
ORDER BY created_at DESC
LIMIT ?
""", (limit,))
results = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError as e:
print(f"Warning: tool_analytics query error: {e}")
results = []

conn.close()
return results

def get_activity_associations( component_type: str = None, session_id: str = None, limit: int = 20, aggregate: bool = False ) -> List[Dict]: """Get activity associations from sessions.db (Tier 3).

Queries message_component_invocations table which tracks
which components (skills, agents, commands, hooks) were
invoked during each session.

Args:
component_type: Filter by component type (skill, agent, command, hook)
session_id: Filter by specific session
limit: Maximum results
aggregate: If True, return aggregated stats
"""
conn = get_sessions_db()

try:
if aggregate:
cursor = conn.execute("""
SELECT
component_type,
COUNT(*) as invocation_count,
COUNT(DISTINCT component_name) as unique_components,
COUNT(DISTINCT session_id) as unique_sessions
FROM message_component_invocations
GROUP BY component_type
ORDER BY invocation_count DESC
""")
results = [dict(row) for row in cursor.fetchall()]
elif component_type and session_id:
cursor = conn.execute("""
SELECT id, message_id, component_name, component_type,
session_id, timestamp, created_at
FROM message_component_invocations
WHERE component_type = ? AND session_id = ?
ORDER BY created_at DESC
LIMIT ?
""", (component_type, session_id, limit))
results = [dict(row) for row in cursor.fetchall()]
elif component_type:
cursor = conn.execute("""
SELECT id, message_id, component_name, component_type,
session_id, timestamp, created_at
FROM message_component_invocations
WHERE component_type = ?
ORDER BY created_at DESC
LIMIT ?
""", (component_type, limit))
results = [dict(row) for row in cursor.fetchall()]
elif session_id:
cursor = conn.execute("""
SELECT id, message_id, component_name, component_type,
session_id, timestamp, created_at
FROM message_component_invocations
WHERE session_id = ?
ORDER BY created_at DESC
LIMIT ?
""", (session_id, limit))
results = [dict(row) for row in cursor.fetchall()]
else:
cursor = conn.execute("""
SELECT id, message_id, component_name, component_type,
session_id, timestamp, created_at
FROM message_component_invocations
ORDER BY created_at DESC
LIMIT ?
""", (limit,))
results = [dict(row) for row in cursor.fetchall()]
except sqlite3.OperationalError as e:
print(f"Warning: message_component_invocations query error: {e}")
results = []

conn.close()
return results

def format_token_economics(te: Dict, aggregate: bool = False) -> str: """Format token economics for display.""" lines = []

if aggregate:
# Aggregated stats display
total_cost = te.get('total_cost_usd') or 0
lines.append("TOKEN ECONOMICS SUMMARY")
lines.append("-" * 40)
lines.append(f" Total Records: {te.get('total_records', 0):,}")
lines.append(f" Total Input Tokens: {te.get('total_input_tokens', 0):,}")
lines.append(f" Total Output Tokens:{te.get('total_output_tokens', 0):,}")
lines.append(f" Grand Total Tokens: {te.get('grand_total_tokens', 0):,}")
lines.append(f" Avg Input Tokens: {te.get('avg_input_tokens', 0):,.1f}")
lines.append(f" Avg Output Tokens: {te.get('avg_output_tokens', 0):,.1f}")
lines.append(f" Total Cost (USD): ${total_cost:,.2f}")
lines.append(f" Unique Sessions: {te.get('unique_sessions', 0):,}")
lines.append(f" Unique Models: {te.get('unique_models', 0):,}")
lines.append(f" First Record: {te.get('first_record', 'N/A')}")
lines.append(f" Last Record: {te.get('last_record', 'N/A')}")
else:
# Individual record display (using actual column names)
session = te.get('session_id', 'unknown')[:8] if te.get('session_id') else 'unknown'
timestamp = te.get('created_at', 'unknown')
input_tokens = te.get('token_input', 0) or 0
output_tokens = te.get('token_output', 0) or 0
cache_read = te.get('token_cache_read', 0) or 0
cache_write = te.get('token_cache_write', 0) or 0
total_tokens = input_tokens + output_tokens

lines.append(f"[{timestamp}] Session: {session}...")
lines.append(f" Input: {input_tokens:,} tokens")
lines.append(f" Output: {output_tokens:,} tokens")
lines.append(f" Total: {total_tokens:,} tokens")

if cache_read or cache_write:
lines.append(f" Cache Read: {cache_read:,} tokens")
lines.append(f" Cache Write: {cache_write:,} tokens")

if te.get('model_name'):
model = te['model_name']
tier = te.get('model_tier', '')
lines.append(f" Model: {model}" + (f" ({tier})" if tier else ""))

cost = te.get('cost_total_usd')
if cost:
lines.append(f" Cost: ${cost:.4f}")

if te.get('task_id'):
lines.append(f" Task: {te['task_id']}")
if te.get('agent_name'):
lines.append(f" Agent: {te['agent_name']}")
if te.get('operation_type'):
lines.append(f" Operation: {te['operation_type']}")

lines.append("-" * 60)

return "\n".join(lines)

def format_tool_analytics(ta: Dict, aggregate: bool = False) -> str: """Format tool analytics for display.""" lines = []

if aggregate:
# Aggregated tool stats
tool = ta.get('tool_name', 'unknown')
category = ta.get('tool_category', '')
count = ta.get('invocation_count', 0) or 0
success = ta.get('success_count', 0) or 0
failure = ta.get('failure_count', 0) or 0
success_rate = (success / count * 100) if count > 0 else 0
avg_duration = ta.get('avg_duration_ms') or 0

tool_header = f"[{tool}]" + (f" ({category})" if category else "")
lines.append(tool_header)
lines.append(f" Invocations: {count:,}")
lines.append(f" Success: {success:,} ({success_rate:.1f}%)")
lines.append(f" Failures: {failure:,}")
if avg_duration:
lines.append(f" Avg Duration:{avg_duration:,.1f} ms")
lines.append(f" Sessions: {ta.get('unique_sessions', 0):,}")
lines.append("-" * 60)
else:
# Individual invocation (using actual column names)
tool = ta.get('tool_name', 'unknown')
category = ta.get('tool_category', '')
session = ta.get('session_id', 'unknown')[:8] if ta.get('session_id') else 'unknown'
timestamp = ta.get('created_at', 'unknown')
status = ta.get('status', 'unknown')
status_icon = "✓" if status == 'success' else "✗"
duration = ta.get('duration_ms') or 0

tool_header = f"[{tool}]" + (f" ({category})" if category else "")
lines.append(f"{tool_header} {status_icon} {timestamp}")
lines.append(f" Session: {session}...")
lines.append(f" Status: {status}")

if duration:
lines.append(f" Duration: {duration:,} ms")

if ta.get('task_id'):
lines.append(f" Task: {ta['task_id']}")
if ta.get('agent_name'):
lines.append(f" Agent: {ta['agent_name']}")

if ta.get('error_type'):
lines.append(f" Error: [{ta['error_type']}] {(ta.get('error_message') or '')[:60]}")
elif ta.get('error_message'):
lines.append(f" Error: {ta['error_message'][:80]}")

if ta.get('retry_count'):
lines.append(f" Retries: {ta['retry_count']}")

lines.append("-" * 60)

return "\n".join(lines)

def format_activity_association(aa: Dict, aggregate: bool = False) -> str: """Format activity association (component invocation) for display.""" lines = []

if aggregate:
# Aggregated component stats
comp_type = aa.get('component_type', 'unknown')
count = aa.get('invocation_count', 0)
unique_comps = aa.get('unique_components', 0)
unique_sessions = aa.get('unique_sessions', 0)

lines.append(f"[{comp_type}]")
lines.append(f" Invocations: {count:,}")
lines.append(f" Unique Components: {unique_comps:,}")
lines.append(f" Unique Sessions: {unique_sessions:,}")
lines.append("-" * 60)
else:
# Individual invocation record
comp_type = aa.get('component_type', 'unknown')
comp_name = aa.get('component_name', 'unknown')
session = aa.get('session_id', 'unknown')[:8] if aa.get('session_id') else 'unknown'
timestamp = aa.get('timestamp') or aa.get('created_at', 'unknown')

lines.append(f"[{comp_type}] {comp_name}")
lines.append(f" Session: {session}...")
lines.append(f" Timestamp: {timestamp}")

if aa.get('message_id'):
lines.append(f" Message: #{aa['message_id']}")

lines.append("-" * 60)

return "\n".join(lines)

def format_grouped_result(result: Dict, query_type: str) -> str: """Format a grouped query result for display (J.4.7.2).

Args:
result: Grouped result dict with aggregation fields
query_type: Type of query ('tokens', 'tools', 'decisions', etc.)
"""
lines = []

# Extract grouping keys vs aggregation values
group_keys = []
agg_values = []

for key, value in result.items():
# Common aggregation field names
if key in ('record_count', 'total_input', 'total_output', 'total_cost',
'avg_cost', 'invocation_count', 'success_count', 'failure_count',
'avg_duration_ms', 'decision_count', 'error_count', 'learning_count',
'activity_count', 'avg_confidence'):
agg_values.append((key, value))
else:
group_keys.append((key, value))

# Format grouping keys
if group_keys:
key_parts = [f"{k}={v}" for k, v in group_keys]
lines.append(f"📊 {' | '.join(key_parts)}")

# Format aggregations based on query type
if query_type == 'tokens':
for key, value in agg_values:
if key == 'record_count':
lines.append(f" Records: {value:,}")
elif key == 'total_input':
lines.append(f" Input: {value:,.0f} tokens")
elif key == 'total_output':
lines.append(f" Output: {value:,.0f} tokens")
elif key == 'total_cost':
lines.append(f" Total Cost: ${value:.4f}")
elif key == 'avg_cost':
lines.append(f" Avg Cost: ${value:.6f}")
elif query_type == 'tools':
for key, value in agg_values:
if key == 'invocation_count':
lines.append(f" Invocations: {value:,}")
elif key == 'success_count':
lines.append(f" Successes: {value:,}")
elif key == 'failure_count':
lines.append(f" Failures: {value:,}")
elif key == 'avg_duration_ms':
if value:
lines.append(f" Avg Latency: {value:.1f}ms")
else:
# Generic format for other types
for key, value in agg_values:
if isinstance(value, float):
lines.append(f" {key}: {value:.4f}")
else:
lines.append(f" {key}: {value}")

lines.append("-" * 50)
return "\n".join(lines)

def format_decision(dec: Dict) -> str: """Format a decision for display.""" lines = []

decision_type = dec.get('decision_type', 'unknown')
decision = dec.get('decision', 'No decision')[:100]
created = dec.get('created_at', 'unknown')
project_path = dec.get('project_path', '')
project_id = dec.get('project_id', '')
scope = dec.get('scope', '')

lines.append(f"[{decision_type}] {decision}")
lines.append(f" Created: {created}")

# ADR-156: Show project attribution
if project_id:
scope_badge = f" [{scope}]" if scope else ""
lines.append(f" Project: {project_id}{scope_badge}")
elif project_path:
lines.append(f" Path: {project_path}")

if dec.get('rationale'):
rationale = dec['rationale'][:200] + "..." if len(dec.get('rationale', '')) > 200 else dec['rationale']
lines.append(f" Rationale: {rationale}")

lines.append("-" * 60)
return "\n".join(lines)

def format_error_solution(es: Dict) -> str: """Format an error solution for display.""" lines = []

error_type = es.get('error_type', 'unknown')
error_msg = es.get('error_message', es.get('error_signature', 'No message'))[:80]
success = es.get('success_count', 0)

lines.append(f"[{error_type}] {error_msg}")
lines.append(f" Success count: {success}")

if es.get('solution'):
solution = es['solution'][:200] + "..." if len(es.get('solution', '')) > 200 else es['solution']
lines.append(f" Solution: {solution}")

lines.append("-" * 60)
return "\n".join(lines)

=============================================================================

J.32: Session Log Query Functions

=============================================================================

def _get_sessions_db(): """Get connection to sessions.db with WAL mode.""" import sqlite3 db_path = os.path.expanduser('~/PROJECTS/.coditect-data/context-storage/sessions.db') if not os.path.exists(db_path): print(f"Error: sessions.db not found at {db_path}") sys.exit(1) conn = sqlite3.connect(db_path, timeout=10) conn.execute("PRAGMA journal_mode=WAL") conn.row_factory = sqlite3.Row return conn

def _format_session_log_entry(row, show_content: bool = True, highlight: str = None) -> str: """Format a session_log_entries row for display.""" lines = [] heading = row['heading'] or '(no heading)' ts = row['entry_timestamp'] or '' task_ids = row['task_ids'] or '' project = row['project_id'] or '' log_date = row['log_date'] or ''

# Header line
header_parts = []
if ts:
header_parts.append(ts)
if task_ids:
header_parts.append(f"[{task_ids}]")
header_parts.append(heading)
lines.append(" ".join(header_parts))

# Metadata line
meta = []
if project:
meta.append(f"Project: {project}")
if log_date:
meta.append(f"Date: {log_date}")
if row['author']:
meta.append(f"Author: {row['author']}")
if meta:
lines.append(" " + " | ".join(meta))

# Content preview
if show_content and row['content']:
content = row['content']
if highlight:
# Simple highlight: wrap matches in >>..<<
import re as _re
for term in highlight.split():
term_clean = term.strip('"')
if term_clean:
pattern = _re.compile(_re.escape(term_clean), _re.IGNORECASE)
content = pattern.sub(f">>{term_clean}<<", content)
# Show first 300 chars
preview = content[:300].replace('\n', ' ').strip()
if len(content) > 300:
preview += "..."
lines.append(f" {preview}")

lines.append("")
return "\n".join(lines)

def _apply_project_filter(args) -> str: """Build SQL WHERE clause for project filtering.""" project_id = getattr(args, 'filter_project', None) or os.environ.get('CODITECT_PROJECT') if project_id and not getattr(args, 'all_projects', False): if getattr(args, 'project_only', False): return f"AND project_id = '{project_id}'" else: return f"AND (project_id = '{project_id}' OR project_id IS NULL)" return ""

def _session_log_stats(args): """Show session log indexing statistics.""" conn = _get_sessions_db() cursor = conn.cursor()

# Check if table exists
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='session_log_entries'")
if not cursor.fetchone():
print("No session log entries found. Run /cx to index session logs.")
conn.close()
return

if args.json:
stats = {}
cursor.execute("SELECT COUNT(*) FROM session_log_entries")
stats['total_entries'] = cursor.fetchone()[0]
cursor.execute("SELECT project_id, COUNT(*) as cnt FROM session_log_entries GROUP BY project_id ORDER BY cnt DESC")
stats['by_project'] = {r[0]: r[1] for r in cursor.fetchall()}
cursor.execute("SELECT MIN(log_date), MAX(log_date) FROM session_log_entries")
row = cursor.fetchone()
stats['date_range'] = {'from': row[0], 'to': row[1]}
cursor.execute("SELECT COUNT(DISTINCT source_file) FROM session_log_entries")
stats['files_indexed'] = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM session_log_entries WHERE task_ids IS NOT NULL AND task_ids != ''")
stats['with_task_ids'] = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM session_log_entries WHERE entry_timestamp IS NOT NULL")
stats['with_timestamps'] = cursor.fetchone()[0]
print(json.dumps(stats, indent=2))
else:
print("\n" + "=" * 60)
print("SESSION LOG INDEX STATISTICS (J.32)")
print("=" * 60)

cursor.execute("SELECT COUNT(*) FROM session_log_entries")
total = cursor.fetchone()[0]
print(f"\nTotal entries: {total:,}")

cursor.execute("SELECT COUNT(DISTINCT source_file) FROM session_log_entries")
files = cursor.fetchone()[0]
print(f"Files indexed: {files}")

cursor.execute("SELECT MIN(log_date), MAX(log_date) FROM session_log_entries")
row = cursor.fetchone()
print(f"Date range: {row[0]} to {row[1]}")

print(f"\nBy Project:")
cursor.execute("SELECT project_id, COUNT(*) as cnt FROM session_log_entries GROUP BY project_id ORDER BY cnt DESC")
for r in cursor.fetchall():
print(f" {r[0] or '(global)'}: {r[1]:,} entries")

cursor.execute("SELECT COUNT(*) FROM session_log_entries WHERE task_ids IS NOT NULL AND task_ids != ''")
with_tasks = cursor.fetchone()[0]
print(f"\nWith task IDs: {with_tasks:,} ({with_tasks*100//total if total else 0}%)")

cursor.execute("SELECT COUNT(*) FROM session_log_entries WHERE entry_timestamp IS NOT NULL")
with_ts = cursor.fetchone()[0]
print(f"With timestamps: {with_ts:,} ({with_ts*100//total if total else 0}%)")

cursor.execute("SELECT COUNT(*) FROM session_log_entries WHERE author IS NOT NULL")
with_author = cursor.fetchone()[0]
print(f"With author: {with_author:,} ({with_author*100//total if total else 0}%)")

conn.close()

def _session_log_search(args): """FTS5 search of session log entries.""" conn = _get_sessions_db() cursor = conn.cursor() query = getattr(args, 'session_logs', '') or '' limit = args.limit project_filter = _apply_project_filter(args)

if query:
# Wrap bare terms in quotes for FTS5 (handles hyphens in ADR-173, etc.)
fts_query = query
if '-' in query and '"' not in query:
fts_query = f'"{query}"'

cursor.execute(f"""
SELECT sle.* FROM session_log_entries sle
WHERE sle.id IN (
SELECT rowid FROM session_log_fts WHERE session_log_fts MATCH ?
) {project_filter}
ORDER BY sle.log_date DESC, sle.entry_timestamp DESC
LIMIT ?
""", (fts_query, limit))
else:
# No query — show all entries
cursor.execute(f"""
SELECT * FROM session_log_entries
WHERE 1=1 {project_filter}
ORDER BY log_date DESC, entry_timestamp DESC
LIMIT ?
""", (limit,))

rows = cursor.fetchall()

if args.json:
results = []
for r in rows:
results.append({k: r[k] for k in r.keys()})
print(json.dumps(results, indent=2))
else:
print(f"\n{'=' * 60}")
search_label = f"Session Log Search: \"{query}\"" if query else "Session Log Entries (Recent)"
print(search_label)
print(f"{'=' * 60}")
print(f"Results: {len(rows)}\n")
for row in rows:
print(_format_session_log_entry(row, highlight=query))

conn.close()

def _session_log_task_search(args): """Search session log entries by task ID.""" conn = _get_sessions_db() cursor = conn.cursor() task_id = args.session_log_task limit = args.limit project_filter = _apply_project_filter(args)

cursor.execute(f"""
SELECT * FROM session_log_entries
WHERE task_ids LIKE ? {project_filter}
ORDER BY log_date DESC, entry_timestamp DESC
LIMIT ?
""", (f"%{task_id}%", limit))

rows = cursor.fetchall()

if args.json:
results = [{k: r[k] for k in r.keys()} for r in rows]
print(json.dumps(results, indent=2))
else:
print(f"\n{'=' * 60}")
print(f"Session Log Entries for Task: {task_id}")
print(f"{'=' * 60}")
print(f"Results: {len(rows)}\n")
for row in rows:
print(_format_session_log_entry(row, highlight=task_id))

conn.close()

def _session_log_date_filter(args): """Filter session log entries by date.""" conn = _get_sessions_db() cursor = conn.cursor() date = args.session_log_date limit = args.limit project_filter = _apply_project_filter(args)

cursor.execute(f"""
SELECT * FROM session_log_entries
WHERE log_date = ? {project_filter}
ORDER BY entry_timestamp ASC
LIMIT ?
""", (date, limit))

rows = cursor.fetchall()

if args.json:
results = [{k: r[k] for k in r.keys()} for r in rows]
print(json.dumps(results, indent=2))
else:
print(f"\n{'=' * 60}")
print(f"Session Log Entries for Date: {date}")
print(f"{'=' * 60}")
print(f"Results: {len(rows)}\n")
for row in rows:
print(_format_session_log_entry(row))

conn.close()

def _session_log_range_filter(args): """Filter session log entries by date range.""" conn = _get_sessions_db() cursor = conn.cursor() from_date, to_date = args.session_log_range limit = args.limit project_filter = _apply_project_filter(args)

cursor.execute(f"""
SELECT * FROM session_log_entries
WHERE log_date BETWEEN ? AND ? {project_filter}
ORDER BY log_date DESC, entry_timestamp DESC
LIMIT ?
""", (from_date, to_date, limit))

rows = cursor.fetchall()

if args.json:
results = [{k: r[k] for k in r.keys()} for r in rows]
print(json.dumps(results, indent=2))
else:
print(f"\n{'=' * 60}")
print(f"Session Log Entries: {from_date} to {to_date}")
print(f"{'=' * 60}")
print(f"Results: {len(rows)}\n")
for row in rows:
print(_format_session_log_entry(row))

conn.close()

def _session_log_recent(args): """Show N most recent session log entries.""" conn = _get_sessions_db() cursor = conn.cursor() n = args.session_log_recent project_filter = _apply_project_filter(args)

cursor.execute(f"""
SELECT * FROM session_log_entries
WHERE 1=1 {project_filter}
ORDER BY log_date DESC, entry_timestamp DESC
LIMIT ?
""", (n,))

rows = cursor.fetchall()

if args.json:
results = [{k: r[k] for k in r.keys()} for r in rows]
print(json.dumps(results, indent=2))
else:
print(f"\n{'=' * 60}")
print(f"Most Recent {n} Session Log Entries")
print(f"{'=' * 60}")
print(f"Results: {len(rows)}\n")
for row in rows:
print(_format_session_log_entry(row))

conn.close()

def main(): parser = argparse.ArgumentParser( description='Context Query - ADR-118 Four-Tier Memory System', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" DATABASES (ADR-118 Four-Tier Architecture): Tier 4: projects.db - Registered projects, project embeddings (regenerable) Tier 3: sessions.db - Messages, token economics, tool analytics Tier 2: org.db - Decisions, skill learnings, error solutions Tier 1: platform.db - Component registry (read-only)

EXAMPLES: %(prog)s "error" # Search messages %(prog)s --recent 20 # Last 20 messages %(prog)s --decisions # View decisions %(prog)s --errors "TypeError" # Search error solutions %(prog)s --learnings # View skill learnings %(prog)s --tokens # View token economics (Tier 3) %(prog)s --token-stats # Aggregated token statistics %(prog)s --tools # View tool analytics (Tier 3) %(prog)s --tool-stats # Tool usage statistics by tool %(prog)s --activities # View activity associations %(prog)s --stats # Database statistics

CONTEXT GRAPH (ADR-151 Phase 6): %(prog)s --graph "task" # Build context graph for task %(prog)s --graph-stats # Knowledge graph statistics %(prog)s --graph "task" --graph-strategy semantic --graph-budget 8000 %(prog)s --graph "task" --graph-format json --graph-persist

QUERY TEMPLATES (ADR-154): %(prog)s --queries # List all query templates %(prog)s --template # Execute named template (-Q) %(prog)s --query-agents # Templates for agent type %(prog)s --query-category # Templates by category %(prog)s --create-query # Create template interactively (J.4.8.5) %(prog)s --create-query quick # Quick template creation %(prog)s --create-query clone # Clone existing template

GRAPH TRAVERSAL (J.4.7.3): %(prog)s --traversal bfs --start-node # BFS from node %(prog)s --traversal dfs --start-node # DFS from node %(prog)s --traversal semantic --start-node # Relevance-based traversal %(prog)s --traversal policy_first --start-node # Policy/ADR-first traversal %(prog)s --traversal shortest --start-node --end-node # Find path %(prog)s --traversal bfs --start-node --traversal-depth 5 --limit 100 """ )

parser.add_argument('query', nargs='?', help='Search query for messages')
parser.add_argument('--recent', '-r', type=int, metavar='N',
help='Show N most recent messages')
parser.add_argument('--decisions', '-d', action='store_true',
help='Show architectural decisions')
parser.add_argument('--decision-type', metavar='TYPE',
help='Filter decisions by type')
parser.add_argument('--errors', '-e', nargs='?', const='', metavar='QUERY',
help='Show error solutions (optionally filtered)')
parser.add_argument('--learnings', '-l', action='store_true',
help='Show skill learnings')
parser.add_argument('--pattern-type', metavar='TYPE',
help='Filter learnings by pattern type')
parser.add_argument('--components', '-c', nargs='?', const='', metavar='QUERY',
help='Search components in platform.db (optionally filtered)')
parser.add_argument('--component-type', metavar='TYPE',
help='Filter components by type (agent, skill, command, script, hook)')
parser.add_argument('--project', '-p', action='store_true',
help='Show project-specific decisions from project.db')
parser.add_argument('--project-context', action='store_true',
help='Show project context from project.db')
parser.add_argument('--projects', nargs='?', const='', metavar='QUERY',
help='List registered projects from projects.db (Tier 4)')

# ADR-156: Project-scoped context filtering
parser.add_argument('--filter-project', metavar='PROJECT_ID',
help='Filter results by project ID (e.g., CUST-avivatec-fpa). '
'Also reads from CODITECT_PROJECT env var.')
parser.add_argument('--project-only', action='store_true',
help='Exclude global scope data (only show project-specific results)')
parser.add_argument('--all-projects', action='store_true',
help='Show results from all projects (ignore project filter)')
parser.add_argument('--project-stats', action='store_true',
help='Show context statistics for current project (ADR-156)')
# ADR-159: Standard scope flags
parser.add_argument('--team', metavar='TEAM_ID',
help='Filter by team scope (ADR-159)')
parser.add_argument('--tenant', metavar='TENANT_ID',
help='Filter by tenant scope (ADR-159, implies all tenant projects)')

# J.16.2: Tier 4 Project Queries (ADR-118)
parser.add_argument('--project-search', metavar='PROJECT_NAME',
help='Search within a specific registered project (J.16.2.2). '
'Filters messages/decisions by project_id.')
parser.add_argument('--project-tree', metavar='PROJECT_NAME',
help='Search a project and its child projects (J.16.2.4). '
'Uses parent_project_id relationships for monorepos/submodules.')

# J.15.5: Project Code/Document Search (uses project_embeddings)
parser.add_argument('--code-search', metavar='PROJECT_NAME',
help='Search project source code/docs (J.15.5.1). '
'Uses semantic embeddings from project_embedder.py.')
parser.add_argument('--code-tree', metavar='PROJECT_NAME',
help='Search project + children source code (J.15.5.3). '
'Searches across monorepo/submodule tree.')
parser.add_argument('--all-code', action='store_true',
help='Search source code across ALL projects (J.15.5.2). '
'Cross-project code search.')
parser.add_argument('--code-mode', choices=['fts', 'semantic', 'hybrid'],
default='hybrid',
help='Code search mode: fts (text), semantic (vector), hybrid (default)')
parser.add_argument('--create-code-fts', action='store_true',
help='Create FTS5 index for project_embeddings (admin)')

# J.16.1: Tier 3 Analytics Queries
parser.add_argument('--tokens', '-t', nargs='?', const='', metavar='SESSION_ID',
help='Show token economics (Tier 3). Optionally filter by session ID.')
parser.add_argument('--token-stats', action='store_true',
help='Show aggregated token statistics')
parser.add_argument('--tools', nargs='?', const='', metavar='TOOL_NAME',
help='Show tool analytics (Tier 3). Optionally filter by tool name.')
parser.add_argument('--tool-stats', action='store_true',
help='Show tool usage statistics by tool')
parser.add_argument('--activities', '-a', nargs='?', const='', metavar='COMPONENT_TYPE',
help='Show component invocations (Tier 3). Filter by type: skill, agent, command, hook.')
parser.add_argument('--activity-stats', action='store_true',
help='Show component invocation statistics by type')
parser.add_argument('--session-filter', metavar='SESSION_ID',
help='Filter --tokens, --tools, or --activities by session ID')

# J.16.3: Semantic Search Migration (MCP Semantic Search Server)
parser.add_argument('--semantic', metavar='QUERY',
help='Semantic search using vector embeddings (J.16.3.1). '
'Finds conceptually similar messages.')
parser.add_argument('--hybrid', metavar='QUERY',
help='Hybrid search combining FTS5 + vector embeddings with RRF fusion (J.16.3.1). '
'Best for general-purpose search.')
parser.add_argument('--recall', metavar='TOPIC',
help='RAG-style recall (J.16.3.2). Similar to --hybrid but optimized for '
'context injection. Returns structured context for agent prompts.')
parser.add_argument('--fts-weight', type=float, default=0.4,
help='FTS5 weight in hybrid search (default: 0.4)')
parser.add_argument('--vector-weight', type=float, default=0.6,
help='Vector weight in hybrid search (default: 0.6)')
parser.add_argument('--similarity-threshold', type=float, default=0.3,
help='Minimum similarity threshold for semantic search (default: 0.3)')
parser.add_argument('--semantic-help', action='store_true',
help='Show semantic search help')

# J.25.5: ADR-151 Phase 6 - Context Graph Integration (CP-40)
parser.add_argument('--graph', '-g', nargs='?', const='', metavar='TASK',
help='Build context graph for task description (ADR-151). '
'Uses semantic search to find relevant nodes.')
parser.add_argument('--graph-stats', action='store_true',
help='Show knowledge graph statistics (kg_nodes, kg_edges in org.db)')
parser.add_argument('--graph-strategy', choices=['anchor', 'semantic', 'policy_first'],
default='semantic',
help='Context graph seed selection strategy (default: semantic)')
parser.add_argument('--graph-budget', type=int, default=4000,
help='Token budget for context graph (default: 4000)')
parser.add_argument('--graph-depth', type=int, default=3,
help='Maximum BFS depth for graph expansion (default: 3)')
parser.add_argument('--graph-persist', action='store_true',
help='Persist built context graph to sessions.db')
parser.add_argument('--graph-format', choices=['markdown', 'json', 'text'],
default='markdown',
help='Output format for context graph (default: markdown)')

# J.25.6.15: View stored context graph projections
parser.add_argument('--context-graph', nargs='?', const='list', metavar='GRAPH_ID',
help='View stored context graph projections. No arg = list all, '
'with GRAPH_ID = show details for that graph.')

# J.4.8.3: ADR-154 Query Templates
parser.add_argument('--queries', action='store_true',
help='List available query templates (ADR-154)')
parser.add_argument('--template', '-Q', dest='template', metavar='TEMPLATE',
help='Execute a named query template (e.g., architecture-context)')
parser.add_argument('--query-agents', metavar='AGENT_TYPE',
help='List templates for an agent type (e.g., senior-architect)')
parser.add_argument('--query-category', metavar='CATEGORY',
help='List templates by category (e.g., agent-context, workflow)')

# J.4.8.5: Interactive Query Template Creator (ADR-154)
parser.add_argument('--create-query', nargs='?', const='interactive', metavar='MODE',
help='Create new query template. Modes: interactive (default), quick, clone')

# J.4.7.1: ADR-149 Phase 1 Enhanced Flag Syntax
parser.add_argument('--where', '-W', metavar='EXPR',
help='Filter results with expression (e.g., "confidence > 0.8 AND decision_type = \'api\'")')

# J.4.7.2: ADR-149 Phase 1 GROUP BY and SORT
parser.add_argument('--group-by', '-G', metavar='FIELDS',
help='Group results by fields (e.g., "model_name,week" or "session_id,day")')
parser.add_argument('--sort', '-S', metavar='FIELDS',
help='Sort results by fields with direction (e.g., "cost_total_usd_desc,created_at_asc")')

# J.4.7.3: ADR-149 Phase 1 Knowledge Graph Traversal
parser.add_argument('--traversal', '-T', metavar='STRATEGY',
help='Traverse knowledge graph with strategy: bfs, dfs, shortest, policy_first, semantic')
parser.add_argument('--start-node', metavar='NODE',
help='Start node for --traversal (node ID or name)')
parser.add_argument('--end-node', metavar='NODE',
help='End node for --traversal shortest path')
parser.add_argument('--traversal-depth', type=int, default=3,
help='Maximum traversal depth (default: 3)')

# J.4.7.4: ADR-149 Phase 1 Field-Specific Search
parser.add_argument('--field', '-F', metavar='SPEC',
help='Field-specific search (e.g., "role:assistant,content:*error*")')
parser.add_argument('--field-help', action='store_true',
help='Show valid fields for --field option')

# J.4.7.5: ADR-149 Phase 1 Cross-Table Correlation
parser.add_argument('--correlate', '-C', metavar='TABLES',
help='Correlate data across tables (e.g., "messages,token_economics" or shortcut name)')
parser.add_argument('--by', metavar='KEYS',
help='Join key(s) for --correlate (e.g., "session_id" or "session_id,task_id")')
parser.add_argument('--correlate-help', action='store_true',
help='Show valid tables and join keys for --correlate option')

# J.4.4.1: Boolean Search Operators (ADR-149)
parser.add_argument('--boolean-help', action='store_true',
help='Show Boolean search operator syntax (AND, OR, NOT, phrases, wildcards)')

# J.4.5: Result Ranking (ADR-149)
parser.add_argument('--rank', action='store_true', default=True,
help='Enable result ranking (default: enabled for searches)')
parser.add_argument('--no-rank', action='store_true',
help='Disable result ranking (use raw chronological order)')
parser.add_argument('--rank-weights', metavar='R,T,S',
help='Custom ranking weights: relevance,recency,source (must sum to 1.0). '
'Default: 0.5,0.3,0.2')
parser.add_argument('--show-scores', action='store_true',
help='Include scoring breakdown in output (_score, _relevance, _recency)')
parser.add_argument('--rank-help', action='store_true',
help='Show result ranking options and factors')

# J.4.6: Aggregations (ADR-149)
parser.add_argument('--by-hour', action='store_true',
help='Aggregate results by hour (J.4.6.1)')
parser.add_argument('--by-day', action='store_true',
help='Aggregate results by day (J.4.6.1)')
parser.add_argument('--by-week', action='store_true',
help='Aggregate results by ISO week (J.4.6.1)')
parser.add_argument('--by-month', action='store_true',
help='Aggregate results by month (J.4.6.1)')
parser.add_argument('--by-year', action='store_true',
help='Aggregate results by year (J.4.6.1)')
parser.add_argument('--topics', action='store_true',
help='Cluster results by topic keywords (J.4.6.2)')
parser.add_argument('--min-cluster', type=int, default=2,
help='Minimum messages per topic cluster (default: 2)')
parser.add_argument('--max-clusters', type=int, default=20,
help='Maximum topic clusters to return (default: 20)')
parser.add_argument('--trending', action='store_true',
help='Show trending topics (rising/falling) (J.4.6.3)')
parser.add_argument('--window', type=int, default=7,
help='Days in trend comparison window (default: 7)')
parser.add_argument('--growth-threshold', type=float, default=50.0,
help='Minimum %% change to be rising/falling (default: 50)')
parser.add_argument('--aggregation-help', action='store_true',
help='Show aggregation options help')

# J.8.3: Session Linking - Cross-Session Context Continuity
parser.add_argument('--link', nargs=3, metavar=('A', 'B', 'REL'),
help='Link sessions: A --[REL]--> B (relations: continues, references, supersedes, fixes)')
parser.add_argument('--links', metavar='SESSION',
help='Show all sessions linked to SESSION')
parser.add_argument('--chain', metavar='SESSION',
help='Show the full session chain (all continuations)')
parser.add_argument('--auto-link', action='store_true',
help='Auto-detect and create continuation links')
parser.add_argument('--link-stats', action='store_true',
help='Show session link statistics')
parser.add_argument('--min-confidence', type=float, default=0.5,
help='Minimum confidence for auto-link (default: 0.5)')
parser.add_argument('--link-hours', type=int, default=24,
help='Hours to search for continuations (default: 24)')
parser.add_argument('--link-help', action='store_true',
help='Show session linking help')
parser.add_argument('--dry-run', action='store_true',
help='Preview auto-link without creating links')

# J.8.4: Session Resume Optimization
parser.add_argument('--resume', metavar='SESSION',
help='Generate optimized resume context for SESSION')
parser.add_argument('--resume-quick', action='store_true',
help='Generate minimal resume (fewer tokens)')
parser.add_argument('--resume-chain', type=int, default=5,
help='Maximum chain depth for resume (default: 5)')
parser.add_argument('--resume-tokens', type=int, default=4000,
help='Token budget for resume output (default: 4000)')
parser.add_argument('--resume-help', action='store_true',
help='Show session resume help')

# J.32: Session Log Queries
parser.add_argument('--session-logs', nargs='?', const='', metavar='QUERY',
help='Search session log entries (J.32). FTS5 full-text search.')
parser.add_argument('--session-log-task', metavar='TASK_ID',
help='Find session log entries by task ID (e.g., H.13.9)')
parser.add_argument('--session-log-date', metavar='DATE',
help='Filter session log entries by date (YYYY-MM-DD)')
parser.add_argument('--session-log-range', nargs=2, metavar=('FROM', 'TO'),
help='Filter session log entries by date range (YYYY-MM-DD YYYY-MM-DD)')
parser.add_argument('--session-log-recent', type=int, metavar='N',
help='Show N most recent session log entries')
parser.add_argument('--session-log-stats', action='store_true',
help='Show session log indexing statistics')

parser.add_argument('--stats', '-s', action='store_true',
help='Show database statistics')
parser.add_argument('--limit', type=int, default=20,
help='Maximum results to show (default: 20)')
parser.add_argument('--json', action='store_true',
help='Output in JSON format')

args = parser.parse_args()

# ADR-156/159: Resolve project context using scope module
if not args.all_projects:
try:
from scripts.core.scope import resolve_scope
scope = resolve_scope(
project=args.filter_project,
team=getattr(args, 'team', None),
tenant=getattr(args, 'tenant', None),
)
if scope.project:
os.environ['CODITECT_PROJECT'] = scope.project
except ImportError:
# Fallback to legacy resolution
if args.filter_project:
os.environ['CODITECT_PROJECT'] = args.filter_project
elif not os.environ.get('CODITECT_PROJECT'):
try:
from scripts.core.paths import discover_project
detected_project = discover_project()
if detected_project:
os.environ['CODITECT_PROJECT'] = detected_project
except ImportError:
pass

# ==========================================================================
# ADR-156: Project-Scoped Context Statistics
# ==========================================================================
if args.project_stats:
try:
from scripts.core.project_context import get_project_context_stats, get_active_project
project_id = get_active_project()

if args.json:
stats = get_project_context_stats(project_id)
print(json.dumps(stats, indent=2))
else:
print("\n" + "=" * 60)
print("PROJECT CONTEXT STATISTICS (ADR-156)")
print("=" * 60)

stats = get_project_context_stats(project_id)
print(f"\nProject: {stats['project_id'] or '(none - global)'}")
print(f"\nContext Data:")
print(f" Decisions: {stats['decisions']:,}")
print(f" Learnings: {stats['learnings']:,}")
print(f" Error Solutions: {stats['error_solutions']:,}")
print(f" Messages: {stats['messages']:,}")

if not stats['project_id']:
print("\nTip: Set CODITECT_PROJECT or use --filter-project to see project-specific stats")

except ImportError as e:
print(f"Error: project_context module not available: {e}")
print("Check: scripts/core/project_context.py")
sys.exit(0)

# ==========================================================================
# J.32: Session Log Queries
# ==========================================================================
_sl_handled = False
if getattr(args, 'session_log_stats', False):
_sl_handled = True
_session_log_stats(args)
sys.exit(0)

if getattr(args, 'session_logs', None) is not None:
_sl_handled = True
_session_log_search(args)
sys.exit(0)

if getattr(args, 'session_log_task', None):
_sl_handled = True
_session_log_task_search(args)
sys.exit(0)

if getattr(args, 'session_log_date', None):
_sl_handled = True
_session_log_date_filter(args)
sys.exit(0)

if getattr(args, 'session_log_range', None):
_sl_handled = True
_session_log_range_filter(args)
sys.exit(0)

if getattr(args, 'session_log_recent', None):
_sl_handled = True
_session_log_recent(args)
sys.exit(0)

# ==========================================================================
# J.4.8.3: ADR-154 Query Templates
# ==========================================================================

# List all query templates
if args.queries:
if not QUERY_TEMPLATES_AVAILABLE:
print("Error: Query template registry not available.")
print("Check: scripts/context_graph/query_templates.py")
sys.exit(1)

try:
registry = QueryTemplateRegistry()
template_names = registry.list_all()
templates = [registry.get(name) for name in template_names]

if args.json:
print(json.dumps([
{
'name': t.name,
'version': t.version,
'description': t.description,
'category': t.category,
'agent_types': t.agent_types,
'track': t.track,
}
for t in templates if t
], indent=2))
else:
print("\n" + "=" * 60)
print("QUERY TEMPLATES (ADR-154)")
print("=" * 60)
if not templates:
print("\nNo templates found.")
print("Templates should be in: config/query-templates/")
else:
print(f"\nFound {len(templates)} templates:\n")
for t in templates:
if not t:
continue
agents = ', '.join(t.agent_types[:3])
if len(t.agent_types) > 3:
agents += f' (+{len(t.agent_types) - 3} more)'
print(f" {t.name}")
print(f" Category: {t.category}")
print(f" Agents: {agents}")
print(f" Track: {t.track or 'N/A'}")
print()
print("Usage: /cxq --template <template-name> (or -Q)")
print("=" * 60)
sys.exit(0)
except Exception as e:
print(f"Error listing templates: {e}")
sys.exit(1)

# List templates for a specific agent type
if args.query_agents:
if not QUERY_TEMPLATES_AVAILABLE:
print("Error: Query template registry not available.")
sys.exit(1)

try:
registry = QueryTemplateRegistry()
templates = registry.find_for_agent(args.query_agents)

if args.json:
print(json.dumps([t.name for t in templates], indent=2))
else:
print(f"\nTemplates for agent '{args.query_agents}':")
if not templates:
print(" (none found)")
else:
for t in templates:
print(f" - {t.name}: {t.description}")
sys.exit(0)
except Exception as e:
print(f"Error: {e}")
sys.exit(1)

# List templates by category
if args.query_category:
if not QUERY_TEMPLATES_AVAILABLE:
print("Error: Query template registry not available.")
sys.exit(1)

try:
registry = QueryTemplateRegistry()
templates = registry.find_by_category(args.query_category)

if args.json:
print(json.dumps([t.name for t in templates], indent=2))
else:
print(f"\nTemplates in category '{args.query_category}':")
if not templates:
print(" (none found)")
else:
for t in templates:
print(f" - {t.name}: {t.description}")
sys.exit(0)
except Exception as e:
print(f"Error: {e}")
sys.exit(1)

# Execute a named query template
if args.template:
if not QUERY_TEMPLATES_AVAILABLE:
print("Error: Query template registry not available.")
sys.exit(1)

if not CONTEXT_GRAPH_AVAILABLE:
print("Error: Context graph builder required for query templates.")
sys.exit(1)

try:
registry = QueryTemplateRegistry()
template = registry.get(args.template)

if not template:
print(f"Error: Template '{args.template}' not found.")
print("\nAvailable templates:")
for name in registry.list_all():
print(f" - {name}")
sys.exit(1)

print(f"\n[Executing template: {template.name}]")
print(f"Description: {template.description}")
print(f"Strategy: {template.expansion.strategy}")
print(f"Token budget: {template.pruning.token_budget}")
print()

# Build context graph using template parameters
with ContextGraphBuilder() as builder:
# Extract strategy and budget from template
strategy = template.expansion.strategy
budget = template.pruning.token_budget
depth = template.expansion.depth

# Build the context graph
graph = builder.build(
task_description=template.description,
seed_strategy=strategy,
token_budget=budget,
max_depth=depth
)

if args.json:
# Serialize graph to JSON
result = {
'template': template.name,
'description': template.description,
'node_count': len(graph.nodes) if graph else 0,
'edge_count': len(graph.edges) if graph else 0,
'nodes': [
{'id': n.id, 'type': n.node_type, 'name': n.name}
for n in (graph.nodes.values() if graph else [])
]
}
print(json.dumps(result, indent=2, default=str))
else:
# Format as markdown
if graph and graph.nodes:
nodes = list(graph.nodes.values())
print(f"\nFound {len(nodes)} relevant nodes:")
for node in nodes[:15]:
print(f" - [{node.node_type}] {node.name}")
if len(nodes) > 15:
print(f" ... and {len(nodes) - 15} more")
else:
print("\nNo relevant context found.")

sys.exit(0)
except Exception as e:
print(f"Error executing template: {e}")
import traceback
traceback.print_exc()
sys.exit(1)

# ==========================================================================
# J.4.8.5: ADR-154 - Interactive Query Template Creator
# ==========================================================================

if args.create_query:
if not TEMPLATE_CREATOR_AVAILABLE:
print("Error: Query template creator not available.")
print("Check: scripts/context_graph/query_template_creator.py")
sys.exit(1)

try:
creator = QueryTemplateCreator()
mode = args.create_query

if mode == 'interactive':
print("\n" + "=" * 60)
print("INTERACTIVE QUERY TEMPLATE CREATOR (J.4.8.5)")
print("=" * 60)
result = creator.create_interactive()
if result:
print(f"\n✅ Template saved to: {result}")
else:
print("\n❌ Template creation cancelled.")
elif mode == 'quick':
# Quick mode - prompt for minimal info
print("\n" + "=" * 60)
print("QUICK QUERY TEMPLATE CREATOR")
print("=" * 60)
name = input("Template name: ").strip()
description = input("Description: ").strip()
track = input("Track (A-N, O-AA, AB-AK) [optional]: ").strip() or None

if name and description:
result = creator.create_quick(
name=name,
description=description,
track=track
)
print(f"\n✅ Template saved to: {result}")
else:
print("\n❌ Name and description required.")
sys.exit(1)
elif mode == 'clone':
# Clone mode - copy existing template
print("\n" + "=" * 60)
print("CLONE EXISTING TEMPLATE")
print("=" * 60)

# List available templates
if QUERY_TEMPLATES_AVAILABLE:
registry = QueryTemplateRegistry()
templates = registry.list_all()
print("\nAvailable templates:")
for t in templates[:20]:
print(f" - {t}")
if len(templates) > 20:
print(f" ... and {len(templates) - 20} more")
print()

source = input("Source template name: ").strip()
new_name = input("New template name: ").strip()

if source and new_name:
result = creator.create_from_existing(source, new_name)
if result:
print(f"\n✅ Template cloned to: {result}")
else:
print(f"\n❌ Failed to clone template '{source}'")
sys.exit(1)
else:
print("\n❌ Source and new name required.")
sys.exit(1)
else:
print(f"Error: Unknown mode '{mode}'")
print("Valid modes: interactive, quick, clone")
sys.exit(1)

sys.exit(0)
except KeyboardInterrupt:
print("\n\nTemplate creation cancelled.")
sys.exit(0)
except Exception as e:
print(f"Error creating template: {e}")
import traceback
traceback.print_exc()
sys.exit(1)

# ==========================================================================
# J.4.7.3: ADR-149 Phase 1 - Knowledge Graph Traversal
# ==========================================================================

if args.traversal:
if not TRAVERSAL_AVAILABLE:
print("Error: Traversal module not available.")
print("Check: scripts/context_graph/traversal.py")
sys.exit(1)

# Validate strategy
valid, error = validate_traversal_strategy(args.traversal)
if not valid:
print(f"Error: {error}")
sys.exit(1)

# Require start node
if not args.start_node:
print("Error: --traversal requires --start-node")
print("Usage: /cxq --traversal bfs --start-node <node_id_or_name>")
print("\nAvailable strategies: " + ", ".join(TRAVERSAL_STRATEGIES))
sys.exit(1)

try:
conn = get_org_db()

result = traverse_graph(
conn=conn,
start_node=args.start_node,
strategy=args.traversal,
max_depth=args.traversal_depth,
max_nodes=args.limit,
end_node=args.end_node,
)

conn.close()

if args.json:
print(format_traversal_result(result, format='json'))
else:
print(format_traversal_result(result, format='text'))

if result.success:
print(f"\nTraversed {len(result.nodes)} nodes, {len(result.edges)} edges")
print(f"Max depth reached: {result.depth}")

except Exception as e:
print(f"Error during traversal: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
return

# ==========================================================================
# J.4.7.4: ADR-149 Phase 1 - Field-Specific Search Help
# ==========================================================================

if args.field_help:
if not FIELD_PARSER_AVAILABLE:
print("Error: Field parser not available.")
print("Check: scripts/context_graph/field_parser.py")
sys.exit(1)

# Determine query type from other flags
query_type = None
if args.recent or args.query:
query_type = 'messages'
elif args.decisions:
query_type = 'decisions'
elif args.errors:
query_type = 'errors'
elif args.tokens is not None:
query_type = 'tokens'
elif args.tools is not None:
query_type = 'tools'
elif args.components is not None:
query_type = 'components'
elif args.learnings:
query_type = 'skills'

print(format_field_help(query_type))
sys.exit(0)

# ==========================================================================
# J.4.7.5: ADR-149 Phase 1 - Cross-Table Correlation
# ==========================================================================

if args.correlate_help:
if not CORRELATOR_AVAILABLE:
print("Error: Correlator not available.")
print("Check: scripts/context_graph/correlator.py")
sys.exit(1)

print(format_correlation_help())
sys.exit(0)

# ==========================================================================
# J.4.4.1: Boolean Search Help (ADR-149)
# ==========================================================================

if args.boolean_help:
if BOOLEAN_PARSER_AVAILABLE:
print(BOOLEAN_SEARCH_HELP)
else:
print("Boolean Search Syntax")
print("=" * 40)
print()
print("OPERATORS:")
print(" AND Require both terms")
print(" OR Match either term")
print(" NOT Exclude term")
print()
print("EXAMPLES:")
print(' /cxq "error AND authentication"')
print(' /cxq "deploy OR kubernetes"')
print(' /cxq "security NOT test"')
print()
print("Note: Full parser not available.")
print("Check: scripts/context_graph/boolean_parser.py")
sys.exit(0)

# ==========================================================================
# J.4.5: Result Ranking Help (ADR-149)
# ==========================================================================

if args.rank_help:
if RESULT_RANKER_AVAILABLE:
print(get_ranking_help())
else:
print("Result Ranking (J.4.5)")
print("=" * 40)
print()
print("FACTORS:")
print(" Relevance (50%) BM25-style term frequency")
print(" Recency (30%) Recent results boosted")
print(" Source (20%) Decisions rank higher than messages")
print()
print("OPTIONS:")
print(" --rank Enable ranking (default)")
print(" --no-rank Disable ranking")
print(" --rank-weights R,T,S Custom weights")
print(" --show-scores Show scoring breakdown")
print()
print("Note: Full ranker not available.")
print("Check: scripts/context_graph/result_ranker.py")
sys.exit(0)

# ==========================================================================
# J.4.6: Aggregation Help (ADR-149)
# ==========================================================================

if args.aggregation_help:
if AGGREGATOR_AVAILABLE:
print(get_aggregation_help())
else:
print("Aggregation Options (J.4.6)")
print("=" * 40)
print()
print("TIME AGGREGATIONS:")
print(" --by-hour Group by hour")
print(" --by-day Group by day")
print(" --by-week Group by ISO week")
print(" --by-month Group by month")
print(" --by-year Group by year")
print()
print("TOPIC CLUSTERING:")
print(" --topics Cluster by keywords")
print(" --min-cluster N Min messages per cluster")
print(" --max-clusters N Max clusters to show")
print()
print("TREND ANALYSIS:")
print(" --trending Show rising/falling topics")
print(" --window N Days in comparison window")
print(" --growth-threshold N Min % change")
print()
print("Note: Full aggregator not available.")
print("Check: scripts/context_graph/aggregator.py")
sys.exit(0)

# ==========================================================================
# J.8.3: Session Linking - Cross-Session Context Continuity
# ==========================================================================

if args.link_help:
if SESSION_LINKER_AVAILABLE:
print(format_link_help())
else:
print("Session Linking Help (J.8.3)")
print("=" * 40)
print()
print("COMMANDS:")
print(" --link A B REL Create link: A --[REL]--> B")
print(" --links SESSION Show links for session")
print(" --chain SESSION Show full session chain")
print(" --auto-link Auto-detect continuations")
print(" --link-stats Show link statistics")
print()
print("RELATIONSHIPS: continues, references, supersedes, fixes")
print()
print("Note: Session linker not available.")
print("Check: scripts/context_graph/session_linker.py")
sys.exit(0)

if args.link:
if not SESSION_LINKER_AVAILABLE:
print("Error: Session linker not available.")
print("Check: scripts/context_graph/session_linker.py")
sys.exit(1)

session_a, session_b, relationship = args.link
sessions_db = SESSIONS_DB_PATH
linker = SessionLinker(sessions_db)
success, message = linker.create_link(session_a, session_b, relationship)
print(message)
sys.exit(0 if success else 1)

if args.links:
if not SESSION_LINKER_AVAILABLE:
print("Error: Session linker not available.")
sys.exit(1)

sessions_db = SESSIONS_DB_PATH
linker = SessionLinker(sessions_db)
links = linker.get_links(args.links)
print(format_links_output(links, json_output=args.json))
sys.exit(0)

if args.chain:
if not SESSION_LINKER_AVAILABLE:
print("Error: Session linker not available.")
sys.exit(1)

sessions_db = SESSIONS_DB_PATH
linker = SessionLinker(sessions_db)
chain = linker.get_full_chain(args.chain)
print(format_chain_output(chain, linker, json_output=args.json))
sys.exit(0)

if args.auto_link:
if not SESSION_LINKER_AVAILABLE:
print("Error: Session linker not available.")
sys.exit(1)

sessions_db = SESSIONS_DB_PATH
linker = SessionLinker(sessions_db)
results = linker.auto_link(
min_confidence=args.min_confidence,
hours=args.link_hours,
dry_run=args.dry_run
)

if args.json:
print(json.dumps(results, indent=2))
else:
if not results:
print(f"No session continuations detected (threshold: {args.min_confidence})")
else:
mode = "Would create" if args.dry_run else "Created"
print(f"Session Linking Results ({mode}):")
print("=" * 60)
for r in results:
status = "✓" if r.get('created') else "○" if args.dry_run else "✗"
print(f"{status} {r['session_a'][:12]}... --[continues]--> {r['session_b'][:12]}...")
print(f" Confidence: {r['confidence']:.0%}")
if r.get('message'):
print(f" {r['message']}")
print()
sys.exit(0)

if args.link_stats:
if not SESSION_LINKER_AVAILABLE:
print("Error: Session linker not available.")
sys.exit(1)

sessions_db = SESSIONS_DB_PATH
linker = SessionLinker(sessions_db)
stats = linker.get_link_stats()

if args.json:
print(json.dumps(stats, indent=2))
else:
print("Session Link Statistics")
print("=" * 40)
print(f"Total Links: {stats['total_links']}")
print(f"Unique Sources: {stats['unique_source_sessions']}")
print(f"Unique Targets: {stats['unique_target_sessions']}")
print()
print("By Relationship:")
for rel, count in stats.get('by_relationship', {}).items():
print(f" {rel}: {count}")
sys.exit(0)

# J.8.4: Session Resume Optimization handlers
if args.resume_help:
if SESSION_RESUME_AVAILABLE:
print(format_resume_help())
else:
print("Session Resume Help")
print("=" * 40)
print("--resume SESSION Generate optimized resume context")
print("--resume-quick Generate minimal resume")
print("--resume-chain N Maximum chain depth (default: 5)")
print("--resume-tokens N Token budget (default: 4000)")
print()
print("Note: Full resume features require session_resume.py")
sys.exit(0)

if args.resume:
if not SESSION_RESUME_AVAILABLE:
print("Error: Session resume optimizer not available.")
print("Check: scripts/context_graph/session_resume.py")
sys.exit(1)

optimizer = SessionResumeOptimizer(
sessions_db_path=SESSIONS_DB_PATH,
org_db_path=ORG_DB_PATH
)

if args.resume_quick:
output = optimizer.get_quick_resume(args.resume)
print(output)
else:
resume = optimizer.generate_resume_context(
session_id=args.resume,
max_chain_depth=args.resume_chain,
token_budget=args.resume_tokens
)

if args.json:
print(format_resume_output(resume, "json"))
else:
print(format_resume_output(resume, "markdown"))
sys.exit(0)

if args.correlate:
if not CORRELATOR_AVAILABLE:
print("Error: Correlator not available.")
print("Check: scripts/context_graph/correlator.py")
sys.exit(1)

# Validate --by is provided
join_key = args.by or ""
if not join_key and "," in args.correlate:
# No shortcut, must have explicit join key
print("Error: --by <join_key> required for --correlate")
print("Example: --correlate 'messages,token_economics' --by session_id")
print("Run: --correlate-help for valid tables and keys")
sys.exit(1)

try:
correlation = parse_correlation(args.correlate, join_key)
except ValueError as e:
print(f"Error parsing correlation: {e}")
print("Run: --correlate-help for valid tables and keys")
sys.exit(1)

# Get database connections
sessions_conn = get_sessions_db()
org_conn = None
if "org" in correlation.get_databases():
org_conn = get_org_db()

try:
result = execute_correlation(
correlation,
sessions_conn,
org_conn,
limit=args.limit,
)

format_type = "json" if args.json else "text"
print(format_correlation_result(result, format_type))

except Exception as e:
print(f"Error executing correlation: {e}")
sys.exit(1)
finally:
sessions_conn.close()
if org_conn:
org_conn.close()

sys.exit(0)

# ==========================================================================
# J.25.5: ADR-151 Phase 6 - Context Graph (CP-40)
# ==========================================================================

# Graph stats mode - show knowledge graph statistics
if args.graph_stats:
if not CONTEXT_GRAPH_AVAILABLE:
print("Error: Context graph builder not available.")
print("Install with: pip install -e .[context-graph]")
sys.exit(1)

try:
with ContextGraphBuilder() as builder:
stats = builder.get_stats()

if args.json:
print(json.dumps(stats, indent=2))
else:
print("\n" + "=" * 60)
print("ADR-151 KNOWLEDGE GRAPH STATISTICS")
print("=" * 60)

print("\nNodes by Type (kg_nodes in org.db):")
print("-" * 40)
total_nodes = stats.get('total_nodes', 0)
for node_type, count in stats.get('nodes_by_type', {}).items():
pct = (count / total_nodes * 100) if total_nodes > 0 else 0
print(f" {node_type:<25} {count:>8,} ({pct:>5.1f}%)")
print("-" * 40)
print(f" {'TOTAL':<25} {total_nodes:>8,}")

print("\nEdges by Type (kg_edges in org.db):")
print("-" * 40)
total_edges = stats.get('total_edges', 0)
for edge_type, count in stats.get('edges_by_type', {}).items():
pct = (count / total_edges * 100) if total_edges > 0 else 0
print(f" {edge_type:<25} {count:>8,} ({pct:>5.1f}%)")
print("-" * 40)
print(f" {'TOTAL':<25} {total_edges:>8,}")

print("\nContext Graph Usage (sessions.db):")
print("-" * 40)
print(f" Context Graphs Built: {stats.get('total_context_graphs', 0):,}")
print(f" Total Graph Usages: {stats.get('total_graph_usages', 0):,}")
print()
except Exception as e:
print(f"Error getting graph stats: {e}")
sys.exit(1)
return

# J.25.6.15: View stored context graph projections
if args.context_graph is not None:
conn = get_sessions_db()
try:
if args.context_graph == 'list':
# List all stored context graphs
rows = conn.execute("""
SELECT id, name, task_description, seed_strategy,
token_budget, node_count, edge_count,
tokens_estimated, created_at, build_time_ms
FROM context_graphs
ORDER BY created_at DESC
""").fetchall()

if not rows:
print("No stored context graphs found.")
print("Build one with: /cxq --graph \"task description\" --graph-persist")
return

if args.json:
results = []
for r in rows:
results.append({
'id': r[0], 'name': r[1], 'task_description': r[2],
'seed_strategy': r[3], 'token_budget': r[4],
'node_count': r[5], 'edge_count': r[6],
'tokens_estimated': r[7], 'created_at': r[8],
'build_time_ms': r[9],
})
print(json.dumps(results, indent=2))
else:
print(f"\nStored Context Graphs ({len(rows)} total):")
print("=" * 80)
for r in rows:
graph_id, name, task_desc, strategy = r[0], r[1], r[2], r[3]
budget, nodes, edges, tokens_est = r[4], r[5], r[6], r[7]
created, build_ms = r[8], r[9]
label = task_desc or name or '(unnamed)'
print(f"\n ID: {graph_id}")
print(f" Task: {label}")
print(f" Strategy: {strategy} | Nodes: {nodes} | Edges: {edges}")
print(f" Tokens: ~{tokens_est:,} / {budget:,} budget")
print(f" Built: {created} ({build_ms}ms)")
print()
print(f"View details: /cxq --context-graph <GRAPH_ID>")
else:
# Show details for a specific graph
graph_id = args.context_graph

# Fuzzy match: allow partial ID
row = conn.execute(
"SELECT id, name, task_description, seed_strategy, "
"token_budget, max_depth, max_nodes, relevance_threshold, "
"node_count, edge_count, tokens_estimated, "
"created_at, expires_at, build_time_ms, builder_version, "
"policies_applied, phi_node_count "
"FROM context_graphs WHERE id = ? OR id LIKE ?",
(graph_id, f"%{graph_id}%")
).fetchone()

if not row:
print(f"Error: No context graph found matching '{graph_id}'")
print("List available graphs: /cxq --context-graph")
return

full_id = row[0]

# Get nodes for this graph
nodes = conn.execute("""
SELECT node_id, relevance_score, depth, is_seed, token_estimate
FROM context_graph_nodes
WHERE context_graph_id = ?
ORDER BY is_seed DESC, relevance_score DESC
""", (full_id,)).fetchall()

# Get usage records
usages = conn.execute("""
SELECT session_id, agent_id, tokens_used, was_helpful, used_at
FROM context_graph_usage
WHERE context_graph_id = ?
ORDER BY used_at DESC
""", (full_id,)).fetchall()

policies_raw = row[15]
policies = json.loads(policies_raw) if policies_raw else []
phi_node_count = row[16] if row[16] is not None else 0

if args.json:
result = {
'id': row[0], 'name': row[1], 'task_description': row[2],
'seed_strategy': row[3], 'token_budget': row[4],
'max_depth': row[5], 'max_nodes': row[6],
'relevance_threshold': row[7], 'node_count': row[8],
'edge_count': row[9], 'tokens_estimated': row[10],
'created_at': row[11], 'expires_at': row[12],
'build_time_ms': row[13], 'builder_version': row[14],
'policies_applied': policies,
'phi_node_count': phi_node_count,
'nodes': [
{'node_id': n[0], 'relevance_score': n[1],
'depth': n[2], 'is_seed': bool(n[3]),
'token_estimate': n[4]}
for n in nodes
],
'usages': [
{'session_id': u[0], 'agent_id': u[1],
'tokens_used': u[2], 'was_helpful': u[3],
'used_at': u[4]}
for u in usages
],
}
print(json.dumps(result, indent=2))
else:
label = row[2] or row[1] or '(unnamed)'
print(f"\nContext Graph: {full_id}")
print("=" * 70)
print(f" Task: {label}")
print(f" Strategy: {row[3]}")
print(f" Budget: {row[4]:,} tokens | Depth: {row[5]} | Max nodes: {row[6]}")
print(f" Threshold: {row[7]}")
print(f" Result: {row[8]} nodes, {row[9]} edges, ~{row[10]:,} tokens")
print(f" Built: {row[11]} ({row[13]}ms, v{row[14]})")
if row[12]:
print(f" Expires: {row[12]}")
if policies:
print(f" Policies: {len(policies)} applied")
for p in policies[:5]:
print(f" - {p.get('node_id', 'N/A')} (role={p.get('role', 'N/A')})")
if len(policies) > 5:
print(f" ... and {len(policies) - 5} more")

# J.25.4.3: PHI compliance display
if phi_node_count > 0:
phi_pct = (phi_node_count / row[8] * 100) if row[8] > 0 else 0
print(f" PHI Nodes: {phi_node_count} ({phi_pct:.1f}% of graph) [HIPAA review recommended]")
else:
print(f" PHI Nodes: 0 (clean)")

if nodes:
seed_nodes = [n for n in nodes if n[3]]
expanded = [n for n in nodes if not n[3]]

print(f"\nSeed Nodes ({len(seed_nodes)}):")
print("-" * 60)
for n in seed_nodes:
node_short = n[0]
if len(node_short) > 55:
node_short = '...' + node_short[-52:]
print(f" [{n[1]:.2f}] {node_short}")

if expanded:
print(f"\nExpanded Nodes ({len(expanded)}):")
print("-" * 60)
for n in expanded[:20]: # Show top 20
node_short = n[0]
if len(node_short) > 50:
node_short = '...' + node_short[-47:]
print(f" [{n[1]:.2f}] d={n[2]} {node_short} (~{n[4]} tok)")
if len(expanded) > 20:
print(f" ... and {len(expanded) - 20} more nodes")

if usages:
print(f"\nUsage History ({len(usages)}):")
print("-" * 60)
for u in usages:
helpful = '?' if u[3] is None else ('Y' if u[3] else 'N')
print(f" {u[4]} agent={u[1] or 'N/A'} tokens={u[2] or 'N/A'} helpful={helpful}")
else:
print(f"\n No usage records yet.")
print()
except Exception as e:
print(f"Error querying context graphs: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
finally:
conn.close()
return

# Graph build mode - build context graph for task
if args.graph is not None:
if not CONTEXT_GRAPH_AVAILABLE:
print("Error: Context graph builder not available.")
print("Ensure scripts/context_graph/ module is present.")
sys.exit(1)

# Use query arg if graph arg is empty
task = args.graph if args.graph else args.query
if not task:
print("Error: --graph requires a task description.")
print("Usage: /cxq --graph \"find decisions about database architecture\"")
sys.exit(1)

try:
with ContextGraphBuilder() as builder:
graph = builder.build(
task_description=task,
seed_strategy=args.graph_strategy,
token_budget=args.graph_budget,
max_depth=args.graph_depth,
max_nodes=args.limit * 3, # Allow more nodes, pruning will limit
persist=args.graph_persist,
)

if args.json:
output = builder.serialize_for_context(graph, format="json")
else:
output = builder.serialize_for_context(
graph,
format=args.graph_format,
include_edges=True,
)

print(output)

# Print build stats unless JSON mode
if not args.json:
print("\n" + "-" * 60)
print("BUILD STATISTICS:")
stats = builder.last_build_stats
print(f" Seeds: {stats.get('seed_count', 0)}")
print(f" Nodes: {stats.get('node_count', 0)}")
print(f" Edges: {stats.get('edge_count', 0)}")
print(f" Tokens: ~{stats.get('tokens_estimated', 0):,} / {stats.get('token_budget', 0):,}")
print(f" Build: {stats.get('build_time_ms', 0)}ms")
if args.graph_persist:
print(f" Persisted: Yes (sessions.db)")
except Exception as e:
print(f"Error building context graph: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
return

# J.16.3: Semantic search help
if args.semantic_help:
print("\n" + "=" * 60)
print("SEMANTIC SEARCH (J.16.3)")
print("=" * 60)
print("""

Semantic search uses vector embeddings to find conceptually similar content. It requires sentence-transformers and embeddings to be pre-generated.

Options: --semantic QUERY Pure semantic search (vector similarity only) --hybrid QUERY Hybrid search (FTS5 + vector with RRF fusion) --recall TOPIC RAG-style recall for context injection

--fts-weight W FTS5 weight in hybrid search (default: 0.4) --vector-weight W Vector weight in hybrid search (default: 0.6) --similarity-threshold T Min similarity for semantic search (default: 0.3) --limit N Maximum results (default: 20)

Examples: /cxq --semantic "authentication patterns" /cxq --hybrid "database migration errors" /cxq --recall "context extraction architecture" /cxq --hybrid "auth" --fts-weight 0.3 --vector-weight 0.7 /cxq --semantic "deployment" --similarity-threshold 0.5

Algorithm: Hybrid search uses Reciprocal Rank Fusion (RRF): RRF_score = Σ (weight_i / (k + rank_i))

Where k=60 (constant), weights default to 0.4 (FTS5) and 0.6 (vector). Results appearing in both searches get boosted scores.

Requirements:

  • sentence-transformers package

  • Embeddings generated via /cx --with-embeddings

  • sessions.db with embeddings table populated """) if SEMANTIC_SEARCH_AVAILABLE: print("Status: ✅ Semantic search available") print(f" Embeddings: {'✅ Available' if MCP_EMBEDDINGS_AVAILABLE else '❌ Not available'}") else: print("Status: ❌ Semantic search not available") print(" Install: pip install sentence-transformers") return

    J.16.3.1: Semantic search (vector only)

    if args.semantic: if not SEMANTIC_SEARCH_AVAILABLE: print("Error: Semantic search not available.") print("Install sentence-transformers: pip install sentence-transformers") print("Generate embeddings: /cx --with-embeddings") sys.exit(1)

    if not MCP_EMBEDDINGS_AVAILABLE:
    print("Error: Embedding model not loaded.")
    print("Install: pip install sentence-transformers")
    sys.exit(1)

    try:
    results = mcp_vector_search(
    args.semantic,
    limit=args.limit,
    threshold=args.similarity_threshold,
    )

    if args.json:
    print(json.dumps(results, indent=2, default=str))
    else:
    print(f"\n🔮 Semantic Search Results for '{args.semantic}' ({len(results)} results):\n")
    for r in results:
    score = r.get('vector_score', 0)
    content = r.get('content', '')[:200] + '...' if len(r.get('content', '')) > 200 else r.get('content', '')
    print(f"[{score:.3f}] {r.get('role', 'unknown')}: {content}")
    print(f" Session: {r.get('session_id', 'N/A')[:20]}... | {r.get('timestamp', 'N/A')}")
    print()
    except Exception as e:
    print(f"Error: Semantic search failed: {e}")
    sys.exit(1)
    return

    J.16.3.1: Hybrid search (FTS5 + vector)

    if args.hybrid: if not SEMANTIC_SEARCH_AVAILABLE: print("Error: Hybrid search not available.") print("Install sentence-transformers: pip install sentence-transformers") print("Generate embeddings: /cx --with-embeddings") sys.exit(1)

    try:
    results = mcp_hybrid_search(
    args.hybrid,
    limit=args.limit,
    fts_weight=args.fts_weight,
    vector_weight=args.vector_weight,
    vector_threshold=args.similarity_threshold,
    )

    if args.json:
    print(json.dumps(results, indent=2, default=str))
    else:
    print(f"\n🔍🔮 Hybrid Search Results for '{args.hybrid}' ({len(results)} results):\n")
    for r in results:
    score = r.get('hybrid_score', 0)
    methods = '+'.join(r.get('search_methods', ['?']))
    content = r.get('content', '')[:200] + '...' if len(r.get('content', '')) > 200 else r.get('content', '')
    print(f"[{score:.4f}|{methods}] {r.get('role', 'unknown')}: {content}")
    print(f" Session: {r.get('session_id', 'N/A')[:20]}... | {r.get('timestamp', 'N/A')}")
    print()
    except Exception as e:
    print(f"Error: Hybrid search failed: {e}")
    sys.exit(1)
    return

    J.16.3.2: Recall (RAG-style retrieval for context injection)

    if args.recall: if not SEMANTIC_SEARCH_AVAILABLE: print("Error: Recall not available.") print("Install sentence-transformers: pip install sentence-transformers") sys.exit(1)

    try:
    # Use hybrid search for recall but format output for context injection
    results = mcp_hybrid_search(
    args.recall,
    limit=args.limit,
    fts_weight=args.fts_weight,
    vector_weight=args.vector_weight,
    vector_threshold=args.similarity_threshold,
    )

    if args.json:
    # JSON format for programmatic use
    output = {
    'topic': args.recall,
    'results_count': len(results),
    'messages': results,
    }
    print(json.dumps(output, indent=2, default=str))
    else:
    # Markdown format optimized for LLM context injection
    print(f"## Recalled Context: {args.recall}\n")
    print(f"*{len(results)} relevant messages retrieved*\n")
    print("---\n")
    for i, r in enumerate(results, 1):
    role = r.get('role', 'unknown')
    content = r.get('content', '')
    timestamp = r.get('timestamp', 'N/A')
    session = r.get('session_id', 'N/A')[:16]

    print(f"### [{i}] {role.title()} ({timestamp[:10]})")
    print(f"*Session: {session}...*\n")
    print(content)
    print("\n---\n")
    except Exception as e:
    print(f"Error: Recall failed: {e}")
    sys.exit(1)
    return

    Stats mode

    if args.stats: stats = get_stats() if args.json: print(json.dumps(stats, indent=2)) else: print("\n" + "=" * 60) print("ADR-118 FOUR-TIER DATABASE STATISTICS") print("=" * 60)

        print("\nTier 3: sessions.db (Session Data)")
    print("-" * 40)
    for key, value in stats.get('tier_3_sessions', {}).items():
    print(f" {key}: {value:,}" if isinstance(value, int) else f" {key}: {value}")

    print("\nTier 2: org.db (Organization Knowledge)")
    print("-" * 40)
    for key, value in stats.get('tier_2_org', {}).items():
    print(f" {key}: {value:,}" if isinstance(value, int) else f" {key}: {value}")

    print("\nTier 1: platform.db (Framework Components)")
    print("-" * 40)
    for key, value in stats.get('tier_1_platform', {}).items():
    print(f" {key}: {value:,}" if isinstance(value, int) else f" {key}: {value}")

    if stats.get('tier_4_projects'):
    print("\nTier 4: projects.db (Registered Projects)")
    print("-" * 40)
    for key, value in stats.get('tier_4_projects', {}).items():
    print(f" {key}: {value:,}" if isinstance(value, int) else f" {key}: {value}")

    # LLM Context Distribution
    if stats.get('llm_context') and 'error' not in stats['llm_context']:
    print("\n" + "=" * 60)
    print("CONTEXT USAGE BY LLM")
    print("=" * 60)
    llm_ctx = stats['llm_context']
    total_msgs = llm_ctx.get('_total_messages', 0)
    total_chars = llm_ctx.get('_total_chars', 0)

    print(f"\n{'LLM':<12} {'Messages':>12} {'Pct':>8} {'Chars':>14} {'Pct':>8}")
    print("-" * 56)
    for llm, data in llm_ctx.items():
    if llm.startswith('_'):
    continue
    if isinstance(data, dict):
    print(f"{llm:<12} {data['messages']:>12,} {data['messages_pct']:>7.1f}% {data['chars']:>14,} {data['chars_pct']:>7.1f}%")
    print("-" * 56)
    print(f"{'TOTAL':<12} {total_msgs:>12,} {'100.0':>7}% {total_chars:>14,} {'100.0':>7}%")

    print()
    return

    ==========================================================================

    J.4.7.1: Parse --where clause (ADR-149 Phase 1)

    ==========================================================================

    where_clause = None where_params = None

    if args.where: if not WHERE_PARSER_AVAILABLE: print("Error: WHERE parser not available.") print("Check: scripts/context_graph/where_parser.py") sys.exit(1)

    result = parse_where(args.where)
    if not result.success:
    print(f"Error parsing --where expression: {result.error}")
    print(f"Expression: {args.where}")
    sys.exit(1)

    where_clause = result.sql
    where_params = result.params

    # Validate fields based on query type
    query_type = None
    if args.decisions:
    query_type = 'decisions'
    elif args.errors is not None:
    query_type = 'errors'
    elif args.learnings:
    query_type = 'learnings'
    elif args.tokens is not None:
    query_type = 'tokens'
    elif args.tools is not None:
    query_type = 'tools'

    if query_type:
    allowed = get_allowed_fields(query_type)
    if allowed:
    valid, error = validate_fields(where_clause, allowed)
    if not valid:
    print(f"Error: {error}")
    sys.exit(1)

    ==========================================================================

    ADR-156: Project Context Setup

    ==========================================================================

    Resolve project_id: --filter-project > CODITECT_PROJECT env var > None

    project_id = getattr(args, 'filter_project', None) or os.environ.get('CODITECT_PROJECT') project_only = getattr(args, 'project_only', False) all_projects = getattr(args, 'all_projects', False)

    If --all-projects, ignore project filter

    if all_projects: project_id = None project_only = False

    Set environment for downstream functions

    if project_id and not all_projects: os.environ['CODITECT_PROJECT'] = project_id

    Show project filter status (ADR-156)

    if project_id and not args.json and not getattr(args, 'stats', False): scope_type = "project-only" if project_only else "project + global" print(f"📁 Project filter: {project_id} ({scope_type})")

    ==========================================================================

    J.4.7.2: Parse --group-by and --sort (ADR-149 Phase 1)

    ==========================================================================

    group_by_sql = None group_by_select = None sort_sql = None

    Determine query type for field validation

    query_type = None if args.decisions: query_type = 'decisions' elif args.errors is not None: query_type = 'errors' elif args.learnings: query_type = 'learnings' elif args.tokens is not None: query_type = 'tokens' elif args.tools is not None: query_type = 'tools' elif args.activities is not None: query_type = 'activities'

    if args.group_by: if not GROUP_SORT_AVAILABLE: print("Error: GROUP BY processor not available.") print("Check: scripts/context_graph/group_sort_processor.py") sys.exit(1)

    if not query_type:
    print("Error: --group-by requires a query type (--tokens, --tools, --decisions, etc.)")
    sys.exit(1)

    result = parse_group_by(args.group_by, query_type)
    if not result.success:
    print(f"Error parsing --group-by: {result.error}")
    sys.exit(1)

    group_by_sql = result.sql
    group_by_select = result.select_fields

    if args.sort: if not GROUP_SORT_AVAILABLE: print("Error: SORT processor not available.") print("Check: scripts/context_graph/group_sort_processor.py") sys.exit(1)

    if not query_type:
    print("Error: --sort requires a query type (--tokens, --tools, --decisions, etc.)")
    sys.exit(1)

    result = parse_sort(args.sort, query_type)
    if not result.success:
    print(f"Error parsing --sort: {result.error}")
    sys.exit(1)

    sort_sql = result.sql

    Decisions mode

    if args.decisions: decisions = get_decisions( limit=args.limit, decision_type=args.decision_type, where_clause=where_clause, where_params=where_params, project_only=getattr(args, 'project_only', False) # ADR-156 )

    # J.4.7.4: Apply field filter if specified
    if args.field and FIELD_PARSER_AVAILABLE:
    decisions = _apply_field_filter(decisions, args.field, 'decisions')

    if args.json:
    print(json.dumps(decisions, indent=2, default=str))
    else:
    print(f"\n📋 Decisions ({len(decisions)} results):\n")
    for dec in decisions:
    print(format_decision(dec))
    return

    Error solutions mode

    if args.errors is not None: query = args.errors if args.errors else None errors = get_error_solutions( query=query, limit=args.limit, where_clause=where_clause, where_params=where_params, project_only=project_only # ADR-156 )

    # J.4.7.4: Apply field filter if specified
    if args.field and FIELD_PARSER_AVAILABLE:
    errors = _apply_field_filter(errors, args.field, 'errors')

    if args.json:
    print(json.dumps(errors, indent=2, default=str))
    else:
    print(f"\n🔧 Error Solutions ({len(errors)} results):\n")
    for es in errors:
    print(format_error_solution(es))
    return

    Skill learnings mode

    if args.learnings: learnings = get_skill_learnings( limit=args.limit, pattern_type=args.pattern_type, where_clause=where_clause, where_params=where_params, project_only=project_only # ADR-156 )

    # J.4.7.4: Apply field filter if specified
    if args.field and FIELD_PARSER_AVAILABLE:
    learnings = _apply_field_filter(learnings, args.field, 'skills')

    if args.json:
    print(json.dumps(learnings, indent=2, default=str))
    else:
    print(f"\n📚 Skill Learnings ({len(learnings)} results):\n")
    for learning in learnings:
    skill = learning.get('skill_name', 'unknown')
    pattern = learning.get('pattern_type', 'unknown')
    created = learning.get('created_at', 'unknown')
    print(f"[{pattern}] {skill}")
    print(f" Created: {created}")
    if learning.get('pattern_description'):
    desc = learning['pattern_description'][:200]
    print(f" Description: {desc}...")
    print("-" * 60)
    return

    Components mode (Tier 1 - platform.db)

    if args.components is not None: query = args.components if args.components else None components = get_components(query=query, component_type=args.component_type, limit=args.limit) if args.json: print(json.dumps(components, indent=2, default=str)) else: print(f"\n🧩 Components ({len(components)} results):\n") for comp in components: print(format_component(comp)) return

    Project decisions mode (Tier 4 - project.db)

    if args.project: decisions = get_project_decisions(limit=args.limit) if not decisions: db_path = find_project_db() if not db_path: print("\n⚠️ No project.db found in current directory or parents") print(" Project decisions are stored in {project}/.coditect/project.db") else: print(f"\n📁 Project DB: {db_path}") print(" No project decisions found") else: if args.json: print(json.dumps(decisions, indent=2, default=str)) else: print(f"\n📋 Project Decisions ({len(decisions)} results):\n") for dec in decisions: print(format_decision(dec)) return

    Project context mode (Tier 4 - project.db)

    if args.project_context: contexts = get_project_context(limit=args.limit) if not contexts: db_path = find_project_db() if not db_path: print("\n⚠️ No project.db found in current directory or parents") else: print(f"\n📁 Project DB: {db_path}") print(" No project context found") else: if args.json: print(json.dumps(contexts, indent=2, default=str)) else: print(f"\n📚 Project Context ({len(contexts)} results):\n") for ctx in contexts: print(f"[{ctx.get('context_type', 'unknown')}] {ctx.get('key', 'unnamed')}") if ctx.get('value'): print(f" {ctx['value'][:100]}...") print("-" * 60) return

    Registered projects mode (Tier 4 - projects.db per ADR-118)

    if args.projects is not None: query = args.projects if args.projects else None projects = get_registered_projects(query=query, limit=args.limit) if not projects: if not PROJECTS_DB_PATH.exists(): print("\n⚠️ projects.db not found at", PROJECTS_DB_PATH) print(" Initialize with: python3 scripts/init_projects_db.py") print(" Register project: /cx --register-project ") else: print("\n📁 No registered projects found") print(" Register a project: /cx --register-project ") else: if args.json: print(json.dumps(projects, indent=2, default=str)) else: print(f"\n📦 Registered Projects ({len(projects)} results):\n") for proj in projects: print(format_project(proj)) return

    ==========================================================================

    J.16.2.2: Single Project Search (--project-search)

    ==========================================================================

    if args.project_search: project = get_project_by_name(args.project_search) if not project: print(f"\n⚠️ Project not found: {args.project_search}") print(" List projects: /cxq --projects") print(" Register project: /cx --register-project ") return

    project_uuid = project.get('project_uuid')
    project_name = project.get('name', 'unknown')
    project_path = project.get('path')

    # Search with optional query from positional argument
    search_query = args.query if args.query else None
    results = search_in_project(
    query=search_query,
    project_uuid=project_uuid,
    limit=args.limit,
    project_name=project_name,
    project_path=project_path
    )

    if args.json:
    output = {
    'project': project,
    'query': search_query,
    'results': results
    }
    print(json.dumps(output, indent=2, default=str))
    else:
    print(f"\n🔍 Project Search: {project_name}")
    print(format_project_search_results(results, search_query))
    return

    ==========================================================================

    J.16.2.3: Cross-Project Search (--all-projects with query)

    Note: --all-projects flag already exists for filtering, but with a query

    argument it triggers cross-project search

    ==========================================================================

    if all_projects and args.query: results = search_all_projects( query=args.query, limit=args.limit )

    if args.json:
    print(json.dumps(results, indent=2, default=str))
    else:
    print(f"\n🌐 Cross-Project Search")
    print(format_project_search_results(results, args.query))
    return

    ==========================================================================

    J.16.2.4: Project Tree Search (--project-tree)

    ==========================================================================

    if args.project_tree: projects = get_project_tree(args.project_tree) if not projects: print(f"\n⚠️ Project not found: {args.project_tree}") print(" List projects: /cxq --projects") print(" Register project: /cx --register-project ") return

    # Get all project UUIDs and paths for search
    project_uuids = [p.get('project_uuid') for p in projects if p.get('project_uuid')]
    project_paths = [p.get('path') for p in projects if p.get('path')]
    parent_project = projects[0] if projects else {}
    project_name = parent_project.get('name', 'unknown')
    project_path = parent_project.get('path')

    # Search with optional query
    search_query = args.query if args.query else None

    # If single project (no children), use simple search
    if len(project_uuids) == 1:
    results = search_in_project(
    query=search_query,
    project_uuid=project_uuids[0],
    limit=args.limit,
    project_name=project_name,
    project_path=project_path
    )
    else:
    # Multi-project tree search
    results = search_in_project(
    query=search_query,
    project_uuid=project_uuids[0],
    limit=args.limit,
    include_children=True,
    child_uuids=project_uuids,
    project_name=project_name,
    project_path=project_path,
    project_paths=project_paths
    )

    if args.json:
    output = {
    'parent_project': parent_project,
    'tree_size': len(projects),
    'child_projects': projects[1:] if len(projects) > 1 else [],
    'query': search_query,
    'results': results
    }
    print(json.dumps(output, indent=2, default=str))
    else:
    tree_info = f" ({len(projects)} projects in tree)" if len(projects) > 1 else ""
    print(f"\n🌳 Project Tree Search: {project_name}{tree_info}")
    if len(projects) > 1:
    print(f" Children: {', '.join([p.get('name', '?') for p in projects[1:]])}")
    print(format_project_search_results(results, search_query))
    return

    ==========================================================================

    J.15.5: Project Code/Document Search (project_embeddings)

    ==========================================================================

    J.15.5 Admin: Create FTS index for project embeddings

    if getattr(args, 'create_code_fts', False): if not PROJECT_CODE_SEARCH_AVAILABLE: print("\n⚠️ Project code search not available") print(" Missing: scripts/project_search.py") return success = create_project_fts_index() if success: print("✅ FTS index created for project_embeddings") else: print("❌ Failed to create FTS index") return

    J.15.5.1: Single project code search (--code-search)

    if getattr(args, 'code_search', None): if not PROJECT_CODE_SEARCH_AVAILABLE: print("\n⚠️ Project code search not available") print(" Missing: scripts/project_search.py") return

    if not args.query:
    print("\n⚠️ Query required for code search")
    print(" Usage: /cxq --code-search <project> \"query\"")
    return

    code_mode = getattr(args, 'code_mode', 'hybrid')
    results = search_project_code(
    query=args.query,
    project_name=args.code_search,
    limit=args.limit,
    search_mode=code_mode,
    similarity_threshold=getattr(args, 'similarity_threshold', 0.3),
    fts_weight=getattr(args, 'fts_weight', 0.4),
    semantic_weight=getattr(args, 'vector_weight', 0.6)
    )

    if args.json:
    output = {
    'query': results.query,
    'search_type': results.search_type,
    'total_results': results.total_results,
    'projects_searched': results.projects_searched,
    'errors': results.errors,
    'results': [
    {
    'project_name': r.project_name,
    'file_path': r.file_path,
    'chunk_text': r.chunk_text,
    'start_line': r.start_line,
    'end_line': r.end_line,
    'content_type': r.content_type,
    'language': r.language,
    'score': r.score,
    'score_type': r.score_type
    }
    for r in results.results
    ]
    }
    print(json.dumps(output, indent=2, default=str))
    else:
    print(format_project_code_results(results))
    return

    J.15.5.3: Project tree code search (--code-tree)

    if getattr(args, 'code_tree', None): if not PROJECT_CODE_SEARCH_AVAILABLE: print("\n⚠️ Project code search not available") print(" Missing: scripts/project_search.py") return

    if not args.query:
    print("\n⚠️ Query required for code search")
    print(" Usage: /cxq --code-tree <project> \"query\"")
    return

    code_mode = getattr(args, 'code_mode', 'hybrid')
    results = search_project_tree_code(
    query=args.query,
    project_name=args.code_tree,
    limit=args.limit,
    search_mode=code_mode,
    similarity_threshold=getattr(args, 'similarity_threshold', 0.3),
    fts_weight=getattr(args, 'fts_weight', 0.4),
    semantic_weight=getattr(args, 'vector_weight', 0.6)
    )

    if args.json:
    output = {
    'query': results.query,
    'search_type': results.search_type,
    'total_results': results.total_results,
    'projects_searched': results.projects_searched,
    'errors': results.errors,
    'results': [
    {
    'project_name': r.project_name,
    'file_path': r.file_path,
    'chunk_text': r.chunk_text,
    'start_line': r.start_line,
    'end_line': r.end_line,
    'content_type': r.content_type,
    'language': r.language,
    'score': r.score,
    'score_type': r.score_type
    }
    for r in results.results
    ]
    }
    print(json.dumps(output, indent=2, default=str))
    else:
    print(format_project_code_results(results))
    return

    J.15.5.2: All projects code search (--all-code)

    if getattr(args, 'all_code', False): if not PROJECT_CODE_SEARCH_AVAILABLE: print("\n⚠️ Project code search not available") print(" Missing: scripts/project_search.py") return

    if not args.query:
    print("\n⚠️ Query required for code search")
    print(" Usage: /cxq --all-code \"query\"")
    return

    code_mode = getattr(args, 'code_mode', 'hybrid')
    results = search_all_projects_code(
    query=args.query,
    limit=args.limit,
    search_mode=code_mode,
    similarity_threshold=getattr(args, 'similarity_threshold', 0.3),
    fts_weight=getattr(args, 'fts_weight', 0.4),
    semantic_weight=getattr(args, 'vector_weight', 0.6)
    )

    if args.json:
    output = {
    'query': results.query,
    'search_type': results.search_type,
    'total_results': results.total_results,
    'projects_searched': results.projects_searched,
    'errors': results.errors,
    'results': [
    {
    'project_name': r.project_name,
    'file_path': r.file_path,
    'chunk_text': r.chunk_text,
    'start_line': r.start_line,
    'end_line': r.end_line,
    'content_type': r.content_type,
    'language': r.language,
    'score': r.score,
    'score_type': r.score_type
    }
    for r in results.results
    ]
    }
    print(json.dumps(output, indent=2, default=str))
    else:
    print(format_project_code_results(results))
    return

    ==========================================================================

    J.16.1: Tier 3 Analytics - Token Economics

    ==========================================================================

    if args.token_stats: tokens = get_token_economics(aggregate=True) if args.json: print(json.dumps(tokens, indent=2, default=str)) else: print("\n📊 Token Economics Statistics (Tier 3):\n") if tokens: print(format_token_economics(tokens[0], aggregate=True)) else: print("No token economics data found.") return

    if args.tokens is not None: session_id = args.tokens if args.tokens else args.session_filter tokens = get_token_economics( session_id=session_id, limit=args.limit, group_by_sql=group_by_sql, group_by_select=group_by_select, sort_sql=sort_sql )

    # J.4.7.4: Apply field filter if specified
    if args.field and FIELD_PARSER_AVAILABLE:
    tokens = _apply_field_filter(tokens, args.field, 'tokens')

    if args.json:
    print(json.dumps(tokens, indent=2, default=str))
    else:
    filter_desc = f" (session: {session_id[:8]}...)" if session_id else ""
    group_desc = f" [grouped by: {args.group_by}]" if args.group_by else ""
    sort_desc = f" [sorted by: {args.sort}]" if args.sort else ""
    field_desc = f" [field: {args.field}]" if args.field else ""
    print(f"\n🪙 Token Economics{filter_desc}{group_desc}{sort_desc}{field_desc} ({len(tokens)} results):\n")
    for te in tokens:
    # J.4.7.2: Use grouped format when group_by is active
    if group_by_sql:
    print(format_grouped_result(te, 'tokens'))
    else:
    print(format_token_economics(te))
    return

    ==========================================================================

    J.16.1: Tier 3 Analytics - Tool Analytics

    ==========================================================================

    if args.tool_stats: tools = get_tool_analytics(aggregate=True, limit=args.limit) if args.json: print(json.dumps(tools, indent=2, default=str)) else: print("\n📊 Tool Usage Statistics (Tier 3):\n") if tools: for ta in tools: print(format_tool_analytics(ta, aggregate=True)) else: print("No tool analytics data found.") return

    if args.tools is not None: tool_name = args.tools if args.tools else None session_id = args.session_filter tools = get_tool_analytics( tool_name=tool_name, session_id=session_id, limit=args.limit, group_by_sql=group_by_sql, group_by_select=group_by_select, sort_sql=sort_sql )

    # J.4.7.4: Apply field filter if specified
    if args.field and FIELD_PARSER_AVAILABLE:
    tools = _apply_field_filter(tools, args.field, 'tools')

    if args.json:
    print(json.dumps(tools, indent=2, default=str))
    else:
    filter_parts = []
    if tool_name:
    filter_parts.append(f"tool: {tool_name}")
    if session_id:
    filter_parts.append(f"session: {session_id[:8]}...")
    filter_desc = f" ({', '.join(filter_parts)})" if filter_parts else ""
    group_desc = f" [grouped by: {args.group_by}]" if args.group_by else ""
    sort_desc = f" [sorted by: {args.sort}]" if args.sort else ""
    field_desc = f" [field: {args.field}]" if args.field else ""
    print(f"\n🔧 Tool Analytics{filter_desc}{group_desc}{sort_desc}{field_desc} ({len(tools)} results):\n")
    for ta in tools:
    # J.4.7.2: Use grouped format when group_by is active
    if group_by_sql:
    print(format_grouped_result(ta, 'tools'))
    else:
    print(format_tool_analytics(ta))
    return

    ==========================================================================

    J.16.1: Tier 3 Analytics - Activity Associations (Component Invocations)

    ==========================================================================

    if args.activity_stats: activities = get_activity_associations(aggregate=True) if args.json: print(json.dumps(activities, indent=2, default=str)) else: print("\n📊 Component Invocation Statistics (Tier 3):\n") if activities: for aa in activities: print(format_activity_association(aa, aggregate=True)) else: print("No component invocation data found.") return

    if args.activities is not None: component_type = args.activities if args.activities else None session_id = args.session_filter activities = get_activity_associations( component_type=component_type, session_id=session_id, limit=args.limit ) if args.json: print(json.dumps(activities, indent=2, default=str)) else: filter_parts = [] if component_type: filter_parts.append(f"type: {component_type}") if session_id: filter_parts.append(f"session: {session_id[:8]}...") filter_desc = f" ({', '.join(filter_parts)})" if filter_parts else "" print(f"\n🔗 Component Invocations{filter_desc} ({len(activities)} results):\n") for aa in activities: print(format_activity_association(aa)) return

    Recent messages mode

    if args.recent: messages = get_recent_messages(limit=args.recent, project_only=project_only) # ADR-156

    # J.4.7.4: Apply field filter if specified
    if args.field and FIELD_PARSER_AVAILABLE:
    messages = _apply_field_filter(messages, args.field, 'messages')

    if args.json:
    print(json.dumps(messages, indent=2, default=str))
    else:
    print(f"\n💬 Recent Messages ({len(messages)} results):\n")
    for msg in messages:
    print(format_message(msg))
    return

    Search mode (default)

    if args.query: # Fetch more results than requested to allow ranking to reorder fetch_limit = args.limit * 2 if (args.rank and not args.no_rank and RESULT_RANKER_AVAILABLE) else args.limit messages = search_messages(args.query, limit=fetch_limit, project_only=project_only) # ADR-156

    # J.4.7.4: Apply field filter if specified
    if args.field and FIELD_PARSER_AVAILABLE:
    messages = _apply_field_filter(messages, args.field, 'messages')

    # J.4.5: Apply result ranking
    if args.rank and not args.no_rank and RESULT_RANKER_AVAILABLE and messages:
    # Parse custom weights if provided
    r_weight, t_weight, s_weight = 0.5, 0.3, 0.2
    if args.rank_weights:
    try:
    parts = [float(x.strip()) for x in args.rank_weights.split(',')]
    if len(parts) == 3:
    r_weight, t_weight, s_weight = parts
    except ValueError:
    print(f"⚠️ Invalid --rank-weights format. Using defaults.", file=sys.stderr)

    messages = rank_results(
    messages,
    query=args.query,
    relevance_weight=r_weight,
    recency_weight=t_weight,
    source_weight=s_weight,
    limit=args.limit,
    include_scores=args.show_scores,
    )
    elif len(messages) > args.limit:
    messages = messages[:args.limit]

    # J.4.6: Apply aggregations if requested
    aggregation_applied = False

    # J.4.6.1: Time-based aggregations
    time_period = None
    if args.by_hour:
    time_period = 'hour'
    elif args.by_day:
    time_period = 'day'
    elif args.by_week:
    time_period = 'week'
    elif args.by_month:
    time_period = 'month'
    elif args.by_year:
    time_period = 'year'

    if time_period and AGGREGATOR_AVAILABLE:
    aggregation = aggregate_by_time(messages, period=time_period)
    output_format = 'json' if args.json else 'text'
    print(format_time_aggregation(aggregation, format=output_format))
    aggregation_applied = True

    # J.4.6.2: Topic clustering
    if args.topics and AGGREGATOR_AVAILABLE and not aggregation_applied:
    clusters = cluster_by_topic(
    messages,
    min_cluster_size=args.min_cluster,
    max_clusters=args.max_clusters,
    )
    output_format = 'json' if args.json else 'text'
    print(format_topic_clusters(clusters, format=output_format))
    aggregation_applied = True

    # J.4.6.3: Trend analysis
    if args.trending and AGGREGATOR_AVAILABLE and not aggregation_applied:
    trends = analyze_trends(
    messages,
    window_days=args.window,
    growth_threshold=args.growth_threshold,
    )
    output_format = 'json' if args.json else 'text'
    print(format_trends(trends, format=output_format))
    aggregation_applied = True

    if aggregation_applied:
    return

    if args.json:
    print(json.dumps(messages, indent=2, default=str))
    else:
    ranked_label = " (ranked)" if (args.rank and not args.no_rank and RESULT_RANKER_AVAILABLE) else ""
    print(f"\n🔍 Search Results for '{args.query}' ({len(messages)} results{ranked_label}):\n")
    for msg in messages:
    if args.show_scores and '_score' in msg:
    score_info = f" [score={msg['_score']:.3f}, rel={msg['_relevance']:.2f}, rec={msg['_recency']:.2f}]"
    print(format_message(msg) + score_info)
    else:
    print(format_message(msg))
    return

    No arguments - show help

    parser.print_help()

if name == 'main': main()