Skip to main content

#!/usr/bin/env python3 """ Project Code/Document Search (J.15.5)

Searches project source code and documentation using semantic embeddings generated by project_embedder.py. Integrates with /cxq for unified search.

Tasks:

  • J.15.5.1: /cxq --project "query" (single project search)
  • J.15.5.2: /cxq --all-projects "query" (cross-project search)
  • J.15.5.3: /cxq --project-tree "query" (project + children)
  • J.15.5.4: Search coordinator for projects.db
  • J.15.5.5: RRF merge for multi-database results

Usage: python3 scripts/project_search.py --project coditect-core "authentication" python3 scripts/project_search.py --all-projects "database migration" python3 scripts/project_search.py --project-tree rollout-master "API"

Created: 2026-02-05 Author: Claude (Opus 4.5) """

import json import sqlite3 import sys from dataclasses import dataclass, field from pathlib import Path from typing import Any, Dict, List, Optional, Tuple

Add parent to path for imports

sys.path.insert(0, str(Path(file).parent.parent))

try: from scripts.core.paths import get_projects_db_path except ImportError: def get_projects_db_path() -> Path: home = Path.home() candidates = [ home / "PROJECTS" / ".coditect-data" / "context-storage" / "projects.db", home / ".coditect-data" / "context-storage" / "projects.db", ] for c in candidates: if c.exists(): return c return candidates[0]

=============================================================================

Configuration

=============================================================================

RRF parameters (J.15.5.5)

RRF_K = 60 # RRF constant DEFAULT_FTS_WEIGHT = 0.4 DEFAULT_SEMANTIC_WEIGHT = 0.6 DEFAULT_SIMILARITY_THRESHOLD = 0.3

Embedding search config

DEFAULT_MODEL = "all-MiniLM-L6-v2" DEFAULT_EMBEDDING_DIM = 384

=============================================================================

Data Classes

=============================================================================

@dataclass class SearchResult: """A single search result from project embeddings.""" project_id: int project_name: str file_path: str chunk_text: str chunk_index: int start_line: int end_line: int content_type: str language: Optional[str] score: float score_type: str # 'fts', 'semantic', 'rrf'

@dataclass class ProjectSearchResults: """Results from searching project content.""" query: str search_type: str # 'single', 'tree', 'all' total_results: int results: List[SearchResult] = field(default_factory=list) projects_searched: List[str] = field(default_factory=list) errors: List[str] = field(default_factory=list)

=============================================================================

Embedding Model

=============================================================================

class EmbeddingModel: """Lazy-loaded embedding model for semantic search."""

def __init__(self, model_name: str = DEFAULT_MODEL):
self.model_name = model_name
self.model = None
self._available = None

@property
def available(self) -> bool:
"""Check if sentence-transformers is available."""
if self._available is None:
try:
from sentence_transformers import SentenceTransformer
self._available = True
except ImportError:
self._available = False
return self._available

def _load_model(self):
"""Lazy-load the embedding model."""
if self.model is None and self.available:
from sentence_transformers import SentenceTransformer
self.model = SentenceTransformer(self.model_name)

def encode(self, text: str) -> bytes:
"""Encode text to embedding bytes."""
if not self.available:
raise RuntimeError("sentence-transformers not installed")

self._load_model()
import numpy as np

embedding = self.model.encode(text, convert_to_numpy=True)
return embedding.astype(np.float32).tobytes()

Global model instance (lazy-loaded)

_embedding_model: Optional[EmbeddingModel] = None

def get_embedding_model() -> EmbeddingModel: """Get or create the embedding model.""" global _embedding_model if _embedding_model is None: _embedding_model = EmbeddingModel() return _embedding_model

=============================================================================

Database Functions

=============================================================================

def get_projects_db() -> sqlite3.Connection: """Get connection to projects.db.""" db_path = get_projects_db_path() if not db_path.exists(): raise FileNotFoundError(f"projects.db not found at {db_path}")

conn = sqlite3.connect(str(db_path), timeout=30)
conn.row_factory = sqlite3.Row
return conn

def get_project_by_name(name: str) -> Optional[Dict]: """Get project by name or UUID.""" try: conn = get_projects_db() cursor = conn.execute(""" SELECT * FROM projects WHERE name = ? OR project_uuid = ? OR path LIKE ? """, (name, name, f'%{name}')) row = cursor.fetchone() conn.close() return dict(row) if row else None except FileNotFoundError: return None

def get_project_tree(name: str) -> List[Dict]: """Get project and all children (for monorepos/submodules).""" parent = get_project_by_name(name) if not parent: return []

parent_id = parent.get('id')
if not parent_id:
return [parent]

try:
conn = get_projects_db()
cursor = conn.execute("""
WITH RECURSIVE project_tree AS (
SELECT id, project_uuid, name, path, 0 as depth
FROM projects
WHERE id = ?

UNION ALL

SELECT p.id, p.project_uuid, p.name, p.path, pt.depth + 1
FROM projects p
INNER JOIN project_tree pt ON p.parent_project_id = pt.id
)
SELECT * FROM project_tree
ORDER BY depth, name
""", (parent_id,))
results = [dict(row) for row in cursor.fetchall()]
conn.close()
return results
except sqlite3.OperationalError:
return [parent]

def get_all_projects() -> List[Dict]: """Get all registered projects.""" try: conn = get_projects_db() cursor = conn.execute(""" SELECT id, project_uuid, name, path FROM projects WHERE status = 'active' ORDER BY name """) results = [dict(row) for row in cursor.fetchall()] conn.close() return results except FileNotFoundError: return []

=============================================================================

FTS Search (J.15.5.4)

=============================================================================

def search_project_fts( query: str, project_ids: List[int], limit: int = 20 ) -> List[SearchResult]: """ Full-text search in project_embeddings using FTS5.

Args:
query: Search query
project_ids: List of project IDs to search
limit: Maximum results

Returns:
List of SearchResult with FTS scores
"""
if not project_ids:
return []

try:
conn = get_projects_db()
except FileNotFoundError:
return []

results = []

# Check if FTS index exists
try:
cursor = conn.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND name='project_embeddings_fts'
""")
fts_exists = cursor.fetchone() is not None
except:
fts_exists = False

placeholders = ','.join(['?' for _ in project_ids])

if fts_exists:
# Use FTS5 for efficient search
try:
cursor = conn.execute(f"""
SELECT
pe.project_id,
p.name as project_name,
ch.relative_path as file_path,
pe.chunk_text,
pe.chunk_index,
pe.start_line,
pe.end_line,
pe.content_type,
pe.language,
bm25(project_embeddings_fts) as score
FROM project_embeddings_fts fts
JOIN project_embeddings pe ON pe.id = fts.rowid
JOIN content_hashes ch ON pe.content_hash_id = ch.id
JOIN projects p ON pe.project_id = p.id
WHERE fts.chunk_text MATCH ?
AND pe.project_id IN ({placeholders})
ORDER BY score
LIMIT ?
""", [query] + project_ids + [limit])

for i, row in enumerate(cursor.fetchall()):
results.append(SearchResult(
project_id=row['project_id'],
project_name=row['project_name'],
file_path=row['file_path'],
chunk_text=row['chunk_text'],
chunk_index=row['chunk_index'],
start_line=row['start_line'],
end_line=row['end_line'],
content_type=row['content_type'],
language=row['language'],
score=abs(row['score']) if row['score'] else 0.0,
score_type='fts'
))
except sqlite3.OperationalError:
pass # FTS query failed, fall through to LIKE

# Fallback to LIKE search
if not results:
cursor = conn.execute(f"""
SELECT
pe.project_id,
p.name as project_name,
ch.relative_path as file_path,
pe.chunk_text,
pe.chunk_index,
pe.start_line,
pe.end_line,
pe.content_type,
pe.language
FROM project_embeddings pe
JOIN content_hashes ch ON pe.content_hash_id = ch.id
JOIN projects p ON pe.project_id = p.id
WHERE pe.chunk_text LIKE ?
AND pe.project_id IN ({placeholders})
ORDER BY pe.id DESC
LIMIT ?
""", [f'%{query}%'] + project_ids + [limit])

for i, row in enumerate(cursor.fetchall()):
results.append(SearchResult(
project_id=row['project_id'],
project_name=row['project_name'],
file_path=row['file_path'],
chunk_text=row['chunk_text'],
chunk_index=row['chunk_index'],
start_line=row['start_line'],
end_line=row['end_line'],
content_type=row['content_type'],
language=row['language'],
score=1.0 / (i + 1), # Simple rank-based score
score_type='like'
))

conn.close()
return results

=============================================================================

Semantic Search (J.15.5.4)

=============================================================================

def cosine_similarity(a: bytes, b: bytes) -> float: """Compute cosine similarity between two embedding byte arrays.""" import numpy as np

vec_a = np.frombuffer(a, dtype=np.float32)
vec_b = np.frombuffer(b, dtype=np.float32)

dot = np.dot(vec_a, vec_b)
norm_a = np.linalg.norm(vec_a)
norm_b = np.linalg.norm(vec_b)

if norm_a == 0 or norm_b == 0:
return 0.0

return float(dot / (norm_a * norm_b))

def search_project_semantic( query: str, project_ids: List[int], limit: int = 20, similarity_threshold: float = DEFAULT_SIMILARITY_THRESHOLD ) -> List[SearchResult]: """ Semantic search in project_embeddings using vector similarity.

Args:
query: Search query
project_ids: List of project IDs to search
limit: Maximum results
similarity_threshold: Minimum similarity score

Returns:
List of SearchResult with similarity scores
"""
if not project_ids:
return []

model = get_embedding_model()
if not model.available:
return []

try:
conn = get_projects_db()
except FileNotFoundError:
return []

# Encode query
try:
query_embedding = model.encode(query)
except Exception as e:
return []

# Get all embeddings for target projects
placeholders = ','.join(['?' for _ in project_ids])
cursor = conn.execute(f"""
SELECT
pe.id,
pe.project_id,
p.name as project_name,
ch.relative_path as file_path,
pe.chunk_text,
pe.chunk_index,
pe.start_line,
pe.end_line,
pe.content_type,
pe.language,
pe.embedding
FROM project_embeddings pe
JOIN content_hashes ch ON pe.content_hash_id = ch.id
JOIN projects p ON pe.project_id = p.id
WHERE pe.project_id IN ({placeholders})
AND pe.embedding IS NOT NULL
""", project_ids)

# Compute similarities
scored_results = []
for row in cursor.fetchall():
if row['embedding']:
similarity = cosine_similarity(query_embedding, row['embedding'])
if similarity >= similarity_threshold:
scored_results.append((similarity, row))

conn.close()

# Sort by similarity and return top results
scored_results.sort(key=lambda x: x[0], reverse=True)

results = []
for similarity, row in scored_results[:limit]:
results.append(SearchResult(
project_id=row['project_id'],
project_name=row['project_name'],
file_path=row['file_path'],
chunk_text=row['chunk_text'],
chunk_index=row['chunk_index'],
start_line=row['start_line'],
end_line=row['end_line'],
content_type=row['content_type'],
language=row['language'],
score=similarity,
score_type='semantic'
))

return results

=============================================================================

RRF Fusion (J.15.5.5)

=============================================================================

def rrf_merge( fts_results: List[SearchResult], semantic_results: List[SearchResult], fts_weight: float = DEFAULT_FTS_WEIGHT, semantic_weight: float = DEFAULT_SEMANTIC_WEIGHT, limit: int = 20 ) -> List[SearchResult]: """ Merge FTS and semantic results using Reciprocal Rank Fusion.

RRF formula: score = Σ (weight_i / (k + rank_i))

Args:
fts_results: Results from FTS search
semantic_results: Results from semantic search
fts_weight: Weight for FTS results (default 0.4)
semantic_weight: Weight for semantic results (default 0.6)
limit: Maximum merged results

Returns:
Merged list of SearchResult with RRF scores
"""
# Create unique key for each result
def result_key(r: SearchResult) -> str:
return f"{r.project_id}:{r.file_path}:{r.chunk_index}"

# Calculate RRF scores
scores: Dict[str, float] = {}
result_map: Dict[str, SearchResult] = {}

# Add FTS results
for rank, result in enumerate(fts_results):
key = result_key(result)
rrf_score = fts_weight / (RRF_K + rank)
scores[key] = scores.get(key, 0) + rrf_score
result_map[key] = result

# Add semantic results
for rank, result in enumerate(semantic_results):
key = result_key(result)
rrf_score = semantic_weight / (RRF_K + rank)
scores[key] = scores.get(key, 0) + rrf_score
if key not in result_map:
result_map[key] = result

# Sort by RRF score
sorted_keys = sorted(scores.keys(), key=lambda k: scores[k], reverse=True)

# Build final results
results = []
for key in sorted_keys[:limit]:
result = result_map[key]
results.append(SearchResult(
project_id=result.project_id,
project_name=result.project_name,
file_path=result.file_path,
chunk_text=result.chunk_text,
chunk_index=result.chunk_index,
start_line=result.start_line,
end_line=result.end_line,
content_type=result.content_type,
language=result.language,
score=scores[key],
score_type='rrf'
))

return results

=============================================================================

Unified Search Functions (J.15.5.1-3)

=============================================================================

def search_project_code( query: str, project_name: str, limit: int = 20, search_mode: str = 'hybrid', similarity_threshold: float = DEFAULT_SIMILARITY_THRESHOLD, fts_weight: float = DEFAULT_FTS_WEIGHT, semantic_weight: float = DEFAULT_SEMANTIC_WEIGHT ) -> ProjectSearchResults: """ J.15.5.1: Search within a single project.

Args:
query: Search query
project_name: Project name or UUID
limit: Maximum results
search_mode: 'fts', 'semantic', or 'hybrid' (default)
similarity_threshold: Minimum similarity for semantic
fts_weight: Weight for FTS in hybrid mode
semantic_weight: Weight for semantic in hybrid mode

Returns:
ProjectSearchResults
"""
project = get_project_by_name(project_name)
if not project:
return ProjectSearchResults(
query=query,
search_type='single',
total_results=0,
errors=[f"Project not found: {project_name}"]
)

project_ids = [project['id']]
project_names = [project['name']]

results = _search_with_mode(
query=query,
project_ids=project_ids,
limit=limit,
search_mode=search_mode,
similarity_threshold=similarity_threshold,
fts_weight=fts_weight,
semantic_weight=semantic_weight
)

return ProjectSearchResults(
query=query,
search_type='single',
total_results=len(results),
results=results,
projects_searched=project_names
)

def search_project_tree_code( query: str, project_name: str, limit: int = 20, search_mode: str = 'hybrid', similarity_threshold: float = DEFAULT_SIMILARITY_THRESHOLD, fts_weight: float = DEFAULT_FTS_WEIGHT, semantic_weight: float = DEFAULT_SEMANTIC_WEIGHT ) -> ProjectSearchResults: """ J.15.5.3: Search project and all children (monorepos/submodules).

Args:
query: Search query
project_name: Parent project name or UUID
limit: Maximum results
search_mode: 'fts', 'semantic', or 'hybrid' (default)
similarity_threshold: Minimum similarity for semantic
fts_weight: Weight for FTS in hybrid mode
semantic_weight: Weight for semantic in hybrid mode

Returns:
ProjectSearchResults
"""
projects = get_project_tree(project_name)
if not projects:
return ProjectSearchResults(
query=query,
search_type='tree',
total_results=0,
errors=[f"Project not found: {project_name}"]
)

project_ids = [p['id'] for p in projects]
project_names = [p['name'] for p in projects]

results = _search_with_mode(
query=query,
project_ids=project_ids,
limit=limit,
search_mode=search_mode,
similarity_threshold=similarity_threshold,
fts_weight=fts_weight,
semantic_weight=semantic_weight
)

return ProjectSearchResults(
query=query,
search_type='tree',
total_results=len(results),
results=results,
projects_searched=project_names
)

def search_all_projects_code( query: str, limit: int = 20, search_mode: str = 'hybrid', similarity_threshold: float = DEFAULT_SIMILARITY_THRESHOLD, fts_weight: float = DEFAULT_FTS_WEIGHT, semantic_weight: float = DEFAULT_SEMANTIC_WEIGHT ) -> ProjectSearchResults: """ J.15.5.2: Search across all registered projects.

Args:
query: Search query
limit: Maximum results
search_mode: 'fts', 'semantic', or 'hybrid' (default)
similarity_threshold: Minimum similarity for semantic
fts_weight: Weight for FTS in hybrid mode
semantic_weight: Weight for semantic in hybrid mode

Returns:
ProjectSearchResults
"""
projects = get_all_projects()
if not projects:
return ProjectSearchResults(
query=query,
search_type='all',
total_results=0,
errors=["No registered projects found"]
)

project_ids = [p['id'] for p in projects]
project_names = [p['name'] for p in projects]

results = _search_with_mode(
query=query,
project_ids=project_ids,
limit=limit,
search_mode=search_mode,
similarity_threshold=similarity_threshold,
fts_weight=fts_weight,
semantic_weight=semantic_weight
)

return ProjectSearchResults(
query=query,
search_type='all',
total_results=len(results),
results=results,
projects_searched=project_names
)

def _search_with_mode( query: str, project_ids: List[int], limit: int, search_mode: str, similarity_threshold: float, fts_weight: float, semantic_weight: float ) -> List[SearchResult]: """Execute search with specified mode."""

if search_mode == 'fts':
return search_project_fts(query, project_ids, limit)

elif search_mode == 'semantic':
return search_project_semantic(
query, project_ids, limit, similarity_threshold
)

else: # hybrid (default)
fts_results = search_project_fts(query, project_ids, limit * 2)
semantic_results = search_project_semantic(
query, project_ids, limit * 2, similarity_threshold
)

if not semantic_results:
# Fall back to FTS only if semantic unavailable
return fts_results[:limit]

return rrf_merge(
fts_results, semantic_results,
fts_weight, semantic_weight, limit
)

=============================================================================

Formatting

=============================================================================

def format_search_results(results: ProjectSearchResults, verbose: bool = False) -> str: """Format search results for display.""" lines = []

# Header
search_type_labels = {
'single': '🔍 Single Project Search',
'tree': '🌳 Project Tree Search',
'all': '🌐 Cross-Project Search'
}
lines.append(f"\n{search_type_labels.get(results.search_type, 'Search')}")
lines.append(f"Query: \"{results.query}\"")
lines.append(f"Projects: {', '.join(results.projects_searched[:5])}" +
(f"... (+{len(results.projects_searched)-5})" if len(results.projects_searched) > 5 else ""))
lines.append(f"Results: {results.total_results}")
lines.append("")

# Errors
if results.errors:
for err in results.errors:
lines.append(f"⚠️ {err}")
lines.append("")

# Results
for i, r in enumerate(results.results[:20], 1):
lines.append(f"{i}. [{r.project_name}] {r.file_path}:{r.start_line}-{r.end_line}")
lines.append(f" Score: {r.score:.4f} ({r.score_type}) | Type: {r.content_type}")

# Truncate chunk text for display
text = r.chunk_text[:200].replace('\n', ' ')
if len(r.chunk_text) > 200:
text += '...'
lines.append(f" {text}")
lines.append("")

if results.total_results > 20:
lines.append(f" ... {results.total_results - 20} more results")

return '\n'.join(lines)

=============================================================================

FTS Index Creation

=============================================================================

def create_fts_index(): """Create FTS5 index on project_embeddings for fast text search.""" try: conn = get_projects_db() except FileNotFoundError: print("Error: projects.db not found") return False

try:
# Check if FTS table exists
cursor = conn.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND name='project_embeddings_fts'
""")
if cursor.fetchone():
print("FTS index already exists")
conn.close()
return True

# Create FTS5 virtual table
conn.execute("""
CREATE VIRTUAL TABLE project_embeddings_fts
USING fts5(chunk_text, content='project_embeddings', content_rowid='id')
""")

# Populate FTS index
conn.execute("""
INSERT INTO project_embeddings_fts(rowid, chunk_text)
SELECT id, chunk_text FROM project_embeddings
""")

# Create triggers to keep FTS in sync
conn.execute("""
CREATE TRIGGER project_embeddings_ai AFTER INSERT ON project_embeddings BEGIN
INSERT INTO project_embeddings_fts(rowid, chunk_text)
VALUES (new.id, new.chunk_text);
END
""")

conn.execute("""
CREATE TRIGGER project_embeddings_ad AFTER DELETE ON project_embeddings BEGIN
INSERT INTO project_embeddings_fts(project_embeddings_fts, rowid, chunk_text)
VALUES ('delete', old.id, old.chunk_text);
END
""")

conn.execute("""
CREATE TRIGGER project_embeddings_au AFTER UPDATE ON project_embeddings BEGIN
INSERT INTO project_embeddings_fts(project_embeddings_fts, rowid, chunk_text)
VALUES ('delete', old.id, old.chunk_text);
INSERT INTO project_embeddings_fts(rowid, chunk_text)
VALUES (new.id, new.chunk_text);
END
""")

conn.commit()
print("✅ FTS index created successfully")

# Get stats
count = conn.execute("SELECT COUNT(*) FROM project_embeddings_fts").fetchone()[0]
print(f" Indexed {count:,} chunks")

conn.close()
return True

except Exception as e:
print(f"Error creating FTS index: {e}")
conn.close()
return False

=============================================================================

CLI

=============================================================================

def main(): import argparse

parser = argparse.ArgumentParser(
description="Search project source code and documentation (J.15.5)",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""

Examples: # Single project search (J.15.5.1) python3 project_search.py --project coditect-core "authentication"

# Cross-project search (J.15.5.2)
python3 project_search.py --all-projects "database migration"

# Project tree search (J.15.5.3)
python3 project_search.py --project-tree rollout-master "API"

# Search modes
python3 project_search.py --project coditect-core --mode fts "error"
python3 project_search.py --project coditect-core --mode semantic "auth patterns"
python3 project_search.py --project coditect-core --mode hybrid "config" # default

# Create FTS index
python3 project_search.py --create-fts-index
"""
)

# Search scope
scope_group = parser.add_mutually_exclusive_group()
scope_group.add_argument('--project', '-p', metavar='NAME',
help='Search within a single project (J.15.5.1)')
scope_group.add_argument('--project-tree', '-t', metavar='NAME',
help='Search project and children (J.15.5.3)')
scope_group.add_argument('--all-projects', '-a', action='store_true',
help='Search all registered projects (J.15.5.2)')

# Query
parser.add_argument('query', nargs='?',
help='Search query')

# Search options
parser.add_argument('--mode', '-m', choices=['fts', 'semantic', 'hybrid'],
default='hybrid',
help='Search mode (default: hybrid)')
parser.add_argument('--limit', '-l', type=int, default=20,
help='Maximum results (default: 20)')
parser.add_argument('--threshold', type=float, default=DEFAULT_SIMILARITY_THRESHOLD,
help=f'Semantic similarity threshold (default: {DEFAULT_SIMILARITY_THRESHOLD})')
parser.add_argument('--fts-weight', type=float, default=DEFAULT_FTS_WEIGHT,
help=f'FTS weight in hybrid mode (default: {DEFAULT_FTS_WEIGHT})')
parser.add_argument('--semantic-weight', type=float, default=DEFAULT_SEMANTIC_WEIGHT,
help=f'Semantic weight in hybrid mode (default: {DEFAULT_SEMANTIC_WEIGHT})')

# Output options
parser.add_argument('--json', action='store_true',
help='Output as JSON')
parser.add_argument('--verbose', '-v', action='store_true',
help='Verbose output')

# Admin commands
parser.add_argument('--create-fts-index', action='store_true',
help='Create FTS5 index for project embeddings')

args = parser.parse_args()

# Admin: Create FTS index
if args.create_fts_index:
success = create_fts_index()
sys.exit(0 if success else 1)

# Require query for search
if not args.query and (args.project or args.project_tree or args.all_projects):
parser.error("Query required for search")

# Execute search
if args.project:
results = search_project_code(
query=args.query,
project_name=args.project,
limit=args.limit,
search_mode=args.mode,
similarity_threshold=args.threshold,
fts_weight=args.fts_weight,
semantic_weight=args.semantic_weight
)
elif args.project_tree:
results = search_project_tree_code(
query=args.query,
project_name=args.project_tree,
limit=args.limit,
search_mode=args.mode,
similarity_threshold=args.threshold,
fts_weight=args.fts_weight,
semantic_weight=args.semantic_weight
)
elif args.all_projects:
results = search_all_projects_code(
query=args.query,
limit=args.limit,
search_mode=args.mode,
similarity_threshold=args.threshold,
fts_weight=args.fts_weight,
semantic_weight=args.semantic_weight
)
else:
parser.print_help()
return

# Output
if args.json:
output = {
'query': results.query,
'search_type': results.search_type,
'total_results': results.total_results,
'projects_searched': results.projects_searched,
'errors': results.errors,
'results': [
{
'project_name': r.project_name,
'file_path': r.file_path,
'chunk_text': r.chunk_text,
'chunk_index': r.chunk_index,
'start_line': r.start_line,
'end_line': r.end_line,
'content_type': r.content_type,
'language': r.language,
'score': r.score,
'score_type': r.score_type
}
for r in results.results
]
}
print(json.dumps(output, indent=2))
else:
print(format_search_results(results, args.verbose))

# Exit code
if results.errors:
sys.exit(1)

if name == "main": main()