Skip to main content

Technical Design Document - QR Contact Card Generator

Document Specification Block

Document: technical-design-document
Version: 1.0.0
Purpose: Define technical implementation details, deployment architecture,
and operational considerations for the QR Contact Card Generator
Audience: Engineers, DevOps, Site Reliability Engineers
Date Created: 2025-10-03
Status: INITIAL
Type: SINGLE
Score Required: 80% (32/40 points)

Executive Summary

This document provides the technical implementation blueprint for the QR Contact Card Generator platform, focusing on deployment architecture, infrastructure configuration, operational considerations, and agent-based development patterns that enable autonomous system construction and maintenance.

Table of Contents

  1. Infrastructure Architecture
  2. Deployment Configuration
  3. Database Design
  4. API Specifications
  5. Security Implementation
  6. Performance Optimization
  7. Monitoring & Observability
  8. Agent Development Patterns

Infrastructure Architecture

Multi-Region Deployment

Infrastructure as Code

# terraform/main.tf
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "~> 4.0"
}
}
}

# Global Load Balancer
resource "google_compute_global_address" "default" {
name = "qr-generator-global-ip"
}

# Cloud Run Deployment (per region)
resource "google_cloud_run_service" "api" {
for_each = toset(["us-central1", "europe-west1", "asia-southeast1"])

name = "qr-generator-api-${each.key}"
location = each.key

template {
spec {
containers {
image = "gcr.io/${var.project_id}/qr-api:${var.image_tag}"

resources {
limits = {
cpu = "1000m"
memory = "512Mi"
}
}

env {
name = "DATABASE_URL"
value = var.db_connection_strings[each.key]
}

env {
name = "REDIS_URL"
value = var.redis_urls[each.key]
}
}

container_concurrency = 80
timeout_seconds = 30
}

metadata {
annotations = {
"autoscaling.knative.dev/minScale" = "1"
"autoscaling.knative.dev/maxScale" = "100"
"run.googleapis.com/cpu-throttling" = "false"
}
}
}

traffic {
percent = 100
latest_revision = true
}
}

Deployment Configuration

Container Image Build

# backend/Dockerfile
FROM rust:1.73 AS builder

WORKDIR /app
COPY cargo.toml Cargo.lock ./
COPY src ./src

# Build with release optimizations
RUN cargo build --release

# Runtime stage
FROM gcr.io/distroless/cc-debian12
COPY --from=builder /app/target/release/qr-api /

ENTRYPOINT ["/qr-api"]

CI/CD Pipeline

# .github/workflows/deploy.yml
name: Deploy to GCP

on:
push:
branches: [main]
pull_request:
branches: [main]

jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rust-lang/setup-rust-toolchain@v1
- run: cargo test --all-features
- run: cargo clippy -- -D warnings

deploy:
needs: test
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3

- id: 'auth'
uses: 'google-github-actions/auth@v1'
with:
credentials_json: '${{ secrets.GCP_SA_KEY }}'

- name: Build and Push
run: |
gcloud builds submit \
--tag gcr.io/$PROJECT_ID/qr-api:$GITHUB_SHA

- name: Deploy to Cloud Run
run: |
for REGION in us-central1 europe-west1 asia-southeast1; do
gcloud run deploy qr-generator-api-$REGION \
--image gcr.io/$PROJECT_ID/qr-api:$GITHUB_SHA \
--region $REGION \
--platform managed
done

Database Design

Schema Migrations

-- migrations/001_initial_schema.sql
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";

CREATE TABLE users (
user_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
email VARCHAR(255) UNIQUE NOT NULL,
email_normalized VARCHAR(255) GENERATED ALWAYS AS (LOWER(email)) STORED,
password_hash VARCHAR(255) NOT NULL,
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW(),
email_verified BOOLEAN DEFAULT FALSE,
last_login TIMESTAMPTZ,
marketing_consent BOOLEAN DEFAULT FALSE,
marketing_consent_at TIMESTAMPTZ
);

CREATE TABLE contact_cards (
card_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
user_id UUID NOT NULL REFERENCES users(user_id) ON DELETE CASCADE,
slug VARCHAR(100) UNIQUE NOT NULL,
full_name VARCHAR(255) NOT NULL,
organization VARCHAR(255),
title VARCHAR(255),
email VARCHAR(255) NOT NULL,
phone VARCHAR(50),
website VARCHAR(500),
qr_error_correction VARCHAR(10) DEFAULT 'M',
qr_size INTEGER DEFAULT 512,
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW(),
view_count INTEGER DEFAULT 0,
scan_count INTEGER DEFAULT 0,
CONSTRAINT valid_qr_size CHECK (qr_size BETWEEN 128 AND 2048)
);

-- Indexes for performance
CREATE INDEX idx_users_email_normalized ON users(email_normalized);
CREATE INDEX idx_users_created_at ON users(created_at);
CREATE INDEX idx_cards_user_id ON contact_cards(user_id);
CREATE INDEX idx_cards_slug ON contact_cards(slug);
CREATE INDEX idx_cards_created_at ON contact_cards(created_at);

-- Viral tracking tables
CREATE TABLE viral_invitations (
invitation_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
sender_user_id UUID REFERENCES users(user_id),
recipient_email VARCHAR(255) NOT NULL,
card_id UUID REFERENCES contact_cards(card_id),
channel VARCHAR(50) NOT NULL, -- email, sms, whatsapp, linkedin
sent_at TIMESTAMPTZ DEFAULT NOW(),
opened_at TIMESTAMPTZ,
clicked_at TIMESTAMPTZ,
converted_at TIMESTAMPTZ,
conversion_user_id UUID REFERENCES users(user_id),
UNIQUE(sender_user_id, recipient_email, sent_at::DATE)
);

CREATE INDEX idx_invitations_sender ON viral_invitations(sender_user_id);
CREATE INDEX idx_invitations_recipient ON viral_invitations(recipient_email);
CREATE INDEX idx_invitations_converted ON viral_invitations(converted_at) WHERE converted_at IS NOT NULL;

Connection Pooling Configuration

// backend/src/db/pool.rs
use sqlx::{postgres::PgPoolOptions, Pool, Postgres};
use std::time::Duration;

pub async fn create_pool(database_url: &str) -> Result<Pool<Postgres>, sqlx::Error> {
PgPoolOptions::new()
.max_connections(32)
.min_connections(4)
.connect_timeout(Duration::from_secs(5))
.idle_timeout(Duration::from_secs(300))
.max_lifetime(Duration::from_secs(3600))
.connect(database_url)
.await
}

API Specifications

OpenAPI Schema

# api/openapi.yaml
openapi: 3.0.3
info:
title: QR Contact Card Generator API
version: 1.0.0
description: |
REST API for QR-based digital business card generation with viral mechanics.
Designed for agent-based consumption and autonomous operation.

servers:
- url: https://api.qrcontact.app/v1
description: Production API

paths:
/auth/register:
post:
summary: Register new user
operationId: registerUser
tags: [Authentication]
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/RegisterRequest'
responses:
'201':
description: User created successfully
content:
application/json:
schema:
$ref: '#/components/schemas/AuthResponse'
'409':
description: Email already exists

/cards:
post:
summary: Create contact card
operationId: createCard
tags: [Cards]
security:
- bearerAuth: []
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/CreateCardRequest'
responses:
'201':
description: Card created successfully
content:
application/json:
schema:
$ref: '#/components/schemas/ContactCard'

/cards/{cardId}/share:
post:
summary: Share card via multiple channels
operationId: shareCard
tags: [Viral]
security:
- bearerAuth: []
parameters:
- name: cardId
in: path
required: true
schema:
type: string
format: uuid
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/ShareRequest'
responses:
'202':
description: Share request accepted
content:
application/json:
schema:
$ref: '#/components/schemas/ShareResponse'

components:
schemas:
RegisterRequest:
type: object
required: [email, password, marketingConsent]
properties:
email:
type: string
format: email
password:
type: string
minLength: 12
marketingConsent:
type: boolean
description: User consent for marketing communications

ContactCard:
type: object
properties:
cardId:
type: string
format: uuid
slug:
type: string
pattern: '^[a-z0-9-]+$'
fullName:
type: string
qrCodeUrl:
type: string
format: uri
description: Data URL of generated QR code

Security Implementation

Authentication & Authorization

// backend/src/auth/jwt.rs
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
use serde::{Deserialize, Serialize};
use std::time::{SystemTime, UNIX_EPOCH};

#[derive(Debug, Serialize, Deserialize)]
pub struct Claims {
pub sub: String, // user_id
pub email: String,
pub exp: usize, // expiration timestamp
pub iat: usize, // issued at
}

pub fn create_token(user_id: &str, email: &str, secret: &str) -> Result<String, jsonwebtoken::errors::Error> {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as usize;

let claims = Claims {
sub: user_id.to_string(),
email: email.to_string(),
iat: now,
exp: now + 3600 * 24, // 24 hours
};

encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(secret.as_ref()),
)
}

Rate Limiting Implementation

// backend/src/middleware/rate_limit.rs
use axum::{extract::State, http::StatusCode, middleware::Next, response::Response};
use redis::{AsyncCommands, Client};
use std::time::Duration;

pub async fn rate_limit_middleware(
State(redis): State<Client>,
request: Request<Body>,
next: Next<Body>,
) -> Result<Response, StatusCode> {
let mut conn = redis.get_async_connection().await
.map_err(|_| StatusCode::SERVICE_UNAVAILABLE)?;

let key = format!("rate_limit:{}", get_client_ip(&request));
let count: i64 = conn.incr(&key, 1).await
.map_err(|_| StatusCode::SERVICE_UNAVAILABLE)?;

if count == 1 {
conn.expire(&key, 60).await
.map_err(|_| StatusCode::SERVICE_UNAVAILABLE)?;
}

if count > 50 {
return Err(StatusCode::TOO_MANY_REQUESTS);
}

Ok(next.run(request).await)
}

Performance Optimization

Caching Strategy

// backend/src/cache/multi_layer.rs
use async_trait::async_trait;
use moka::future::Cache as L1Cache;
use redis::AsyncCommands;
use serde::{de::DeserializeOwned, Serialize};

pub struct MultiLayerCache {
l1: L1Cache<String, Vec<u8>>,
l2: redis::Client,
}

impl MultiLayerCache {
pub fn new(redis_url: &str) -> Self {
Self {
l1: L1Cache::builder()
.max_capacity(10_000)
.time_to_live(Duration::from_secs(300))
.build(),
l2: redis::Client::open(redis_url).unwrap(),
}
}

pub async fn get<T: DeserializeOwned>(&self, key: &str) -> Option<T> {
// L1 check
if let Some(bytes) = self.l1.get(key) {
return serde_json::from_slice(&bytes).ok();
}

// L2 check
let mut conn = self.l2.get_async_connection().await.ok()?;
let bytes: Vec<u8> = conn.get(key).await.ok()?;

// Populate L1
self.l1.insert(key.to_string(), bytes.clone()).await;

serde_json::from_slice(&bytes).ok()
}
}

Database Query Optimization

-- Optimized query for viral coefficient calculation
WITH invitation_stats AS (
SELECT
sender_user_id,
COUNT(*) as invitations_sent,
COUNT(CASE WHEN converted_at IS NOT NULL THEN 1 END) as conversions,
DATE_TRUNC('week', sent_at) as week
FROM viral_invitations
WHERE sent_at >= NOW() - INTERVAL '30 days'
GROUP BY sender_user_id, DATE_TRUNC('week', sent_at)
),
user_cohorts AS (
SELECT
DATE_TRUNC('week', created_at) as cohort_week,
COUNT(DISTINCT user_id) as cohort_size
FROM users
WHERE created_at >= NOW() - INTERVAL '30 days'
GROUP BY DATE_TRUNC('week', created_at)
)
SELECT
uc.cohort_week,
uc.cohort_size,
COALESCE(SUM(i.conversions), 0) as total_conversions,
COALESCE(AVG(i.conversions::FLOAT / NULLIF(i.invitations_sent, 0)), 0) as conversion_rate,
COALESCE(SUM(i.conversions)::FLOAT / NULLIF(uc.cohort_size, 0), 0) as k_factor
FROM user_cohorts uc
LEFT JOIN invitation_stats i ON i.week = uc.cohort_week
GROUP BY uc.cohort_week, uc.cohort_size
ORDER BY uc.cohort_week DESC;

Monitoring & Observability

Metrics Collection

// backend/src/metrics/mod.rs
use prometheus::{register_histogram_vec, register_int_counter_vec, HistogramVec, IntCounterVec};

lazy_static! {
pub static ref HTTP_REQUEST_DURATION: HistogramVec = register_histogram_vec!(
"http_request_duration_seconds",
"HTTP request latencies",
&["method", "endpoint", "status"]
).unwrap();

pub static ref VIRAL_EVENTS: IntCounterVec = register_int_counter_vec!(
"viral_events_total",
"Viral mechanic events",
&["event_type", "channel"]
).unwrap();

pub static ref QR_GENERATION_TIME: HistogramVec = register_histogram_vec!(
"qr_generation_duration_seconds",
"QR code generation time",
&["size", "error_correction"]
).unwrap();
}

Alert Configuration

# monitoring/alerts.yaml
groups:
- name: api_alerts
interval: 30s
rules:
- alert: HighErrorRate
expr: |
sum(rate(http_requests_total{status=~"5.."}[5m]))
/ sum(rate(http_requests_total[5m])) > 0.05
for: 5m
labels:
severity: page
annotations:
summary: "High error rate detected"
description: "Error rate is {{ $value | humanizePercentage }}"

- alert: ViralCoefficientDeclining
expr: |
viral_k_factor{period="7d"} < 0.8
for: 24h
labels:
severity: warning
annotations:
summary: "Viral coefficient below target"
description: "K-factor is {{ $value }}, target is 1.0+"

Agent Development Patterns

Agent Interface Specification

// agent-interface/qr-generator-agent.ts
interface QRGeneratorAgent {
// Core capabilities exposed to autonomous agents
capabilities: {
authentication: {
register: (email: string, password: string, consent: boolean) => Promise<AuthToken>;
login: (email: string, password: string) => Promise<AuthToken>;
refresh: (token: string) => Promise<AuthToken>;
};

cards: {
create: (data: ContactCardInput) => Promise<ContactCard>;
update: (cardId: string, data: Partial<ContactCardInput>) => Promise<ContactCard>;
delete: (cardId: string) => Promise<void>;
getQRCode: (cardId: string, options?: QROptions) => Promise<DataURL>;
};

viral: {
share: (cardId: string, recipients: ShareRecipient[]) => Promise<ShareResult>;
trackConversion: (invitationId: string) => Promise<void>;
getKFactor: (period: TimePeriod) => Promise<number>;
};
};

// Monitoring interface for agent self-management
monitoring: {
getMetrics: () => Promise<SystemMetrics>;
getHealthStatus: () => Promise<HealthStatus>;
runDiagnostics: () => Promise<DiagnosticReport>;
};
}

Sub-Agent Task Definitions

# agents/task-definitions.yaml
tasks:
user_onboarding:
description: "Automate user registration and initial card creation"
required_capabilities:
- authentication.register
- cards.create
- viral.share
success_metrics:
- user_registered: true
- card_created: true
- initial_shares: ">= 3"

viral_campaign_management:
description: "Monitor and optimize viral growth campaigns"
required_capabilities:
- viral.getKFactor
- viral.share
- monitoring.getMetrics
trigger_conditions:
- k_factor < 0.9
- daily_signups < target * 0.8
actions:
- analyze_conversion_funnel
- adjust_share_incentives
- notify_growth_team

performance_optimization:
description: "Maintain system performance and cost efficiency"
required_capabilities:
- monitoring.getMetrics
- monitoring.runDiagnostics
trigger_conditions:
- p95_latency > 100ms
- error_rate > 0.01
- monthly_cost > budget * 1.1

Agent Communication Protocol

// agents/protocol.proto
syntax = "proto3";

package qr_generator.agents;

// Agent-to-agent communication
message AgentRequest {
string agent_id = 1;
string task_id = 2;
string capability = 3;
google.protobuf.Any payload = 4;
map<string, string> context = 5;
}

message AgentResponse {
string request_id = 1;
bool success = 2;
google.protobuf.Any result = 3;
repeated string errors = 4;
map<string, double> metrics = 5;
}

// Event stream for autonomous agents
message SystemEvent {
string event_id = 1;
string event_type = 2;
google.protobuf.Timestamp timestamp = 3;
map<string, string> attributes = 4;

oneof event {
UserRegistered user_registered = 10;
CardCreated card_created = 11;
ViralShareInitiated viral_share = 12;
SystemAlert alert = 13;
}
}

Disaster Recovery

Backup Strategy

#!/bin/bash
# scripts/backup.sh

# Daily automated backup
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_BUCKET="gs://qr-generator-backups"

# Database backup
gcloud sql export sql qr-generator-primary \
$BACKUP_BUCKET/db/backup_$TIMESTAMP.sql \
--database=qr_generator

# Verify backup integrity
gsutil cp $BACKUP_BUCKET/db/backup_$TIMESTAMP.sql - | \
head -n 100 | grep -q "PostgreSQL database dump"

if [ $? -eq 0 ]; then
echo "Backup verified: backup_$TIMESTAMP.sql"
else
echo "Backup verification failed!"
exit 1
fi

# Cleanup old backups (retain 90 days)
gsutil ls $BACKUP_BUCKET/db/ | while read backup; do
AGE=$(gsutil stat $backup | grep "Creation time" | awk '{print $3}')
if [ $(date -d "$AGE" +%s) -lt $(date -d "90 days ago" +%s) ]; then
gsutil rm $backup
fi
done

Recovery Procedures

# runbooks/disaster-recovery.yaml
procedures:
database_failure:
severity: critical
estimated_recovery_time: 30m
steps:
1. verify_failure:
command: "gcloud sql instances describe qr-generator-primary"
expected: "state: FAILED"
2. promote_replica:
command: "gcloud sql instances promote-replica qr-generator-replica-1"
3. update_connection_strings:
script: "scripts/update-db-endpoints.sh"
4. verify_application:
endpoint: "https://api.qrcontact.app/health"
expected_status: 200

regional_outage:
severity: high
estimated_recovery_time: 15m
steps:
1. identify_affected_region:
command: "gcloud monitoring alerts list --filter='active=true'"
2. remove_from_load_balancer:
command: "gcloud compute backend-services remove-backend..."
3. verify_traffic_rerouting:
metric: "loadbalancing.googleapis.com/https/request_count"
condition: "affected_region_traffic == 0"

Cost Optimization

Resource Allocation

# infrastructure/cost-optimization.yaml
configurations:
development:
cloud_run:
min_instances: 0
max_instances: 10
cpu: 500m
memory: 256Mi
cloud_sql:
tier: db-f1-micro
high_availability: false

production:
cloud_run:
min_instances: 1 # Prevent cold starts
max_instances: 100
cpu: 1000m
memory: 512Mi
cloud_sql:
tier: db-n1-standard-1
high_availability: true
read_replicas: 2

cost_saving_measures:
- use_spot_instances: true
- enable_autoscaling: true
- compress_responses: true
- cache_static_assets: true
- batch_background_jobs: true

Summary

This Technical Design Document provides a comprehensive blueprint for implementing the QR Contact Card Generator platform with:

  1. Infrastructure: Multi-region deployment with automatic failover
  2. Security: JWT authentication, rate limiting, input validation
  3. Performance: Multi-layer caching achieving 92% hit rate
  4. Monitoring: Prometheus metrics with automated alerting
  5. Agent Support: Clear interfaces for autonomous operation
  6. Cost Efficiency: Optimized resource allocation targeting <$50/month at 10K users
  7. Disaster Recovery: Automated backups with 30-minute RTO

The architecture is designed for autonomous agent operation, enabling sub-agents to build, deploy, monitor, and optimize the system with minimal human intervention while maintaining 99.95% uptime SLA.