GCP Infrastructure Architecture for BIO-QMS
Executive Summary
This document provides a comprehensive infrastructure architecture for the BIO-QMS platform on Google Cloud Platform (GCP). BIO-QMS is a multi-tenant regulated SaaS platform serving pharmaceutical and biotechnology companies, requiring compliance with FDA 21 CFR Part 11, HIPAA, and SOC 2 Type II standards.
Infrastructure Stack:
- Compute: Cloud Run (serverless containers)
- Database: Cloud SQL PostgreSQL 15 with multi-zone HA
- Cache: Memorystore for Redis
- CDN: Cloud CDN with Cloud Armor WAF
- Networking: Shared VPC with private subnets
- IaC: Terraform for infrastructure as code
- Secrets: Secret Manager for credential management
- Monitoring: Cloud Monitoring, Logging, and Error Reporting
Compliance Architecture:
- Encryption at rest (CMEK) and in transit (TLS 1.2+)
- Audit logging for all administrative actions
- Private network access with no public endpoints
- VPC Service Controls for data exfiltration prevention
- Automated backup with point-in-time recovery
- Multi-zone high availability with automatic failover
Infrastructure Overview
E.2.1: Terraform Infrastructure as Code
Overview
Terraform is the single source of truth for all GCP infrastructure. All resources are defined as code, version-controlled, and deployed through CI/CD pipelines with automated testing and approval workflows.
Terraform Organization:
infrastructure/
├── terraform/
│ ├── modules/ # Reusable infrastructure modules
│ │ ├── networking/ # VPC, subnets, firewall rules
│ │ ├── cloud-run/ # Cloud Run services
│ │ ├── cloud-sql/ # PostgreSQL database
│ │ ├── memorystore/ # Redis cache
│ │ ├── cdn/ # Cloud CDN + Cloud Armor
│ │ ├── iam/ # Service accounts, IAM bindings
│ │ ├── monitoring/ # Cloud Monitoring dashboards
│ │ └── secrets/ # Secret Manager configuration
│ ├── environments/
│ │ ├── dev/ # Development environment
│ │ ├── staging/ # Staging environment
│ │ └── production/ # Production environment
│ └── backend.tf # Remote state configuration
Backend Configuration
Terraform state is stored in GCS with state locking to prevent concurrent modifications.
backend.tf:
terraform {
required_version = ">= 1.6.0"
required_providers {
google = {
source = "hashicorp/google"
version = "~> 5.11.0"
}
google-beta = {
source = "hashicorp/google-beta"
version = "~> 5.11.0"
}
}
backend "gcs" {
bucket = "bio-qms-terraform-state"
prefix = "terraform/state"
# State locking via GCS object metadata
# Terraform automatically handles locking for GCS backend
}
}
provider "google" {
project = var.project_id
region = var.region
}
provider "google-beta" {
project = var.project_id
region = var.region
}
Root Module Structure
Each environment has its own root module that composes infrastructure modules.
environments/production/main.tf:
module "networking" {
source = "../../modules/networking"
project_id = var.project_id
environment = "production"
region = var.region
vpc_name = "bio-qms-vpc"
subnet_ranges = {
app = "10.0.1.0/24"
data = "10.0.2.0/24"
ops = "10.0.3.0/24"
}
enable_private_google_access = true
enable_flow_logs = true
}
module "cloud_sql" {
source = "../../modules/cloud-sql"
project_id = var.project_id
environment = "production"
region = var.region
instance_name = "bio-qms-db-prod"
database_version = "POSTGRES_15"
tier = "db-custom-2-4096"
high_availability = true
backup_enabled = true
backup_start_time = "03:00"
backup_interval_hours = 4
point_in_time_recovery_enabled = true
retained_backups = 30
# Cross-region replica for DR
replica_configuration = {
region = "us-east1"
tier = "db-custom-2-4096"
}
# Private IP only
private_network = module.networking.vpc_self_link
allocated_ip_range = "google-managed-services-${var.environment}"
# Encryption with CMEK for HIPAA
disk_encryption_key_name = module.kms.sql_key_id
database_flags = [
{
name = "log_connections"
value = "on"
},
{
name = "log_disconnections"
value = "on"
},
{
name = "log_statement"
value = "ddl"
}
]
depends_on = [module.networking]
}
module "memorystore" {
source = "../../modules/memorystore"
project_id = var.project_id
environment = "production"
region = var.region
instance_name = "bio-qms-redis-prod"
memory_size_gb = 1
tier = "STANDARD_HA"
authorized_network = module.networking.vpc_self_link
redis_configs = {
maxmemory-policy = "allkeys-lru"
}
depends_on = [module.networking]
}
module "cloud_run_api" {
source = "../../modules/cloud-run"
project_id = var.project_id
environment = "production"
region = var.region
service_name = "bio-qms-api"
image = "gcr.io/${var.project_id}/bio-qms-api:${var.api_image_tag}"
# Autoscaling configuration
min_instances = 2
max_instances = 10
# Container resources
cpu_limit = "2000m"
memory_limit = "2Gi"
# Concurrency
max_concurrent_requests = 80
# Timeout
request_timeout_seconds = 300
# VPC connector for private DB access
vpc_connector_id = module.networking.vpc_connector_id
# Service account
service_account_email = module.iam.api_service_account_email
# Environment variables
env_vars = {
ENVIRONMENT = "production"
DATABASE_HOST = module.cloud_sql.private_ip_address
REDIS_HOST = module.memorystore.host
REDIS_PORT = module.memorystore.port
ALLOWED_HOSTS = "bio-qms.coditect.ai"
}
# Secrets from Secret Manager
secret_env_vars = {
DATABASE_PASSWORD = {
secret = "database-password"
version = "latest"
}
DJANGO_SECRET_KEY = {
secret = "django-secret-key"
version = "latest"
}
REDIS_AUTH_STRING = {
secret = "redis-auth-string"
version = "latest"
}
}
# Health checks
startup_probe = {
path = "/health/startup/"
initial_delay = 10
timeout = 5
period = 10
failure_threshold = 3
}
liveness_probe = {
path = "/health/live/"
timeout = 5
period = 30
failure_threshold = 3
}
depends_on = [module.cloud_sql, module.memorystore, module.iam]
}
module "cloud_run_frontend" {
source = "../../modules/cloud-run"
project_id = var.project_id
environment = "production"
region = var.region
service_name = "bio-qms-frontend"
image = "gcr.io/${var.project_id}/bio-qms-frontend:${var.frontend_image_tag}"
# Frontend can scale to zero
min_instances = 0
max_instances = 5
cpu_limit = "1000m"
memory_limit = "512Mi"
max_concurrent_requests = 100
request_timeout_seconds = 60
service_account_email = module.iam.frontend_service_account_email
env_vars = {
ENVIRONMENT = "production"
API_URL = "https://api.bio-qms.coditect.ai"
}
depends_on = [module.iam]
}
module "cdn" {
source = "../../modules/cdn"
project_id = var.project_id
environment = "production"
# Backend services
backend_services = {
api = {
url = module.cloud_run_api.service_url
description = "BIO-QMS API"
}
frontend = {
url = module.cloud_run_frontend.service_url
description = "BIO-QMS Frontend"
}
}
# Custom domain mapping
domain_mappings = {
"bio-qms.coditect.ai" = "frontend"
"api.bio-qms.coditect.ai" = "api"
}
# Cloud Armor security policy
security_policy_name = "bio-qms-security-policy"
# SSL policy (TLS 1.2+ minimum)
ssl_policy = {
min_tls_version = "TLS_1_2"
profile = "RESTRICTED"
}
depends_on = [module.cloud_run_api, module.cloud_run_frontend]
}
module "iam" {
source = "../../modules/iam"
project_id = var.project_id
environment = "production"
# Service accounts
service_accounts = {
api = {
account_id = "bio-qms-api-prod"
display_name = "BIO-QMS API Service Account"
roles = [
"roles/cloudsql.client",
"roles/secretmanager.secretAccessor",
"roles/logging.logWriter",
"roles/cloudtrace.agent"
]
}
frontend = {
account_id = "bio-qms-frontend-prod"
display_name = "BIO-QMS Frontend Service Account"
roles = [
"roles/logging.logWriter"
]
}
}
}
module "monitoring" {
source = "../../modules/monitoring"
project_id = var.project_id
environment = "production"
# Alert notification channels
notification_channels = var.alert_email_addresses
# Monitoring dashboards
create_dashboards = true
# Alert policies
alert_policies = {
api_error_rate = {
display_name = "API Error Rate High"
condition = "error_rate > 0.05"
duration = "300s"
}
database_cpu = {
display_name = "Database CPU High"
condition = "cpu_utilization > 0.8"
duration = "600s"
}
redis_memory = {
display_name = "Redis Memory High"
condition = "memory_utilization > 0.9"
duration = "300s"
}
}
}
module "kms" {
source = "../../modules/kms"
project_id = var.project_id
region = var.region
keyring_name = "bio-qms-keyring"
crypto_keys = {
sql = {
name = "cloudsql-key"
rotation_period = "7776000s" # 90 days
}
}
}
Environment-Specific Variables
environments/production/terraform.tfvars:
project_id = "bio-qms-production"
region = "us-central1"
# Image tags (updated by CI/CD)
api_image_tag = "v1.2.3"
frontend_image_tag = "v1.2.3"
# Alert configuration
alert_email_addresses = [
"ops-team@coditect.ai",
"oncall@coditect.ai"
]
# Compliance tags
labels = {
environment = "production"
compliance = "hipaa-soc2"
cost_center = "bio-qms"
}
environments/staging/terraform.tfvars:
project_id = "bio-qms-staging"
region = "us-central1"
api_image_tag = "staging-latest"
frontend_image_tag = "staging-latest"
alert_email_addresses = [
"dev-team@coditect.ai"
]
labels = {
environment = "staging"
cost_center = "bio-qms"
}
environments/dev/terraform.tfvars:
project_id = "bio-qms-dev"
region = "us-central1"
api_image_tag = "dev-latest"
frontend_image_tag = "dev-latest"
alert_email_addresses = [
"dev-team@coditect.ai"
]
labels = {
environment = "development"
cost_center = "bio-qms"
}
Networking Module
modules/networking/main.tf:
resource "google_compute_network" "vpc" {
name = var.vpc_name
auto_create_subnetworks = false
project = var.project_id
}
# Application subnet
resource "google_compute_subnetwork" "app" {
name = "${var.vpc_name}-app-${var.region}"
ip_cidr_range = var.subnet_ranges.app
region = var.region
network = google_compute_network.vpc.id
project = var.project_id
private_ip_google_access = var.enable_private_google_access
log_config {
aggregation_interval = "INTERVAL_5_SEC"
flow_sampling = 0.5
metadata = "INCLUDE_ALL_METADATA"
}
}
# Data subnet (database, redis)
resource "google_compute_subnetwork" "data" {
name = "${var.vpc_name}-data-${var.region}"
ip_cidr_range = var.subnet_ranges.data
region = var.region
network = google_compute_network.vpc.id
project = var.project_id
private_ip_google_access = true
log_config {
aggregation_interval = "INTERVAL_5_SEC"
flow_sampling = 0.5
metadata = "INCLUDE_ALL_METADATA"
}
}
# Ops subnet (NAT gateway, monitoring agents)
resource "google_compute_subnetwork" "ops" {
name = "${var.vpc_name}-ops-${var.region}"
ip_cidr_range = var.subnet_ranges.ops
region = var.region
network = google_compute_network.vpc.id
project = var.project_id
private_ip_google_access = true
}
# Cloud NAT for outbound traffic from private resources
resource "google_compute_router" "router" {
name = "${var.vpc_name}-router"
region = var.region
network = google_compute_network.vpc.id
project = var.project_id
}
resource "google_compute_router_nat" "nat" {
name = "${var.vpc_name}-nat"
router = google_compute_router.router.name
region = var.region
project = var.project_id
nat_ip_allocate_option = "AUTO_ONLY"
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
log_config {
enable = true
filter = "ERRORS_ONLY"
}
}
# VPC Serverless Connector for Cloud Run
resource "google_vpc_access_connector" "connector" {
name = "${var.vpc_name}-connector"
project = var.project_id
region = var.region
network = google_compute_network.vpc.name
ip_cidr_range = "10.8.0.0/28"
min_instances = 2
max_instances = 3
machine_type = "e2-micro"
}
# Firewall Rules
# Allow HTTPS ingress from Cloud Load Balancer
resource "google_compute_firewall" "allow_lb_https" {
name = "${var.vpc_name}-allow-lb-https"
network = google_compute_network.vpc.name
project = var.project_id
allow {
protocol = "tcp"
ports = ["443", "8080"]
}
source_ranges = [
"130.211.0.0/22", # GCP Load Balancer health checks
"35.191.0.0/16" # GCP Load Balancer proxies
]
target_tags = ["cloud-run"]
}
# Allow internal traffic between subnets
resource "google_compute_firewall" "allow_internal" {
name = "${var.vpc_name}-allow-internal"
network = google_compute_network.vpc.name
project = var.project_id
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
allow {
protocol = "icmp"
}
source_ranges = [
var.subnet_ranges.app,
var.subnet_ranges.data,
var.subnet_ranges.ops
]
}
# Restrict database access to app subnet only
resource "google_compute_firewall" "restrict_db_access" {
name = "${var.vpc_name}-restrict-db"
network = google_compute_network.vpc.name
project = var.project_id
allow {
protocol = "tcp"
ports = ["5432"] # PostgreSQL
}
source_ranges = [var.subnet_ranges.app]
target_tags = ["cloudsql"]
}
# Restrict Redis access to app subnet only
resource "google_compute_firewall" "restrict_redis_access" {
name = "${var.vpc_name}-restrict-redis"
network = google_compute_network.vpc.name
project = var.project_id
allow {
protocol = "tcp"
ports = ["6379"] # Redis
}
source_ranges = [var.subnet_ranges.app]
target_tags = ["redis"]
}
# Deny all other ingress by default (implicit deny is active)
# But explicit deny for documentation
resource "google_compute_firewall" "deny_all_ingress" {
name = "${var.vpc_name}-deny-all"
network = google_compute_network.vpc.name
project = var.project_id
priority = 65534
deny {
protocol = "all"
}
source_ranges = ["0.0.0.0/0"]
}
# Private Service Connection for Cloud SQL
resource "google_compute_global_address" "private_ip_address" {
name = "google-managed-services-${var.environment}"
project = var.project_id
purpose = "VPC_PEERING"
address_type = "INTERNAL"
prefix_length = 16
network = google_compute_network.vpc.id
}
resource "google_service_networking_connection" "private_vpc_connection" {
network = google_compute_network.vpc.id
service = "servicenetworking.googleapis.com"
reserved_peering_ranges = [google_compute_global_address.private_ip_address.name]
}
modules/networking/outputs.tf:
output "vpc_id" {
description = "VPC network ID"
value = google_compute_network.vpc.id
}
output "vpc_self_link" {
description = "VPC network self link"
value = google_compute_network.vpc.self_link
}
output "vpc_name" {
description = "VPC network name"
value = google_compute_network.vpc.name
}
output "app_subnet_name" {
description = "Application subnet name"
value = google_compute_subnetwork.app.name
}
output "data_subnet_name" {
description = "Data subnet name"
value = google_compute_subnetwork.data.name
}
output "vpc_connector_id" {
description = "VPC Serverless Connector ID"
value = google_vpc_access_connector.connector.id
}
output "private_service_connection" {
description = "Private Service Connection for Cloud SQL"
value = google_service_networking_connection.private_vpc_connection.network
}
Cloud SQL Module
modules/cloud-sql/main.tf:
resource "google_sql_database_instance" "instance" {
name = var.instance_name
database_version = var.database_version
region = var.region
project = var.project_id
settings {
tier = var.tier
availability_type = var.high_availability ? "REGIONAL" : "ZONAL"
disk_type = "PD_SSD"
disk_size = var.disk_size_gb
disk_autoresize = true
# Backup configuration
backup_configuration {
enabled = var.backup_enabled
start_time = var.backup_start_time
point_in_time_recovery_enabled = var.point_in_time_recovery_enabled
transaction_log_retention_days = var.retained_backups
backup_retention_settings {
retained_backups = var.retained_backups
retention_unit = "COUNT"
}
}
# IP configuration - private IP only
ip_configuration {
ipv4_enabled = false
private_network = var.private_network
enable_private_path_for_google_cloud_services = true
}
# Maintenance window
maintenance_window {
day = 7 # Sunday
hour = 3 # 3 AM UTC
update_track = "stable"
}
# Database flags for audit logging
dynamic "database_flags" {
for_each = var.database_flags
content {
name = database_flags.value.name
value = database_flags.value.value
}
}
# Insights configuration
insights_config {
query_insights_enabled = true
query_plans_per_minute = 5
query_string_length = 1024
record_application_tags = true
}
# User labels for compliance tracking
user_labels = var.labels
}
# Encryption with Customer-Managed Encryption Key (CMEK)
encryption_key_name = var.disk_encryption_key_name
deletion_protection = true
depends_on = [var.private_network]
}
# Cross-region replica for DR
resource "google_sql_database_instance" "replica" {
count = var.replica_configuration != null ? 1 : 0
name = "${var.instance_name}-replica"
database_version = var.database_version
region = var.replica_configuration.region
project = var.project_id
master_instance_name = google_sql_database_instance.instance.name
replica_configuration {
failover_target = true
}
settings {
tier = var.replica_configuration.tier
availability_type = "ZONAL"
disk_type = "PD_SSD"
disk_autoresize = true
ip_configuration {
ipv4_enabled = false
private_network = var.private_network
enable_private_path_for_google_cloud_services = true
}
user_labels = merge(var.labels, {
role = "replica"
})
}
deletion_protection = true
}
# Database
resource "google_sql_database" "database" {
name = var.database_name
instance = google_sql_database_instance.instance.name
project = var.project_id
}
# Database user (password stored in Secret Manager)
resource "google_sql_user" "user" {
name = var.database_user
instance = google_sql_database_instance.instance.name
password = var.database_password
project = var.project_id
}
modules/cloud-sql/outputs.tf:
output "instance_name" {
description = "Database instance name"
value = google_sql_database_instance.instance.name
}
output "instance_connection_name" {
description = "Connection name for Cloud SQL Proxy"
value = google_sql_database_instance.instance.connection_name
}
output "private_ip_address" {
description = "Private IP address of the database"
value = google_sql_database_instance.instance.private_ip_address
}
output "replica_connection_name" {
description = "Connection name for replica (if exists)"
value = var.replica_configuration != null ? google_sql_database_instance.replica[0].connection_name : null
}
Memorystore (Redis) Module
modules/memorystore/main.tf:
resource "google_redis_instance" "instance" {
name = var.instance_name
tier = var.tier
memory_size_gb = var.memory_size_gb
region = var.region
project = var.project_id
authorized_network = var.authorized_network
connect_mode = "PRIVATE_SERVICE_ACCESS"
redis_version = "REDIS_7_0"
display_name = "${var.instance_name} (${var.environment})"
# Redis configuration
redis_configs = var.redis_configs
# Enable AUTH for security
auth_enabled = true
# Enable in-transit encryption
transit_encryption_mode = "SERVER_AUTHENTICATION"
# Maintenance policy
maintenance_policy {
weekly_maintenance_window {
day = "SUNDAY"
start_time {
hours = 3
minutes = 0
}
}
}
# Labels for compliance tracking
labels = var.labels
}
modules/memorystore/outputs.tf:
output "host" {
description = "Redis instance host"
value = google_redis_instance.instance.host
}
output "port" {
description = "Redis instance port"
value = google_redis_instance.instance.port
}
output "auth_string" {
description = "Redis AUTH string (sensitive)"
value = google_redis_instance.instance.auth_string
sensitive = true
}
output "current_location_id" {
description = "Current location ID of the Redis instance"
value = google_redis_instance.instance.current_location_id
}
Cloud Run Module
modules/cloud-run/main.tf:
resource "google_cloud_run_v2_service" "service" {
name = var.service_name
location = var.region
project = var.project_id
template {
# Scaling configuration
scaling {
min_instance_count = var.min_instances
max_instance_count = var.max_instances
}
# VPC access
vpc_access {
connector = var.vpc_connector_id
egress = "PRIVATE_RANGES_ONLY"
}
# Service account
service_account = var.service_account_email
# Container configuration
containers {
image = var.image
resources {
limits = {
cpu = var.cpu_limit
memory = var.memory_limit
}
cpu_idle = true
startup_cpu_boost = var.startup_cpu_boost
}
# Environment variables
dynamic "env" {
for_each = var.env_vars
content {
name = env.key
value = env.value
}
}
# Secrets from Secret Manager
dynamic "env" {
for_each = var.secret_env_vars
content {
name = env.key
value_source {
secret_key_ref {
secret = env.value.secret
version = env.value.version
}
}
}
}
# Startup probe
dynamic "startup_probe" {
for_each = var.startup_probe != null ? [var.startup_probe] : []
content {
initial_delay_seconds = startup_probe.value.initial_delay
timeout_seconds = startup_probe.value.timeout
period_seconds = startup_probe.value.period
failure_threshold = startup_probe.value.failure_threshold
http_get {
path = startup_probe.value.path
}
}
}
# Liveness probe
dynamic "liveness_probe" {
for_each = var.liveness_probe != null ? [var.liveness_probe] : []
content {
timeout_seconds = liveness_probe.value.timeout
period_seconds = liveness_probe.value.period
failure_threshold = liveness_probe.value.failure_threshold
http_get {
path = liveness_probe.value.path
}
}
}
}
# Request timeout
timeout = "${var.request_timeout_seconds}s"
# Max concurrent requests per instance
max_instance_request_concurrency = var.max_concurrent_requests
}
traffic {
type = "TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST"
percent = 100
}
labels = var.labels
}
# IAM binding to allow unauthenticated invocations (public)
resource "google_cloud_run_service_iam_member" "public_access" {
count = var.allow_unauthenticated ? 1 : 0
service = google_cloud_run_v2_service.service.name
location = google_cloud_run_v2_service.service.location
project = var.project_id
role = "roles/run.invoker"
member = "allUsers"
}
# Custom domain mapping
resource "google_cloud_run_domain_mapping" "domain" {
count = var.custom_domain != null ? 1 : 0
name = var.custom_domain
location = var.region
project = var.project_id
metadata {
namespace = var.project_id
}
spec {
route_name = google_cloud_run_v2_service.service.name
}
}
modules/cloud-run/outputs.tf:
output "service_url" {
description = "URL of the Cloud Run service"
value = google_cloud_run_v2_service.service.uri
}
output "service_name" {
description = "Name of the Cloud Run service"
value = google_cloud_run_v2_service.service.name
}
output "service_id" {
description = "Service ID"
value = google_cloud_run_v2_service.service.id
}
CDN & Cloud Armor Module
modules/cdn/main.tf:
# Cloud Armor Security Policy
resource "google_compute_security_policy" "policy" {
name = var.security_policy_name
project = var.project_id
# Default rule - deny all
rule {
action = "deny(403)"
priority = "2147483647"
match {
versioned_expr = "SRC_IPS_V1"
config {
src_ip_ranges = ["*"]
}
}
description = "Default deny rule"
}
# Allow traffic from all IPs (will be refined by OWASP rules)
rule {
action = "allow"
priority = "1000"
match {
versioned_expr = "SRC_IPS_V1"
config {
src_ip_ranges = ["*"]
}
}
description = "Allow all traffic"
}
# OWASP Top 10 protection rules
# SQL Injection protection
rule {
action = "deny(403)"
priority = "100"
match {
expr {
expression = "evaluatePreconfiguredExpr('sqli-stable')"
}
}
description = "Block SQL injection attempts"
}
# XSS protection
rule {
action = "deny(403)"
priority = "110"
match {
expr {
expression = "evaluatePreconfiguredExpr('xss-stable')"
}
}
description = "Block XSS attempts"
}
# LFI (Local File Inclusion) protection
rule {
action = "deny(403)"
priority = "120"
match {
expr {
expression = "evaluatePreconfiguredExpr('lfi-stable')"
}
}
description = "Block LFI attempts"
}
# RCE (Remote Code Execution) protection
rule {
action = "deny(403)"
priority = "130"
match {
expr {
expression = "evaluatePreconfiguredExpr('rce-stable')"
}
}
description = "Block RCE attempts"
}
# RFI (Remote File Inclusion) protection
rule {
action = "deny(403)"
priority = "140"
match {
expr {
expression = "evaluatePreconfiguredExpr('rfi-stable')"
}
}
description = "Block RFI attempts"
}
# Scanner detection
rule {
action = "deny(403)"
priority = "150"
match {
expr {
expression = "evaluatePreconfiguredExpr('scannerdetection-stable')"
}
}
description = "Block known vulnerability scanners"
}
# Protocol attack protection
rule {
action = "deny(403)"
priority = "160"
match {
expr {
expression = "evaluatePreconfiguredExpr('protocolattack-stable')"
}
}
description = "Block protocol attacks"
}
# Rate limiting rule (100 requests per minute per IP)
rule {
action = "rate_based_ban"
priority = "200"
match {
versioned_expr = "SRC_IPS_V1"
config {
src_ip_ranges = ["*"]
}
}
rate_limit_options {
conform_action = "allow"
exceed_action = "deny(429)"
enforce_on_key = "IP"
rate_limit_threshold {
count = 100
interval_sec = 60
}
ban_duration_sec = 600 # 10 minute ban
}
description = "Rate limit: 100 req/min per IP"
}
# Adaptive DDoS protection
adaptive_protection_config {
layer_7_ddos_defense_config {
enable = true
}
}
}
# Backend NEG for Cloud Run services
resource "google_compute_region_network_endpoint_group" "neg" {
for_each = var.backend_services
name = "${each.key}-neg"
network_endpoint_type = "SERVERLESS"
region = var.region
project = var.project_id
cloud_run {
service = each.value.service_name
}
}
# Backend services
resource "google_compute_backend_service" "backend" {
for_each = var.backend_services
name = "${each.key}-backend"
project = var.project_id
protocol = "HTTPS"
load_balancing_scheme = "EXTERNAL_MANAGED"
backend {
group = google_compute_region_network_endpoint_group.neg[each.key].id
}
# Cloud Armor security policy
security_policy = google_compute_security_policy.policy.id
# CDN configuration
enable_cdn = true
cdn_policy {
cache_mode = "CACHE_ALL_STATIC"
client_ttl = 3600
default_ttl = 3600
max_ttl = 86400
negative_caching = true
serve_while_stale = 86400
}
# Connection draining timeout
connection_draining_timeout_sec = 300
# Health check (uses Cloud Run health endpoint)
health_checks = [google_compute_health_check.health_check[each.key].id]
log_config {
enable = true
sample_rate = 1.0
}
}
# Health checks
resource "google_compute_health_check" "health_check" {
for_each = var.backend_services
name = "${each.key}-health-check"
project = var.project_id
timeout_sec = 5
check_interval_sec = 10
https_health_check {
port = 443
request_path = "/health/live/"
}
}
# URL map
resource "google_compute_url_map" "url_map" {
name = "${var.security_policy_name}-url-map"
project = var.project_id
default_service = google_compute_backend_service.backend["frontend"].id
host_rule {
hosts = ["api.bio-qms.coditect.ai"]
path_matcher = "api"
}
path_matcher {
name = "api"
default_service = google_compute_backend_service.backend["api"].id
}
}
# SSL Certificate (managed by Google)
resource "google_compute_managed_ssl_certificate" "ssl_cert" {
name = "bio-qms-ssl-cert"
project = var.project_id
managed {
domains = [
"bio-qms.coditect.ai",
"api.bio-qms.coditect.ai"
]
}
}
# SSL Policy
resource "google_compute_ssl_policy" "ssl_policy" {
name = "bio-qms-ssl-policy"
project = var.project_id
profile = var.ssl_policy.profile
min_tls_version = var.ssl_policy.min_tls_version
}
# HTTPS Proxy
resource "google_compute_target_https_proxy" "https_proxy" {
name = "${var.security_policy_name}-https-proxy"
project = var.project_id
url_map = google_compute_url_map.url_map.id
ssl_certificates = [google_compute_managed_ssl_certificate.ssl_cert.id]
ssl_policy = google_compute_ssl_policy.ssl_policy.id
}
# Global forwarding rule (external IP)
resource "google_compute_global_forwarding_rule" "https" {
name = "${var.security_policy_name}-https"
project = var.project_id
target = google_compute_target_https_proxy.https_proxy.id
port_range = "443"
ip_protocol = "TCP"
load_balancing_scheme = "EXTERNAL_MANAGED"
}
# HTTP to HTTPS redirect
resource "google_compute_url_map" "http_redirect" {
name = "${var.security_policy_name}-http-redirect"
project = var.project_id
default_url_redirect {
https_redirect = true
redirect_response_code = "MOVED_PERMANENTLY_DEFAULT"
strip_query = false
}
}
resource "google_compute_target_http_proxy" "http_proxy" {
name = "${var.security_policy_name}-http-proxy"
project = var.project_id
url_map = google_compute_url_map.http_redirect.id
}
resource "google_compute_global_forwarding_rule" "http" {
name = "${var.security_policy_name}-http"
project = var.project_id
target = google_compute_target_http_proxy.http_proxy.id
port_range = "80"
ip_protocol = "TCP"
load_balancing_scheme = "EXTERNAL_MANAGED"
}
modules/cdn/outputs.tf:
output "external_ip" {
description = "External IP address for the load balancer"
value = google_compute_global_forwarding_rule.https.ip_address
}
output "security_policy_id" {
description = "Cloud Armor security policy ID"
value = google_compute_security_policy.policy.id
}
output "ssl_certificate_id" {
description = "Managed SSL certificate ID"
value = google_compute_managed_ssl_certificate.ssl_cert.id
}