- Created SelfFreezoneClient in Self components
- Wraps SDK FreezoneScriptClient for Self-specific operations
- Implements send_verification_email method
- Uses Rhai script template for email verification
- Includes template variable substitution
- Added serde-wasm-bindgen dependency
Usage:
let client = SelfFreezoneClient::builder()
.supervisor_url("http://localhost:8080")
.secret("my-secret")
.build()?;
client.send_verification_email(
"user@example.com",
"123456",
"https://verify.com/abc"
).await?;
21 KiB
21 KiB
Production Deployment Guide
Overview
This guide covers deploying Self in production environments, including infrastructure setup, security hardening, monitoring, and maintenance procedures.
Infrastructure Requirements
Minimum System Requirements
Server Requirements
- CPU: 2 vCPUs (4+ recommended)
- Memory: 2GB RAM (4GB+ recommended)
- Storage: 20GB SSD (50GB+ recommended)
- Network: 1Gbps connection
- OS: Ubuntu 20.04 LTS or newer
Database Requirements (Production)
- PostgreSQL: 12+ or MySQL 8.0+
- Memory: 4GB RAM dedicated
- Storage: 100GB+ SSD with backup
- Connections: 100+ concurrent connections
Load Balancer (High Availability)
- Nginx: 1.18+ or HAProxy 2.0+
- SSL Termination: TLS 1.3 support
- Health Checks: HTTP/HTTPS monitoring
- Rate Limiting: Request throttling
Cloud Deployment Options
AWS Deployment
# docker-compose.aws.yml
version: '3.8'
services:
self-server:
image: self-identity:latest
environment:
- DATABASE_URL=postgresql://user:pass@rds-endpoint/selfdb
- JWT_SECRET=${JWT_SECRET}
- SMTP_HOST=email-smtp.us-east-1.amazonaws.com
deploy:
replicas: 3
resources:
limits:
memory: 1G
reservations:
memory: 512M
Google Cloud Platform
# gcp-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: self-identity
spec:
replicas: 3
selector:
matchLabels:
app: self-identity
template:
metadata:
labels:
app: self-identity
spec:
containers:
- name: self-server
image: gcr.io/project-id/self-identity:latest
ports:
- containerPort: 8080
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: db-secret
key: url
Azure Container Instances
# azure-container.yaml
apiVersion: 2019-12-01
location: eastus
name: self-identity
properties:
containers:
- name: self-server
properties:
image: selfidentity.azurecr.io/self-identity:latest
resources:
requests:
cpu: 1
memoryInGb: 2
ports:
- port: 8080
protocol: TCP
environmentVariables:
- name: DATABASE_URL
secureValue: postgresql://...
Docker Deployment
Production Dockerfile
# Multi-stage build for optimized production image
FROM rust:1.75 as builder
WORKDIR /app
COPY Cargo.toml Cargo.lock ./
COPY server/ ./server/
COPY components/ ./components/
# Build optimized release binary
RUN cargo build --release --bin server
# Runtime image
FROM debian:bookworm-slim
RUN apt-get update && apt-get install -y \
ca-certificates \
libssl3 \
&& rm -rf /var/lib/apt/lists/*
# Create non-root user
RUN useradd -r -s /bin/false selfuser
WORKDIR /app
COPY --from=builder /app/target/release/server ./
COPY --chown=selfuser:selfuser static/ ./static/
USER selfuser
EXPOSE 8080
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8080/health || exit 1
CMD ["./server"]
Docker Compose Production Setup
# docker-compose.prod.yml
version: '3.8'
services:
self-server:
build:
context: .
dockerfile: Dockerfile.prod
restart: unless-stopped
environment:
- DATABASE_URL=postgresql://selfuser:${DB_PASSWORD}@postgres:5432/selfdb
- JWT_SECRET=${JWT_SECRET}
- SMTP_HOST=${SMTP_HOST}
- SMTP_USERNAME=${SMTP_USERNAME}
- SMTP_PASSWORD=${SMTP_PASSWORD}
- RUST_LOG=info
ports:
- "8080:8080"
depends_on:
- postgres
- redis
networks:
- self-network
volumes:
- ./logs:/app/logs
postgres:
image: postgres:15-alpine
restart: unless-stopped
environment:
- POSTGRES_DB=selfdb
- POSTGRES_USER=selfuser
- POSTGRES_PASSWORD=${DB_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
- ./init.sql:/docker-entrypoint-initdb.d/init.sql
networks:
- self-network
redis:
image: redis:7-alpine
restart: unless-stopped
command: redis-server --requirepass ${REDIS_PASSWORD}
volumes:
- redis_data:/data
networks:
- self-network
nginx:
image: nginx:alpine
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
- ./ssl:/etc/nginx/ssl
- ./static:/usr/share/nginx/html
depends_on:
- self-server
networks:
- self-network
volumes:
postgres_data:
redis_data:
networks:
self-network:
driver: bridge
Database Setup
PostgreSQL Schema
-- init.sql
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- Users table
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
email VARCHAR(255) UNIQUE NOT NULL,
name VARCHAR(255) NOT NULL,
public_key TEXT UNIQUE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
is_active BOOLEAN DEFAULT TRUE
);
-- Email verifications table
CREATE TABLE email_verifications (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
email VARCHAR(255) NOT NULL,
token VARCHAR(255) UNIQUE NOT NULL,
verified BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
expires_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() + INTERVAL '24 hours'
);
-- Authentication sessions table
CREATE TABLE auth_sessions (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID REFERENCES users(id) ON DELETE CASCADE,
token_hash VARCHAR(255) NOT NULL,
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
last_used TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- Audit log table
CREATE TABLE audit_logs (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID REFERENCES users(id) ON DELETE SET NULL,
action VARCHAR(100) NOT NULL,
resource VARCHAR(100),
details JSONB,
ip_address INET,
user_agent TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- Indexes for performance
CREATE INDEX idx_users_email ON users(email);
CREATE INDEX idx_users_public_key ON users(public_key);
CREATE INDEX idx_email_verifications_token ON email_verifications(token);
CREATE INDEX idx_auth_sessions_token_hash ON auth_sessions(token_hash);
CREATE INDEX idx_audit_logs_user_id ON audit_logs(user_id);
CREATE INDEX idx_audit_logs_created_at ON audit_logs(created_at);
-- Update trigger for updated_at
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_users_updated_at BEFORE UPDATE ON users
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
Database Migration System
// migrations/mod.rs
use sqlx::{PgPool, migrate::MigrateDatabase};
pub async fn run_migrations(database_url: &str) -> Result<(), Box<dyn std::error::Error>> {
// Create database if it doesn't exist
if !sqlx::Postgres::database_exists(database_url).await? {
sqlx::Postgres::create_database(database_url).await?;
}
let pool = PgPool::connect(database_url).await?;
// Run migrations
sqlx::migrate!("./migrations").run(&pool).await?;
Ok(())
}
Nginx Configuration
Production Nginx Config
# nginx.conf
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
# Rate limiting
limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=auth:10m rate=5r/s;
# Upstream servers
upstream self_backend {
server self-server:8080;
# Add more servers for load balancing
# server self-server-2:8080;
# server self-server-3:8080;
}
# HTTP to HTTPS redirect
server {
listen 80;
server_name your-domain.com;
return 301 https://$server_name$request_uri;
}
# HTTPS server
server {
listen 443 ssl http2;
server_name your-domain.com;
# SSL configuration
ssl_certificate /etc/nginx/ssl/cert.pem;
ssl_certificate_key /etc/nginx/ssl/key.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256;
ssl_prefer_server_ciphers off;
# Security headers
add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'";
# Static files
location /static/ {
alias /usr/share/nginx/html/;
expires 1y;
add_header Cache-Control "public, immutable";
}
# API endpoints with rate limiting
location /api/ {
limit_req zone=api burst=20 nodelay;
proxy_pass http://self_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# OAuth endpoints with stricter rate limiting
location /oauth/ {
limit_req zone=auth burst=10 nodelay;
proxy_pass http://self_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Health check
location /health {
proxy_pass http://self_backend;
access_log off;
}
# Default location
location / {
proxy_pass http://self_backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
}
SSL/TLS Configuration
Let's Encrypt Setup
#!/bin/bash
# setup-ssl.sh
# Install certbot
sudo apt-get update
sudo apt-get install -y certbot python3-certbot-nginx
# Obtain certificate
sudo certbot --nginx -d your-domain.com
# Setup auto-renewal
echo "0 12 * * * /usr/bin/certbot renew --quiet" | sudo crontab -
Manual Certificate Setup
#!/bin/bash
# manual-ssl.sh
# Generate private key
openssl genrsa -out key.pem 2048
# Generate certificate signing request
openssl req -new -key key.pem -out cert.csr
# Generate self-signed certificate (for testing)
openssl x509 -req -days 365 -in cert.csr -signkey key.pem -out cert.pem
# Set proper permissions
chmod 600 key.pem
chmod 644 cert.pem
Environment Configuration
Production Environment Variables
# .env.production
# Database
DATABASE_URL=postgresql://selfuser:secure_password@localhost:5432/selfdb
# JWT Configuration
JWT_SECRET=your-super-secure-jwt-secret-key-here
JWT_EXPIRATION=3600
# SMTP Configuration
SMTP_HOST=smtp.your-provider.com
SMTP_PORT=587
SMTP_USERNAME=your-smtp-username
SMTP_PASSWORD=your-smtp-password
SMTP_FROM=noreply@your-domain.com
# Server Configuration
SERVER_PORT=8080
SERVER_HOST=0.0.0.0
BASE_URL=https://your-domain.com
# Redis Configuration (for sessions)
REDIS_URL=redis://localhost:6379
REDIS_PASSWORD=your-redis-password
# Logging
RUST_LOG=info
LOG_LEVEL=info
# Security
CORS_ORIGINS=https://your-frontend-domain.com
RATE_LIMIT_REQUESTS=100
RATE_LIMIT_WINDOW=60
# Monitoring
METRICS_ENABLED=true
HEALTH_CHECK_ENABLED=true
Configuration Management
// config.rs
use serde::Deserialize;
use std::env;
#[derive(Debug, Deserialize)]
pub struct Config {
pub database_url: String,
pub jwt_secret: String,
pub jwt_expiration: u64,
pub smtp: SmtpConfig,
pub server: ServerConfig,
pub redis_url: Option<String>,
pub cors_origins: Vec<String>,
pub rate_limit: RateLimitConfig,
}
#[derive(Debug, Deserialize)]
pub struct SmtpConfig {
pub host: String,
pub port: u16,
pub username: String,
pub password: String,
pub from: String,
}
#[derive(Debug, Deserialize)]
pub struct ServerConfig {
pub host: String,
pub port: u16,
pub base_url: String,
}
#[derive(Debug, Deserialize)]
pub struct RateLimitConfig {
pub requests: u32,
pub window: u64,
}
impl Config {
pub fn from_env() -> Result<Self, config::ConfigError> {
let mut cfg = config::Config::builder();
// Load from environment variables
cfg = cfg.add_source(config::Environment::with_prefix("SELF"));
// Load from config file if exists
if let Ok(config_path) = env::var("CONFIG_PATH") {
cfg = cfg.add_source(config::File::with_name(&config_path));
}
cfg.build()?.try_deserialize()
}
}
Monitoring and Logging
Prometheus Metrics
// metrics.rs
use prometheus::{Counter, Histogram, Gauge, Registry};
use std::sync::Arc;
pub struct Metrics {
pub registry: Registry,
pub http_requests_total: Counter,
pub http_request_duration: Histogram,
pub active_connections: Gauge,
pub auth_attempts_total: Counter,
pub auth_failures_total: Counter,
}
impl Metrics {
pub fn new() -> Arc<Self> {
let registry = Registry::new();
let http_requests_total = Counter::new(
"http_requests_total",
"Total HTTP requests"
).unwrap();
let http_request_duration = Histogram::new(
"http_request_duration_seconds",
"HTTP request duration"
).unwrap();
let active_connections = Gauge::new(
"active_connections",
"Active connections"
).unwrap();
let auth_attempts_total = Counter::new(
"auth_attempts_total",
"Total authentication attempts"
).unwrap();
let auth_failures_total = Counter::new(
"auth_failures_total",
"Total authentication failures"
).unwrap();
registry.register(Box::new(http_requests_total.clone())).unwrap();
registry.register(Box::new(http_request_duration.clone())).unwrap();
registry.register(Box::new(active_connections.clone())).unwrap();
registry.register(Box::new(auth_attempts_total.clone())).unwrap();
registry.register(Box::new(auth_failures_total.clone())).unwrap();
Arc::new(Metrics {
registry,
http_requests_total,
http_request_duration,
active_connections,
auth_attempts_total,
auth_failures_total,
})
}
}
Structured Logging
// logging.rs
use tracing::{info, warn, error};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
pub fn init_logging() {
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(
std::env::var("RUST_LOG").unwrap_or_else(|_| "info".into()),
))
.with(tracing_subscriber::fmt::layer().json())
.init();
}
pub fn log_security_event(event_type: &str, details: serde_json::Value) {
info!(
event_type = event_type,
details = %details,
timestamp = %chrono::Utc::now(),
"Security event logged"
);
}
Backup and Recovery
Database Backup Script
#!/bin/bash
# backup-db.sh
set -e
DB_NAME="selfdb"
DB_USER="selfuser"
BACKUP_DIR="/backups"
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="$BACKUP_DIR/selfdb_backup_$DATE.sql"
# Create backup directory
mkdir -p $BACKUP_DIR
# Create database backup
pg_dump -h localhost -U $DB_USER -d $DB_NAME > $BACKUP_FILE
# Compress backup
gzip $BACKUP_FILE
# Upload to cloud storage (AWS S3 example)
aws s3 cp $BACKUP_FILE.gz s3://your-backup-bucket/database/
# Clean up old backups (keep last 30 days)
find $BACKUP_DIR -name "selfdb_backup_*.sql.gz" -mtime +30 -delete
echo "Backup completed: $BACKUP_FILE.gz"
Automated Backup Cron Job
# Add to crontab: crontab -e
# Run backup every day at 2 AM
0 2 * * * /path/to/backup-db.sh >> /var/log/backup.log 2>&1
Health Checks and Monitoring
Health Check Endpoint
// health.rs
use axum::{Json, response::IntoResponse};
use serde_json::json;
use sqlx::PgPool;
pub async fn health_check(pool: &PgPool) -> impl IntoResponse {
let mut status = "healthy";
let mut checks = serde_json::Map::new();
// Database health check
match sqlx::query("SELECT 1").fetch_one(pool).await {
Ok(_) => {
checks.insert("database".to_string(), json!("healthy"));
}
Err(_) => {
status = "unhealthy";
checks.insert("database".to_string(), json!("unhealthy"));
}
}
// Memory usage check
let memory_usage = get_memory_usage();
if memory_usage < 90.0 {
checks.insert("memory".to_string(), json!("healthy"));
} else {
status = "degraded";
checks.insert("memory".to_string(), json!("high"));
}
Json(json!({
"status": status,
"timestamp": chrono::Utc::now(),
"version": env!("CARGO_PKG_VERSION"),
"checks": checks
}))
}
Monitoring Dashboard
# docker-compose.monitoring.yml
version: '3.8'
services:
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./grafana/datasources:/etc/grafana/provisioning/datasources
volumes:
prometheus_data:
grafana_data:
Security Hardening
Server Hardening Checklist
-
System Updates
sudo apt update && sudo apt upgrade -y sudo apt install unattended-upgrades -
Firewall Configuration
sudo ufw default deny incoming sudo ufw default allow outgoing sudo ufw allow ssh sudo ufw allow 80/tcp sudo ufw allow 443/tcp sudo ufw enable -
SSH Hardening
# /etc/ssh/sshd_config PermitRootLogin no PasswordAuthentication no PubkeyAuthentication yes Port 2222 # Change default port -
Fail2Ban Setup
sudo apt install fail2ban sudo systemctl enable fail2ban sudo systemctl start fail2ban
Application Security
// security middleware
use axum::{
middleware::{self, Next},
http::{Request, HeaderMap, HeaderValue},
response::Response,
};
pub async fn security_headers<B>(
request: Request<B>,
next: Next<B>,
) -> Response {
let mut response = next.run(request).await;
let headers = response.headers_mut();
headers.insert("X-Frame-Options", HeaderValue::from_static("DENY"));
headers.insert("X-Content-Type-Options", HeaderValue::from_static("nosniff"));
headers.insert("X-XSS-Protection", HeaderValue::from_static("1; mode=block"));
headers.insert(
"Strict-Transport-Security",
HeaderValue::from_static("max-age=31536000; includeSubDomains")
);
response
}
Deployment Scripts
Deployment Automation
#!/bin/bash
# deploy.sh
set -e
echo "Starting deployment..."
# Pull latest code
git pull origin main
# Build Docker image
docker build -t self-identity:latest .
# Run database migrations
docker-compose exec postgres psql -U selfuser -d selfdb -f /migrations/latest.sql
# Update services with zero downtime
docker-compose up -d --no-deps self-server
# Wait for health check
echo "Waiting for service to be healthy..."
for i in {1..30}; do
if curl -f http://localhost:8080/health; then
echo "Service is healthy!"
break
fi
sleep 2
done
# Clean up old images
docker image prune -f
echo "Deployment completed successfully!"
Rollback Script
#!/bin/bash
# rollback.sh
set -e
PREVIOUS_VERSION=${1:-"previous"}
echo "Rolling back to version: $PREVIOUS_VERSION"
# Pull previous image
docker pull self-identity:$PREVIOUS_VERSION
# Update docker-compose to use previous version
sed -i "s/self-identity:latest/self-identity:$PREVIOUS_VERSION/g" docker-compose.yml
# Restart services
docker-compose up -d --no-deps self-server
echo "Rollback completed!"
This comprehensive deployment guide covers all aspects of running Self in production, from infrastructure setup to monitoring and security. The configuration is designed for scalability, security, and maintainability.