Set up NGINX as a high-performance reverse proxy with Redis-backed caching, multiple load balancing algorithms, SSL termination, and comprehensive monitoring for production environments.
Prerequisites
- Root or sudo access
- Multiple backend servers configured
- Basic understanding of HTTP/HTTPS protocols
- Redis server available
What this solves
NGINX reverse proxy with advanced caching and load balancing provides high-performance request distribution across multiple backend servers while reducing response times through intelligent caching strategies. This setup handles SSL termination, implements failover mechanisms, and scales horizontally to support high-traffic production environments with Redis-backed cache storage and comprehensive performance monitoring.
Step-by-step configuration
Install NGINX with required modules
Install NGINX with additional modules for Redis integration and advanced functionality. The nginx-extras package includes the Redis module and other performance enhancements.
sudo apt update && sudo apt upgrade -y
sudo apt install -y nginx-extras redis-server ssl-cert
Configure Redis for NGINX caching
Set up Redis with optimized memory settings and persistence for cache storage. Configure Redis to handle high-frequency cache operations with appropriate memory limits.
# Memory optimization for caching
maxmemory 2gb
maxmemory-policy allkeys-lru
Network configuration
bind 127.0.0.1
port 6379
timeout 0
Persistence settings
save 900 1
save 300 10
save 60 10000
Performance tuning
tcp-keepalive 300
tcp-backlog 511
sudo systemctl enable --now redis-server
sudo systemctl status redis-server
Create SSL certificates
Generate SSL certificates for secure HTTPS termination. For production environments, replace with certificates from a trusted CA like Let's Encrypt.
sudo mkdir -p /etc/nginx/ssl
sudo openssl req -x509 -nodes -days 365 -newkey rsa:4096 \
-keyout /etc/nginx/ssl/nginx-selfsigned.key \
-out /etc/nginx/ssl/nginx-selfsigned.crt \
-subj "/C=US/ST=CA/L=San Francisco/O=Example Corp/CN=example.com"
sudo chmod 600 /etc/nginx/ssl/nginx-selfsigned.key
sudo chmod 644 /etc/nginx/ssl/nginx-selfsigned.crt
Configure upstream servers and load balancing
Define upstream server groups with different load balancing algorithms and health check parameters. This configuration supports multiple backend applications with failover capabilities.
# Primary application servers with least_conn algorithm
upstream app_backend {
least_conn;
server 203.0.113.10:8080 max_fails=3 fail_timeout=30s weight=3;
server 203.0.113.11:8080 max_fails=3 fail_timeout=30s weight=3;
server 203.0.113.12:8080 max_fails=3 fail_timeout=30s weight=2 backup;
keepalive 32;
keepalive_requests 100;
keepalive_timeout 60s;
}
API servers with ip_hash for session persistence
upstream api_backend {
ip_hash;
server 203.0.113.20:3000 max_fails=2 fail_timeout=20s;
server 203.0.113.21:3000 max_fails=2 fail_timeout=20s;
keepalive 16;
}
Static content servers with round-robin
upstream static_backend {
server 203.0.113.30:80 max_fails=2 fail_timeout=15s;
server 203.0.113.31:80 max_fails=2 fail_timeout=15s;
}
Configure advanced caching with multiple zones
Set up multiple cache zones for different content types with Redis backend integration and intelligent cache key generation.
# Cache path and zone definitions
proxy_cache_path /var/cache/nginx/app
levels=1:2
keys_zone=app_cache:256m
max_size=10g
inactive=60m
use_temp_path=off;
proxy_cache_path /var/cache/nginx/api
levels=1:2
keys_zone=api_cache:128m
max_size=2g
inactive=30m
use_temp_path=off;
proxy_cache_path /var/cache/nginx/static
levels=1:2
keys_zone=static_cache:64m
max_size=5g
inactive=7d
use_temp_path=off;
Redis cache for session data
upstream redis_backend {
server 127.0.0.1:6379;
keepalive 8;
}
sudo mkdir -p /var/cache/nginx/{app,api,static}
sudo chown -R www-data:www-data /var/cache/nginx
sudo chmod -R 755 /var/cache/nginx
Create main NGINX configuration
Configure the main NGINX settings with performance optimizations, worker processes, and global security headers.
user www-data;
worker_processes auto;
worker_rlimit_nofile 65535;
pid /run/nginx.pid;
events {
worker_connections 4096;
use epoll;
multi_accept on;
}
http {
# Basic settings
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
# Buffer settings
client_body_buffer_size 16K;
client_header_buffer_size 1k;
client_max_body_size 100M;
large_client_header_buffers 4 8k;
# Timeout settings
client_body_timeout 12;
client_header_timeout 12;
send_timeout 10;
# MIME types
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_comp_level 6;
gzip_types text/plain text/css text/xml text/javascript
application/json application/javascript application/xml+rss
application/atom+xml image/svg+xml;
# Rate limiting
limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=login:10m rate=1r/s;
# Connection limiting
limit_conn_zone $binary_remote_addr zone=perip:10m;
# Real IP configuration
set_real_ip_from 10.0.0.0/8;
set_real_ip_from 172.16.0.0/12;
set_real_ip_from 192.168.0.0/16;
real_ip_header X-Forwarded-For;
real_ip_recursive on;
# Logging format
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" '
'rt=$request_time uct="$upstream_connect_time" '
'uht="$upstream_header_time" urt="$upstream_response_time" '
'cache=$upstream_cache_status';
access_log /var/log/nginx/access.log main;
error_log /var/log/nginx/error.log warn;
# Include configurations
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
}
Configure virtual host with SSL termination
Create the main virtual host configuration with SSL termination, security headers, and intelligent request routing based on URL patterns.
server {
listen 80;
server_name example.com www.example.com;
# Redirect HTTP to HTTPS
location / {
return 301 https://$server_name$request_uri;
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
server {
listen 443 ssl http2;
server_name example.com www.example.com;
# SSL Configuration
ssl_certificate /etc/nginx/ssl/nginx-selfsigned.crt;
ssl_certificate_key /etc/nginx/ssl/nginx-selfsigned.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-RSA-AES128-GCM-SHA256;
ssl_prefer_server_ciphers off;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
ssl_stapling on;
ssl_stapling_verify on;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;
# Connection limiting
limit_conn perip 20;
# API endpoints with caching and rate limiting
location /api/ {
limit_req zone=api burst=20 nodelay;
proxy_pass http://api_backend;
proxy_cache api_cache;
proxy_cache_valid 200 302 5m;
proxy_cache_valid 404 1m;
proxy_cache_valid any 1m;
proxy_cache_key "$scheme$request_method$host$request_uri";
proxy_cache_bypass $http_cache_control;
# Cache headers
add_header X-Cache-Status $upstream_cache_status;
# Proxy headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Timeouts
proxy_connect_timeout 5s;
proxy_send_timeout 10s;
proxy_read_timeout 30s;
# Buffer settings
proxy_buffering on;
proxy_buffer_size 4k;
proxy_buffers 8 4k;
proxy_busy_buffers_size 8k;
}
# Static content with long-term caching
location /static/ {
proxy_pass http://static_backend;
proxy_cache static_cache;
proxy_cache_valid 200 7d;
proxy_cache_valid 404 1h;
# Long cache headers for static content
expires 7d;
add_header Cache-Control "public, immutable";
add_header X-Cache-Status $upstream_cache_status;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Login endpoints with strict rate limiting
location /login {
limit_req zone=login burst=5 nodelay;
proxy_pass http://app_backend;
proxy_cache off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Security headers for sensitive endpoints
add_header X-Frame-Options "DENY" always;
add_header Cache-Control "no-cache, no-store, must-revalidate" always;
}
# Main application with intelligent caching
location / {
proxy_pass http://app_backend;
proxy_cache app_cache;
proxy_cache_valid 200 302 10m;
proxy_cache_valid 404 5m;
proxy_cache_valid any 2m;
proxy_cache_key "$scheme$request_method$host$request_uri$is_args$args";
# Cache bypass conditions
proxy_cache_bypass $cookie_nocache $arg_nocache $http_pragma $http_authorization;
proxy_no_cache $cookie_nocache $arg_nocache $http_pragma $http_authorization;
# Cache headers
add_header X-Cache-Status $upstream_cache_status;
# Proxy headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $server_name;
# Performance settings
proxy_http_version 1.1;
proxy_set_header Connection "";
# Timeouts and buffers
proxy_connect_timeout 5s;
proxy_send_timeout 30s;
proxy_read_timeout 60s;
proxy_buffering on;
proxy_buffer_size 8k;
proxy_buffers 16 8k;
proxy_busy_buffers_size 16k;
}
# Cache purge endpoint (restrict to internal networks)
location /purge/ {
allow 127.0.0.1;
allow 10.0.0.0/8;
allow 172.16.0.0/12;
allow 192.168.0.0/16;
deny all;
proxy_cache_purge app_cache "$scheme$request_method$host$uri$is_args$args";
}
# NGINX status for monitoring
location /nginx_status {
stub_status on;
allow 127.0.0.1;
allow 10.0.0.0/8;
deny all;
access_log off;
}
}
Enable and configure monitoring
Set up NGINX status monitoring and configure log rotation for production environments. This provides essential metrics for performance analysis.
sudo ln -s /etc/nginx/sites-available/reverse-proxy /etc/nginx/sites-enabled/
sudo rm -f /etc/nginx/sites-enabled/default
sudo nginx -t
/var/log/nginx/*.log {
daily
missingok
rotate 14
compress
delaycompress
notifempty
create 0640 www-data adm
sharedscripts
prerotate
if [ -d /etc/logrotate.d/httpd-prerotate ]; then \
run-parts /etc/logrotate.d/httpd-prerotate; \
fi \
endscript
postrotate
invoke-rc.d nginx rotate >/dev/null 2>&1
endscript
}
Configure cache management and cleanup
Set up automated cache cleanup and management scripts to prevent disk space issues and maintain optimal performance.
#!/bin/bash
NGINX Cache Cleanup Script
CACHE_DIRS=("/var/cache/nginx/app" "/var/cache/nginx/api" "/var/cache/nginx/static")
MAX_SIZE_GB=8
LOG_FILE="/var/log/nginx/cache-cleanup.log"
echo "[$(date)] Starting cache cleanup" >> "$LOG_FILE"
for dir in "${CACHE_DIRS[@]}"; do
if [ -d "$dir" ]; then
current_size=$(du -sg "$dir" | cut -f1)
if [ "$current_size" -gt "$MAX_SIZE_GB" ]; then
echo "[$(date)] Cache $dir size: ${current_size}GB, cleaning up" >> "$LOG_FILE"
find "$dir" -type f -atime +1 -delete
find "$dir" -type d -empty -delete
fi
fi
done
Clear Redis cache if memory usage is high
redis_memory=$(redis-cli INFO memory | grep used_memory_human | cut -d: -f2 | cut -dM -f1)
if [ "${redis_memory%.*}" -gt 1500 ]; then
echo "[$(date)] Redis memory usage high, clearing LRU keys" >> "$LOG_FILE"
redis-cli --latency-history -i 1 MEMORY DOCTOR
fi
echo "[$(date)] Cache cleanup completed" >> "$LOG_FILE"
sudo chmod +x /usr/local/bin/nginx-cache-cleanup.sh
sudo chown root:root /usr/local/bin/nginx-cache-cleanup.sh
Create systemd timer for automated maintenance
Set up automated cache maintenance with systemd timers to run cleanup tasks periodically without manual intervention.
[Unit]
Description=NGINX Cache Cleanup Service
Wants=nginx-cache-cleanup.timer
[Service]
Type=oneshot
ExecStart=/usr/local/bin/nginx-cache-cleanup.sh
User=root
Group=root
[Install]
WantedBy=multi-user.target
[Unit]
Description=Run NGINX cache cleanup every 6 hours
Requires=nginx-cache-cleanup.service
[Timer]
OnBootSec=1h
OnUnitActiveSec=6h
Persistent=true
[Install]
WantedBy=timers.target
sudo systemctl daemon-reload
sudo systemctl enable --now nginx-cache-cleanup.timer
sudo systemctl enable --now nginx
Verify your setup
# Check NGINX configuration and status
sudo nginx -t
sudo systemctl status nginx
Verify Redis is running
sudo systemctl status redis-server
redis-cli ping
Test SSL configuration
openssl s_client -connect example.com:443 -servername example.com
Check upstream server connectivity
curl -I http://203.0.113.10:8080
Test cache functionality
curl -I https://example.com/api/test
curl -I https://example.com/api/test # Should show cache hit
Monitor cache status
watch -n 2 'curl -s http://127.0.0.1/nginx_status'
Check cache directory sizes
du -sh /var/cache/nginx/*
Test load balancing with multiple requests
for i in {1..10}; do curl -I https://example.com/; done
Verify rate limiting
ab -n 100 -c 10 https://example.com/api/test
Performance monitoring and tuning
Configure comprehensive monitoring to track performance metrics and identify bottlenecks. This setup integrates with existing monitoring solutions for complete observability.
#!/bin/bash
NGINX Performance Monitoring Script
METRICS_FILE="/var/log/nginx/performance-metrics.log"
NGINX_STATUS_URL="http://127.0.0.1/nginx_status"
Collect NGINX metrics
ngx_stats=$(curl -s $NGINX_STATUS_URL)
active_connections=$(echo "$ngx_stats" | grep "Active connections" | cut -d: -f2 | tr -d ' ')
reading=$(echo "$ngx_stats" | grep "Reading" | cut -d' ' -f2)
writing=$(echo "$ngx_stats" | grep "Writing" | cut -d' ' -f4)
waiting=$(echo "$ngx_stats" | grep "Waiting" | cut -d' ' -f6)
Collect cache metrics
cache_size=$(du -sb /var/cache/nginx | cut -f1)
cache_files=$(find /var/cache/nginx -type f | wc -l)
Collect Redis metrics
redis_memory=$(redis-cli INFO memory | grep used_memory: | cut -d: -f2)
redis_keys=$(redis-cli DBSIZE)
Log metrics
echo "[$(date '+%Y-%m-%d %H:%M:%S')] active_conn=$active_connections reading=$reading writing=$writing waiting=$waiting cache_size=$cache_size cache_files=$cache_files redis_memory=$redis_memory redis_keys=$redis_keys" >> "$METRICS_FILE"
Alert on high connection count
if [ "$active_connections" -gt 1000 ]; then
echo "[$(date)] HIGH LOAD ALERT: Active connections: $active_connections" | logger -t nginx-monitor
fi
sudo chmod +x /usr/local/bin/nginx-monitor.sh
sudo /usr/local/bin/nginx-monitor.sh
For comprehensive monitoring integration, you can extend this setup with Prometheus and Grafana dashboards or implement application-level monitoring for complete observability.
Test load balancing and failover
# Test upstream server failover
Temporarily stop one backend server
ssh user@203.0.113.10 "sudo systemctl stop your-app"
Monitor traffic distribution
for i in {1..20}; do
curl -s -o /dev/null -w "%{http_code} %{time_total}\n" https://example.com/
done
Restart the server and verify recovery
ssh user@203.0.113.10 "sudo systemctl start your-app"
Test different load balancing algorithms
Monitor upstream status in access logs
tail -f /var/log/nginx/access.log | grep upstream_response_time
Test cache purging
curl -X PURGE https://example.com/purge/api/test
Benchmark performance under load
ab -n 10000 -c 100 https://example.com/
wrk -t12 -c400 -d30s https://example.com/
Common issues
| Symptom | Cause | Fix |
|---|---|---|
| 502 Bad Gateway errors | Backend servers unavailable | Check upstream server health: curl -I http://backend-ip:port |
| Cache not working | Cache bypass headers present | Check proxy_cache_bypass conditions and request headers |
| SSL handshake failures | Certificate or cipher issues | Test with: openssl s_client -connect domain:443 -servername domain |
| High memory usage | Cache zones too large | Reduce cache zone sizes or enable cleanup: nginx -s reload |
| Rate limiting false positives | Shared IP addresses or CDN | Adjust limit_req_zone rates or exclude trusted IPs |
| Redis connection errors | Redis server down or misconfigured | Check Redis status: sudo systemctl status redis-server |
| Load balancing not working | All requests to one server | Verify upstream configuration and remove ip_hash if not needed |
| Cache disk space full | No cache cleanup configured | Run cleanup script: /usr/local/bin/nginx-cache-cleanup.sh |
Next steps
- Configure NGINX Redis caching with SSL authentication for enhanced security
- Implement NGINX Redis cluster caching for high availability scenarios
- Configure NGINX load balancing with health checks for advanced failover detection
- Configure NGINX rate limiting and DDoS protection for security hardening
- Set up NGINX monitoring with Prometheus and Grafana for comprehensive observability
Automated install script
Run this to automate the entire setup
#!/usr/bin/env bash
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Default configuration
DOMAIN="${1:-example.com}"
BACKEND_SERVERS="${2:-127.0.0.1:8080,127.0.0.1:8081}"
# Usage message
usage() {
echo "Usage: $0 [domain] [backend_servers]"
echo "Example: $0 example.com '10.0.1.10:8080,10.0.1.11:8080'"
exit 1
}
# Logging functions
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Cleanup on error
cleanup() {
log_error "Installation failed. Rolling back changes..."
systemctl stop nginx redis-server 2>/dev/null || true
rm -rf /etc/nginx/ssl /var/cache/nginx 2>/dev/null || true
exit 1
}
trap cleanup ERR
# Check if running as root or with sudo
if [[ $EUID -ne 0 ]]; then
log_error "This script must be run as root or with sudo"
exit 1
fi
# Detect distribution and set package manager
if [ -f /etc/os-release ]; then
. /etc/os-release
case "$ID" in
ubuntu|debian)
PKG_MGR="apt"
PKG_INSTALL="apt install -y"
UPDATE_CMD="apt update && apt upgrade -y"
NGINX_PKG="nginx-extras"
REDIS_PKG="redis-server"
SSL_PKG="ssl-cert"
NGINX_USER="www-data"
REDIS_SERVICE="redis-server"
NGINX_CONF_DIR="/etc/nginx"
USE_SITES_AVAILABLE=true
;;
almalinux|rocky|centos|rhel|ol)
PKG_MGR="dnf"
PKG_INSTALL="dnf install -y"
UPDATE_CMD="dnf update -y"
NGINX_PKG="nginx"
REDIS_PKG="redis"
SSL_PKG="openssl"
NGINX_USER="nginx"
REDIS_SERVICE="redis"
NGINX_CONF_DIR="/etc/nginx"
USE_SITES_AVAILABLE=false
;;
fedora)
PKG_MGR="dnf"
PKG_INSTALL="dnf install -y"
UPDATE_CMD="dnf update -y"
NGINX_PKG="nginx"
REDIS_PKG="redis"
SSL_PKG="openssl"
NGINX_USER="nginx"
REDIS_SERVICE="redis"
NGINX_CONF_DIR="/etc/nginx"
USE_SITES_AVAILABLE=false
;;
amzn)
PKG_MGR="yum"
PKG_INSTALL="yum install -y"
UPDATE_CMD="yum update -y"
NGINX_PKG="nginx"
REDIS_PKG="redis"
SSL_PKG="openssl"
NGINX_USER="nginx"
REDIS_SERVICE="redis"
NGINX_CONF_DIR="/etc/nginx"
USE_SITES_AVAILABLE=false
;;
*)
log_error "Unsupported distribution: $ID"
exit 1
;;
esac
else
log_error "Cannot detect distribution"
exit 1
fi
log_info "[1/8] Updating system packages..."
eval $UPDATE_CMD
log_info "[2/8] Installing NGINX, Redis, and SSL tools..."
$PKG_INSTALL $NGINX_PKG $REDIS_PKG $SSL_PKG
log_info "[3/8] Configuring Redis for caching..."
cat > /etc/redis/redis.conf << 'EOF'
bind 127.0.0.1
port 6379
timeout 0
tcp-keepalive 300
tcp-backlog 511
maxmemory 2gb
maxmemory-policy allkeys-lru
save 900 1
save 300 10
save 60 10000
EOF
systemctl enable --now $REDIS_SERVICE
log_info "[4/8] Creating SSL certificates..."
mkdir -p $NGINX_CONF_DIR/ssl
openssl req -x509 -nodes -days 365 -newkey rsa:4096 \
-keyout $NGINX_CONF_DIR/ssl/nginx-selfsigned.key \
-out $NGINX_CONF_DIR/ssl/nginx-selfsigned.crt \
-subj "/C=US/ST=CA/L=San Francisco/O=Example Corp/CN=$DOMAIN"
chmod 600 $NGINX_CONF_DIR/ssl/nginx-selfsigned.key
chmod 644 $NGINX_CONF_DIR/ssl/nginx-selfsigned.crt
chown -R root:$NGINX_USER $NGINX_CONF_DIR/ssl
log_info "[5/8] Creating cache directories..."
mkdir -p /var/cache/nginx/{app,api,static}
chown -R $NGINX_USER:$NGINX_USER /var/cache/nginx
chmod -R 755 /var/cache/nginx
log_info "[6/8] Configuring NGINX upstream servers..."
IFS=',' read -ra SERVERS <<< "$BACKEND_SERVERS"
UPSTREAM_CONFIG=""
for server in "${SERVERS[@]}"; do
UPSTREAM_CONFIG="$UPSTREAM_CONFIG server $server max_fails=3 fail_timeout=30s;\n"
done
cat > $NGINX_CONF_DIR/conf.d/upstream.conf << EOF
upstream app_backend {
least_conn;
$(echo -e "$UPSTREAM_CONFIG")
keepalive 32;
keepalive_requests 100;
keepalive_timeout 60s;
}
upstream redis_backend {
server 127.0.0.1:6379;
keepalive 8;
}
EOF
log_info "[7/8] Creating main NGINX configuration..."
cat > $NGINX_CONF_DIR/conf.d/cache.conf << 'EOF'
proxy_cache_path /var/cache/nginx/app levels=1:2 keys_zone=app_cache:256m max_size=10g inactive=60m use_temp_path=off;
proxy_cache_path /var/cache/nginx/api levels=1:2 keys_zone=api_cache:128m max_size=2g inactive=30m use_temp_path=off;
proxy_cache_path /var/cache/nginx/static levels=1:2 keys_zone=static_cache:64m max_size=5g inactive=7d use_temp_path=off;
EOF
if [ "$USE_SITES_AVAILABLE" = true ]; then
SITE_CONFIG="$NGINX_CONF_DIR/sites-available/$DOMAIN"
mkdir -p $NGINX_CONF_DIR/sites-available $NGINX_CONF_DIR/sites-enabled
else
SITE_CONFIG="$NGINX_CONF_DIR/conf.d/$DOMAIN.conf"
fi
cat > "$SITE_CONFIG" << EOF
server {
listen 80;
server_name $DOMAIN;
return 301 https://\$server_name\$request_uri;
}
server {
listen 443 ssl http2;
server_name $DOMAIN;
ssl_certificate $NGINX_CONF_DIR/ssl/nginx-selfsigned.crt;
ssl_certificate_key $NGINX_CONF_DIR/ssl/nginx-selfsigned.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
proxy_cache app_cache;
proxy_cache_valid 200 302 60m;
proxy_cache_valid 404 1m;
proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
proxy_cache_lock on;
location / {
proxy_pass http://app_backend;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_connect_timeout 30s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
add_header X-Cache-Status \$upstream_cache_status;
}
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
EOF
if [ "$USE_SITES_AVAILABLE" = true ]; then
ln -sf ../sites-available/$DOMAIN $NGINX_CONF_DIR/sites-enabled/$DOMAIN
rm -f $NGINX_CONF_DIR/sites-enabled/default
fi
chown -R root:$NGINX_USER $NGINX_CONF_DIR
find $NGINX_CONF_DIR -type f -exec chmod 644 {} \;
find $NGINX_CONF_DIR -type d -exec chmod 755 {} \;
log_info "[8/8] Starting and enabling services..."
systemctl enable --now nginx
systemctl enable --now $REDIS_SERVICE
# Configure firewall if available
if command -v ufw >/dev/null 2>&1; then
ufw allow 80/tcp
ufw allow 443/tcp
elif command -v firewall-cmd >/dev/null 2>&1; then
firewall-cmd --permanent --add-service=http
firewall-cmd --permanent --add-service=https
firewall-cmd --reload
fi
# Verification checks
log_info "Verifying installation..."
nginx -t
systemctl is-active --quiet nginx || { log_error "NGINX is not running"; exit 1; }
systemctl is-active --quiet $REDIS_SERVICE || { log_error "Redis is not running"; exit 1; }
log_info "✅ NGINX reverse proxy with caching and load balancing installed successfully!"
log_info "Domain: $DOMAIN"
log_info "Backend servers: $BACKEND_SERVERS"
log_info "SSL certificate: Self-signed (replace with Let's Encrypt for production)"
log_info "Cache directories: /var/cache/nginx/{app,api,static}"
log_info "Configuration files: $NGINX_CONF_DIR"
Review the script before running. Execute with: bash install.sh