Set up Consul Connect as a service mesh on Kubernetes with Helm, enabling automatic mTLS encryption, service discovery, and traffic routing between microservices for secure inter-service communication.
Prerequisites
- Kubernetes cluster with admin access
- Helm 3 installed
- kubectl configured
- At least 4GB RAM available for Consul components
What this solves
Consul Connect provides a service mesh that secures service-to-service communication with automatic mutual TLS encryption, service discovery, and traffic management. This tutorial shows you how to deploy Consul Connect on Kubernetes using Helm charts, configure service mesh networking with sidecar proxies, and implement secure communication policies between microservices.
Step-by-step installation
Update system packages and install dependencies
Start by updating your system and installing required tools including Helm and kubectl.
sudo apt update && sudo apt upgrade -y
sudo apt install -y curl wget gnupg lsb-release
Install kubectl
Download and install the Kubernetes command-line tool for cluster management.
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
kubectl version --client
Install Helm 3
Install Helm to manage Kubernetes applications using charts.
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
helm version
Add HashiCorp Helm repository
Add the official HashiCorp Helm repository to access Consul charts.
helm repo add hashicorp https://helm.releases.hashicorp.com
helm repo update
Create Consul namespace
Create a dedicated namespace for Consul components to isolate them from other applications.
kubectl create namespace consul
Create Consul Connect configuration
Create a Helm values file to configure Consul with Connect service mesh enabled.
global:
name: consul
datacenter: dc1
tls:
enabled: true
enableAutoEncrypt: true
acls:
manageSystemACLs: true
gossipEncryption:
autoGenerate: true
server:
replicas: 3
storage: 10Gi
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 100Mi
cpu: 100m
client:
enabled: true
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 100Mi
cpu: 100m
connectInject:
enabled: true
default: false
transparentProxy:
defaultEnabled: true
resources:
requests:
memory: 50Mi
cpu: 50m
limits:
memory: 50Mi
cpu: 50m
controller:
enabled: true
meshGateway:
enabled: true
replicas: 1
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 100Mi
cpu: 100m
ui:
enabled: true
service:
type: LoadBalancer
Deploy Consul with Connect
Install Consul using Helm with the Connect-enabled configuration.
helm install consul hashicorp/consul --namespace consul --values consul-values.yaml
kubectl get pods -n consul
Wait for deployment to complete
Monitor the deployment status and wait for all pods to be running.
kubectl wait --for=condition=ready pod --all -n consul --timeout=300s
kubectl get svc -n consul
Configure service mesh networking
Create service intentions
Define service-to-service communication policies using Consul intentions.
apiVersion: consul.hashicorp.com/v1alpha1
kind: ServiceIntentions
metadata:
name: web-to-api
spec:
destination:
name: api
sources:
- name: web
action: allow
---
api: consul.hashicorp.com/v1alpha1
kind: ServiceIntentions
metadata:
name: api-to-database
spec:
destination:
name: database
sources:
- name: api
action: allow
Apply service intentions
Deploy the service intentions to configure allowed communication paths.
kubectl apply -f service-intentions.yaml
kubectl get serviceintentions
Create example application with Connect
Deploy a sample application with Consul Connect sidecar injection enabled.
apiVersion: apps/v1
kind: Deployment
metadata:
name: web
labels:
app: web
spec:
replicas: 2
selector:
matchLabels:
app: web
template:
metadata:
labels:
app: web
annotations:
consul.hashicorp.com/connect-inject: "true"
spec:
containers:
- name: web
image: nginx:1.25
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: web
spec:
selector:
app: web
ports:
- port: 80
targetPort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: api
labels:
app: api
spec:
replicas: 2
selector:
matchLabels:
app: api
template:
metadata:
labels:
app: api
annotations:
consul.hashicorp.com/connect-inject: "true"
spec:
containers:
- name: api
image: hashicorp/http-echo:latest
args:
- -text="Hello from API"
ports:
- containerPort: 5678
---
apiVersion: v1
kind: Service
metadata:
name: api
spec:
selector:
app: api
ports:
- port: 5678
targetPort: 5678
Deploy the example application
Apply the application configuration with Connect sidecar injection.
kubectl apply -f example-app.yaml
kubectl get pods -l app=web
kubectl get pods -l app=api
Configure mTLS and traffic routing
Create traffic splitting configuration
Configure traffic routing and splitting between service versions.
apiVersion: consul.hashicorp.com/v1alpha1
kind: ServiceSplitter
metadata:
name: api
spec:
splits:
- weight: 80
service: api
serviceSubset: v1
- weight: 20
service: api
serviceSubset: v2
---
apiVersion: consul.hashicorp.com/v1alpha1
kind: ServiceResolver
metadata:
name: api
spec:
subsets:
v1:
filter: "Service.Meta.version == v1"
v2:
filter: "Service.Meta.version == v2"
Configure service defaults
Set default protocol and mesh gateway configuration for services.
apiVersion: consul.hashicorp.com/v1alpha1
kind: ServiceDefaults
metadata:
name: web
spec:
protocol: http
meshGateway:
mode: local
---
apiVersion: consul.hashicorp.com/v1alpha1
kind: ServiceDefaults
metadata:
name: api
spec:
protocol: http
meshGateway:
mode: local
Apply traffic routing configuration
Deploy the traffic management and service default configurations.
kubectl apply -f service-defaults.yaml
kubectl apply -f traffic-split.yaml
kubectl get servicedefaults
kubectl get servicesplitter
Configure ingress gateway
Set up an ingress gateway to expose services outside the mesh.
apiVersion: consul.hashicorp.com/v1alpha1
kind: IngressGateway
metadata:
name: ingress-gateway
spec:
listeners:
- port: 8080
protocol: http
services:
- name: web
---
apiVersion: v1
kind: Service
metadata:
name: consul-ingress-gateway
annotations:
consul.hashicorp.com/service-name: ingress-gateway
spec:
selector:
app: consul
component: ingress-gateway
ports:
- port: 8080
targetPort: 8080
type: LoadBalancer
Deploy ingress gateway
Apply the ingress gateway configuration to expose services externally.
kubectl apply -f ingress-gateway.yaml
kubectl get ingressgateway
kubectl get svc consul-ingress-gateway
Implement service discovery
Create service resolver for failover
Configure automatic failover between service instances.
apiVersion: consul.hashicorp.com/v1alpha1
kind: ServiceResolver
metadata:
name: web
spec:
defaultSubset: v1
subsets:
v1:
filter: "Service.Meta.version == v1"
v2:
filter: "Service.Meta.version == v2"
failover:
v1:
- service: web
serviceSubset: v2
Configure health checks
Set up health checking for service discovery and load balancing.
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-with-health
labels:
app: web-with-health
spec:
replicas: 2
selector:
matchLabels:
app: web-with-health
template:
metadata:
labels:
app: web-with-health
annotations:
consul.hashicorp.com/connect-inject: "true"
consul.hashicorp.com/service-meta-version: "v1"
consul.hashicorp.com/connect-service-upstreams: "api:5678"
spec:
containers:
- name: web
image: nginx:1.25
ports:
- containerPort: 80
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 10
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
periodSeconds: 5
Apply service discovery configuration
Deploy the service resolver and health check configurations.
kubectl apply -f service-resolver-failover.yaml
kubectl apply -f health-check.yaml
kubectl get serviceresolver
kubectl get pods -l app=web-with-health
Configure monitoring and observability
Enable metrics collection
Configure Consul Connect to expose Prometheus metrics for monitoring.
global:
name: consul
datacenter: dc1
metrics:
enabled: true
enableAgentMetrics: true
agentMetricsRetentionTime: 1m
enableGatewayMetrics: true
tls:
enabled: true
enableAutoEncrypt: true
acls:
manageSystemACLs: true
gossipEncryption:
autoGenerate: true
server:
replicas: 3
storage: 10Gi
extraConfig: |
{
"telemetry": {
"prometheus_retention_time": "60s",
"disable_hostname": true
}
}
connectInject:
enabled: true
default: false
transparentProxy:
defaultEnabled: true
metrics:
defaultEnabled: true
defaultEnableMerging: true
Update Consul with metrics enabled
Upgrade the Consul deployment to enable metrics collection.
helm upgrade consul hashicorp/consul --namespace consul --values consul-metrics.yaml
kubectl rollout status deployment/consul-connect-injector -n consul
Verify your setup
kubectl get pods -n consul
kubectl get svc -n consul
kubectl logs -l app=consul -n consul --tail=10
Check Connect injection status
kubectl get pods -l app=web -o jsonpath='{.items[].spec.containers[].name}'
Verify service mesh connectivity
kubectl exec -it $(kubectl get pod -l app=web -o jsonpath='{.items[0].metadata.name}') -c web -- curl localhost:5678
Check Consul UI (if LoadBalancer is available)
kubectl get svc consul-ui -n consul
View service intentions
kubectl get serviceintentions
Check service mesh metrics
kubectl port-forward -n consul svc/consul-server 8500:8500 &
curl http://localhost:8500/v1/agent/metrics?format=prometheus
Common issues
| Symptom | Cause | Fix |
|---|---|---|
| Pods stuck in pending state | Insufficient cluster resources | Check node capacity with kubectl describe nodes and scale cluster |
| Connect injection failing | Webhook not ready | Check kubectl logs -l app=consul-connect-injector -n consul |
| Service communication blocked | Missing service intentions | Create intentions with kubectl apply -f service-intentions.yaml |
| mTLS certificate errors | TLS configuration mismatch | Verify TLS settings in Helm values and restart pods |
| UI not accessible | Service type misconfiguration | Change UI service type to LoadBalancer or use port-forward |
| High memory usage | Default resource limits too high | Adjust resource limits in Helm values file |
Next steps
- Implement Consul Connect mTLS with Vault PKI backend for secure service mesh communication
- Configure Istio distributed tracing with Jaeger and Zipkin for comprehensive microservices observability
- Implement Kubernetes network policies with Calico for microsegmentation
- Configure Consul backup and disaster recovery with automated snapshots
- Set up Consul multi-datacenter federation for global service mesh
Running this in production?
Automated install script
Run this to automate the entire setup
#!/usr/bin/env bash
set -euo pipefail
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration variables
CONSUL_NAMESPACE="consul"
KUBECTL_VERSION=""
CLEANUP_ON_FAILURE=true
# Usage function
usage() {
echo "Usage: $0 [OPTIONS]"
echo "Options:"
echo " -n, --namespace NAME Consul namespace (default: consul)"
echo " --no-cleanup Don't cleanup on failure"
echo " -h, --help Show this help message"
exit 1
}
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
-n|--namespace)
CONSUL_NAMESPACE="$2"
shift 2
;;
--no-cleanup)
CLEANUP_ON_FAILURE=false
shift
;;
-h|--help)
usage
;;
*)
echo -e "${RED}Unknown option: $1${NC}"
usage
;;
esac
done
# Cleanup function for rollback
cleanup() {
if [ "$CLEANUP_ON_FAILURE" = true ]; then
echo -e "${YELLOW}[CLEANUP] Rolling back due to failure...${NC}"
kubectl delete namespace "$CONSUL_NAMESPACE" --ignore-not-found=true 2>/dev/null || true
helm repo remove hashicorp 2>/dev/null || true
fi
}
# Set trap for cleanup on error
trap cleanup ERR
# Function to print colored status messages
print_status() {
echo -e "${BLUE}$1${NC}"
}
print_success() {
echo -e "${GREEN}$1${NC}"
}
print_error() {
echo -e "${RED}$1${NC}"
}
print_warning() {
echo -e "${YELLOW}$1${NC}"
}
# Check if running as root or with sudo
check_privileges() {
if [[ $EUID -eq 0 ]]; then
SUDO=""
elif command -v sudo >/dev/null 2>&1; then
SUDO="sudo"
# Test sudo access
if ! $SUDO -n true 2>/dev/null; then
print_error "This script requires sudo privileges. Please run with sudo or as root."
exit 1
fi
else
print_error "This script requires root privileges or sudo access."
exit 1
fi
}
# Detect Linux distribution
detect_distro() {
if [ -f /etc/os-release ]; then
. /etc/os-release
case "$ID" in
ubuntu|debian)
PKG_MGR="apt"
PKG_UPDATE="apt update"
PKG_INSTALL="apt install -y"
PKG_UPGRADE="apt upgrade -y"
;;
almalinux|rocky|centos|rhel|ol|fedora)
PKG_MGR="dnf"
PKG_UPDATE="dnf check-update || true"
PKG_INSTALL="dnf install -y"
PKG_UPGRADE="dnf upgrade -y"
;;
amzn)
PKG_MGR="yum"
PKG_UPDATE="yum check-update || true"
PKG_INSTALL="yum install -y"
PKG_UPGRADE="yum upgrade -y"
;;
*)
print_error "Unsupported distribution: $ID"
exit 1
;;
esac
else
print_error "Cannot detect Linux distribution"
exit 1
fi
}
# Check prerequisites
check_prerequisites() {
print_status "[1/8] Checking prerequisites..."
check_privileges
detect_distro
# Check if Kubernetes cluster is accessible
if command -v kubectl >/dev/null 2>&1; then
if ! kubectl cluster-info >/dev/null 2>&1; then
print_warning "kubectl is installed but cannot connect to cluster. Continuing with installation..."
fi
fi
print_success "Prerequisites check completed"
}
# Update system packages
update_system() {
print_status "[2/8] Updating system packages..."
$SUDO $PKG_UPDATE
$SUDO $PKG_UPGRADE
$SUDO $PKG_INSTALL curl wget gnupg
# Install additional packages based on distro
case "$PKG_MGR" in
apt)
$SUDO $PKG_INSTALL lsb-release apt-transport-https ca-certificates
;;
dnf|yum)
$SUDO $PKG_INSTALL which
;;
esac
print_success "System packages updated"
}
# Install kubectl
install_kubectl() {
print_status "[3/8] Installing kubectl..."
if command -v kubectl >/dev/null 2>&1; then
print_warning "kubectl already installed, skipping..."
return
fi
KUBECTL_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt)
curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
chmod 755 kubectl
$SUDO mv kubectl /usr/local/bin/
# Verify installation
if kubectl version --client >/dev/null 2>&1; then
print_success "kubectl installed successfully"
else
print_error "kubectl installation failed"
exit 1
fi
}
# Install Helm
install_helm() {
print_status "[4/8] Installing Helm..."
if command -v helm >/dev/null 2>&1; then
print_warning "Helm already installed, skipping..."
return
fi
curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Verify installation
if helm version >/dev/null 2>&1; then
print_success "Helm installed successfully"
else
print_error "Helm installation failed"
exit 1
fi
}
# Setup Helm repository
setup_helm_repo() {
print_status "[5/8] Setting up HashiCorp Helm repository..."
helm repo add hashicorp https://helm.releases.hashicorp.com
helm repo update
print_success "HashiCorp Helm repository added"
}
# Create Consul namespace
create_namespace() {
print_status "[6/8] Creating Consul namespace..."
if kubectl get namespace "$CONSUL_NAMESPACE" >/dev/null 2>&1; then
print_warning "Namespace $CONSUL_NAMESPACE already exists, skipping..."
else
kubectl create namespace "$CONSUL_NAMESPACE"
print_success "Namespace $CONSUL_NAMESPACE created"
fi
}
# Deploy Consul Connect
deploy_consul() {
print_status "[7/8] Deploying Consul Connect..."
# Create Consul values file
cat > /tmp/consul-values.yaml << 'EOF'
global:
name: consul
datacenter: dc1
tls:
enabled: true
enableAutoEncrypt: true
acls:
manageSystemACLs: true
gossipEncryption:
autoGenerate: true
server:
replicas: 3
storage: 10Gi
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 100Mi
cpu: 100m
client:
enabled: true
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 100Mi
cpu: 100m
connectInject:
enabled: true
default: false
transparentProxy:
defaultEnabled: true
resources:
requests:
memory: 50Mi
cpu: 50m
limits:
memory: 50Mi
cpu: 50m
controller:
enabled: true
meshGateway:
enabled: true
replicas: 1
resources:
requests:
memory: 100Mi
cpu: 100m
limits:
memory: 100Mi
cpu: 100m
ui:
enabled: true
service:
type: LoadBalancer
EOF
# Install Consul
helm install consul hashicorp/consul --namespace "$CONSUL_NAMESPACE" --values /tmp/consul-values.yaml
# Wait for deployment
print_status "Waiting for Consul pods to be ready (timeout: 300s)..."
kubectl wait --for=condition=ready pod --all -n "$CONSUL_NAMESPACE" --timeout=300s
# Clean up values file
rm -f /tmp/consul-values.yaml
print_success "Consul Connect deployed successfully"
}
# Verify installation
verify_installation() {
print_status "[8/8] Verifying installation..."
# Check pod status
echo "Consul pods status:"
kubectl get pods -n "$CONSUL_NAMESPACE"
# Check services
echo -e "\nConsul services:"
kubectl get svc -n "$CONSUL_NAMESPACE"
# Check if Consul UI service exists
if kubectl get svc consul-ui -n "$CONSUL_NAMESPACE" >/dev/null 2>&1; then
echo -e "\n${GREEN}Consul UI is available. To access it:${NC}"
echo "kubectl port-forward svc/consul-ui 8080:80 -n $CONSUL_NAMESPACE"
echo "Then visit: http://localhost:8080"
fi
print_success "Installation verification completed"
}
# Main execution
main() {
echo -e "${GREEN}Starting Consul Connect with Kubernetes installation...${NC}"
check_prerequisites
update_system
install_kubectl
install_helm
setup_helm_repo
create_namespace
deploy_consul
verify_installation
# Disable cleanup trap on success
trap - ERR
echo -e "${GREEN}"
echo "=========================================="
echo "Consul Connect installation completed!"
echo "=========================================="
echo -e "${NC}"
echo "Next steps:"
echo "1. Create service intentions for service-to-service communication"
echo "2. Deploy applications with connect-inject annotations"
echo "3. Configure service mesh policies as needed"
echo ""
echo "For more information, visit: https://www.consul.io/docs/connect"
}
main "$@"
Review the script before running. Execute with: bash install.sh