Secure your Kubernetes cluster with Calico CNI network policies and OPA Gatekeeper admission control. This tutorial shows you how to implement pod isolation, policy enforcement, and admission validation for production-grade security.
Prerequisites
- Kubernetes cluster with admin access
- kubectl configured
- Helm 3 installed
- Basic Kubernetes networking knowledge
What this solves
Kubernetes network policies and admission controllers provide essential security layers for production clusters. Network policies control traffic flow between pods and namespaces, while admission controllers validate resources before they're created. This tutorial implements Calico CNI for network policy enforcement and OPA Gatekeeper for policy validation, giving you comprehensive security controls over pod communication and resource creation.
Prerequisites
- Kubernetes cluster with admin access (kubeadm recommended)
- kubectl configured for cluster access
- Helm 3 installed on your system
- Basic understanding of Kubernetes networking concepts
Step-by-step installation
Install Calico CNI with network policy support
Calico provides both networking and network policy capabilities for Kubernetes. Download and apply the Calico manifest to enable network policy enforcement.
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.0/manifests/tigera-operator.yaml
Configure Calico installation
Create a custom Calico installation configuration to enable network policy features and optimize for your environment.
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
ipPools:
- blockSize: 26
cidr: 10.244.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
nodeMetricsPort: 9091
---
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
Apply Calico configuration
Deploy the Calico configuration and wait for all components to become ready.
kubectl apply -f /tmp/calico-custom-resources.yaml
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=300s
Verify Calico installation
Check that all Calico pods are running and the API server is accessible.
kubectl get pods -n calico-system
kubectl get nodes -o wide
Install OPA Gatekeeper using Helm
Add the Gatekeeper Helm repository and install it to enable admission control policies.
helm repo add gatekeeper https://open-policy-agent.github.io/gatekeeper/charts
helm repo update
Deploy Gatekeeper with custom values
Create a values file to configure Gatekeeper with appropriate resource limits and audit settings.
replicas: 3
revisionHistoryLimit: 10
controllerManager:
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
audit:
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
postInstall:
labelNamespace:
enabled: true
Install Gatekeeper
Deploy Gatekeeper using Helm with the custom configuration values.
helm install gatekeeper gatekeeper/gatekeeper \
--namespace gatekeeper-system \
--create-namespace \
--values /tmp/gatekeeper-values.yaml
Wait for Gatekeeper deployment
Verify that all Gatekeeper components are running before proceeding with policy configuration.
kubectl wait --for=condition=Ready pods --all -n gatekeeper-system --timeout=300s
kubectl get pods -n gatekeeper-system
Configure network policies for pod isolation
Create test namespaces
Set up separate namespaces to demonstrate network policy isolation between different application tiers.
kubectl create namespace frontend
kubectl create namespace backend
kubectl create namespace database
Deploy test applications
Create sample applications in each namespace to test network policy enforcement.
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend-app
namespace: frontend
spec:
replicas: 2
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
tier: web
spec:
containers:
- name: nginx
image: nginx:1.25
ports:
- containerPort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend-app
namespace: backend
spec:
replicas: 2
selector:
matchLabels:
app: backend
template:
metadata:
labels:
app: backend
tier: api
spec:
containers:
- name: httpd
image: httpd:2.4
ports:
- containerPort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: database-app
namespace: database
spec:
replicas: 1
selector:
matchLabels:
app: database
template:
metadata:
labels:
app: database
tier: data
spec:
containers:
- name: postgres
image: postgres:16
env:
- name: POSTGRES_DB
value: testdb
- name: POSTGRES_USER
value: testuser
- name: POSTGRES_PASSWORD
value: testpass123
ports:
- containerPort: 5432
Apply test applications
Deploy the test applications and verify they're running in their respective namespaces.
kubectl apply -f /tmp/test-apps.yaml
kubectl get pods -n frontend
kubectl get pods -n backend
kubectl get pods -n database
Create default deny network policy
Implement a default deny-all policy to block traffic between namespaces by default.
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: frontend
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: backend
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: database
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
Apply default deny policies
Deploy the default deny policies to establish a secure baseline for network traffic.
kubectl apply -f /tmp/default-deny.yaml
Create selective allow policies
Define network policies that allow specific traffic flows between application tiers.
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-frontend-to-backend
namespace: backend
spec:
podSelector:
matchLabels:
tier: api
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
name: frontend
ports:
- protocol: TCP
port: 80
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-backend-to-database
namespace: database
spec:
podSelector:
matchLabels:
tier: data
policyTypes:
- Ingress
ingress:
- from:
- namespaceSelector:
matchLabels:
name: backend
ports:
- protocol: TCP
port: 5432
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-dns-egress
namespace: frontend
spec:
podSelector: {}
policyTypes:
- Egress
egress:
- to: []
ports:
- protocol: UDP
port: 53
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-dns-egress
namespace: backend
spec:
podSelector: {}
policyTypes:
- Egress
egress:
- to: []
ports:
- protocol: UDP
port: 53
Label namespaces for policy targeting
Add labels to namespaces so network policies can reference them in selectors.
kubectl label namespace frontend name=frontend
kubectl label namespace backend name=backend
kubectl label namespace database name=database
Apply allow policies
Deploy the selective allow policies to enable necessary communication between application tiers.
kubectl apply -f /tmp/allow-policies.yaml
Configure OPA Gatekeeper constraints
Create constraint template for required labels
Define a Gatekeeper constraint template to enforce that all pods have required security labels.
apiVersion: templates.gatekeeper.sh/v1beta1
kind: ConstraintTemplate
metadata:
name: k8srequiredlabels
spec:
crd:
spec:
names:
kind: K8sRequiredLabels
validation:
openAPIV3Schema:
type: object
properties:
labels:
type: array
items:
type: string
targets:
- target: admission.k8s.gatekeeper.sh
rego: |
package k8srequiredlabels
violation[{"msg": msg}] {
required := input.parameters.labels
provided := input.review.object.metadata.labels
missing := required[_]
not provided[missing]
msg := sprintf("Missing required label: %v", [missing])
}
Apply constraint template
Deploy the constraint template to make it available for creating specific constraints.
kubectl apply -f /tmp/required-labels-template.yaml
Create required labels constraint
Create a constraint that enforces specific labels on all pods for security classification.
apiVersion: constraints.gatekeeper.sh/v1beta1
kind: K8sRequiredLabels
metadata:
name: must-have-security-labels
spec:
match:
kinds:
- apiGroups: [""]
kinds: ["Pod"]
excludedNamespaces: ["kube-system", "gatekeeper-system", "calico-system"]
parameters:
labels: ["tier", "app"]
Create constraint template for network policy requirements
Define a template to ensure namespaces have network policies for security.
apiVersion: templates.gatekeeper.sh/v1beta1
kind: ConstraintTemplate
metadata:
name: k8srequirenetworkpolicy
spec:
crd:
spec:
names:
kind: K8sRequireNetworkPolicy
validation:
openAPIV3Schema:
type: object
targets:
- target: admission.k8s.gatekeeper.sh
rego: |
package k8srequirenetworkpolicy
violation[{"msg": msg}] {
input.review.kind.kind == "Namespace"
not input.review.object.metadata.labels["network-policy"]
msg := "Namespace must have network-policy label set to 'enabled'"
}
Apply constraint templates
Deploy both constraint templates and create the network policy constraint.
kubectl apply -f /tmp/required-labels-constraint.yaml
kubectl apply -f /tmp/require-networkpolicy-template.yaml
Create network policy constraint
Enforce that new namespaces must indicate they have network policies configured.
apiVersion: constraints.gatekeeper.sh/v1beta1
kind: K8sRequireNetworkPolicy
metadata:
name: namespace-must-have-network-policy
spec:
match:
kinds:
- apiGroups: [""]
kinds: ["Namespace"]
excludedNamespaces: ["kube-system", "gatekeeper-system", "calico-system", "kube-public", "default"]
Apply network policy constraint
Deploy the constraint to enforce network policy requirements on new namespaces.
kubectl apply -f /tmp/require-networkpolicy-constraint.yaml
Test policy enforcement
Test network policy isolation
Verify that network policies are blocking unauthorized traffic between namespaces.
# Get pod names for testing
FRONTEND_POD=$(kubectl get pods -n frontend -o jsonpath='{.items[0].metadata.name}')
BACKEND_POD=$(kubectl get pods -n backend -o jsonpath='{.items[0].metadata.name}')
Test blocked connection (should fail)
kubectl exec -n frontend $FRONTEND_POD -- wget -qO- --timeout=5 http://$BACKEND_POD.backend.svc.cluster.local || echo "Connection blocked by network policy"
Test allowed connection after updating policies
kubectl exec -n frontend $FRONTEND_POD -- nslookup kubernetes.default.svc.cluster.local
Test Gatekeeper constraint validation
Attempt to create resources that violate the configured constraints to verify enforcement.
# Try to create a pod without required labels (should fail)
kubectl run test-pod --image=nginx -n frontend --dry-run=server
Try to create a namespace without network-policy label (should fail)
kubectl create namespace test-namespace --dry-run=server
Create compliant resources
Test creating resources that meet all policy requirements to ensure legitimate workloads can deploy.
apiVersion: v1
kind: Namespace
metadata:
name: test-compliant
labels:
network-policy: "enabled"
---
apiVersion: v1
kind: Pod
metadata:
name: compliant-pod
namespace: test-compliant
labels:
app: test
tier: web
spec:
containers:
- name: nginx
image: nginx:1.25
Apply compliant resources
Deploy resources that satisfy all constraints to verify they're accepted.
kubectl apply -f /tmp/compliant-test.yaml
kubectl get pods -n test-compliant
Verify your setup
# Check Calico components
kubectl get pods -n calico-system
kubectl get networkpolicies --all-namespaces
Check Gatekeeper components
kubectl get pods -n gatekeeper-system
kubectl get constraints
kubectl get constrainttemplates
Verify network policy enforcement
calicoctl get networkpolicy --all-namespaces
Check constraint violations
kubectl get k8srequiredlabels
kubectl get k8srequirenetworkpolicy
Advanced configuration
Configure global network policies
Create cluster-wide policies that apply across all namespaces for baseline security.
apiVersion: projectcalico.org/v3
kind: GlobalNetworkPolicy
metadata:
name: deny-all-except-dns
spec:
selector: all()
types:
- Egress
egress:
- action: Allow
protocol: UDP
destination:
ports:
- 53
- action: Allow
protocol: TCP
destination:
ports:
- 53
Create admission webhook bypass
Configure Gatekeeper to allow emergency access while maintaining audit logging.
apiVersion: config.gatekeeper.sh/v1alpha1
kind: Config
metadata:
name: config
namespace: gatekeeper-system
spec:
match:
- excludedNamespaces: ["kube-system", "gatekeeper-system"]
processes: ["*"]
validation:
traces:
- user:
kind:
group: "*"
version: "*"
kind: "*"
readiness:
statsEnabled: true
Apply advanced configurations
Deploy the advanced policies and configurations for production use.
kubectl apply -f /tmp/gatekeeper-config.yaml
Common issues
| Symptom | Cause | Fix |
|---|---|---|
| Network policies not enforcing | CNI doesn't support network policies | Verify Calico is installed: kubectl get pods -n calico-system |
| Pods can't resolve DNS | Network policy blocks DNS egress | Add DNS egress rules to network policies |
| Gatekeeper webhook fails | Certificate or connectivity issues | Check webhook status: kubectl get validatingadmissionwebhooks |
| Constraints not enforcing | Template not properly applied | Verify template exists: kubectl get constrainttemplates |
| Legitimate pods rejected | Overly restrictive constraints | Review constraint match criteria and excluded namespaces |
| Network policy connectivity issues | Incorrect label selectors | Verify pod and namespace labels match policy selectors |
Security considerations
Monitor policy violations
Set up monitoring to track and alert on policy violations for security oversight.
# View recent constraint violations
kubectl get events --field-selector reason=FailedAdmission --all-namespaces
Check Gatekeeper audit results
kubectl logs -n gatekeeper-system -l control-plane=audit-controller
Consider integrating with your monitoring stack to track policy enforcement metrics and create alerts for security violations.
Next steps
- Implement Kubernetes resource quotas and limits for comprehensive workload management
- Configure Kubernetes RBAC with service accounts for authentication and authorization
- Implement Pod Security Standards for additional security controls
- Configure secrets management with Vault for secure credential handling
- Monitor network policies with Prometheus for operational visibility
Automated install script
Run this to automate the entire setup
#!/usr/bin/env bash
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Global variables
CALICO_VERSION=${1:-"v3.27.0"}
KUBECONFIG_PATH=${KUBECONFIG:-"$HOME/.kube/config"}
TEMP_DIR="/tmp/k8s-security-install"
# Cleanup function
cleanup() {
echo -e "${YELLOW}Cleaning up temporary files...${NC}"
rm -rf "$TEMP_DIR"
}
# Error handler
error_handler() {
echo -e "${RED}Error occurred in script at line: ${1}. Exiting...${NC}"
cleanup
exit 1
}
trap 'error_handler ${LINENO}' ERR
trap cleanup EXIT
usage() {
echo "Usage: $0 [CALICO_VERSION]"
echo "Example: $0 v3.27.0"
echo "If no version specified, defaults to v3.27.0"
exit 1
}
# Detect distribution
detect_distro() {
if [ -f /etc/os-release ]; then
. /etc/os-release
case "$ID" in
ubuntu|debian)
PKG_MGR="apt"
PKG_INSTALL="apt install -y"
PKG_UPDATE="apt update"
;;
almalinux|rocky|centos|rhel|ol|fedora)
PKG_MGR="dnf"
PKG_INSTALL="dnf install -y"
PKG_UPDATE="dnf check-update || true"
;;
amzn)
PKG_MGR="yum"
PKG_INSTALL="yum install -y"
PKG_UPDATE="yum check-update || true"
;;
*)
echo -e "${RED}Unsupported distro: $ID${NC}"
exit 1
;;
esac
else
echo -e "${RED}/etc/os-release not found. Cannot detect distribution.${NC}"
exit 1
fi
}
# Check if running as root or with sudo
check_privileges() {
if [[ $EUID -ne 0 ]] && ! sudo -n true 2>/dev/null; then
echo -e "${RED}This script requires root privileges or passwordless sudo${NC}"
exit 1
fi
}
# Check prerequisites
check_prerequisites() {
echo -e "${YELLOW}[1/10] Checking prerequisites...${NC}"
# Check if kubectl is installed and configured
if ! command -v kubectl &> /dev/null; then
echo -e "${RED}kubectl is not installed or not in PATH${NC}"
exit 1
fi
# Check if kubectl can connect to cluster
if ! kubectl cluster-info &> /dev/null; then
echo -e "${RED}kubectl cannot connect to Kubernetes cluster${NC}"
exit 1
fi
# Check if user has admin access
if ! kubectl auth can-i '*' '*' --all-namespaces &> /dev/null; then
echo -e "${RED}Insufficient cluster permissions. Admin access required.${NC}"
exit 1
fi
# Check if helm is installed
if ! command -v helm &> /dev/null; then
echo -e "${YELLOW}Helm not found. Installing...${NC}"
install_helm
fi
echo -e "${GREEN}Prerequisites check passed${NC}"
}
# Install Helm
install_helm() {
$PKG_UPDATE
case "$PKG_MGR" in
apt)
curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -
echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
apt update
$PKG_INSTALL helm
;;
dnf|yum)
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod 755 get_helm.sh
./get_helm.sh
rm -f get_helm.sh
;;
esac
}
# Setup working directory
setup_working_dir() {
echo -e "${YELLOW}[2/10] Setting up working directory...${NC}"
mkdir -p "$TEMP_DIR"
chmod 755 "$TEMP_DIR"
echo -e "${GREEN}Working directory created: $TEMP_DIR${NC}"
}
# Install Calico CNI
install_calico() {
echo -e "${YELLOW}[3/10] Installing Calico CNI...${NC}"
kubectl create -f "https://raw.githubusercontent.com/projectcalico/calico/${CALICO_VERSION}/manifests/tigera-operator.yaml"
# Wait for tigera-operator to be ready
kubectl wait --for=condition=Ready pods --all -n tigera-operator --timeout=300s
echo -e "${GREEN}Calico operator installed successfully${NC}"
}
# Configure Calico installation
configure_calico() {
echo -e "${YELLOW}[4/10] Configuring Calico installation...${NC}"
cat > "$TEMP_DIR/calico-custom-resources.yaml" << 'EOF'
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
ipPools:
- blockSize: 26
cidr: 10.244.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
nodeMetricsPort: 9091
---
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
EOF
chmod 644 "$TEMP_DIR/calico-custom-resources.yaml"
kubectl apply -f "$TEMP_DIR/calico-custom-resources.yaml"
echo -e "${GREEN}Calico configuration applied${NC}"
}
# Wait for Calico to be ready
wait_for_calico() {
echo -e "${YELLOW}[5/10] Waiting for Calico to be ready...${NC}"
# Wait for calico-system namespace to exist
until kubectl get namespace calico-system &> /dev/null; do
echo "Waiting for calico-system namespace..."
sleep 5
done
kubectl wait --for=condition=Ready pods --all -n calico-system --timeout=600s
echo -e "${GREEN}Calico is ready${NC}"
}
# Verify Calico installation
verify_calico() {
echo -e "${YELLOW}[6/10] Verifying Calico installation...${NC}"
kubectl get pods -n calico-system
kubectl get nodes -o wide
echo -e "${GREEN}Calico verification completed${NC}"
}
# Install OPA Gatekeeper
install_gatekeeper() {
echo -e "${YELLOW}[7/10] Installing OPA Gatekeeper...${NC}"
helm repo add gatekeeper https://open-policy-agent.github.io/gatekeeper/charts
helm repo update
cat > "$TEMP_DIR/gatekeeper-values.yaml" << 'EOF'
replicas: 3
revisionHistoryLimit: 10
controllerManager:
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
audit:
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 256Mi
postInstall:
labelNamespace:
enabled: true
EOF
chmod 644 "$TEMP_DIR/gatekeeper-values.yaml"
helm install gatekeeper gatekeeper/gatekeeper \
--namespace gatekeeper-system \
--create-namespace \
--values "$TEMP_DIR/gatekeeper-values.yaml"
echo -e "${GREEN}Gatekeeper installed successfully${NC}"
}
# Wait for Gatekeeper to be ready
wait_for_gatekeeper() {
echo -e "${YELLOW}[8/10] Waiting for Gatekeeper to be ready...${NC}"
kubectl wait --for=condition=Ready pods --all -n gatekeeper-system --timeout=300s
echo -e "${GREEN}Gatekeeper is ready${NC}"
}
# Create test namespaces
create_test_namespaces() {
echo -e "${YELLOW}[9/10] Creating test namespaces...${NC}"
kubectl create namespace frontend --dry-run=client -o yaml | kubectl apply -f -
kubectl create namespace backend --dry-run=client -o yaml | kubectl apply -f -
kubectl create namespace database --dry-run=client -o yaml | kubectl apply -f -
echo -e "${GREEN}Test namespaces created${NC}"
}
# Final verification
final_verification() {
echo -e "${YELLOW}[10/10] Performing final verification...${NC}"
echo "Calico system pods:"
kubectl get pods -n calico-system
echo -e "\nGatekeeper system pods:"
kubectl get pods -n gatekeeper-system
echo -e "\nTest namespaces:"
kubectl get namespaces frontend backend database
echo -e "\nNodes with Calico:"
kubectl get nodes -o wide
echo -e "${GREEN}Installation completed successfully!${NC}"
echo -e "${GREEN}You can now configure network policies and admission control policies${NC}"
}
# Main function
main() {
if [[ $# -gt 1 ]]; then
usage
fi
echo -e "${GREEN}Starting Kubernetes Security Installation with Calico CNI and OPA Gatekeeper${NC}"
detect_distro
check_privileges
check_prerequisites
setup_working_dir
install_calico
configure_calico
wait_for_calico
verify_calico
install_gatekeeper
wait_for_gatekeeper
create_test_namespaces
final_verification
echo -e "${GREEN}Installation completed successfully!${NC}"
}
main "$@"
Review the script before running. Execute with: bash install.sh