Skip to content

Docker & Container Patterns

Master containerization with Cursor IDE and Claude Code. This guide covers multi-stage builds, container optimization, Docker Compose orchestration, security best practices, and production-ready container workflows with AI assistance.

  1. Initialize Docker Project

    Terminal window
    # Create optimized Dockerfile
    Agent: "Create a multi-stage Docker setup for this Node.js app with:
    - Build stage for dependencies
    - Production stage with minimal attack surface
    - Non-root user
    - Health checks"
  2. Install Docker MCP Server (optional)

    Terminal window
    # Claude Code
    # Note: No direct MCP URL - use Docker Hub MCP Server
    claude mcp add docker-hub -- npx -y @docker/hub-mcp
    # Access Docker MCP Catalog at: https://hub.docker.com/mcp
  3. Configure AI Rules

    # .cursorrules or CLAUDE.md
    Docker best practices:
    - Use specific base image versions (no :latest)
    - Minimize layers and image size
    - Run as non-root user
    - Use .dockerignore
    - Implement health checks
    - Follow security best practices
    - Use build cache efficiently
# AI Prompt
Agent: "Create an optimized multi-stage Dockerfile for Node.js with:
- Dependency caching
- Production build optimization
- Security hardening
- Minimal final image"
# Generated Dockerfile
# Build stage
FROM node:20-alpine AS builder
# Install build dependencies
RUN apk add --no-cache python3 make g++
WORKDIR /app
# Cache dependencies
COPY package*.json ./
RUN npm ci --only=production && \
cp -R node_modules prod_node_modules && \
npm ci
# Build application
COPY . .
RUN npm run build
# Production stage
FROM node:20-alpine AS production
# Install dumb-init for proper signal handling
RUN apk add --no-cache dumb-init
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
adduser -S nodejs -u 1001
WORKDIR /app
# Copy production dependencies
COPY --from=builder --chown=nodejs:nodejs /app/prod_node_modules ./node_modules
# Copy built application
COPY --from=builder --chown=nodejs:nodejs /app/dist ./dist
COPY --from=builder --chown=nodejs:nodejs /app/package*.json ./
# Switch to non-root user
USER nodejs
# Expose port
EXPOSE 3000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD node healthcheck.js
# Use dumb-init to handle signals properly
ENTRYPOINT ["dumb-init", "--"]
CMD ["node", "dist/index.js"]
# AI Prompt: "Create optimized Python Docker build with Poetry"
# Build stage
FROM python:3.12-slim AS builder
# Install build dependencies
RUN apt-get update && apt-get install -y \
build-essential \
curl \
&& rm -rf /var/lib/apt/lists/*
# Install Poetry
ENV POETRY_VERSION=1.7.1
RUN curl -sSL https://install.python-poetry.org | python3 -
# Set working directory
WORKDIR /app
# Copy dependency files
COPY pyproject.toml poetry.lock ./
# Install dependencies
RUN ~/.local/bin/poetry config virtualenvs.create false && \
~/.local/bin/poetry install --no-interaction --no-ansi --only main
# Copy application
COPY . .
# Production stage
FROM python:3.12-slim AS production
# Install runtime dependencies
RUN apt-get update && apt-get install -y \
libpq5 \
&& rm -rf /var/lib/apt/lists/*
# Create non-root user
RUN useradd -m -u 1001 -s /bin/bash app
# Copy Python packages from builder
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
COPY --from=builder /usr/local/bin /usr/local/bin
# Set working directory
WORKDIR /app
# Copy application files
COPY --from=builder --chown=app:app /app .
# Switch to non-root user
USER app
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD python -c "import requests; requests.get('http://localhost:8000/health')"
# Run application
CMD ["gunicorn", "--bind", "0.0.0.0:8000", "--workers", "4", "app:application"]
# AI Prompt
Ask: "Optimize this Dockerfile for size:
- Use Alpine or distroless base
- Remove unnecessary files
- Combine RUN commands
- Use --no-cache flags
- Clean package manager cache"
# Optimized Go application
# Build stage
FROM golang:1.21-alpine AS builder
RUN apk add --no-cache git ca-certificates
WORKDIR /app
# Cache dependencies
COPY go.mod go.sum ./
RUN go mod download
# Build binary
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build \
-ldflags="-w -s" \
-o server ./cmd/server
# Final stage - distroless
FROM gcr.io/distroless/static:nonroot
# Copy binary
COPY --from=builder /app/server /server
# Copy certificates for HTTPS
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
# Run as non-root
USER nonroot:nonroot
EXPOSE 8080
ENTRYPOINT ["/server"]
1.4
# AI Prompt: "Optimize Docker build performance with caching"
FROM node:20-alpine AS builder
# Mount cache for package managers
RUN --mount=type=cache,target=/root/.npm \
npm config set cache /root/.npm
WORKDIR /app
# Copy only package files first
COPY package*.json ./
# Install dependencies with cache mount
RUN --mount=type=cache,target=/root/.npm \
npm ci
# Copy source files (changes don't invalidate npm cache)
COPY . .
# Build with cache mount
RUN --mount=type=cache,target=/app/.next/cache \
npm run build
# Production stage
FROM node:20-alpine
WORKDIR /app
# Copy built application
COPY --from=builder /app/.next/standalone ./
COPY --from=builder /app/.next/static ./.next/static
COPY --from=builder /app/public ./public
EXPOSE 3000
CMD ["node", "server.js"]
# AI Prompt
Agent: "Create Docker Compose for microservices with:
- API gateway
- Multiple backend services
- PostgreSQL and Redis
- Service discovery
- Health checks
- Development overrides"
# docker-compose.yml
version: '3.9'
services:
# API Gateway
gateway:
build:
context: ./gateway
target: ${BUILD_TARGET:-production}
ports:
- "8080:8080"
environment:
- SERVICE_DISCOVERY_URL=http://consul:8500
- LOG_LEVEL=${LOG_LEVEL:-info}
depends_on:
consul:
condition: service_healthy
auth-service:
condition: service_healthy
product-service:
condition: service_healthy
networks:
- app-network
healthcheck:
test: ["CMD", "wget", "--spider", "-q", "http://localhost:8080/health"]
interval: 30s
timeout: 3s
retries: 3
# Auth Service
auth-service:
build:
context: ./services/auth
cache_from:
- ${REGISTRY}/auth-service:cache
environment:
- DATABASE_URL=postgresql://postgres:postgres@postgres:5432/auth_db
- REDIS_URL=redis://redis:6379
- JWT_SECRET=${JWT_SECRET}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- app-network
deploy:
replicas: 2
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001/health"]
interval: 30s
timeout: 3s
retries: 3
# Product Service
product-service:
build:
context: ./services/product
environment:
- DATABASE_URL=postgresql://postgres:postgres@postgres:5432/product_db
- CACHE_URL=redis://redis:6379
- S3_BUCKET=${S3_BUCKET}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
networks:
- app-network
volumes:
- product-images:/app/uploads
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3002/health"]
interval: 30s
timeout: 3s
retries: 3
# PostgreSQL
postgres:
image: postgres:16-alpine
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=${DB_PASSWORD:-postgres}
- POSTGRES_MULTIPLE_DATABASES=auth_db,product_db
volumes:
- postgres-data:/var/lib/postgresql/data
- ./scripts/init-db.sh:/docker-entrypoint-initdb.d/init-db.sh
networks:
- app-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
# Redis
redis:
image: redis:7-alpine
command: redis-server --requirepass ${REDIS_PASSWORD:-redis}
volumes:
- redis-data:/data
networks:
- app-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
# Service Discovery
consul:
image: consul:1.17
command: agent -server -bootstrap -ui -client=0.0.0.0
ports:
- "8500:8500"
networks:
- app-network
healthcheck:
test: ["CMD", "consul", "members"]
interval: 10s
timeout: 5s
retries: 5
networks:
app-network:
driver: bridge
volumes:
postgres-data:
redis-data:
product-images:
docker-compose.dev.yml
# AI Prompt: "Create development Docker Compose with hot reload"
version: '3.9'
services:
# Frontend with hot reload
frontend:
build:
context: ./frontend
target: development
volumes:
- ./frontend:/app
- /app/node_modules
environment:
- NODE_ENV=development
- REACT_APP_API_URL=http://localhost:8080
ports:
- "3000:3000"
command: npm run dev
# Backend with watch mode
backend:
build:
context: ./backend
target: development
volumes:
- ./backend:/app
- /app/node_modules
environment:
- NODE_ENV=development
- DEBUG=app:*
ports:
- "8080:8080"
- "9229:9229" # Node.js debugger
command: npm run dev:debug
# Database with initialization
postgres:
ports:
- "5432:5432"
volumes:
- ./db/init:/docker-entrypoint-initdb.d
- ./db/seeds:/seeds
# Mailhog for email testing
mailhog:
image: mailhog/mailhog
ports:
- "1025:1025" # SMTP
- "8025:8025" # Web UI
networks:
- app-network
# Adminer for database management
adminer:
image: adminer
ports:
- "8081:8080"
networks:
- app-network
# AI Prompt
Agent: "Harden this Docker container with:
- Security scanning
- Minimal attack surface
- Read-only filesystem
- Dropped capabilities
- Security policies"
# Hardened container example
FROM node:20-alpine AS production
# Install security updates
RUN apk update && apk upgrade && rm -rf /var/cache/apk/*
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
adduser -S nodejs -u 1001
# Set up app directory with proper permissions
WORKDIR /app
RUN chown nodejs:nodejs /app
# Copy application files
COPY --chown=nodejs:nodejs package*.json ./
RUN npm ci --only=production && npm cache clean --force
COPY --chown=nodejs:nodejs . .
# Security configurations
USER nodejs
# Drop all capabilities
RUN apk add --no-cache libcap && \
setcap -r /usr/local/bin/node || true
# Read-only root filesystem
RUN chmod -R a-w /app
# Security labels
LABEL security.scan="enabled" \
security.updates="auto"
# Health check with timeout
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD node healthcheck.js || exit 1
# Run with limited resources
CMD ["node", "--max-old-space-size=512", "index.js"]
docker-compose.secrets.yml
# AI Prompt: "Implement secure secret handling in Docker"
version: '3.9'
services:
app:
build: .
secrets:
- db_password
- api_key
- jwt_secret
environment:
- DB_PASSWORD_FILE=/run/secrets/db_password
- API_KEY_FILE=/run/secrets/api_key
- JWT_SECRET_FILE=/run/secrets/jwt_secret
# Use init script to load secrets
entrypoint: ["/app/docker-entrypoint.sh"]
command: ["node", "index.js"]
secrets:
db_password:
file: ./secrets/db_password.txt
api_key:
file: ./secrets/api_key.txt
jwt_secret:
file: ./secrets/jwt_secret.txt
# docker-entrypoint.sh
#!/bin/sh
set -e
# Load secrets from files into environment variables
if [ -f "$DB_PASSWORD_FILE" ]; then
export DB_PASSWORD=$(cat "$DB_PASSWORD_FILE")
fi
if [ -f "$API_KEY_FILE" ]; then
export API_KEY=$(cat "$API_KEY_FILE")
fi
if [ -f "$JWT_SECRET_FILE" ]; then
export JWT_SECRET=$(cat "$JWT_SECRET_FILE")
fi
# Execute the main command
exec "$@"
.devcontainer/devcontainer.json
// AI Prompt: "Create devcontainer for full-stack development"
{
"name": "Full Stack Dev Container",
"dockerComposeFile": "docker-compose.yml",
"service": "dev",
"workspaceFolder": "/workspace",
"features": {
"ghcr.io/devcontainers/features/common-utils:2": {},
"ghcr.io/devcontainers/features/git:1": {},
"ghcr.io/devcontainers/features/node:1": {
"version": "20"
},
"ghcr.io/devcontainers/features/docker-in-docker:2": {}
},
"customizations": {
"vscode": {
"extensions": [
"dbaeumer.vscode-eslint",
"esbenp.prettier-vscode",
"ms-azuretools.vscode-docker",
"github.copilot",
"continue.continue"
],
"settings": {
"terminal.integrated.defaultProfile.linux": "zsh",
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode"
}
}
},
"forwardPorts": [3000, 8080, 5432],
"postCreateCommand": "npm install && npm run db:migrate",
"remoteUser": "vscode"
}
# AI Prompt
Agent: "Create GitHub Actions workflow for Docker:
- Build and test
- Security scanning
- Push to registry
- Deploy to staging
- Cache optimization"
# .github/workflows/docker-ci.yml
name: Docker CI/CD
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-test:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
security-events: write
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=sha
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: |
VERSION=${{ github.sha }}
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
format: 'sarif'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: 'trivy-results.sarif'
- name: Run integration tests
run: |
docker compose -f docker-compose.test.yml up --abort-on-container-exit
docker compose -f docker-compose.test.yml down -v
deploy-staging:
needs: build-and-test
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
steps:
- name: Deploy to staging
uses: digitalocean/action-doctl@v2
with:
token: ${{ secrets.DIGITALOCEAN_ACCESS_TOKEN }}
run: |
doctl kubernetes cluster kubeconfig save staging-cluster
kubectl set image deployment/app app=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
kubectl rollout status deployment/app

Docker Development Guidelines

  1. Layer Caching - Order Dockerfile commands from least to most frequently changing
  2. Multi-stage Builds - Separate build dependencies from runtime
  3. Security First - Run as non-root, scan for vulnerabilities
  4. Size Matters - Use minimal base images, remove unnecessary files
  5. Health Checks - Always implement health check endpoints
  6. Secrets - Never hardcode secrets, use proper secret management
# AI: "Add debugging capabilities to production container"
FROM app:production AS debug
# Install debugging tools
USER root
RUN apk add --no-cache \
curl \
wget \
netcat-openbsd \
bind-tools \
strace
USER nodejs
# Enable Node.js debugging
ENV NODE_OPTIONS="--inspect=0.0.0.0:9229"
EXPOSE 9229
Terminal window
# AI Prompt: "Debug Docker build issues"
# Analyze build performance
docker build --progress=plain --no-cache .
# Debug layer sizes
docker history --human --format "table {{.CreatedBy}}\t{{.Size}}" image:tag
# Inspect build cache
docker buildx du --verbose
# Security scan
docker scout cves image:tag
# Resource usage
docker stats --no-stream