Every Docker command you actually need, organized by what you're trying to do. From "I just installed Docker" to "my containers are in production." Bookmark this.
# Check Docker is working
docker version # Client + server version
docker info # System-wide information
docker system df # Disk usage summary
# Run your first container
docker run hello-world # Verify installation
docker run -it ubuntu bash # Interactive Ubuntu shell
docker run -d nginx # Run nginx in background (detached)
# Run containers
docker run -d --name myapp -p 8080:80 nginx # Named, port-mapped, detached
docker run -d -p 3000:3000 -v $(pwd):/app node # With volume mount
docker run --rm -it alpine sh # Temporary (auto-remove on exit)
docker run -d --restart unless-stopped myapp # Auto-restart on crash
docker run --env-file .env myapp # Load env vars from file
docker run --memory 512m --cpus 1.5 myapp # Resource limits
# List containers
docker ps # Running containers
docker ps -a # All containers (including stopped)
docker ps -q # Just IDs (useful for scripting)
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" # Custom format
# Container lifecycle
docker start myapp # Start stopped container
docker stop myapp # Graceful stop (SIGTERM, then SIGKILL after 10s)
docker stop -t 30 myapp # Graceful stop with 30s timeout
docker restart myapp # Stop + start
docker kill myapp # Force stop (SIGKILL)
docker pause myapp # Freeze container
docker unpause myapp # Unfreeze
# Interact with running containers
docker exec -it myapp bash # Open shell in running container
docker exec myapp cat /etc/hosts # Run single command
docker logs myapp # View logs
docker logs -f myapp # Follow logs (like tail -f)
docker logs --tail 100 myapp # Last 100 lines
docker logs --since 1h myapp # Last hour of logs
docker top myapp # Running processes
docker stats # Live resource usage (all containers)
docker stats myapp # Resource usage (specific container)
# Copy files
docker cp myapp:/app/data.json ./data.json # Container โ host
docker cp ./config.yaml myapp:/app/config.yaml # Host โ container
# Remove containers
docker rm myapp # Remove stopped container
docker rm -f myapp # Force remove (even if running)
docker container prune # Remove ALL stopped containers
# List and search
docker images # Local images
docker images -a # Include intermediate layers
docker search nginx # Search Docker Hub
# Pull and push
docker pull nginx # Pull latest
docker pull nginx:1.25 # Pull specific tag
docker pull --platform linux/amd64 nginx # Specific platform
docker push myregistry/myapp:v1.0 # Push to registry
# Build
docker build -t myapp . # Build from Dockerfile in current dir
docker build -t myapp:v1.0 . # With tag
docker build -f Dockerfile.prod -t myapp . # Custom Dockerfile
docker build --no-cache -t myapp . # Fresh build (no cache)
docker build --build-arg NODE_ENV=production -t myapp . # Build args
docker build --target builder -t myapp . # Multi-stage: specific target
# Remove images
docker rmi nginx # Remove image
docker rmi -f nginx # Force remove
docker image prune # Remove dangling images
docker image prune -a # Remove ALL unused images
# Production-ready Node.js example
FROM node:20-slim AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
COPY . .
RUN npm run build
FROM node:20-slim
WORKDIR /app
RUN addgroup --system app && adduser --system --ingroup app app
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/package.json ./
USER app
EXPOSE 3000
HEALTHCHECK --interval=30s --timeout=3s CMD curl -f http://localhost:3000/health || exit 1
CMD ["node", "dist/index.js"]
# Production-ready Python example
FROM python:3.12-slim AS builder
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir --prefix=/install -r requirements.txt
FROM python:3.12-slim
WORKDIR /app
COPY --from=builder /install /usr/local
COPY . .
RUN useradd -r -s /bin/false app
USER app
EXPOSE 8000
HEALTHCHECK --interval=30s --timeout=3s CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"
CMD ["gunicorn", "app:app", "-b", "0.0.0.0:8000", "-w", "4"]
# Named volumes (Docker manages the data)
docker volume create mydata
docker volume ls
docker volume inspect mydata
docker run -d -v mydata:/app/data myapp # Mount named volume
# Bind mounts (your filesystem)
docker run -d -v $(pwd)/data:/app/data myapp # Mount host dir
docker run -d -v $(pwd)/data:/app/data:ro myapp # Read-only mount
# Remove volumes
docker volume rm mydata
docker volume prune # Remove ALL unused volumes
# Manage networks
docker network ls
docker network create mynet
docker network create --driver bridge mynet
docker network inspect mynet
docker network rm mynet
# Connect containers
docker run -d --name api --network mynet myapi
docker run -d --name db --network mynet postgres
# Now 'api' can reach 'db' by container name: postgres://db:5432
docker network connect mynet existing-container
docker network disconnect mynet existing-container
# docker-compose.yml
services:
app:
build: .
ports:
- "3000:3000"
environment:
- DATABASE_URL=postgres://db:5432/myapp
depends_on:
db:
condition: service_healthy
restart: unless-stopped
db:
image: postgres:16
environment:
POSTGRES_DB: myapp
POSTGRES_PASSWORD: secret
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
pgdata:
# Compose commands
docker compose up # Start all services (foreground)
docker compose up -d # Start all services (detached)
docker compose up --build # Rebuild images first
docker compose down # Stop and remove containers
docker compose down -v # Stop, remove containers AND volumes
docker compose ps # List running services
docker compose logs # View all logs
docker compose logs -f app # Follow specific service logs
docker compose exec app bash # Shell into service
docker compose pull # Pull latest images
docker compose restart app # Restart specific service
docker compose stop # Stop without removing
docker compose build # Build/rebuild images
# The nuclear options
docker system prune # Remove stopped containers, unused networks, dangling images
docker system prune -a # Remove EVERYTHING unused (including images)
docker system prune -a --volumes # Remove EVERYTHING including volumes โ ๏ธ
# Targeted cleanup
docker container prune # Remove stopped containers
docker image prune -a # Remove unused images
docker volume prune # Remove unused volumes
docker network prune # Remove unused networks
# See what's using disk space
docker system df
docker system df -v # Verbose breakdown
# Inspect everything
docker inspect myapp # Full container details (JSON)
docker inspect --format '{{.NetworkSettings.IPAddress}}' myapp # Specific field
# Check why a container died
docker logs myapp --tail 50
docker inspect --format '{{.State.ExitCode}}' myapp
docker inspect --format '{{.State.OOMKilled}}' myapp # Out of memory?
# Resource monitoring
docker stats --no-stream # Snapshot of all containers
docker events # Real-time Docker daemon events
# Debug networking
docker run --rm --net container:myapp nicolaka/netshoot # Network debugging toolkit
1. Use .dockerignore โ same idea as .gitignore. Always exclude node_modules, .git, .env, and build artifacts.
2. Use multi-stage builds. Your production image should only contain what's needed to run. Build deps stay in the builder stage.
3. Never run as root. Add a USER instruction in your Dockerfile. It takes 2 lines and prevents entire classes of vulnerabilities.
4. Add healthchecks. Docker (and orchestrators) use these to know if your app is actually working, not just running.
5. Pin your base image versions. FROM node:20.11-slim not FROM node:latest. Reproducible builds save you at 3 AM.
6. Use --restart unless-stopped for anything that should survive a server reboot.
7. docker compose up --build -d is probably the command you run most. Rebuild + restart in one shot.