Fix #1 - SECRET_KEY startup validation (config.py, .env): - App refuses to start if SECRET_KEY is missing, shorter than 32 chars, or matches a known insecure default value - .env: replaced hardcoded test key with placeholder + generation hint Fix #2 - Docker socket proxy (docker-compose.yml): - Add tecnativa/docker-socket-proxy sidecar - Only expose required Docker API endpoints (CONTAINERS, IMAGES, NETWORKS, POST, EXEC); dangerous endpoints explicitly blocked - Remove direct /var/run/docker.sock mount from main container - Route Docker API via DOCKER_HOST=tcp://docker-socket-proxy:2375 Fix #3 - Azure AD group whitelist (auth.py, models.py, validators.py): - New azure_allowed_group_id field in SystemConfig - After token exchange, verify group membership via Graph API /me/memberOf - Deny login with HTTP 403 if user is not in the required group - New Azure AD users now get role 'viewer' instead of 'admin' Fix #4 - Rate limiting on login (main.py, auth.py, requirements.txt): - Add slowapi==0.1.9 dependency - Initialize SlowAPI limiter in main.py with 429 exception handler - Apply 10 requests/minute limit per IP on /login and /mfa/verify
74 lines
2.2 KiB
YAML
74 lines
2.2 KiB
YAML
services:
|
|
# ---------------------------------------------------------------------------
|
|
# Docker Socket Proxy — limits Docker API access to only what is needed.
|
|
# The main app container no longer has direct access to /var/run/docker.sock.
|
|
# ---------------------------------------------------------------------------
|
|
docker-socket-proxy:
|
|
image: tecnativa/docker-socket-proxy:latest
|
|
container_name: docker-socket-proxy
|
|
restart: unless-stopped
|
|
environment:
|
|
# Read-only endpoints
|
|
CONTAINERS: 1
|
|
IMAGES: 1
|
|
NETWORKS: 1
|
|
INFO: 1
|
|
# Write endpoints (needed for compose up/down/start/stop)
|
|
POST: 1
|
|
# Explicitly deny dangerous endpoints
|
|
AUTH: 0
|
|
SECRETS: 0
|
|
SWARM: 0
|
|
NODES: 0
|
|
SERVICES: 0
|
|
TASKS: 0
|
|
CONFIGS: 0
|
|
PLUGINS: 0
|
|
VOLUMES: 0
|
|
BUILD: 0
|
|
COMMIT: 0
|
|
DISTRIBUTION: 0
|
|
EXEC: 1
|
|
volumes:
|
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
|
networks:
|
|
- npm-network
|
|
# Only accessible from within the Docker network — never expose port externally
|
|
|
|
netbird-msp-appliance:
|
|
build: .
|
|
container_name: netbird-msp-appliance
|
|
restart: unless-stopped
|
|
depends_on:
|
|
- docker-socket-proxy
|
|
ports:
|
|
- "${WEB_UI_PORT:-8000}:8000"
|
|
volumes:
|
|
- ./data:/app/data
|
|
- ./logs:/app/logs
|
|
- ./backups:/app/backups
|
|
# NOTE: /var/run/docker.sock is intentionally NOT mounted here.
|
|
# Docker access goes through the docker-socket-proxy sidecar.
|
|
- ${DATA_DIR:-/opt/netbird-instances}:${DATA_DIR:-/opt/netbird-instances}
|
|
environment:
|
|
- SECRET_KEY=${SECRET_KEY}
|
|
- DATABASE_PATH=/app/data/netbird_msp.db
|
|
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
|
- DATA_DIR=${DATA_DIR:-/opt/netbird-instances}
|
|
- DOCKER_NETWORK=${DOCKER_NETWORK:-npm-network}
|
|
- HOST_IP=${HOST_IP:-}
|
|
# Route Docker API calls through the socket proxy instead of the raw socket
|
|
- DOCKER_HOST=tcp://docker-socket-proxy:2375
|
|
networks:
|
|
- npm-network
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:8000/api/health"]
|
|
interval: 30s
|
|
timeout: 10s
|
|
retries: 3
|
|
start_period: 15s
|
|
|
|
networks:
|
|
npm-network:
|
|
external: true
|