diff --git a/.gitignore b/.gitignore index 8c2b884..40f9887 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,63 @@ -# ---> VisualStudioCode +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +*.egg-info/ +dist/ +build/ +.eggs/ + +# Virtual environment +venv/ +.venv/ +env/ + +# IDE .vscode/* !.vscode/settings.json !.vscode/tasks.json !.vscode/launch.json !.vscode/extensions.json !.vscode/*.code-snippets - -# Local History for Visual Studio Code +.idea/ +*.swp +*.swo +*~ .history/ - -# Built Visual Studio Code Extensions *.vsix +# Environment & secrets +.env +.env.local +.env.production + +# Database +*.db +*.sqlite +*.sqlite3 +data/ + +# Logs +logs/ +*.log + +# Backups +backups/ + +# OS +.DS_Store +Thumbs.db +desktop.ini + +# Docker +docker-compose.override.yml + +# Build artifacts +*.tar.gz +*.zip + +# Test +.pytest_cache/ +.coverage +htmlcov/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..bb5a149 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,34 @@ +FROM python:3.11-slim + +LABEL maintainer="NetBird MSP Appliance" +LABEL description="Multi-tenant NetBird management platform" + +# Install system dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Set working directory +WORKDIR /app + +# Copy requirements first for caching +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY app/ ./app/ +COPY templates/ ./templates/ +COPY static/ ./static/ + +# Create data directories +RUN mkdir -p /app/data /app/logs /app/backups + +# Expose port +EXPOSE 8000 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=15s --retries=3 \ + CMD curl -f http://localhost:8000/api/health || exit 1 + +# Run the application +CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", "--log-level", "info"] diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000..a046f75 --- /dev/null +++ b/app/__init__.py @@ -0,0 +1 @@ +# NetBird MSP Appliance diff --git a/app/database.py b/app/database.py new file mode 100644 index 0000000..0be9fc4 --- /dev/null +++ b/app/database.py @@ -0,0 +1,57 @@ +"""Database setup and session management for NetBird MSP Appliance.""" + +import os +import sys +from typing import Generator + +from sqlalchemy import create_engine, event +from sqlalchemy.orm import Session, sessionmaker, declarative_base + +DATABASE_PATH = os.environ.get("DATABASE_PATH", "/app/data/netbird_msp.db") +DATABASE_URL = f"sqlite:///{DATABASE_PATH}" + +engine = create_engine( + DATABASE_URL, + connect_args={"check_same_thread": False}, + echo=False, +) + +# Enable WAL mode and foreign keys for SQLite +@event.listens_for(engine, "connect") +def _set_sqlite_pragma(dbapi_connection, connection_record) -> None: + cursor = dbapi_connection.cursor() + cursor.execute("PRAGMA journal_mode=WAL") + cursor.execute("PRAGMA foreign_keys=ON") + cursor.close() + + +SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) +Base = declarative_base() + + +def get_db() -> Generator[Session, None, None]: + """Yield a database session, ensuring it is closed after use.""" + db = SessionLocal() + try: + yield db + finally: + db.close() + + +def init_db() -> None: + """Create all database tables.""" + from app.models import ( # noqa: F401 + Customer, + Deployment, + DeploymentLog, + SystemConfig, + User, + ) + + Base.metadata.create_all(bind=engine) + + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] == "init": + init_db() + print("Database initialized successfully.") diff --git a/app/dependencies.py b/app/dependencies.py new file mode 100644 index 0000000..a18a583 --- /dev/null +++ b/app/dependencies.py @@ -0,0 +1,77 @@ +"""FastAPI dependencies — JWT authentication, database session, rate limiting.""" + +from datetime import datetime, timedelta +from typing import Optional + +from fastapi import Depends, HTTPException, status +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from jose import JWTError, jwt +from sqlalchemy.orm import Session + +from app.database import get_db +from app.models import User +from app.utils.config import JWT_ALGORITHM, JWT_EXPIRE_MINUTES, SECRET_KEY + +security_scheme = HTTPBearer(auto_error=False) + + +def create_access_token(username: str, expires_delta: Optional[timedelta] = None) -> str: + """Create a JWT access token. + + Args: + username: The user identity to encode. + expires_delta: Custom expiration; defaults to JWT_EXPIRE_MINUTES. + + Returns: + Encoded JWT string. + """ + expire = datetime.utcnow() + (expires_delta or timedelta(minutes=JWT_EXPIRE_MINUTES)) + payload = {"sub": username, "exp": expire} + return jwt.encode(payload, SECRET_KEY, algorithm=JWT_ALGORITHM) + + +def get_current_user( + credentials: Optional[HTTPAuthorizationCredentials] = Depends(security_scheme), + db: Session = Depends(get_db), +) -> User: + """Validate the JWT bearer token and return the authenticated user. + + Args: + credentials: Bearer token from the Authorization header. + db: Database session. + + Returns: + The authenticated User ORM object. + + Raises: + HTTPException: If the token is missing, invalid, or the user is inactive. + """ + if credentials is None: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Authentication required.", + headers={"WWW-Authenticate": "Bearer"}, + ) + + token = credentials.credentials + try: + payload = jwt.decode(token, SECRET_KEY, algorithms=[JWT_ALGORITHM]) + username: Optional[str] = payload.get("sub") + if username is None: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid token payload.", + ) + except JWTError: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid or expired token.", + ) + + user = db.query(User).filter(User.username == username).first() + if user is None or not user.is_active: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="User not found or inactive.", + ) + return user diff --git a/app/main.py b/app/main.py new file mode 100644 index 0000000..d8afd33 --- /dev/null +++ b/app/main.py @@ -0,0 +1,90 @@ +"""FastAPI entry point for NetBird MSP Appliance.""" + +import logging +import os + +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import JSONResponse +from fastapi.staticfiles import StaticFiles + +from app.database import init_db +from app.routers import auth, customers, deployments, monitoring, settings + +# --------------------------------------------------------------------------- +# Logging +# --------------------------------------------------------------------------- +LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO").upper() +logging.basicConfig( + level=getattr(logging, LOG_LEVEL, logging.INFO), + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", +) +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Application +# --------------------------------------------------------------------------- +app = FastAPI( + title="NetBird MSP Appliance", + description="Multi-tenant NetBird management platform for MSPs", + version="1.0.0", + docs_url="/api/docs", + redoc_url="/api/redoc", + openapi_url="/api/openapi.json", +) + +# CORS — allow same-origin; adjust if needed +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# --------------------------------------------------------------------------- +# Routers +# --------------------------------------------------------------------------- +app.include_router(auth.router, prefix="/api/auth", tags=["Authentication"]) +app.include_router(settings.router, prefix="/api/settings", tags=["Settings"]) +app.include_router(customers.router, prefix="/api/customers", tags=["Customers"]) +app.include_router(deployments.router, prefix="/api/customers", tags=["Deployments"]) +app.include_router(monitoring.router, prefix="/api/monitoring", tags=["Monitoring"]) + +# --------------------------------------------------------------------------- +# Static files — serve the frontend SPA +# --------------------------------------------------------------------------- +STATIC_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "static") +if os.path.isdir(STATIC_DIR): + app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static") + +# Serve index.html at root +from fastapi.responses import FileResponse + +@app.get("/", include_in_schema=False) +async def serve_index(): + """Serve the main dashboard.""" + index_path = os.path.join(STATIC_DIR, "index.html") + if os.path.isfile(index_path): + return FileResponse(index_path) + return JSONResponse({"message": "NetBird MSP Appliance API is running."}) + + +# --------------------------------------------------------------------------- +# Health endpoint (unauthenticated) +# --------------------------------------------------------------------------- +@app.get("/api/health", tags=["Health"]) +async def health_check(): + """Simple health check endpoint for Docker HEALTHCHECK.""" + return {"status": "ok", "service": "netbird-msp-appliance"} + + +# --------------------------------------------------------------------------- +# Startup event +# --------------------------------------------------------------------------- +@app.on_event("startup") +async def startup_event(): + """Initialize database tables on startup.""" + logger.info("Starting NetBird MSP Appliance...") + init_db() + logger.info("Database initialized.") diff --git a/app/models.py b/app/models.py new file mode 100644 index 0000000..6022a72 --- /dev/null +++ b/app/models.py @@ -0,0 +1,232 @@ +"""SQLAlchemy ORM models for NetBird MSP Appliance.""" + +from datetime import datetime +from typing import Optional + +from sqlalchemy import ( + Boolean, + CheckConstraint, + DateTime, + ForeignKey, + Integer, + String, + Text, + UniqueConstraint, +) +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from app.database import Base + + +class Customer(Base): + """Customer model representing an MSP client.""" + + __tablename__ = "customers" + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + name: Mapped[str] = mapped_column(String(255), nullable=False) + company: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + subdomain: Mapped[str] = mapped_column(String(63), unique=True, nullable=False) + email: Mapped[str] = mapped_column(String(255), nullable=False) + max_devices: Mapped[int] = mapped_column(Integer, default=20) + notes: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + status: Mapped[str] = mapped_column( + String(20), + default="active", + nullable=False, + ) + created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow) + updated_at: Mapped[datetime] = mapped_column( + DateTime, default=datetime.utcnow, onupdate=datetime.utcnow + ) + + __table_args__ = ( + CheckConstraint( + "status IN ('active', 'inactive', 'deploying', 'error')", + name="ck_customer_status", + ), + ) + + deployment: Mapped[Optional["Deployment"]] = relationship( + "Deployment", back_populates="customer", uselist=False, cascade="all, delete-orphan" + ) + logs: Mapped[list["DeploymentLog"]] = relationship( + "DeploymentLog", back_populates="customer", cascade="all, delete-orphan" + ) + + def to_dict(self) -> dict: + """Serialize customer to dictionary.""" + return { + "id": self.id, + "name": self.name, + "company": self.company, + "subdomain": self.subdomain, + "email": self.email, + "max_devices": self.max_devices, + "notes": self.notes, + "status": self.status, + "created_at": self.created_at.isoformat() if self.created_at else None, + "updated_at": self.updated_at.isoformat() if self.updated_at else None, + } + + +class Deployment(Base): + """Deployment model tracking a customer's NetBird instance.""" + + __tablename__ = "deployments" + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + customer_id: Mapped[int] = mapped_column( + Integer, ForeignKey("customers.id", ondelete="CASCADE"), unique=True, nullable=False + ) + container_prefix: Mapped[str] = mapped_column(String(100), nullable=False) + relay_udp_port: Mapped[int] = mapped_column(Integer, unique=True, nullable=False) + npm_proxy_id: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) + relay_secret: Mapped[str] = mapped_column(Text, nullable=False) + setup_url: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + deployment_status: Mapped[str] = mapped_column( + String(20), default="pending", nullable=False + ) + deployed_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow) + last_health_check: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) + + __table_args__ = ( + CheckConstraint( + "deployment_status IN ('pending', 'running', 'stopped', 'failed')", + name="ck_deployment_status", + ), + ) + + customer: Mapped["Customer"] = relationship("Customer", back_populates="deployment") + + def to_dict(self) -> dict: + """Serialize deployment to dictionary.""" + return { + "id": self.id, + "customer_id": self.customer_id, + "container_prefix": self.container_prefix, + "relay_udp_port": self.relay_udp_port, + "npm_proxy_id": self.npm_proxy_id, + "relay_secret": "***", # Never expose secrets + "setup_url": self.setup_url, + "deployment_status": self.deployment_status, + "deployed_at": self.deployed_at.isoformat() if self.deployed_at else None, + "last_health_check": ( + self.last_health_check.isoformat() if self.last_health_check else None + ), + } + + +class SystemConfig(Base): + """Singleton system configuration — always id=1.""" + + __tablename__ = "system_config" + + id: Mapped[int] = mapped_column( + Integer, primary_key=True, default=1 + ) + base_domain: Mapped[str] = mapped_column(String(255), nullable=False) + admin_email: Mapped[str] = mapped_column(String(255), nullable=False) + npm_api_url: Mapped[str] = mapped_column(String(500), nullable=False) + npm_api_token_encrypted: Mapped[str] = mapped_column(Text, nullable=False) + netbird_management_image: Mapped[str] = mapped_column( + String(255), default="netbirdio/management:latest" + ) + netbird_signal_image: Mapped[str] = mapped_column( + String(255), default="netbirdio/signal:latest" + ) + netbird_relay_image: Mapped[str] = mapped_column( + String(255), default="netbirdio/relay:latest" + ) + netbird_dashboard_image: Mapped[str] = mapped_column( + String(255), default="netbirdio/dashboard:latest" + ) + data_dir: Mapped[str] = mapped_column(String(500), default="/opt/netbird-instances") + docker_network: Mapped[str] = mapped_column(String(100), default="npm-network") + relay_base_port: Mapped[int] = mapped_column(Integer, default=3478) + created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow) + updated_at: Mapped[datetime] = mapped_column( + DateTime, default=datetime.utcnow, onupdate=datetime.utcnow + ) + + __table_args__ = ( + CheckConstraint("id = 1", name="ck_system_config_singleton"), + ) + + def to_dict(self) -> dict: + """Serialize config to dictionary (token masked).""" + return { + "base_domain": self.base_domain, + "admin_email": self.admin_email, + "npm_api_url": self.npm_api_url, + "npm_api_token_set": bool(self.npm_api_token_encrypted), + "netbird_management_image": self.netbird_management_image, + "netbird_signal_image": self.netbird_signal_image, + "netbird_relay_image": self.netbird_relay_image, + "netbird_dashboard_image": self.netbird_dashboard_image, + "data_dir": self.data_dir, + "docker_network": self.docker_network, + "relay_base_port": self.relay_base_port, + "created_at": self.created_at.isoformat() if self.created_at else None, + "updated_at": self.updated_at.isoformat() if self.updated_at else None, + } + + +class DeploymentLog(Base): + """Log entries for deployment actions.""" + + __tablename__ = "deployment_logs" + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + customer_id: Mapped[int] = mapped_column( + Integer, ForeignKey("customers.id", ondelete="CASCADE"), nullable=False + ) + action: Mapped[str] = mapped_column(String(100), nullable=False) + status: Mapped[str] = mapped_column(String(20), nullable=False) + message: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + details: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow) + + __table_args__ = ( + CheckConstraint( + "status IN ('success', 'error', 'info')", + name="ck_log_status", + ), + ) + + customer: Mapped["Customer"] = relationship("Customer", back_populates="logs") + + def to_dict(self) -> dict: + """Serialize log entry to dictionary.""" + return { + "id": self.id, + "customer_id": self.customer_id, + "action": self.action, + "status": self.status, + "message": self.message, + "details": self.details, + "created_at": self.created_at.isoformat() if self.created_at else None, + } + + +class User(Base): + """Admin user model.""" + + __tablename__ = "users" + + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + username: Mapped[str] = mapped_column(String(100), unique=True, nullable=False) + password_hash: Mapped[str] = mapped_column(Text, nullable=False) + email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + is_active: Mapped[bool] = mapped_column(Boolean, default=True) + created_at: Mapped[datetime] = mapped_column(DateTime, default=datetime.utcnow) + + def to_dict(self) -> dict: + """Serialize user to dictionary (no password).""" + return { + "id": self.id, + "username": self.username, + "email": self.email, + "is_active": self.is_active, + "created_at": self.created_at.isoformat() if self.created_at else None, + } diff --git a/app/routers/__init__.py b/app/routers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/routers/auth.py b/app/routers/auth.py new file mode 100644 index 0000000..05b47a5 --- /dev/null +++ b/app/routers/auth.py @@ -0,0 +1,97 @@ +"""Authentication API endpoints — login, logout, current user, password change.""" + +import logging +from datetime import datetime + +from fastapi import APIRouter, Depends, HTTPException, status +from sqlalchemy.orm import Session + +from app.database import get_db +from app.dependencies import create_access_token, get_current_user +from app.models import User +from app.utils.security import hash_password, verify_password +from app.utils.validators import ChangePasswordRequest, LoginRequest + +logger = logging.getLogger(__name__) +router = APIRouter() + + +@router.post("/login") +async def login(payload: LoginRequest, db: Session = Depends(get_db)): + """Authenticate and return a JWT token. + + Args: + payload: Username and password. + db: Database session. + + Returns: + JSON with ``access_token`` and ``token_type``. + """ + user = db.query(User).filter(User.username == payload.username).first() + if not user or not verify_password(payload.password, user.password_hash): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid username or password.", + ) + if not user.is_active: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="Account is disabled.", + ) + + token = create_access_token(user.username) + logger.info("User %s logged in.", user.username) + return { + "access_token": token, + "token_type": "bearer", + "user": user.to_dict(), + } + + +@router.post("/logout") +async def logout(current_user: User = Depends(get_current_user)): + """Logout (client-side token discard). + + Returns: + Confirmation message. + """ + logger.info("User %s logged out.", current_user.username) + return {"message": "Logged out successfully."} + + +@router.get("/me") +async def get_me(current_user: User = Depends(get_current_user)): + """Return the current authenticated user's profile. + + Returns: + User dict (no password hash). + """ + return current_user.to_dict() + + +@router.post("/change-password") +async def change_password( + payload: ChangePasswordRequest, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Change the current user's password. + + Args: + payload: Current and new password. + current_user: Authenticated user. + db: Database session. + + Returns: + Confirmation message. + """ + if not verify_password(payload.current_password, current_user.password_hash): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Current password is incorrect.", + ) + + current_user.password_hash = hash_password(payload.new_password) + db.commit() + logger.info("Password changed for user %s.", current_user.username) + return {"message": "Password changed successfully."} diff --git a/app/routers/customers.py b/app/routers/customers.py new file mode 100644 index 0000000..2d6004f --- /dev/null +++ b/app/routers/customers.py @@ -0,0 +1,231 @@ +"""Customer CRUD API endpoints with automatic deployment on create.""" + +import logging +from datetime import datetime +from typing import Optional + +from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Query, status +from sqlalchemy.orm import Session + +from app.database import get_db +from app.dependencies import get_current_user +from app.models import Customer, Deployment, DeploymentLog, User +from app.services import netbird_service +from app.utils.validators import CustomerCreate, CustomerUpdate + +logger = logging.getLogger(__name__) +router = APIRouter() + + +@router.post("", status_code=status.HTTP_201_CREATED) +async def create_customer( + payload: CustomerCreate, + background_tasks: BackgroundTasks, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Create a new customer and trigger auto-deployment. + + Validates that the subdomain is unique, creates the customer record, + and launches deployment in the background. + + Args: + payload: Customer creation data. + background_tasks: FastAPI background task runner. + + Returns: + Created customer dict with deployment status. + """ + # Check subdomain uniqueness + existing = db.query(Customer).filter(Customer.subdomain == payload.subdomain).first() + if existing: + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, + detail=f"Subdomain '{payload.subdomain}' is already in use.", + ) + + customer = Customer( + name=payload.name, + company=payload.company, + subdomain=payload.subdomain, + email=payload.email, + max_devices=payload.max_devices, + notes=payload.notes, + status="deploying", + ) + db.add(customer) + db.commit() + db.refresh(customer) + + logger.info("Customer %d (%s) created by %s.", customer.id, customer.subdomain, current_user.username) + + # Deploy in background + result = await netbird_service.deploy_customer(db, customer.id) + + response = customer.to_dict() + response["deployment"] = result + return response + + +@router.get("") +async def list_customers( + page: int = Query(default=1, ge=1), + per_page: int = Query(default=25, ge=1, le=100), + search: Optional[str] = Query(default=None), + status_filter: Optional[str] = Query(default=None, alias="status"), + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """List customers with pagination, search, and status filter. + + Args: + page: Page number (1-indexed). + per_page: Items per page. + search: Search in name, subdomain, email. + status_filter: Filter by status. + + Returns: + Paginated customer list with metadata. + """ + query = db.query(Customer) + + if search: + like_term = f"%{search}%" + query = query.filter( + (Customer.name.ilike(like_term)) + | (Customer.subdomain.ilike(like_term)) + | (Customer.email.ilike(like_term)) + | (Customer.company.ilike(like_term)) + ) + + if status_filter: + query = query.filter(Customer.status == status_filter) + + total = query.count() + customers = ( + query.order_by(Customer.created_at.desc()) + .offset((page - 1) * per_page) + .limit(per_page) + .all() + ) + + items = [] + for c in customers: + data = c.to_dict() + if c.deployment: + data["deployment"] = c.deployment.to_dict() + else: + data["deployment"] = None + items.append(data) + + return { + "items": items, + "total": total, + "page": page, + "per_page": per_page, + "pages": (total + per_page - 1) // per_page if total > 0 else 1, + } + + +@router.get("/{customer_id}") +async def get_customer( + customer_id: int, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Get detailed customer information including deployment and logs. + + Args: + customer_id: Customer ID. + + Returns: + Customer dict with deployment info and recent logs. + """ + customer = db.query(Customer).filter(Customer.id == customer_id).first() + if not customer: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Customer not found.", + ) + + data = customer.to_dict() + data["deployment"] = customer.deployment.to_dict() if customer.deployment else None + data["logs"] = [ + log.to_dict() + for log in db.query(DeploymentLog) + .filter(DeploymentLog.customer_id == customer_id) + .order_by(DeploymentLog.created_at.desc()) + .limit(50) + .all() + ] + return data + + +@router.put("/{customer_id}") +async def update_customer( + customer_id: int, + payload: CustomerUpdate, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Update customer information. + + Args: + customer_id: Customer ID. + payload: Fields to update. + + Returns: + Updated customer dict. + """ + customer = db.query(Customer).filter(Customer.id == customer_id).first() + if not customer: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Customer not found.", + ) + + update_data = payload.model_dump(exclude_none=True) + for field, value in update_data.items(): + if hasattr(customer, field): + setattr(customer, field, value) + + customer.updated_at = datetime.utcnow() + db.commit() + db.refresh(customer) + + logger.info("Customer %d updated by %s.", customer_id, current_user.username) + return customer.to_dict() + + +@router.delete("/{customer_id}") +async def delete_customer( + customer_id: int, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Delete a customer and clean up all resources. + + Removes containers, NPM proxy, instance directory, and database records. + + Args: + customer_id: Customer ID. + + Returns: + Confirmation message. + """ + customer = db.query(Customer).filter(Customer.id == customer_id).first() + if not customer: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Customer not found.", + ) + + # Undeploy first (containers, NPM, files) + await netbird_service.undeploy_customer(db, customer_id) + + # Delete customer record (cascades to deployment + logs) + db.delete(customer) + db.commit() + + logger.info("Customer %d deleted by %s.", customer_id, current_user.username) + return {"message": f"Customer {customer_id} deleted successfully."} diff --git a/app/routers/deployments.py b/app/routers/deployments.py new file mode 100644 index 0000000..69d1c0f --- /dev/null +++ b/app/routers/deployments.py @@ -0,0 +1,185 @@ +"""Deployment management API — start, stop, restart, logs, health for customers.""" + +import logging + +from fastapi import APIRouter, Depends, HTTPException, status +from sqlalchemy.orm import Session + +from app.database import get_db +from app.dependencies import get_current_user +from app.models import Customer, Deployment, User +from app.services import docker_service, netbird_service + +logger = logging.getLogger(__name__) +router = APIRouter() + + +@router.post("/{customer_id}/deploy") +async def manual_deploy( + customer_id: int, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Manually trigger deployment for a customer. + + Use this to re-deploy a customer whose previous deployment failed. + + Args: + customer_id: Customer ID. + + Returns: + Deployment result dict. + """ + customer = db.query(Customer).filter(Customer.id == customer_id).first() + if not customer: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Customer not found.") + + # Remove existing deployment if present + existing = db.query(Deployment).filter(Deployment.customer_id == customer_id).first() + if existing: + await netbird_service.undeploy_customer(db, customer_id) + + result = await netbird_service.deploy_customer(db, customer_id) + if not result.get("success"): + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=result.get("error", "Deployment failed."), + ) + return result + + +@router.post("/{customer_id}/start") +async def start_customer( + customer_id: int, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Start containers for a customer. + + Args: + customer_id: Customer ID. + + Returns: + Result dict. + """ + _require_customer(db, customer_id) + result = netbird_service.start_customer(db, customer_id) + if not result.get("success"): + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=result.get("error", "Failed to start containers."), + ) + return result + + +@router.post("/{customer_id}/stop") +async def stop_customer( + customer_id: int, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Stop containers for a customer. + + Args: + customer_id: Customer ID. + + Returns: + Result dict. + """ + _require_customer(db, customer_id) + result = netbird_service.stop_customer(db, customer_id) + if not result.get("success"): + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=result.get("error", "Failed to stop containers."), + ) + return result + + +@router.post("/{customer_id}/restart") +async def restart_customer( + customer_id: int, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Restart containers for a customer. + + Args: + customer_id: Customer ID. + + Returns: + Result dict. + """ + _require_customer(db, customer_id) + result = netbird_service.restart_customer(db, customer_id) + if not result.get("success"): + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=result.get("error", "Failed to restart containers."), + ) + return result + + +@router.get("/{customer_id}/logs") +async def get_customer_logs( + customer_id: int, + tail: int = 200, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Get container logs for a customer. + + Args: + customer_id: Customer ID. + tail: Number of log lines per container. + + Returns: + Dict mapping container name to log text. + """ + _require_customer(db, customer_id) + deployment = db.query(Deployment).filter(Deployment.customer_id == customer_id).first() + if not deployment: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="No deployment found for this customer.", + ) + + logs = docker_service.get_all_container_logs(deployment.container_prefix, tail=tail) + return {"logs": logs} + + +@router.get("/{customer_id}/health") +async def check_customer_health( + customer_id: int, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Run a health check on a customer's deployment. + + Args: + customer_id: Customer ID. + + Returns: + Health check results. + """ + _require_customer(db, customer_id) + return netbird_service.get_customer_health(db, customer_id) + + +def _require_customer(db: Session, customer_id: int) -> Customer: + """Helper to fetch a customer or raise 404. + + Args: + db: Database session. + customer_id: Customer ID. + + Returns: + Customer ORM object. + + Raises: + HTTPException: If customer not found. + """ + customer = db.query(Customer).filter(Customer.id == customer_id).first() + if not customer: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Customer not found.") + return customer diff --git a/app/routers/monitoring.py b/app/routers/monitoring.py new file mode 100644 index 0000000..1fa31ee --- /dev/null +++ b/app/routers/monitoring.py @@ -0,0 +1,116 @@ +"""Monitoring API — system overview, customer statuses, host resources.""" + +import logging +import platform +from typing import Any + +import psutil +from fastapi import APIRouter, Depends +from sqlalchemy.orm import Session + +from app.database import get_db +from app.dependencies import get_current_user +from app.models import Customer, Deployment, User +from app.services import docker_service + +logger = logging.getLogger(__name__) +router = APIRouter() + + +@router.get("/status") +async def system_status( + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +) -> dict[str, Any]: + """System overview with aggregated customer statistics. + + Returns: + Counts by status and total customers. + """ + total = db.query(Customer).count() + active = db.query(Customer).filter(Customer.status == "active").count() + inactive = db.query(Customer).filter(Customer.status == "inactive").count() + deploying = db.query(Customer).filter(Customer.status == "deploying").count() + error = db.query(Customer).filter(Customer.status == "error").count() + + return { + "total_customers": total, + "active": active, + "inactive": inactive, + "deploying": deploying, + "error": error, + } + + +@router.get("/customers") +async def all_customers_status( + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +) -> list[dict[str, Any]]: + """Get deployment status for every customer. + + Returns: + List of dicts with customer info and container statuses. + """ + customers = ( + db.query(Customer) + .order_by(Customer.id) + .all() + ) + + results: list[dict[str, Any]] = [] + for c in customers: + entry: dict[str, Any] = { + "id": c.id, + "name": c.name, + "subdomain": c.subdomain, + "status": c.status, + } + if c.deployment: + containers = docker_service.get_container_status(c.deployment.container_prefix) + entry["deployment_status"] = c.deployment.deployment_status + entry["containers"] = containers + entry["relay_udp_port"] = c.deployment.relay_udp_port + entry["setup_url"] = c.deployment.setup_url + else: + entry["deployment_status"] = None + entry["containers"] = [] + results.append(entry) + + return results + + +@router.get("/resources") +async def host_resources( + current_user: User = Depends(get_current_user), +) -> dict[str, Any]: + """Return host system resource usage. + + Returns: + CPU, memory, disk, and network information. + """ + cpu_percent = psutil.cpu_percent(interval=1) + cpu_count = psutil.cpu_count() + mem = psutil.virtual_memory() + disk = psutil.disk_usage("/") + + return { + "hostname": platform.node(), + "os": f"{platform.system()} {platform.release()}", + "cpu": { + "percent": cpu_percent, + "count": cpu_count, + }, + "memory": { + "total_gb": round(mem.total / (1024 ** 3), 1), + "used_gb": round(mem.used / (1024 ** 3), 1), + "available_gb": round(mem.available / (1024 ** 3), 1), + "percent": mem.percent, + }, + "disk": { + "total_gb": round(disk.total / (1024 ** 3), 1), + "used_gb": round(disk.used / (1024 ** 3), 1), + "free_gb": round(disk.free / (1024 ** 3), 1), + "percent": disk.percent, + }, + } diff --git a/app/routers/settings.py b/app/routers/settings.py new file mode 100644 index 0000000..b96c1e7 --- /dev/null +++ b/app/routers/settings.py @@ -0,0 +1,113 @@ +"""System configuration API — read/write all settings from the database. + +There is no .env file. Every setting lives in the ``system_config`` table +(singleton row with id=1) and is editable via the Web UI settings page. +""" + +import logging +from datetime import datetime + +from fastapi import APIRouter, Depends, HTTPException, status +from sqlalchemy.orm import Session + +from app.database import get_db +from app.dependencies import get_current_user +from app.models import SystemConfig, User +from app.services import npm_service +from app.utils.config import get_system_config +from app.utils.security import encrypt_value +from app.utils.validators import SystemConfigUpdate + +logger = logging.getLogger(__name__) +router = APIRouter() + + +@router.get("/system") +async def get_settings( + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Return all system configuration values (token masked). + + Returns: + System config dict. + """ + row = db.query(SystemConfig).filter(SystemConfig.id == 1).first() + if not row: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="System configuration not initialized. Run install.sh first.", + ) + return row.to_dict() + + +@router.put("/system") +async def update_settings( + payload: SystemConfigUpdate, + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Update system configuration values. + + Only provided (non-None) fields are updated. The NPM API token is + encrypted before storage. + + Args: + payload: Fields to update. + + Returns: + Updated system config dict. + """ + row = db.query(SystemConfig).filter(SystemConfig.id == 1).first() + if not row: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="System configuration not initialized.", + ) + + update_data = payload.model_dump(exclude_none=True) + + # Handle NPM token encryption + if "npm_api_token" in update_data: + raw_token = update_data.pop("npm_api_token") + row.npm_api_token_encrypted = encrypt_value(raw_token) + + for field, value in update_data.items(): + if hasattr(row, field): + setattr(row, field, value) + + row.updated_at = datetime.utcnow() + db.commit() + db.refresh(row) + + logger.info("System configuration updated by %s.", current_user.username) + return row.to_dict() + + +@router.get("/test-npm") +async def test_npm( + current_user: User = Depends(get_current_user), + db: Session = Depends(get_db), +): + """Test connectivity to the Nginx Proxy Manager API. + + Loads the NPM URL and decrypted token from the database and attempts + to list proxy hosts. + + Returns: + Dict with ``ok`` and ``message``. + """ + config = get_system_config(db) + if not config: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="System configuration not initialized.", + ) + if not config.npm_api_url or not config.npm_api_token: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="NPM API URL or token not configured.", + ) + + result = await npm_service.test_npm_connection(config.npm_api_url, config.npm_api_token) + return result diff --git a/app/services/__init__.py b/app/services/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/services/docker_service.py b/app/services/docker_service.py new file mode 100644 index 0000000..341c988 --- /dev/null +++ b/app/services/docker_service.py @@ -0,0 +1,334 @@ +"""Docker container management via the Python Docker SDK. + +Responsible for creating, starting, stopping, restarting, and removing +per-customer Docker Compose stacks. Also provides log retrieval and +container health/status information. +""" + +import logging +import os +import subprocess +import time +from typing import Any, Optional + +import docker +from docker.errors import DockerException, NotFound + +logger = logging.getLogger(__name__) + + +def _get_client() -> docker.DockerClient: + """Return a Docker client connected via the Unix socket. + + Returns: + docker.DockerClient instance. + """ + return docker.from_env() + + +def compose_up(instance_dir: str, project_name: str) -> bool: + """Run ``docker compose up -d`` for a customer instance. + + Args: + instance_dir: Absolute path to the customer's instance directory. + project_name: Docker Compose project name (e.g. ``netbird-kunde5``). + + Returns: + True on success. + + Raises: + RuntimeError: If ``docker compose up`` fails. + """ + compose_file = os.path.join(instance_dir, "docker-compose.yml") + if not os.path.isfile(compose_file): + raise FileNotFoundError(f"docker-compose.yml not found at {compose_file}") + + cmd = [ + "docker", "compose", + "-f", compose_file, + "-p", project_name, + "up", "-d", "--remove-orphans", + ] + logger.info("Running: %s", " ".join(cmd)) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + + if result.returncode != 0: + logger.error("docker compose up failed: %s", result.stderr) + raise RuntimeError(f"docker compose up failed: {result.stderr}") + + logger.info("docker compose up succeeded for %s", project_name) + return True + + +def compose_down(instance_dir: str, project_name: str, remove_volumes: bool = False) -> bool: + """Run ``docker compose down`` for a customer instance. + + Args: + instance_dir: Absolute path to the customer's instance directory. + project_name: Docker Compose project name. + remove_volumes: Whether to also remove volumes. + + Returns: + True on success. + """ + compose_file = os.path.join(instance_dir, "docker-compose.yml") + cmd = [ + "docker", "compose", + "-f", compose_file, + "-p", project_name, + "down", + ] + if remove_volumes: + cmd.append("-v") + + logger.info("Running: %s", " ".join(cmd)) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + + if result.returncode != 0: + logger.warning("docker compose down returned non-zero: %s", result.stderr) + return True + + +def compose_stop(instance_dir: str, project_name: str) -> bool: + """Run ``docker compose stop`` for a customer instance. + + Args: + instance_dir: Absolute path to the customer's instance directory. + project_name: Docker Compose project name. + + Returns: + True on success. + """ + compose_file = os.path.join(instance_dir, "docker-compose.yml") + cmd = [ + "docker", "compose", + "-f", compose_file, + "-p", project_name, + "stop", + ] + logger.info("Running: %s", " ".join(cmd)) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + return result.returncode == 0 + + +def compose_start(instance_dir: str, project_name: str) -> bool: + """Run ``docker compose start`` for a customer instance. + + Args: + instance_dir: Absolute path to the customer's instance directory. + project_name: Docker Compose project name. + + Returns: + True on success. + """ + compose_file = os.path.join(instance_dir, "docker-compose.yml") + cmd = [ + "docker", "compose", + "-f", compose_file, + "-p", project_name, + "start", + ] + logger.info("Running: %s", " ".join(cmd)) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + return result.returncode == 0 + + +def compose_restart(instance_dir: str, project_name: str) -> bool: + """Run ``docker compose restart`` for a customer instance. + + Args: + instance_dir: Absolute path to the customer's instance directory. + project_name: Docker Compose project name. + + Returns: + True on success. + """ + compose_file = os.path.join(instance_dir, "docker-compose.yml") + cmd = [ + "docker", "compose", + "-f", compose_file, + "-p", project_name, + "restart", + ] + logger.info("Running: %s", " ".join(cmd)) + result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) + return result.returncode == 0 + + +def get_container_status(container_prefix: str) -> list[dict[str, Any]]: + """Get the status of all containers matching a prefix. + + Args: + container_prefix: Container name prefix (e.g. ``netbird-kunde5``). + + Returns: + List of dicts with container name, status, and health info. + """ + client = _get_client() + results: list[dict[str, Any]] = [] + try: + containers = client.containers.list(all=True, filters={"name": container_prefix}) + for c in containers: + health = "N/A" + if c.attrs.get("State", {}).get("Health"): + health = c.attrs["State"]["Health"].get("Status", "N/A") + results.append({ + "name": c.name, + "status": c.status, + "health": health, + "image": str(c.image.tags[0]) if c.image.tags else str(c.image.id[:12]), + "created": c.attrs.get("Created", ""), + }) + except DockerException as exc: + logger.error("Failed to get container status: %s", exc) + return results + + +def get_container_logs(container_name: str, tail: int = 200) -> str: + """Retrieve recent logs from a container. + + Args: + container_name: Full container name. + tail: Number of log lines to retrieve. + + Returns: + Log text. + """ + client = _get_client() + try: + container = client.containers.get(container_name) + return container.logs(tail=tail, timestamps=True).decode("utf-8", errors="replace") + except NotFound: + return f"Container {container_name} not found." + except DockerException as exc: + return f"Error retrieving logs: {exc}" + + +def get_all_container_logs(container_prefix: str, tail: int = 100) -> dict[str, str]: + """Get logs for all containers matching a prefix. + + Args: + container_prefix: Container name prefix. + tail: Lines per container. + + Returns: + Dict mapping container name to log text. + """ + client = _get_client() + logs: dict[str, str] = {} + try: + containers = client.containers.list(all=True, filters={"name": container_prefix}) + for c in containers: + try: + logs[c.name] = c.logs(tail=tail, timestamps=True).decode( + "utf-8", errors="replace" + ) + except DockerException: + logs[c.name] = "Error retrieving logs." + except DockerException as exc: + logger.error("Failed to list containers: %s", exc) + return logs + + +def wait_for_healthy(container_prefix: str, timeout: int = 60) -> bool: + """Wait until all containers with the given prefix are running. + + Args: + container_prefix: Container name prefix. + timeout: Maximum seconds to wait. + + Returns: + True if all containers started within timeout. + """ + client = _get_client() + deadline = time.time() + timeout + + while time.time() < deadline: + try: + containers = client.containers.list( + all=True, filters={"name": container_prefix} + ) + if not containers: + time.sleep(2) + continue + + all_running = all(c.status == "running" for c in containers) + if all_running: + logger.info("All containers for %s are running.", container_prefix) + return True + except DockerException as exc: + logger.warning("Health check error: %s", exc) + + time.sleep(3) + + logger.warning("Timeout waiting for %s containers to start.", container_prefix) + return False + + +def get_docker_stats(container_prefix: str) -> list[dict[str, Any]]: + """Retrieve resource usage stats for containers matching a prefix. + + Args: + container_prefix: Container name prefix. + + Returns: + List of dicts with CPU, memory, and network stats. + """ + client = _get_client() + stats_list: list[dict[str, Any]] = [] + try: + containers = client.containers.list(filters={"name": container_prefix}) + for c in containers: + try: + raw = c.stats(stream=False) + cpu_delta = ( + raw.get("cpu_stats", {}).get("cpu_usage", {}).get("total_usage", 0) + - raw.get("precpu_stats", {}).get("cpu_usage", {}).get("total_usage", 0) + ) + system_delta = ( + raw.get("cpu_stats", {}).get("system_cpu_usage", 0) + - raw.get("precpu_stats", {}).get("system_cpu_usage", 0) + ) + num_cpus = len( + raw.get("cpu_stats", {}).get("cpu_usage", {}).get("percpu_usage", [1]) + ) + cpu_pct = 0.0 + if system_delta > 0: + cpu_pct = (cpu_delta / system_delta) * num_cpus * 100 + + mem_usage = raw.get("memory_stats", {}).get("usage", 0) + mem_limit = raw.get("memory_stats", {}).get("limit", 1) + + stats_list.append({ + "name": c.name, + "cpu_percent": round(cpu_pct, 2), + "memory_usage_mb": round(mem_usage / 1024 / 1024, 1), + "memory_limit_mb": round(mem_limit / 1024 / 1024, 1), + "memory_percent": round((mem_usage / mem_limit) * 100, 1) if mem_limit else 0, + }) + except DockerException: + stats_list.append({"name": c.name, "error": "Failed to get stats"}) + except DockerException as exc: + logger.error("Failed to get docker stats: %s", exc) + return stats_list + + +def remove_instance_containers(container_prefix: str) -> bool: + """Force-remove all containers matching a prefix. + + Args: + container_prefix: Container name prefix. + + Returns: + True if removal succeeded. + """ + client = _get_client() + try: + containers = client.containers.list(all=True, filters={"name": container_prefix}) + for c in containers: + logger.info("Removing container %s", c.name) + c.remove(force=True) + return True + except DockerException as exc: + logger.error("Failed to remove containers: %s", exc) + return False diff --git a/app/services/netbird_service.py b/app/services/netbird_service.py new file mode 100644 index 0000000..decc152 --- /dev/null +++ b/app/services/netbird_service.py @@ -0,0 +1,396 @@ +"""NetBird deployment orchestration service. + +Coordinates the full customer deployment lifecycle: +1. Validate inputs +2. Allocate ports +3. Generate configs from Jinja2 templates +4. Create instance directory and write files +5. Start Docker containers +6. Wait for health checks +7. Create NPM proxy hosts +8. Update database + +Includes comprehensive rollback on failure. +""" + +import logging +import os +import shutil +from datetime import datetime +from typing import Any + +from jinja2 import Environment, FileSystemLoader +from sqlalchemy.orm import Session + +from app.models import Customer, Deployment, DeploymentLog, SystemConfig +from app.services import docker_service, npm_service, port_manager +from app.utils.config import get_system_config +from app.utils.security import encrypt_value, generate_relay_secret + +logger = logging.getLogger(__name__) + +# Path to Jinja2 templates +TEMPLATE_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "templates") + + +def _get_jinja_env() -> Environment: + """Create a Jinja2 environment for template rendering.""" + return Environment( + loader=FileSystemLoader(TEMPLATE_DIR), + keep_trailing_newline=True, + ) + + +def _log_action( + db: Session, customer_id: int, action: str, status: str, message: str, details: str = "" +) -> None: + """Write a deployment log entry. + + Args: + db: Active session. + customer_id: The customer this log belongs to. + action: Action name (e.g. ``deploy``, ``stop``). + status: ``success``, ``error``, or ``info``. + message: Human-readable message. + details: Additional details (optional). + """ + log = DeploymentLog( + customer_id=customer_id, + action=action, + status=status, + message=message, + details=details, + ) + db.add(log) + db.commit() + + +async def deploy_customer(db: Session, customer_id: int) -> dict[str, Any]: + """Execute the full deployment workflow for a customer. + + Args: + db: Active session. + customer_id: Customer to deploy. + + Returns: + Dict with ``success``, ``setup_url``, or ``error``. + """ + customer = db.query(Customer).filter(Customer.id == customer_id).first() + if not customer: + return {"success": False, "error": "Customer not found."} + + config = get_system_config(db) + if not config: + return {"success": False, "error": "System not configured. Please set up system settings first."} + + # Update status to deploying + customer.status = "deploying" + db.commit() + + _log_action(db, customer_id, "deploy", "info", "Deployment started.") + + allocated_port = None + instance_dir = None + container_prefix = f"netbird-kunde{customer_id}" + + try: + # Step 1: Allocate relay UDP port + allocated_port = port_manager.allocate_port(db, config.relay_base_port) + _log_action(db, customer_id, "deploy", "info", f"Allocated UDP port {allocated_port}.") + + # Step 2: Generate relay secret + relay_secret = generate_relay_secret() + + # Step 3: Create instance directory + instance_dir = os.path.join(config.data_dir, f"kunde{customer_id}") + os.makedirs(instance_dir, exist_ok=True) + os.makedirs(os.path.join(instance_dir, "data", "management"), exist_ok=True) + os.makedirs(os.path.join(instance_dir, "data", "signal"), exist_ok=True) + _log_action(db, customer_id, "deploy", "info", f"Created directory {instance_dir}.") + + # Step 4: Render templates + jinja_env = _get_jinja_env() + template_vars = { + "customer_id": customer_id, + "subdomain": customer.subdomain, + "base_domain": config.base_domain, + "instance_dir": instance_dir, + "relay_udp_port": allocated_port, + "relay_secret": relay_secret, + "netbird_management_image": config.netbird_management_image, + "netbird_signal_image": config.netbird_signal_image, + "netbird_relay_image": config.netbird_relay_image, + "netbird_dashboard_image": config.netbird_dashboard_image, + "docker_network": config.docker_network, + } + + # docker-compose.yml + dc_template = jinja_env.get_template("docker-compose.yml.j2") + dc_content = dc_template.render(**template_vars) + with open(os.path.join(instance_dir, "docker-compose.yml"), "w") as f: + f.write(dc_content) + + # management.json + mgmt_template = jinja_env.get_template("management.json.j2") + mgmt_content = mgmt_template.render(**template_vars) + with open(os.path.join(instance_dir, "management.json"), "w") as f: + f.write(mgmt_content) + + # relay.env + relay_template = jinja_env.get_template("relay.env.j2") + relay_content = relay_template.render(**template_vars) + with open(os.path.join(instance_dir, "relay.env"), "w") as f: + f.write(relay_content) + + _log_action(db, customer_id, "deploy", "info", "Configuration files generated.") + + # Step 5: Start Docker containers + docker_service.compose_up(instance_dir, container_prefix) + _log_action(db, customer_id, "deploy", "info", "Docker containers started.") + + # Step 6: Wait for containers to be healthy + healthy = docker_service.wait_for_healthy(container_prefix, timeout=60) + if not healthy: + _log_action( + db, customer_id, "deploy", "error", + "Containers did not become healthy within 60 seconds." + ) + # Don't fail completely — containers might still come up + + # Step 7: Create NPM proxy host + domain = f"{customer.subdomain}.{config.base_domain}" + dashboard_container = f"netbird-kunde{customer_id}-dashboard" + npm_result = await npm_service.create_proxy_host( + api_url=config.npm_api_url, + api_token=config.npm_api_token, + domain=domain, + forward_host=dashboard_container, + forward_port=80, + admin_email=config.admin_email, + subdomain=customer.subdomain, + customer_id=customer_id, + ) + + npm_proxy_id = npm_result.get("proxy_id") + if npm_result.get("error"): + _log_action( + db, customer_id, "deploy", "error", + f"NPM proxy creation failed: {npm_result['error']}", + ) + # Continue — deployment works without NPM, admin can fix later + + # Step 8: Create deployment record + setup_url = f"https://{domain}" + deployment = Deployment( + customer_id=customer_id, + container_prefix=container_prefix, + relay_udp_port=allocated_port, + npm_proxy_id=npm_proxy_id, + relay_secret=encrypt_value(relay_secret), + setup_url=setup_url, + deployment_status="running", + deployed_at=datetime.utcnow(), + ) + db.add(deployment) + + customer.status = "active" + db.commit() + + _log_action(db, customer_id, "deploy", "success", f"Deployment complete. URL: {setup_url}") + + return {"success": True, "setup_url": setup_url} + + except Exception as exc: + logger.exception("Deployment failed for customer %d", customer_id) + + # Rollback: stop containers if they were started + try: + docker_service.compose_down( + instance_dir or os.path.join(config.data_dir, f"kunde{customer_id}"), + container_prefix, + remove_volumes=True, + ) + except Exception: + pass + + # Rollback: remove instance directory + if instance_dir and os.path.isdir(instance_dir): + try: + shutil.rmtree(instance_dir) + except Exception: + pass + + customer.status = "error" + db.commit() + + _log_action( + db, customer_id, "deploy", "error", + f"Deployment failed: {exc}", + details=str(exc), + ) + + return {"success": False, "error": str(exc)} + + +async def undeploy_customer(db: Session, customer_id: int) -> dict[str, Any]: + """Remove all resources for a customer deployment. + + Args: + db: Active session. + customer_id: Customer to undeploy. + + Returns: + Dict with ``success`` bool. + """ + customer = db.query(Customer).filter(Customer.id == customer_id).first() + if not customer: + return {"success": False, "error": "Customer not found."} + + deployment = db.query(Deployment).filter(Deployment.customer_id == customer_id).first() + config = get_system_config(db) + + if deployment and config: + instance_dir = os.path.join(config.data_dir, f"kunde{customer_id}") + + # Stop and remove containers + try: + docker_service.compose_down(instance_dir, deployment.container_prefix, remove_volumes=True) + _log_action(db, customer_id, "undeploy", "info", "Containers removed.") + except Exception as exc: + _log_action(db, customer_id, "undeploy", "error", f"Container removal error: {exc}") + + # Remove NPM proxy host + if deployment.npm_proxy_id and config.npm_api_token: + try: + await npm_service.delete_proxy_host( + config.npm_api_url, config.npm_api_token, deployment.npm_proxy_id + ) + _log_action(db, customer_id, "undeploy", "info", "NPM proxy host removed.") + except Exception as exc: + _log_action(db, customer_id, "undeploy", "error", f"NPM removal error: {exc}") + + # Remove instance directory + if os.path.isdir(instance_dir): + try: + shutil.rmtree(instance_dir) + _log_action(db, customer_id, "undeploy", "info", "Instance directory removed.") + except Exception as exc: + _log_action(db, customer_id, "undeploy", "error", f"Directory removal error: {exc}") + + # Remove deployment record + db.delete(deployment) + db.commit() + + _log_action(db, customer_id, "undeploy", "success", "Undeployment complete.") + return {"success": True} + + +def stop_customer(db: Session, customer_id: int) -> dict[str, Any]: + """Stop containers for a customer. + + Args: + db: Active session. + customer_id: Customer whose containers to stop. + + Returns: + Dict with ``success`` bool. + """ + deployment = db.query(Deployment).filter(Deployment.customer_id == customer_id).first() + config = get_system_config(db) + if not deployment or not config: + return {"success": False, "error": "Deployment or config not found."} + + instance_dir = os.path.join(config.data_dir, f"kunde{customer_id}") + ok = docker_service.compose_stop(instance_dir, deployment.container_prefix) + if ok: + deployment.deployment_status = "stopped" + db.commit() + _log_action(db, customer_id, "stop", "success", "Containers stopped.") + else: + _log_action(db, customer_id, "stop", "error", "Failed to stop containers.") + return {"success": ok} + + +def start_customer(db: Session, customer_id: int) -> dict[str, Any]: + """Start containers for a customer. + + Args: + db: Active session. + customer_id: Customer whose containers to start. + + Returns: + Dict with ``success`` bool. + """ + deployment = db.query(Deployment).filter(Deployment.customer_id == customer_id).first() + config = get_system_config(db) + if not deployment or not config: + return {"success": False, "error": "Deployment or config not found."} + + instance_dir = os.path.join(config.data_dir, f"kunde{customer_id}") + ok = docker_service.compose_start(instance_dir, deployment.container_prefix) + if ok: + deployment.deployment_status = "running" + db.commit() + _log_action(db, customer_id, "start", "success", "Containers started.") + else: + _log_action(db, customer_id, "start", "error", "Failed to start containers.") + return {"success": ok} + + +def restart_customer(db: Session, customer_id: int) -> dict[str, Any]: + """Restart containers for a customer. + + Args: + db: Active session. + customer_id: Customer whose containers to restart. + + Returns: + Dict with ``success`` bool. + """ + deployment = db.query(Deployment).filter(Deployment.customer_id == customer_id).first() + config = get_system_config(db) + if not deployment or not config: + return {"success": False, "error": "Deployment or config not found."} + + instance_dir = os.path.join(config.data_dir, f"kunde{customer_id}") + ok = docker_service.compose_restart(instance_dir, deployment.container_prefix) + if ok: + deployment.deployment_status = "running" + db.commit() + _log_action(db, customer_id, "restart", "success", "Containers restarted.") + else: + _log_action(db, customer_id, "restart", "error", "Failed to restart containers.") + return {"success": ok} + + +def get_customer_health(db: Session, customer_id: int) -> dict[str, Any]: + """Check health of a customer's deployment. + + Args: + db: Active session. + customer_id: Customer ID. + + Returns: + Dict with container statuses and overall health. + """ + deployment = db.query(Deployment).filter(Deployment.customer_id == customer_id).first() + if not deployment: + return {"healthy": False, "error": "No deployment found.", "containers": []} + + containers = docker_service.get_container_status(deployment.container_prefix) + all_running = all(c["status"] == "running" for c in containers) if containers else False + + # Update last health check time + deployment.last_health_check = datetime.utcnow() + if all_running: + deployment.deployment_status = "running" + elif containers: + deployment.deployment_status = "failed" + db.commit() + + return { + "healthy": all_running, + "containers": containers, + "deployment_status": deployment.deployment_status, + "last_check": deployment.last_health_check.isoformat(), + } diff --git a/app/services/npm_service.py b/app/services/npm_service.py new file mode 100644 index 0000000..fb81683 --- /dev/null +++ b/app/services/npm_service.py @@ -0,0 +1,234 @@ +"""Nginx Proxy Manager API integration. + +Creates, updates, and deletes proxy host entries so each customer's NetBird +dashboard is accessible at ``{subdomain}.{base_domain}`` with automatic +Let's Encrypt SSL certificates. +""" + +import logging +from typing import Any, Optional + +import httpx + +logger = logging.getLogger(__name__) + +# Timeout for NPM API calls (seconds) +NPM_TIMEOUT = 30 + + +async def test_npm_connection(api_url: str, api_token: str) -> dict[str, Any]: + """Test connectivity to the Nginx Proxy Manager API. + + Args: + api_url: NPM API base URL (e.g. ``http://npm:81/api``). + api_token: Bearer token for authentication. + + Returns: + Dict with ``ok`` (bool) and ``message`` (str). + """ + headers = {"Authorization": f"Bearer {api_token}"} + try: + async with httpx.AsyncClient(timeout=NPM_TIMEOUT) as client: + resp = await client.get(f"{api_url}/nginx/proxy-hosts", headers=headers) + if resp.status_code == 200: + count = len(resp.json()) + return {"ok": True, "message": f"Connected. {count} proxy hosts found."} + return { + "ok": False, + "message": f"NPM returned status {resp.status_code}: {resp.text[:200]}", + } + except httpx.ConnectError: + return {"ok": False, "message": "Connection refused. Is NPM running?"} + except httpx.TimeoutException: + return {"ok": False, "message": "Connection timed out."} + except Exception as exc: + return {"ok": False, "message": f"Unexpected error: {exc}"} + + +async def create_proxy_host( + api_url: str, + api_token: str, + domain: str, + forward_host: str, + forward_port: int = 80, + admin_email: str = "", + subdomain: str = "", + customer_id: int = 0, +) -> dict[str, Any]: + """Create a proxy host entry in NPM with SSL for a customer. + + The proxy routes traffic as follows: + - ``/`` -> dashboard container (port 80) + - ``/api`` -> management container (port 80) + - ``/signalexchange.*`` -> signal container (port 80) + - ``/relay`` -> relay container (port 80) + + Args: + api_url: NPM API base URL. + api_token: Bearer token. + domain: Full domain (e.g. ``kunde1.example.com``). + forward_host: Container name for the dashboard. + forward_port: Port to forward to (default 80). + admin_email: Email for Let's Encrypt. + subdomain: Customer subdomain for building container names. + customer_id: Customer ID for building container names. + + Returns: + Dict with ``proxy_id`` on success or ``error`` on failure. + """ + headers = { + "Authorization": f"Bearer {api_token}", + "Content-Type": "application/json", + } + + # Build advanced Nginx config to route sub-paths to different containers + mgmt_container = f"netbird-kunde{customer_id}-management" + signal_container = f"netbird-kunde{customer_id}-signal" + relay_container = f"netbird-kunde{customer_id}-relay" + + advanced_config = f""" +# NetBird Management API +location /api {{ + proxy_pass http://{mgmt_container}:80; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; +}} + +# NetBird Signal (gRPC-Web) +location /signalexchange. {{ + grpc_pass grpc://{signal_container}:80; + grpc_set_header Host $host; +}} + +# NetBird Relay (WebSocket) +location /relay {{ + proxy_pass http://{relay_container}:80; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; +}} +""" + + payload = { + "domain_names": [domain], + "forward_scheme": "http", + "forward_host": forward_host, + "forward_port": forward_port, + "certificate_id": 0, + "ssl_forced": True, + "hsts_enabled": True, + "hsts_subdomains": False, + "http2_support": True, + "block_exploits": True, + "allow_websocket_upgrade": True, + "access_list_id": 0, + "advanced_config": advanced_config.strip(), + "meta": { + "letsencrypt_agree": True, + "letsencrypt_email": admin_email, + "dns_challenge": False, + }, + } + + try: + async with httpx.AsyncClient(timeout=NPM_TIMEOUT) as client: + resp = await client.post( + f"{api_url}/nginx/proxy-hosts", json=payload, headers=headers + ) + if resp.status_code in (200, 201): + data = resp.json() + proxy_id = data.get("id") + logger.info("Created NPM proxy host %s (id=%s)", domain, proxy_id) + + # Request SSL certificate + await _request_ssl(client, api_url, headers, proxy_id, domain, admin_email) + + return {"proxy_id": proxy_id} + else: + error_msg = f"NPM returned {resp.status_code}: {resp.text[:300]}" + logger.error("Failed to create proxy host: %s", error_msg) + return {"error": error_msg} + except Exception as exc: + logger.error("NPM API error: %s", exc) + return {"error": str(exc)} + + +async def _request_ssl( + client: httpx.AsyncClient, + api_url: str, + headers: dict, + proxy_id: int, + domain: str, + admin_email: str, +) -> None: + """Request a Let's Encrypt SSL certificate for a proxy host. + + Args: + client: httpx client. + api_url: NPM API base URL. + headers: Auth headers. + proxy_id: The proxy host ID. + domain: The domain to certify. + admin_email: Contact email for LE. + """ + ssl_payload = { + "domain_names": [domain], + "meta": { + "letsencrypt_agree": True, + "letsencrypt_email": admin_email, + "dns_challenge": False, + }, + } + try: + resp = await client.post( + f"{api_url}/nginx/certificates", json=ssl_payload, headers=headers + ) + if resp.status_code in (200, 201): + cert_id = resp.json().get("id") + # Assign certificate to proxy host + await client.put( + f"{api_url}/nginx/proxy-hosts/{proxy_id}", + json={"certificate_id": cert_id}, + headers=headers, + ) + logger.info("SSL certificate assigned to proxy host %s", proxy_id) + else: + logger.warning("SSL request returned %s: %s", resp.status_code, resp.text[:200]) + except Exception as exc: + logger.warning("SSL certificate request failed: %s", exc) + + +async def delete_proxy_host( + api_url: str, api_token: str, proxy_id: int +) -> bool: + """Delete a proxy host from NPM. + + Args: + api_url: NPM API base URL. + api_token: Bearer token. + proxy_id: The proxy host ID to delete. + + Returns: + True on success. + """ + headers = {"Authorization": f"Bearer {api_token}"} + try: + async with httpx.AsyncClient(timeout=NPM_TIMEOUT) as client: + resp = await client.delete( + f"{api_url}/nginx/proxy-hosts/{proxy_id}", headers=headers + ) + if resp.status_code in (200, 204): + logger.info("Deleted NPM proxy host %d", proxy_id) + return True + logger.warning( + "Failed to delete proxy host %d: %s %s", + proxy_id, resp.status_code, resp.text[:200], + ) + return False + except Exception as exc: + logger.error("NPM delete error: %s", exc) + return False diff --git a/app/services/port_manager.py b/app/services/port_manager.py new file mode 100644 index 0000000..18daeea --- /dev/null +++ b/app/services/port_manager.py @@ -0,0 +1,110 @@ +"""UDP port allocation service for NetBird relay/STUN ports. + +Manages the range starting at relay_base_port (default 3478). Each customer +gets one unique UDP port. The manager checks both the database and the OS +to avoid collisions. +""" + +import logging +import socket +from typing import Optional + +from sqlalchemy.orm import Session + +from app.models import Deployment + +logger = logging.getLogger(__name__) + + +def _is_udp_port_in_use(port: int) -> bool: + """Check whether a UDP port is currently bound on the host. + + Args: + port: UDP port number to probe. + + Returns: + True if the port is in use. + """ + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + sock.bind(("0.0.0.0", port)) + return False + except OSError: + return True + finally: + sock.close() + + +def get_allocated_ports(db: Session) -> set[int]: + """Return the set of relay UDP ports already assigned in the database. + + Args: + db: Active SQLAlchemy session. + + Returns: + Set of port numbers. + """ + rows = db.query(Deployment.relay_udp_port).all() + return {r[0] for r in rows} + + +def allocate_port(db: Session, base_port: int = 3478, max_ports: int = 100) -> int: + """Find and return the next available relay UDP port. + + Scans from *base_port* to *base_port + max_ports - 1*, skipping ports + that are either already in the database or currently bound on the host. + + Args: + db: Active SQLAlchemy session. + base_port: Start of the port range. + max_ports: Number of ports in the range. + + Returns: + An available port number. + + Raises: + RuntimeError: If no port in the range is available. + """ + allocated = get_allocated_ports(db) + for port in range(base_port, base_port + max_ports): + if port in allocated: + continue + if _is_udp_port_in_use(port): + logger.warning("Port %d is in use on the host, skipping.", port) + continue + logger.info("Allocated relay UDP port %d.", port) + return port + + raise RuntimeError( + f"No available relay ports in range {base_port}-{base_port + max_ports - 1}. " + "All 100 ports are allocated." + ) + + +def release_port(db: Session, port: int) -> None: + """Mark a port as released (informational logging only). + + The actual release happens when the Deployment row is deleted. This + helper exists for explicit logging in rollback scenarios. + + Args: + db: Active SQLAlchemy session. + port: The port to release. + """ + logger.info("Released relay UDP port %d.", port) + + +def validate_port_available(db: Session, port: int) -> bool: + """Check if a specific port is available both in DB and on the host. + + Args: + db: Active SQLAlchemy session. + port: Port number to check. + + Returns: + True if the port is available. + """ + allocated = get_allocated_ports(db) + if port in allocated: + return False + return not _is_udp_port_in_use(port) diff --git a/app/utils/__init__.py b/app/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/utils/config.py b/app/utils/config.py new file mode 100644 index 0000000..141421d --- /dev/null +++ b/app/utils/config.py @@ -0,0 +1,74 @@ +"""Configuration management — loads all settings from the database (system_config table). + +There is NO .env file for application config. The install.sh script collects values +interactively and seeds them into the database. The Web UI settings page allows +editing every value at runtime. +""" + +import os +from dataclasses import dataclass +from typing import Optional + +from sqlalchemy.orm import Session + +from app.utils.security import decrypt_value + + +@dataclass +class AppConfig: + """In-memory snapshot of system configuration.""" + + base_domain: str + admin_email: str + npm_api_url: str + npm_api_token: str # decrypted + netbird_management_image: str + netbird_signal_image: str + netbird_relay_image: str + netbird_dashboard_image: str + data_dir: str + docker_network: str + relay_base_port: int + + +# Environment-level settings (not stored in DB) +SECRET_KEY: str = os.environ.get("SECRET_KEY", "change-me-in-production") +DATABASE_PATH: str = os.environ.get("DATABASE_PATH", "/app/data/netbird_msp.db") +LOG_LEVEL: str = os.environ.get("LOG_LEVEL", "INFO") +JWT_ALGORITHM: str = "HS256" +JWT_EXPIRE_MINUTES: int = 480 # 8 hours + + +def get_system_config(db: Session) -> Optional[AppConfig]: + """Load the singleton SystemConfig row and return an AppConfig dataclass. + + Args: + db: Active SQLAlchemy session. + + Returns: + AppConfig instance or None if the system_config row does not exist yet. + """ + from app.models import SystemConfig + + row = db.query(SystemConfig).filter(SystemConfig.id == 1).first() + if row is None: + return None + + try: + npm_token = decrypt_value(row.npm_api_token_encrypted) + except Exception: + npm_token = "" + + return AppConfig( + base_domain=row.base_domain, + admin_email=row.admin_email, + npm_api_url=row.npm_api_url, + npm_api_token=npm_token, + netbird_management_image=row.netbird_management_image, + netbird_signal_image=row.netbird_signal_image, + netbird_relay_image=row.netbird_relay_image, + netbird_dashboard_image=row.netbird_dashboard_image, + data_dir=row.data_dir, + docker_network=row.docker_network, + relay_base_port=row.relay_base_port, + ) diff --git a/app/utils/security.py b/app/utils/security.py new file mode 100644 index 0000000..bd67cc3 --- /dev/null +++ b/app/utils/security.py @@ -0,0 +1,91 @@ +"""Security utilities — password hashing (bcrypt) and token encryption (Fernet).""" + +import os +import secrets + +from cryptography.fernet import Fernet +from passlib.context import CryptContext + +# --------------------------------------------------------------------------- +# Password hashing (bcrypt) +# --------------------------------------------------------------------------- +pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") + + +def hash_password(plain: str) -> str: + """Hash a plaintext password with bcrypt. + + Args: + plain: The plaintext password. + + Returns: + Bcrypt hash string. + """ + return pwd_context.hash(plain) + + +def verify_password(plain: str, hashed: str) -> bool: + """Verify a plaintext password against a bcrypt hash. + + Args: + plain: The plaintext password to check. + hashed: The stored bcrypt hash. + + Returns: + True if the password matches. + """ + return pwd_context.verify(plain, hashed) + + +# --------------------------------------------------------------------------- +# Fernet encryption for secrets (NPM token, relay secrets, etc.) +# --------------------------------------------------------------------------- +def _get_fernet() -> Fernet: + """Derive a Fernet key from the application SECRET_KEY. + + The SECRET_KEY from the environment is used as the basis. We pad/truncate + it to produce a valid 32-byte URL-safe-base64 key that Fernet requires. + """ + import base64 + import hashlib + + secret = os.environ.get("SECRET_KEY", "change-me-in-production") + # Derive a stable 32-byte key via SHA-256 + key_bytes = hashlib.sha256(secret.encode()).digest() + fernet_key = base64.urlsafe_b64encode(key_bytes) + return Fernet(fernet_key) + + +def encrypt_value(plaintext: str) -> str: + """Encrypt a string value with Fernet. + + Args: + plaintext: Value to encrypt. + + Returns: + Encrypted string (base64-encoded Fernet token). + """ + f = _get_fernet() + return f.encrypt(plaintext.encode()).decode() + + +def decrypt_value(ciphertext: str) -> str: + """Decrypt a Fernet-encrypted string. + + Args: + ciphertext: Encrypted value. + + Returns: + Original plaintext string. + """ + f = _get_fernet() + return f.decrypt(ciphertext.encode()).decode() + + +def generate_relay_secret() -> str: + """Generate a cryptographically secure relay secret. + + Returns: + A 32-character hex string. + """ + return secrets.token_hex(16) diff --git a/app/utils/validators.py b/app/utils/validators.py new file mode 100644 index 0000000..acbe954 --- /dev/null +++ b/app/utils/validators.py @@ -0,0 +1,165 @@ +"""Input validation with Pydantic models for all API endpoints.""" + +import re +from typing import Optional + +from pydantic import BaseModel, Field, field_validator + + +# --------------------------------------------------------------------------- +# Auth +# --------------------------------------------------------------------------- +class LoginRequest(BaseModel): + """Login credentials.""" + + username: str = Field(..., min_length=1, max_length=100) + password: str = Field(..., min_length=1) + + +class ChangePasswordRequest(BaseModel): + """Password change payload.""" + + current_password: str = Field(..., min_length=1) + new_password: str = Field(..., min_length=12, max_length=128) + + +# --------------------------------------------------------------------------- +# Customer +# --------------------------------------------------------------------------- +class CustomerCreate(BaseModel): + """Payload to create a new customer.""" + + name: str = Field(..., min_length=1, max_length=255) + company: Optional[str] = Field(None, max_length=255) + subdomain: str = Field(..., min_length=1, max_length=63) + email: str = Field(..., max_length=255) + max_devices: int = Field(default=20, ge=1, le=10000) + notes: Optional[str] = None + + @field_validator("subdomain") + @classmethod + def validate_subdomain(cls, v: str) -> str: + """Subdomain must be lowercase alphanumeric + hyphens, no leading/trailing hyphen.""" + v = v.lower().strip() + if not re.match(r"^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$", v): + raise ValueError( + "Subdomain must be lowercase, alphanumeric with hyphens, " + "2-63 chars, no leading/trailing hyphen." + ) + return v + + @field_validator("email") + @classmethod + def validate_email(cls, v: str) -> str: + """Basic email format check.""" + pattern = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$" + if not re.match(pattern, v): + raise ValueError("Invalid email address.") + return v.lower().strip() + + +class CustomerUpdate(BaseModel): + """Payload to update an existing customer.""" + + name: Optional[str] = Field(None, min_length=1, max_length=255) + company: Optional[str] = Field(None, max_length=255) + email: Optional[str] = Field(None, max_length=255) + max_devices: Optional[int] = Field(None, ge=1, le=10000) + notes: Optional[str] = None + status: Optional[str] = None + + @field_validator("email") + @classmethod + def validate_email(cls, v: Optional[str]) -> Optional[str]: + """Basic email format check.""" + if v is None: + return v + pattern = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$" + if not re.match(pattern, v): + raise ValueError("Invalid email address.") + return v.lower().strip() + + @field_validator("status") + @classmethod + def validate_status(cls, v: Optional[str]) -> Optional[str]: + """Status must be one of the allowed values.""" + if v is None: + return v + allowed = {"active", "inactive", "deploying", "error"} + if v not in allowed: + raise ValueError(f"Status must be one of: {', '.join(sorted(allowed))}") + return v + + +# --------------------------------------------------------------------------- +# Settings +# --------------------------------------------------------------------------- +class SystemConfigUpdate(BaseModel): + """Payload to update system configuration.""" + + base_domain: Optional[str] = Field(None, min_length=1, max_length=255) + admin_email: Optional[str] = Field(None, max_length=255) + npm_api_url: Optional[str] = Field(None, max_length=500) + npm_api_token: Optional[str] = None # plaintext, will be encrypted before storage + netbird_management_image: Optional[str] = Field(None, max_length=255) + netbird_signal_image: Optional[str] = Field(None, max_length=255) + netbird_relay_image: Optional[str] = Field(None, max_length=255) + netbird_dashboard_image: Optional[str] = Field(None, max_length=255) + data_dir: Optional[str] = Field(None, max_length=500) + docker_network: Optional[str] = Field(None, max_length=100) + relay_base_port: Optional[int] = Field(None, ge=1024, le=65535) + + @field_validator("base_domain") + @classmethod + def validate_domain(cls, v: Optional[str]) -> Optional[str]: + """Validate domain format.""" + if v is None: + return v + pattern = r"^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$" + if not re.match(pattern, v): + raise ValueError("Invalid domain format.") + return v.lower().strip() + + @field_validator("npm_api_url") + @classmethod + def validate_npm_url(cls, v: Optional[str]) -> Optional[str]: + """NPM URL must start with http(s)://.""" + if v is None: + return v + if not re.match(r"^https?://", v): + raise ValueError("NPM API URL must start with http:// or https://") + return v.rstrip("/") + + @field_validator("admin_email") + @classmethod + def validate_email(cls, v: Optional[str]) -> Optional[str]: + """Validate admin email.""" + if v is None: + return v + pattern = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$" + if not re.match(pattern, v): + raise ValueError("Invalid email address.") + return v.lower().strip() + + +# --------------------------------------------------------------------------- +# Query params +# --------------------------------------------------------------------------- +class CustomerListParams(BaseModel): + """Query parameters for listing customers.""" + + page: int = Field(default=1, ge=1) + per_page: int = Field(default=25, ge=1, le=100) + search: Optional[str] = None + status: Optional[str] = None + + @field_validator("status") + @classmethod + def validate_status(cls, v: Optional[str]) -> Optional[str]: + """Filter status validation.""" + if v is None or v == "": + return None + allowed = {"active", "inactive", "deploying", "error"} + if v not in allowed: + raise ValueError(f"Status must be one of: {', '.join(sorted(allowed))}") + return v diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..e95ddd1 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,33 @@ +version: '3.8' + +services: + netbird-msp-appliance: + build: . + container_name: netbird-msp-appliance + restart: unless-stopped + ports: + - "${WEB_UI_PORT:-8000}:8000" + volumes: + - ./data:/app/data + - ./logs:/app/logs + - ./backups:/app/backups + - /var/run/docker.sock:/var/run/docker.sock + - ${DATA_DIR:-/opt/netbird-instances}:${DATA_DIR:-/opt/netbird-instances} + environment: + - SECRET_KEY=${SECRET_KEY} + - DATABASE_PATH=/app/data/netbird_msp.db + - LOG_LEVEL=${LOG_LEVEL:-INFO} + - DATA_DIR=${DATA_DIR:-/opt/netbird-instances} + - DOCKER_NETWORK=${DOCKER_NETWORK:-npm-network} + networks: + - npm-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/api/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 15s + +networks: + npm-network: + external: true diff --git a/install.sh b/install.sh index f23c983..0b53e03 100644 --- a/install.sh +++ b/install.sh @@ -2,7 +2,8 @@ # NetBird MSP Appliance - Interactive Installation Script # This script sets up the complete NetBird MSP management platform -# All configuration is done interactively - no .env file editing needed! +# All configuration is done interactively and stored in the DATABASE. +# There is NO .env file for application config! set -e # Exit on error @@ -29,6 +30,8 @@ cat << 'BANNER' ║ ║ ║ Multi-Tenant NetBird Management Platform ║ ║ ║ +║ All config stored in database - no .env editing! ║ +║ ║ ╚═══════════════════════════════════════════════════════════╝ BANNER echo -e "${NC}" @@ -53,7 +56,7 @@ echo -e "${BLUE}${BOLD}[Step 1/10]${NC} ${BLUE}Checking system requirements...${ CPU_CORES=$(nproc) echo -e "CPU Cores: ${CYAN}$CPU_CORES${NC}" if [ "$CPU_CORES" -lt 4 ]; then - echo -e "${YELLOW}⚠ Warning: Only $CPU_CORES CPU cores detected.${NC}" + echo -e "${YELLOW}Warning: Only $CPU_CORES CPU cores detected.${NC}" echo -e "${YELLOW} Minimum 8 cores recommended for 100 customers.${NC}" else echo -e "${GREEN}✓ CPU cores: Sufficient${NC}" @@ -63,7 +66,7 @@ fi TOTAL_RAM=$(free -g | awk '/^Mem:/{print $2}') echo -e "RAM: ${CYAN}${TOTAL_RAM}GB${NC}" if [ "$TOTAL_RAM" -lt 32 ]; then - echo -e "${YELLOW}⚠ Warning: Only ${TOTAL_RAM}GB RAM detected.${NC}" + echo -e "${YELLOW}Warning: Only ${TOTAL_RAM}GB RAM detected.${NC}" echo -e "${YELLOW} Minimum 64GB recommended for 100 customers.${NC}" else echo -e "${GREEN}✓ RAM: Sufficient${NC}" @@ -73,7 +76,7 @@ fi DISK_SPACE=$(df -BG / | awk 'NR==2 {print $4}' | sed 's/G//') echo -e "Free Disk Space: ${CYAN}${DISK_SPACE}GB${NC}" if [ "$DISK_SPACE" -lt 200 ]; then - echo -e "${YELLOW}⚠ Warning: Only ${DISK_SPACE}GB free disk space.${NC}" + echo -e "${YELLOW}Warning: Only ${DISK_SPACE}GB free disk space.${NC}" echo -e "${YELLOW} Minimum 500GB recommended.${NC}" else echo -e "${GREEN}✓ Disk space: Sufficient${NC}" @@ -263,13 +266,13 @@ CUSTOMIZE_IMAGES=${CUSTOMIZE_IMAGES:-no} if [[ "$CUSTOMIZE_IMAGES" =~ ^[Yy]([Ee][Ss])?$ ]]; then read -p "Management Image [netbirdio/management:latest]: " NETBIRD_MANAGEMENT_IMAGE NETBIRD_MANAGEMENT_IMAGE=${NETBIRD_MANAGEMENT_IMAGE:-netbirdio/management:latest} - + read -p "Signal Image [netbirdio/signal:latest]: " NETBIRD_SIGNAL_IMAGE NETBIRD_SIGNAL_IMAGE=${NETBIRD_SIGNAL_IMAGE:-netbirdio/signal:latest} - + read -p "Relay Image [netbirdio/relay:latest]: " NETBIRD_RELAY_IMAGE NETBIRD_RELAY_IMAGE=${NETBIRD_RELAY_IMAGE:-netbirdio/relay:latest} - + read -p "Dashboard Image [netbirdio/dashboard:latest]: " NETBIRD_DASHBOARD_IMAGE NETBIRD_DASHBOARD_IMAGE=${NETBIRD_DASHBOARD_IMAGE:-netbirdio/dashboard:latest} else @@ -284,7 +287,7 @@ sleep 1 clear # ============================================================================ -# STEP 8: INSTALLATION +# STEP 8: INSTALLATION (stores config in DATABASE, not .env) # ============================================================================ echo -e "${BLUE}${BOLD}[Step 8/10]${NC} ${BLUE}Installation${NC}\n" @@ -322,52 +325,27 @@ else echo -e "${GREEN}✓ Docker network '$DOCKER_NETWORK' created${NC}" fi -# Generate secret key +# Generate secret key for encryption (only env-level secret) echo "Generating encryption keys..." SECRET_KEY=$(openssl rand -base64 32) echo -e "${GREEN}✓ Encryption keys generated${NC}" -# Create .env file -echo "Creating configuration..." +# Create MINIMAL .env — only container-level vars needed by docker-compose.yml +# All application config goes into the DATABASE, not here! +echo "Creating minimal container environment..." cat > "$INSTALL_DIR/.env" << ENVEOF -# NetBird MSP Appliance Configuration -# Generated on $(date) -# DO NOT EDIT - Use Web UI to change settings - -# Security +# Container-level environment only (NOT application config!) +# All settings are stored in the database and editable via Web UI. SECRET_KEY=$SECRET_KEY -ADMIN_USERNAME=$ADMIN_USERNAME -ADMIN_PASSWORD=$ADMIN_PASSWORD - -# Nginx Proxy Manager -NPM_API_URL=$NPM_API_URL -NPM_API_TOKEN=$NPM_API_TOKEN - -# System +DATABASE_PATH=/app/data/netbird_msp.db DATA_DIR=$DATA_DIR DOCKER_NETWORK=$DOCKER_NETWORK -BASE_DOMAIN=$BASE_DOMAIN -ADMIN_EMAIL=$ADMIN_EMAIL - -# NetBird Images -NETBIRD_MANAGEMENT_IMAGE=$NETBIRD_MANAGEMENT_IMAGE -NETBIRD_SIGNAL_IMAGE=$NETBIRD_SIGNAL_IMAGE -NETBIRD_RELAY_IMAGE=$NETBIRD_RELAY_IMAGE -NETBIRD_DASHBOARD_IMAGE=$NETBIRD_DASHBOARD_IMAGE - -# Database -DATABASE_PATH=/app/data/netbird_msp.db - -# Logging LOG_LEVEL=INFO - -# Port Configuration -RELAY_BASE_PORT=3478 WEB_UI_PORT=8000 ENVEOF chmod 600 "$INSTALL_DIR/.env" -echo -e "${GREEN}✓ Configuration file created${NC}" +echo -e "${GREEN}✓ Container environment created${NC}" # Copy application files echo "Copying application files..." @@ -391,10 +369,64 @@ else exit 1 fi -# Initialize database +# Initialize database tables echo "Initializing database..." docker exec $CONTAINER_NAME python -m app.database init || true -echo -e "${GREEN}✓ Database initialized${NC}" +echo -e "${GREEN}✓ Database tables created${NC}" + +# Seed all configuration into the database (system_config + users table) +echo "Seeding configuration into database..." +docker exec $CONTAINER_NAME python -c " +import os +os.environ['SECRET_KEY'] = '$SECRET_KEY' + +from app.database import SessionLocal, init_db +from app.models import SystemConfig, User +from app.utils.security import hash_password, encrypt_value + +init_db() +db = SessionLocal() + +# Create admin user +existing_user = db.query(User).filter(User.username == '$ADMIN_USERNAME').first() +if not existing_user: + user = User( + username='$ADMIN_USERNAME', + password_hash=hash_password('$ADMIN_PASSWORD'), + email='$ADMIN_EMAIL', + ) + db.add(user) + print('Admin user created.') +else: + print('Admin user already exists.') + +# Create system config (singleton row) +existing_config = db.query(SystemConfig).filter(SystemConfig.id == 1).first() +if not existing_config: + config = SystemConfig( + id=1, + base_domain='$BASE_DOMAIN', + admin_email='$ADMIN_EMAIL', + npm_api_url='$NPM_API_URL', + npm_api_token_encrypted=encrypt_value('$NPM_API_TOKEN'), + netbird_management_image='$NETBIRD_MANAGEMENT_IMAGE', + netbird_signal_image='$NETBIRD_SIGNAL_IMAGE', + netbird_relay_image='$NETBIRD_RELAY_IMAGE', + netbird_dashboard_image='$NETBIRD_DASHBOARD_IMAGE', + data_dir='$DATA_DIR', + docker_network='$DOCKER_NETWORK', + relay_base_port=3478, + ) + db.add(config) + print('System configuration saved to database.') +else: + print('System configuration already exists.') + +db.commit() +db.close() +print('Database seeding complete.') +" +echo -e "${GREEN}✓ Configuration stored in database${NC}" clear @@ -448,6 +480,11 @@ echo -e " Web Interface: ${GREEN}http://${SERVER_IP}:8000${NC}" echo -e " Username: ${GREEN}${ADMIN_USERNAME}${NC}" echo -e " Password: ${CYAN}${NC}\n" +echo -e "${BLUE}${BOLD}Configuration:${NC}\n" +echo -e " ${YELLOW}All settings are stored in the database${NC}" +echo -e " ${YELLOW}Edit them anytime via Web UI > Settings${NC}" +echo -e " ${YELLOW}NO .env file editing needed!${NC}\n" + echo -e "${BLUE}${BOLD}Next Steps:${NC}\n" echo -e " 1. ${CYAN}Access the web interface${NC}" echo -e " 2. ${CYAN}Review system settings${NC} (all editable via Web UI)" @@ -464,11 +501,10 @@ echo -e "${BLUE}${BOLD}Important Notes:${NC}\n" echo -e " ${YELLOW}•${NC} All settings can be changed via the Web UI" echo -e " ${YELLOW}•${NC} Installation directory: ${INSTALL_DIR}" echo -e " ${YELLOW}•${NC} Customer data directory: ${DATA_DIR}" +echo -e " ${YELLOW}•${NC} Database: ${INSTALL_DIR}/data/netbird_msp.db" echo -e " ${YELLOW}•${NC} Backup your database regularly\n" -echo -e "${GREEN}${BOLD}Happy MSP-ing! 🚀${NC}\n" - -# Save installation summary +# Save installation summary (no secrets!) cat > "$INSTALL_DIR/INSTALLATION_SUMMARY.txt" << SUMMARY NetBird MSP Appliance - Installation Summary ============================================= @@ -483,6 +519,9 @@ Base Domain: $BASE_DOMAIN NPM API URL: $NPM_API_URL Data Directory: $DATA_DIR +NOTE: All settings are stored in the database and editable via Web UI. + No manual config file editing needed! + Access: ------- Web UI: http://${SERVER_IP}:8000 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..92c4082 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,20 @@ +# NetBird MSP Appliance - Python Dependencies +fastapi==0.104.1 +uvicorn[standard]==0.24.0 +sqlalchemy==2.0.23 +aiosqlite==0.19.0 +pydantic==2.5.2 +pydantic-settings==2.1.0 +python-jose[cryptography]==3.3.0 +passlib[bcrypt]==1.7.4 +bcrypt==4.1.2 +cryptography==41.0.7 +python-multipart==0.0.6 +httpx==0.25.2 +jinja2==3.1.2 +docker==7.0.0 +psutil==5.9.7 +pyyaml==6.0.1 +pytest==7.4.3 +pytest-asyncio==0.23.2 +pytest-httpx==0.28.0 diff --git a/static/css/styles.css b/static/css/styles.css new file mode 100644 index 0000000..3df5641 --- /dev/null +++ b/static/css/styles.css @@ -0,0 +1,175 @@ +/* NetBird MSP Appliance - Custom Styles */ + +/* Login */ +.login-container { + min-height: 100vh; + display: flex; + align-items: center; + justify-content: center; + background: linear-gradient(135deg, #1a1a2e 0%, #16213e 50%, #0f3460 100%); +} + +.login-card { + width: 100%; + max-width: 420px; + border-radius: 12px; +} + +/* Stats cards */ +.stat-card { + border-radius: 10px; + transition: transform 0.15s; +} + +.stat-card:hover { + transform: translateY(-2px); +} + +.stat-icon { + width: 48px; + height: 48px; + border-radius: 12px; + display: flex; + align-items: center; + justify-content: center; + font-size: 1.5rem; +} + +/* Table */ +.table th { + font-weight: 600; + font-size: 0.85rem; + text-transform: uppercase; + letter-spacing: 0.5px; + color: #6c757d; +} + +.table td { + vertical-align: middle; +} + +/* Log viewer */ +.log-viewer { + max-height: 600px; + overflow-y: auto; +} + +.log-entry { + padding: 4px 8px; + border-bottom: 1px solid #f0f0f0; + font-size: 0.85rem; + font-family: 'Consolas', 'Monaco', monospace; +} + +.log-entry:last-child { + border-bottom: none; +} + +.log-time { + color: #6c757d; + margin-right: 8px; +} + +.log-pre { + background: #1e1e1e; + color: #d4d4d4; + padding: 12px; + border-radius: 6px; + max-height: 300px; + overflow-y: auto; + font-size: 0.8rem; + white-space: pre-wrap; + word-wrap: break-word; +} + +/* Toast notification */ +.toast-notification { + position: fixed; + bottom: 20px; + right: 20px; + background: #198754; + color: white; + padding: 12px 24px; + border-radius: 8px; + font-size: 0.9rem; + z-index: 9999; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15); + animation: toast-in 0.3s ease, toast-out 0.3s ease 2.7s; +} + +@keyframes toast-in { + from { opacity: 0; transform: translateY(20px); } + to { opacity: 1; transform: translateY(0); } +} + +@keyframes toast-out { + from { opacity: 1; } + to { opacity: 0; } +} + +/* Badge improvements */ +.badge { + font-weight: 500; + font-size: 0.75rem; + padding: 0.35em 0.65em; +} + +/* Page transitions */ +.page-content { + animation: fade-in 0.15s ease; +} + +@keyframes fade-in { + from { opacity: 0; } + to { opacity: 1; } +} + +/* Progress bars in monitoring */ +.progress { + border-radius: 6px; +} + +.progress-bar { + font-size: 0.75rem; + font-weight: 600; +} + +/* Responsive adjustments */ +@media (max-width: 768px) { + .stat-card .fs-3 { + font-size: 1.5rem !important; + } + + .btn-group-sm .btn { + padding: 0.2rem 0.4rem; + } +} + +/* Custom scrollbar */ +.log-pre::-webkit-scrollbar, +.log-viewer::-webkit-scrollbar { + width: 6px; +} + +.log-pre::-webkit-scrollbar-thumb, +.log-viewer::-webkit-scrollbar-thumb { + background: #555; + border-radius: 3px; +} + +/* Navbar brand */ +.navbar-brand { + font-weight: 700; + letter-spacing: 0.5px; +} + +/* Card improvements */ +.card { + border-radius: 10px; + border: 1px solid rgba(0, 0, 0, 0.08); +} + +.card-header { + font-weight: 600; + background: rgba(0, 0, 0, 0.02); +} diff --git a/static/index.html b/static/index.html new file mode 100644 index 0000000..b1779f4 --- /dev/null +++ b/static/index.html @@ -0,0 +1,509 @@ + + + + + + NetBird MSP Appliance + + + + + + +
+ +
+ + +
+ + + + +
+
+ +
+
+
+
+
+
+
Total Customers
+
0
+
+
+
+
+
+
+
+
+
+
+
+
Active
+
0
+
+
+
+
+
+
+
+
+
+
+
+
Inactive
+
0
+
+
+
+
+
+
+
+
+
+
+
+
Errors
+
0
+
+
+
+
+
+
+
+ + +
+
+
+
+ +
+
+ +
+
+ +
+
+
+
+ + +
+
+ + + + + + + + + + + + + + + + +
IDNameCompanySubdomainStatusDevicesCreatedActions
Loading...
+
+ + +
+
+
+ + +
+
+
+
+ + Customer + active +
+
+ + +
+
+ + + +
+ +
+
+
Loading...
+
+
+ +
+
+
Loading...
+
+
+ +
+
+
+ Container Logs + +
+
+
No logs loaded.
+
+
+
+ +
+
+
+ Health Check + +
+
Click "Check" to run a health check.
+
+
+
+
+
+ + +
+
+

System Settings

+
+ + + +
+ +
+
+
+
+
+
+ + +
Customers get subdomains: kunde.yourdomain.com
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
First UDP port for relay. Range: base to base+99
+
+
+
+ +
+
+
+
+
+ + +
+
+
+
+
+
+ + +
+
+ +
+ + +
+
+
+
+
+ + +
+
+
+
+
+
+ + +
+
+
+
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+ +
+
+
+
+
+ + +
+
+
+
Change Admin Password
+
+
+
+ + +
+
+
+ + +
+
+ + +
+
+
+ +
+
+
+
+
+
+
+
+
+ + +
+
+
+

System Monitoring

+ +
+ + +
+
Host Resources
+
Loading...
+
+ + +
+
All Customer Deployments
+
+ + + + + + + + + + + + + + + +
IDNameSubdomainStatusDeploymentRelay PortContainers
Loading...
+
+
+
+
+
+ + + + + + + + + + + diff --git a/static/js/app.js b/static/js/app.js new file mode 100644 index 0000000..1d75db7 --- /dev/null +++ b/static/js/app.js @@ -0,0 +1,694 @@ +/** + * NetBird MSP Appliance - Frontend Application + * Vanilla JavaScript with Bootstrap 5 + */ + +// --------------------------------------------------------------------------- +// State +// --------------------------------------------------------------------------- +let authToken = localStorage.getItem('authToken') || null; +let currentUser = null; +let currentPage = 'dashboard'; +let currentCustomerId = null; +let currentCustomerData = null; +let customersPage = 1; + +// --------------------------------------------------------------------------- +// API helper +// --------------------------------------------------------------------------- +async function api(method, path, body = null) { + const opts = { + method, + headers: { 'Content-Type': 'application/json' }, + }; + if (authToken) { + opts.headers['Authorization'] = `Bearer ${authToken}`; + } + if (body) { + opts.body = JSON.stringify(body); + } + const resp = await fetch(`/api${path}`, opts); + if (resp.status === 401) { + logout(); + throw new Error('Session expired.'); + } + const data = await resp.json(); + if (!resp.ok) { + throw new Error(data.detail || data.message || 'Request failed.'); + } + return data; +} + +// --------------------------------------------------------------------------- +// Auth +// --------------------------------------------------------------------------- +function initApp() { + if (authToken) { + api('GET', '/auth/me') + .then(user => { + currentUser = user; + document.getElementById('nav-username').textContent = user.username; + showAppPage(); + loadDashboard(); + }) + .catch(() => { + authToken = null; + localStorage.removeItem('authToken'); + showLoginPage(); + }); + } else { + showLoginPage(); + } +} + +function showLoginPage() { + document.getElementById('login-page').classList.remove('d-none'); + document.getElementById('app-page').classList.add('d-none'); +} + +function showAppPage() { + document.getElementById('login-page').classList.add('d-none'); + document.getElementById('app-page').classList.remove('d-none'); +} + +document.getElementById('login-form').addEventListener('submit', async (e) => { + e.preventDefault(); + const errorEl = document.getElementById('login-error'); + const spinner = document.getElementById('login-spinner'); + errorEl.classList.add('d-none'); + spinner.classList.remove('d-none'); + + try { + const data = await api('POST', '/auth/login', { + username: document.getElementById('login-username').value, + password: document.getElementById('login-password').value, + }); + authToken = data.access_token; + localStorage.setItem('authToken', authToken); + currentUser = data.user; + document.getElementById('nav-username').textContent = currentUser.username; + showAppPage(); + loadDashboard(); + } catch (err) { + errorEl.textContent = err.message; + errorEl.classList.remove('d-none'); + } finally { + spinner.classList.add('d-none'); + } +}); + +function logout() { + api('POST', '/auth/logout').catch(() => {}); + authToken = null; + currentUser = null; + localStorage.removeItem('authToken'); + showLoginPage(); +} + +// --------------------------------------------------------------------------- +// Navigation +// --------------------------------------------------------------------------- +function showPage(page) { + document.querySelectorAll('.page-content').forEach(el => el.classList.add('d-none')); + document.getElementById(`page-${page}`).classList.remove('d-none'); + currentPage = page; + + if (page === 'dashboard') loadDashboard(); + else if (page === 'settings') loadSettings(); + else if (page === 'monitoring') loadMonitoring(); +} + +// --------------------------------------------------------------------------- +// Dashboard +// --------------------------------------------------------------------------- +async function loadDashboard() { + await Promise.all([loadStats(), loadCustomers()]); +} + +async function loadStats() { + try { + const data = await api('GET', '/monitoring/status'); + document.getElementById('stat-total').textContent = data.total_customers; + document.getElementById('stat-active').textContent = data.active; + document.getElementById('stat-inactive').textContent = data.inactive; + document.getElementById('stat-error').textContent = data.error; + } catch (err) { + console.error('Failed to load stats:', err); + } +} + +async function loadCustomers() { + const search = document.getElementById('search-input').value; + const status = document.getElementById('status-filter').value; + let url = `/customers?page=${customersPage}&per_page=25`; + if (search) url += `&search=${encodeURIComponent(search)}`; + if (status) url += `&status=${encodeURIComponent(status)}`; + + try { + const data = await api('GET', url); + renderCustomersTable(data); + } catch (err) { + console.error('Failed to load customers:', err); + } +} + +function renderCustomersTable(data) { + const tbody = document.getElementById('customers-table-body'); + if (!data.items || data.items.length === 0) { + tbody.innerHTML = 'No customers found. Click "New Customer" to create one.'; + document.getElementById('pagination-info').textContent = 'Showing 0 of 0'; + document.getElementById('pagination-controls').innerHTML = ''; + return; + } + + tbody.innerHTML = data.items.map(c => ` + + ${c.id} + ${esc(c.name)} + ${esc(c.company || '-')} + ${esc(c.subdomain)} + ${statusBadge(c.status)} + ${c.max_devices} + ${formatDate(c.created_at)} + +
+ + ${c.deployment && c.deployment.deployment_status === 'running' + ? `` + : `` + } + + +
+ + + `).join(''); + + // Pagination + const start = (data.page - 1) * data.per_page + 1; + const end = Math.min(data.page * data.per_page, data.total); + document.getElementById('pagination-info').textContent = `Showing ${start}-${end} of ${data.total}`; + + let paginationHtml = ''; + for (let i = 1; i <= data.pages; i++) { + paginationHtml += `
  • ${i}
  • `; + } + document.getElementById('pagination-controls').innerHTML = paginationHtml; +} + +function goToPage(page) { + customersPage = page; + loadCustomers(); +} + +// Search & filter listeners +document.getElementById('search-input').addEventListener('input', debounce(() => { customersPage = 1; loadCustomers(); }, 300)); +document.getElementById('status-filter').addEventListener('change', () => { customersPage = 1; loadCustomers(); }); + +// --------------------------------------------------------------------------- +// Customer CRUD +// --------------------------------------------------------------------------- +function showNewCustomerModal() { + document.getElementById('customer-modal-title').textContent = 'New Customer'; + document.getElementById('customer-edit-id').value = ''; + document.getElementById('customer-form').reset(); + document.getElementById('cust-max-devices').value = '20'; + document.getElementById('customer-modal-error').classList.add('d-none'); + document.getElementById('customer-save-btn').textContent = 'Save & Deploy'; + + // Update subdomain suffix + api('GET', '/settings/system').then(cfg => { + document.getElementById('cust-subdomain-suffix').textContent = `.${cfg.base_domain || 'domain.com'}`; + }).catch(() => {}); + + const modal = new bootstrap.Modal(document.getElementById('customer-modal')); + // Enable subdomain field for new customers + document.getElementById('cust-subdomain').disabled = false; + modal.show(); +} + +function editCurrentCustomer() { + if (!currentCustomerData) return; + const c = currentCustomerData; + document.getElementById('customer-modal-title').textContent = 'Edit Customer'; + document.getElementById('customer-edit-id').value = c.id; + document.getElementById('cust-name').value = c.name; + document.getElementById('cust-company').value = c.company || ''; + document.getElementById('cust-subdomain').value = c.subdomain; + document.getElementById('cust-subdomain').disabled = true; // Can't change subdomain + document.getElementById('cust-email').value = c.email; + document.getElementById('cust-max-devices').value = c.max_devices; + document.getElementById('cust-notes').value = c.notes || ''; + document.getElementById('customer-modal-error').classList.add('d-none'); + document.getElementById('customer-save-btn').textContent = 'Save Changes'; + + const modal = new bootstrap.Modal(document.getElementById('customer-modal')); + modal.show(); +} + +async function saveCustomer() { + const errorEl = document.getElementById('customer-modal-error'); + const spinner = document.getElementById('customer-save-spinner'); + errorEl.classList.add('d-none'); + spinner.classList.remove('d-none'); + + const editId = document.getElementById('customer-edit-id').value; + const payload = { + name: document.getElementById('cust-name').value, + company: document.getElementById('cust-company').value || null, + email: document.getElementById('cust-email').value, + max_devices: parseInt(document.getElementById('cust-max-devices').value) || 20, + notes: document.getElementById('cust-notes').value || null, + }; + + try { + if (editId) { + await api('PUT', `/customers/${editId}`, payload); + } else { + payload.subdomain = document.getElementById('cust-subdomain').value.toLowerCase(); + await api('POST', '/customers', payload); + } + bootstrap.Modal.getInstance(document.getElementById('customer-modal')).hide(); + loadDashboard(); + if (editId && currentCustomerId == editId) { + viewCustomer(editId); + } + } catch (err) { + errorEl.textContent = err.message; + errorEl.classList.remove('d-none'); + } finally { + spinner.classList.add('d-none'); + } +} + +function showDeleteModal(id, name) { + document.getElementById('delete-customer-id').value = id; + document.getElementById('delete-customer-name').textContent = name; + new bootstrap.Modal(document.getElementById('delete-modal')).show(); +} + +function deleteCurrentCustomer() { + if (!currentCustomerData) return; + showDeleteModal(currentCustomerData.id, currentCustomerData.name); +} + +async function confirmDeleteCustomer() { + const id = document.getElementById('delete-customer-id').value; + const spinner = document.getElementById('delete-spinner'); + spinner.classList.remove('d-none'); + + try { + await api('DELETE', `/customers/${id}`); + bootstrap.Modal.getInstance(document.getElementById('delete-modal')).hide(); + showPage('dashboard'); + } catch (err) { + alert('Delete failed: ' + err.message); + } finally { + spinner.classList.add('d-none'); + } +} + +// --------------------------------------------------------------------------- +// Customer Actions (start/stop/restart) +// --------------------------------------------------------------------------- +async function customerAction(id, action) { + try { + await api('POST', `/customers/${id}/${action}`); + if (currentPage === 'dashboard') loadCustomers(); + if (currentCustomerId == id) viewCustomer(id); + } catch (err) { + alert(`${action} failed: ${err.message}`); + } +} + +// --------------------------------------------------------------------------- +// Customer Detail +// --------------------------------------------------------------------------- +async function viewCustomer(id) { + currentCustomerId = id; + showPage('customer-detail'); + + try { + const data = await api('GET', `/customers/${id}`); + currentCustomerData = data; + document.getElementById('detail-customer-name').textContent = data.name; + const badge = document.getElementById('detail-customer-status'); + badge.innerHTML = statusBadge(data.status); + + // Info tab + document.getElementById('detail-info-content').innerHTML = ` +
    +
    Name: ${esc(data.name)}
    +
    Company: ${esc(data.company || '-')}
    +
    Subdomain: ${esc(data.subdomain)}
    +
    Email: ${esc(data.email)}
    +
    Max Devices: ${data.max_devices}
    +
    Status: ${statusBadge(data.status)}
    +
    Created: ${formatDate(data.created_at)}
    +
    Updated: ${formatDate(data.updated_at)}
    + ${data.notes ? `
    Notes: ${esc(data.notes)}
    ` : ''} +
    + `; + + // Deployment tab + if (data.deployment) { + const d = data.deployment; + document.getElementById('detail-deployment-content').innerHTML = ` +
    +
    Status: ${statusBadge(d.deployment_status)}
    +
    Relay UDP Port: ${d.relay_udp_port}
    +
    Container Prefix: ${esc(d.container_prefix)}
    +
    Deployed: ${formatDate(d.deployed_at)}
    +
    + Setup URL: +
    + + +
    +
    +
    +
    + + + + +
    + `; + } else { + document.getElementById('detail-deployment-content').innerHTML = ` +

    No deployment found.

    + + `; + } + + // Logs tab (preview from deployment_logs table) + if (data.logs && data.logs.length > 0) { + document.getElementById('detail-logs-content').innerHTML = data.logs.map(l => + `
    ${formatDate(l.created_at)} ${l.status} ${esc(l.action)}: ${esc(l.message)}
    ` + ).join(''); + } + } catch (err) { + document.getElementById('detail-info-content').innerHTML = `
    ${err.message}
    `; + } +} + +async function loadCustomerLogs() { + if (!currentCustomerId) return; + try { + const data = await api('GET', `/customers/${currentCustomerId}/logs`); + const content = document.getElementById('detail-logs-content'); + if (!data.logs || Object.keys(data.logs).length === 0) { + content.innerHTML = '

    No container logs available.

    '; + return; + } + let html = ''; + for (const [name, logText] of Object.entries(data.logs)) { + html += `
    ${esc(name)}
    ${esc(logText)}
    `; + } + content.innerHTML = html; + } catch (err) { + document.getElementById('detail-logs-content').innerHTML = `
    ${err.message}
    `; + } +} + +async function loadCustomerHealth() { + if (!currentCustomerId) return; + try { + const data = await api('GET', `/customers/${currentCustomerId}/health`); + const content = document.getElementById('detail-health-content'); + let html = `
    Overall: ${data.healthy ? 'Healthy' : 'Unhealthy'}
    `; + if (data.containers && data.containers.length > 0) { + html += ''; + data.containers.forEach(c => { + const statusClass = c.status === 'running' ? 'text-success' : 'text-danger'; + html += ``; + }); + html += '
    ContainerStatusHealthImage
    ${esc(c.name)}${c.status}${c.health}${esc(c.image)}
    '; + } + html += `
    Last check: ${formatDate(data.last_check)}
    `; + content.innerHTML = html; + } catch (err) { + document.getElementById('detail-health-content').innerHTML = `
    ${err.message}
    `; + } +} + +function copySetupUrl() { + const input = document.getElementById('setup-url-input'); + navigator.clipboard.writeText(input.value).then(() => { + showToast('Setup URL copied to clipboard.'); + }); +} + +// --------------------------------------------------------------------------- +// Settings +// --------------------------------------------------------------------------- +async function loadSettings() { + try { + const cfg = await api('GET', '/settings/system'); + document.getElementById('cfg-base-domain').value = cfg.base_domain || ''; + document.getElementById('cfg-admin-email').value = cfg.admin_email || ''; + document.getElementById('cfg-data-dir').value = cfg.data_dir || ''; + document.getElementById('cfg-docker-network').value = cfg.docker_network || ''; + document.getElementById('cfg-relay-base-port').value = cfg.relay_base_port || 3478; + document.getElementById('cfg-npm-api-url').value = cfg.npm_api_url || ''; + document.getElementById('npm-token-status').textContent = cfg.npm_api_token_set ? 'Token is set (leave empty to keep current)' : 'No token configured'; + document.getElementById('cfg-mgmt-image').value = cfg.netbird_management_image || ''; + document.getElementById('cfg-signal-image').value = cfg.netbird_signal_image || ''; + document.getElementById('cfg-relay-image').value = cfg.netbird_relay_image || ''; + document.getElementById('cfg-dashboard-image').value = cfg.netbird_dashboard_image || ''; + } catch (err) { + showSettingsAlert('danger', 'Failed to load settings: ' + err.message); + } +} + +// System settings form +document.getElementById('settings-system-form').addEventListener('submit', async (e) => { + e.preventDefault(); + try { + await api('PUT', '/settings/system', { + base_domain: document.getElementById('cfg-base-domain').value, + admin_email: document.getElementById('cfg-admin-email').value, + data_dir: document.getElementById('cfg-data-dir').value, + docker_network: document.getElementById('cfg-docker-network').value, + relay_base_port: parseInt(document.getElementById('cfg-relay-base-port').value), + }); + showSettingsAlert('success', 'System settings saved.'); + } catch (err) { + showSettingsAlert('danger', 'Failed: ' + err.message); + } +}); + +// NPM settings form +document.getElementById('settings-npm-form').addEventListener('submit', async (e) => { + e.preventDefault(); + const payload = { npm_api_url: document.getElementById('cfg-npm-api-url').value }; + const token = document.getElementById('cfg-npm-api-token').value; + if (token) payload.npm_api_token = token; + try { + await api('PUT', '/settings/system', payload); + showSettingsAlert('success', 'NPM settings saved.'); + document.getElementById('cfg-npm-api-token').value = ''; + loadSettings(); + } catch (err) { + showSettingsAlert('danger', 'Failed: ' + err.message); + } +}); + +// Image settings form +document.getElementById('settings-images-form').addEventListener('submit', async (e) => { + e.preventDefault(); + try { + await api('PUT', '/settings/system', { + netbird_management_image: document.getElementById('cfg-mgmt-image').value, + netbird_signal_image: document.getElementById('cfg-signal-image').value, + netbird_relay_image: document.getElementById('cfg-relay-image').value, + netbird_dashboard_image: document.getElementById('cfg-dashboard-image').value, + }); + showSettingsAlert('success', 'Image settings saved.'); + } catch (err) { + showSettingsAlert('danger', 'Failed: ' + err.message); + } +}); + +// Test NPM connection +async function testNpmConnection() { + const spinner = document.getElementById('npm-test-spinner'); + const resultEl = document.getElementById('npm-test-result'); + spinner.classList.remove('d-none'); + resultEl.classList.add('d-none'); + + try { + const data = await api('GET', '/settings/test-npm'); + resultEl.className = `mt-3 alert alert-${data.ok ? 'success' : 'danger'}`; + resultEl.textContent = data.message; + resultEl.classList.remove('d-none'); + } catch (err) { + resultEl.className = 'mt-3 alert alert-danger'; + resultEl.textContent = err.message; + resultEl.classList.remove('d-none'); + } finally { + spinner.classList.add('d-none'); + } +} + +// Change password form +document.getElementById('change-password-form').addEventListener('submit', async (e) => { + e.preventDefault(); + const resultEl = document.getElementById('password-result'); + const newPw = document.getElementById('pw-new').value; + const confirmPw = document.getElementById('pw-confirm').value; + + if (newPw !== confirmPw) { + resultEl.className = 'mt-3 alert alert-danger'; + resultEl.textContent = 'Passwords do not match.'; + resultEl.classList.remove('d-none'); + return; + } + + try { + await api('POST', '/auth/change-password', { + current_password: document.getElementById('pw-current').value, + new_password: newPw, + }); + resultEl.className = 'mt-3 alert alert-success'; + resultEl.textContent = 'Password changed successfully.'; + resultEl.classList.remove('d-none'); + document.getElementById('change-password-form').reset(); + } catch (err) { + resultEl.className = 'mt-3 alert alert-danger'; + resultEl.textContent = err.message; + resultEl.classList.remove('d-none'); + } +}); + +function showSettingsAlert(type, msg) { + const el = document.getElementById('settings-alert'); + el.className = `alert alert-${type} alert-dismissible fade show`; + el.innerHTML = `${msg}`; + el.classList.remove('d-none'); + setTimeout(() => el.classList.add('d-none'), 5000); +} + +function togglePasswordVisibility(inputId) { + const input = document.getElementById(inputId); + input.type = input.type === 'password' ? 'text' : 'password'; +} + +// --------------------------------------------------------------------------- +// Monitoring +// --------------------------------------------------------------------------- +async function loadMonitoring() { + await Promise.all([loadResources(), loadAllCustomerStatuses()]); +} + +async function loadResources() { + try { + const data = await api('GET', '/monitoring/resources'); + document.getElementById('monitoring-resources').innerHTML = ` +
    +
    +
    Hostname
    +
    ${esc(data.hostname)}
    +
    ${esc(data.os)}
    +
    +
    +
    CPU (${data.cpu.count} cores)
    +
    +
    ${data.cpu.percent}%
    +
    +
    +
    +
    Memory (${data.memory.used_gb}/${data.memory.total_gb} GB)
    +
    +
    ${data.memory.percent}%
    +
    +
    +
    +
    Disk (${data.disk.used_gb}/${data.disk.total_gb} GB)
    +
    +
    ${data.disk.percent}%
    +
    +
    +
    + `; + } catch (err) { + document.getElementById('monitoring-resources').innerHTML = `
    ${err.message}
    `; + } +} + +async function loadAllCustomerStatuses() { + try { + const data = await api('GET', '/monitoring/customers'); + const tbody = document.getElementById('monitoring-customers-body'); + if (!data || data.length === 0) { + tbody.innerHTML = 'No customers.'; + return; + } + tbody.innerHTML = data.map(c => { + const containerInfo = c.containers.map(ct => `${ct.name}: ${ct.status}`).join(', ') || '-'; + return ` + ${c.id} + ${esc(c.name)} + ${esc(c.subdomain)} + ${statusBadge(c.status)} + ${c.deployment_status ? statusBadge(c.deployment_status) : '-'} + ${c.relay_udp_port || '-'} + ${esc(containerInfo)} + `; + }).join(''); + } catch (err) { + document.getElementById('monitoring-customers-body').innerHTML = `${err.message}`; + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- +function statusBadge(status) { + const map = { + active: 'success', running: 'success', + inactive: 'secondary', stopped: 'secondary', + deploying: 'info', pending: 'info', + error: 'danger', failed: 'danger', + }; + const color = map[status] || 'secondary'; + return `${status}`; +} + +function formatDate(isoStr) { + if (!isoStr) return '-'; + const d = new Date(isoStr); + return d.toLocaleDateString('de-DE') + ' ' + d.toLocaleTimeString('de-DE', { hour: '2-digit', minute: '2-digit' }); +} + +function esc(str) { + if (!str) return ''; + const div = document.createElement('div'); + div.textContent = str; + return div.innerHTML; +} + +function debounce(fn, delay) { + let timer; + return function (...args) { + clearTimeout(timer); + timer = setTimeout(() => fn.apply(this, args), delay); + }; +} + +function showToast(message) { + // Simple inline notification + const el = document.createElement('div'); + el.className = 'toast-notification'; + el.textContent = message; + document.body.appendChild(el); + setTimeout(() => el.remove(), 3000); +} + +// --------------------------------------------------------------------------- +// Init +// --------------------------------------------------------------------------- +document.addEventListener('DOMContentLoaded', initApp); diff --git a/templates/docker-compose.yml.j2 b/templates/docker-compose.yml.j2 new file mode 100644 index 0000000..b7c62ba --- /dev/null +++ b/templates/docker-compose.yml.j2 @@ -0,0 +1,67 @@ +version: '3.8' + +networks: + {{ docker_network }}: + external: true + +services: + netbird-management: + image: {{ netbird_management_image }} + container_name: netbird-kunde{{ customer_id }}-management + restart: unless-stopped + networks: + - {{ docker_network }} + volumes: + - {{ instance_dir }}/data/management:/var/lib/netbird + - {{ instance_dir }}/management.json:/etc/netbird/management.json + command: + - "--port" + - "80" + - "--log-file" + - "console" + - "--log-level" + - "info" + - "--single-account-mode-domain={{ subdomain }}.{{ base_domain }}" + - "--dns-domain={{ subdomain }}.{{ base_domain }}" + healthcheck: + test: ["CMD", "wget", "--spider", "-q", "http://localhost:80/api/accounts"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 15s + + netbird-signal: + image: {{ netbird_signal_image }} + container_name: netbird-kunde{{ customer_id }}-signal + restart: unless-stopped + networks: + - {{ docker_network }} + volumes: + - {{ instance_dir }}/data/signal:/var/lib/netbird + + netbird-relay: + image: {{ netbird_relay_image }} + container_name: netbird-kunde{{ customer_id }}-relay + restart: unless-stopped + networks: + - {{ docker_network }} + ports: + - "{{ relay_udp_port }}:3478/udp" + env_file: + - {{ instance_dir }}/relay.env + environment: + - NB_ENABLE_STUN=true + - NB_STUN_PORTS=3478 + - NB_LISTEN_ADDRESS=:80 + - NB_EXPOSED_ADDRESS=rels://{{ subdomain }}.{{ base_domain }}:443 + - NB_AUTH_SECRET={{ relay_secret }} + + netbird-dashboard: + image: {{ netbird_dashboard_image }} + container_name: netbird-kunde{{ customer_id }}-dashboard + restart: unless-stopped + networks: + - {{ docker_network }} + environment: + - NETBIRD_MGMT_API_ENDPOINT=https://{{ subdomain }}.{{ base_domain }} + - NETBIRD_MGMT_GRPC_API_ENDPOINT=https://{{ subdomain }}.{{ base_domain }} diff --git a/templates/management.json.j2 b/templates/management.json.j2 new file mode 100644 index 0000000..5211d72 --- /dev/null +++ b/templates/management.json.j2 @@ -0,0 +1,63 @@ +{ + "Stuns": [ + { + "Proto": "udp", + "URI": "stun:{{ subdomain }}.{{ base_domain }}:{{ relay_udp_port }}", + "Username": "", + "Password": null + } + ], + "TURNConfig": { + "Turns": [ + { + "Proto": "udp", + "URI": "turn:{{ subdomain }}.{{ base_domain }}:{{ relay_udp_port }}", + "Username": "netbird", + "Password": "{{ relay_secret }}" + } + ], + "CredentialsTTL": "12h", + "Secret": "{{ relay_secret }}", + "TimeBasedCredentials": false + }, + "Relay": { + "Addresses": [ + "rels://{{ subdomain }}.{{ base_domain }}:443" + ], + "CredentialsTTL": "12h", + "Secret": "{{ relay_secret }}" + }, + "Signal": { + "Proto": "https", + "URI": "{{ subdomain }}.{{ base_domain }}:443", + "Username": "", + "Password": null + }, + "HttpConfig": { + "AuthIssuer": "https://{{ subdomain }}.{{ base_domain }}", + "AuthAudience": "{{ subdomain }}.{{ base_domain }}", + "OIDCConfigEndpoint": "" + }, + "IdpManagerConfig": { + "ManagerType": "none" + }, + "DeviceAuthorizationFlow": { + "Provider": "none" + }, + "PKCEAuthorizationFlow": { + "ProviderConfig": { + "Audience": "{{ subdomain }}.{{ base_domain }}", + "ClientID": "", + "ClientSecret": "", + "Domain": "", + "AuthorizationEndpoint": "", + "TokenEndpoint": "", + "Scope": "openid profile email", + "RedirectURLs": [ + "https://{{ subdomain }}.{{ base_domain }}/auth/callback" + ], + "UseIDToken": false + } + }, + "DataStoreEncryptionKey": "{{ relay_secret }}" +} diff --git a/templates/relay.env.j2 b/templates/relay.env.j2 new file mode 100644 index 0000000..0bf6554 --- /dev/null +++ b/templates/relay.env.j2 @@ -0,0 +1,7 @@ +# NetBird Relay Environment - Customer {{ customer_id }} +# {{ subdomain }}.{{ base_domain }} +NB_AUTH_SECRET={{ relay_secret }} +NB_LISTEN_ADDRESS=:80 +NB_EXPOSED_ADDRESS=rels://{{ subdomain }}.{{ base_domain }}:443 +NB_ENABLE_STUN=true +NB_STUN_PORTS=3478 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..93a0193 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,89 @@ +"""Shared test fixtures for the NetBird MSP Appliance test suite.""" + +import os +import pytest +from sqlalchemy import create_engine, event +from sqlalchemy.orm import sessionmaker + +# Override env vars BEFORE importing app modules +os.environ["SECRET_KEY"] = "test-secret-key-for-unit-tests" +os.environ["DATABASE_PATH"] = ":memory:" + +from app.database import Base +from app.models import Customer, Deployment, DeploymentLog, SystemConfig, User +from app.utils.security import hash_password, encrypt_value + + +@pytest.fixture() +def db_session(): + """Create an in-memory SQLite database session for tests.""" + engine = create_engine("sqlite:///:memory:", connect_args={"check_same_thread": False}) + + @event.listens_for(engine, "connect") + def _set_pragma(dbapi_conn, _): + cursor = dbapi_conn.cursor() + cursor.execute("PRAGMA foreign_keys=ON") + cursor.close() + + Base.metadata.create_all(bind=engine) + Session = sessionmaker(bind=engine) + session = Session() + + # Seed admin user + admin = User( + username="admin", + password_hash=hash_password("testpassword123"), + email="admin@test.com", + ) + session.add(admin) + + # Seed system config + config = SystemConfig( + id=1, + base_domain="test.example.com", + admin_email="admin@test.com", + npm_api_url="http://localhost:81/api", + npm_api_token_encrypted=encrypt_value("test-npm-token"), + data_dir="/tmp/netbird-test", + docker_network="test-network", + relay_base_port=3478, + ) + session.add(config) + session.commit() + + yield session + session.close() + + +@pytest.fixture() +def sample_customer(db_session): + """Create and return a sample customer.""" + customer = Customer( + name="Test Customer", + company="Test Corp", + subdomain="testcust", + email="test@example.com", + max_devices=20, + status="active", + ) + db_session.add(customer) + db_session.commit() + db_session.refresh(customer) + return customer + + +@pytest.fixture() +def sample_deployment(db_session, sample_customer): + """Create and return a sample deployment for the sample customer.""" + deployment = Deployment( + customer_id=sample_customer.id, + container_prefix=f"netbird-kunde{sample_customer.id}", + relay_udp_port=3478, + relay_secret=encrypt_value("test-relay-secret"), + setup_url=f"https://testcust.test.example.com", + deployment_status="running", + ) + db_session.add(deployment) + db_session.commit() + db_session.refresh(deployment) + return deployment diff --git a/tests/test_customer_api.py b/tests/test_customer_api.py new file mode 100644 index 0000000..50d602d --- /dev/null +++ b/tests/test_customer_api.py @@ -0,0 +1,220 @@ +"""Unit and API tests for customer management.""" + +import os +import pytest +from unittest.mock import patch, AsyncMock, MagicMock + +os.environ["SECRET_KEY"] = "test-secret-key-for-unit-tests" +os.environ["DATABASE_PATH"] = ":memory:" + +from fastapi.testclient import TestClient +from sqlalchemy import create_engine, event +from sqlalchemy.orm import sessionmaker + +from app.database import Base, get_db +from app.main import app +from app.models import Customer, User, SystemConfig +from app.utils.security import hash_password, encrypt_value +from app.dependencies import get_current_user + + +# --------------------------------------------------------------------------- +# Test fixtures +# --------------------------------------------------------------------------- +@pytest.fixture() +def test_db(): + """Create a test database.""" + engine = create_engine("sqlite:///:memory:", connect_args={"check_same_thread": False}) + + @event.listens_for(engine, "connect") + def _set_pragma(dbapi_conn, _): + cursor = dbapi_conn.cursor() + cursor.execute("PRAGMA foreign_keys=ON") + cursor.close() + + Base.metadata.create_all(bind=engine) + Session = sessionmaker(bind=engine) + session = Session() + + # Seed data + admin = User(username="admin", password_hash=hash_password("testpassword123"), email="admin@test.com") + session.add(admin) + + config = SystemConfig( + id=1, + base_domain="test.example.com", + admin_email="admin@test.com", + npm_api_url="http://localhost:81/api", + npm_api_token_encrypted=encrypt_value("test-npm-token"), + ) + session.add(config) + session.commit() + + yield session + session.close() + + +@pytest.fixture() +def client(test_db): + """Create a test client with overridden dependencies.""" + admin = test_db.query(User).filter(User.username == "admin").first() + + def override_get_db(): + yield test_db + + def override_get_user(): + return admin + + app.dependency_overrides[get_db] = override_get_db + app.dependency_overrides[get_current_user] = override_get_user + + with TestClient(app) as c: + yield c + + app.dependency_overrides.clear() + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- +class TestCustomerList: + """Tests for GET /api/customers.""" + + def test_empty_list(self, client: TestClient): + """List returns empty when no customers exist.""" + resp = client.get("/api/customers") + assert resp.status_code == 200 + data = resp.json() + assert data["total"] == 0 + assert data["items"] == [] + + def test_list_with_customers(self, client: TestClient, test_db): + """List returns customers after creating them.""" + for i in range(3): + test_db.add(Customer(name=f"Customer {i}", subdomain=f"cust{i}", email=f"c{i}@test.com")) + test_db.commit() + + resp = client.get("/api/customers") + assert resp.status_code == 200 + data = resp.json() + assert data["total"] == 3 + assert len(data["items"]) == 3 + + def test_search_filter(self, client: TestClient, test_db): + """Search filters customers by name/subdomain/email.""" + test_db.add(Customer(name="Alpha Corp", subdomain="alpha", email="alpha@test.com")) + test_db.add(Customer(name="Beta Inc", subdomain="beta", email="beta@test.com")) + test_db.commit() + + resp = client.get("/api/customers?search=alpha") + data = resp.json() + assert data["total"] == 1 + assert data["items"][0]["name"] == "Alpha Corp" + + def test_status_filter(self, client: TestClient, test_db): + """Status filter returns only matching customers.""" + test_db.add(Customer(name="Active", subdomain="active1", email="a@t.com", status="active")) + test_db.add(Customer(name="Error", subdomain="error1", email="e@t.com", status="error")) + test_db.commit() + + resp = client.get("/api/customers?status=error") + data = resp.json() + assert data["total"] == 1 + assert data["items"][0]["status"] == "error" + + +class TestCustomerCreate: + """Tests for POST /api/customers.""" + + @patch("app.services.netbird_service.deploy_customer", new_callable=AsyncMock) + def test_create_customer(self, mock_deploy, client: TestClient): + """Creating a customer returns 201 and triggers deployment.""" + mock_deploy.return_value = {"success": True, "setup_url": "https://new.test.example.com"} + + resp = client.post("/api/customers", json={ + "name": "New Customer", + "subdomain": "newcust", + "email": "new@test.com", + }) + assert resp.status_code == 201 + data = resp.json() + assert data["name"] == "New Customer" + assert data["subdomain"] == "newcust" + + def test_duplicate_subdomain(self, client: TestClient, test_db): + """Duplicate subdomain returns 409.""" + test_db.add(Customer(name="Existing", subdomain="taken", email="e@test.com")) + test_db.commit() + + resp = client.post("/api/customers", json={ + "name": "Another", + "subdomain": "taken", + "email": "a@test.com", + }) + assert resp.status_code == 409 + + def test_invalid_subdomain(self, client: TestClient): + """Invalid subdomain format returns 422.""" + resp = client.post("/api/customers", json={ + "name": "Bad", + "subdomain": "UPPER_CASE!", + "email": "b@test.com", + }) + assert resp.status_code == 422 + + def test_invalid_email(self, client: TestClient): + """Invalid email returns 422.""" + resp = client.post("/api/customers", json={ + "name": "Bad Email", + "subdomain": "bademail", + "email": "not-an-email", + }) + assert resp.status_code == 422 + + +class TestCustomerDetail: + """Tests for GET/PUT/DELETE /api/customers/{id}.""" + + def test_get_customer(self, client: TestClient, test_db): + """Get customer returns full details.""" + cust = Customer(name="Detail Test", subdomain="detail", email="d@test.com") + test_db.add(cust) + test_db.commit() + test_db.refresh(cust) + + resp = client.get(f"/api/customers/{cust.id}") + assert resp.status_code == 200 + assert resp.json()["name"] == "Detail Test" + + def test_get_nonexistent(self, client: TestClient): + """Get nonexistent customer returns 404.""" + resp = client.get("/api/customers/999") + assert resp.status_code == 404 + + def test_update_customer(self, client: TestClient, test_db): + """Update customer fields.""" + cust = Customer(name="Before", subdomain="update1", email="u@test.com") + test_db.add(cust) + test_db.commit() + test_db.refresh(cust) + + resp = client.put(f"/api/customers/{cust.id}", json={"name": "After"}) + assert resp.status_code == 200 + assert resp.json()["name"] == "After" + + @patch("app.services.netbird_service.undeploy_customer", new_callable=AsyncMock) + def test_delete_customer(self, mock_undeploy, client: TestClient, test_db): + """Delete customer returns success.""" + mock_undeploy.return_value = {"success": True} + + cust = Customer(name="ToDelete", subdomain="del1", email="del@test.com") + test_db.add(cust) + test_db.commit() + test_db.refresh(cust) + + resp = client.delete(f"/api/customers/{cust.id}") + assert resp.status_code == 200 + + # Verify deleted + resp = client.get(f"/api/customers/{cust.id}") + assert resp.status_code == 404 diff --git a/tests/test_deployment.py b/tests/test_deployment.py new file mode 100644 index 0000000..81a69b2 --- /dev/null +++ b/tests/test_deployment.py @@ -0,0 +1,174 @@ +"""Integration tests for the deployment workflow.""" + +import os +import pytest +from unittest.mock import patch, AsyncMock, MagicMock + +os.environ["SECRET_KEY"] = "test-secret-key-for-unit-tests" +os.environ["DATABASE_PATH"] = ":memory:" + +from app.models import Customer, Deployment, DeploymentLog +from app.services import netbird_service + + +class TestDeploymentWorkflow: + """Tests for the full deploy/undeploy lifecycle.""" + + @patch("app.services.netbird_service.docker_service") + @patch("app.services.netbird_service.npm_service") + @patch("app.services.netbird_service.port_manager") + @pytest.mark.asyncio + async def test_successful_deployment( + self, mock_port_mgr, mock_npm, mock_docker, db_session, sample_customer + ): + """Full deployment creates containers, NPM entry, and DB records.""" + mock_port_mgr.allocate_port.return_value = 3478 + mock_docker.compose_up.return_value = True + mock_docker.wait_for_healthy.return_value = True + mock_npm.create_proxy_host = AsyncMock(return_value={"proxy_id": 42}) + + # Create temp dir for templates + os.makedirs("/tmp/netbird-test", exist_ok=True) + + result = await netbird_service.deploy_customer(db_session, sample_customer.id) + + assert result["success"] is True + assert "setup_url" in result + assert result["setup_url"].startswith("https://") + + # Verify deployment record created + dep = db_session.query(Deployment).filter( + Deployment.customer_id == sample_customer.id + ).first() + assert dep is not None + assert dep.deployment_status == "running" + assert dep.relay_udp_port == 3478 + + # Verify customer status updated + db_session.refresh(sample_customer) + assert sample_customer.status == "active" + + @patch("app.services.netbird_service.docker_service") + @patch("app.services.netbird_service.npm_service") + @patch("app.services.netbird_service.port_manager") + @pytest.mark.asyncio + async def test_deployment_rollback_on_docker_failure( + self, mock_port_mgr, mock_npm, mock_docker, db_session, sample_customer + ): + """Failed docker compose up triggers rollback.""" + mock_port_mgr.allocate_port.return_value = 3479 + mock_docker.compose_up.side_effect = RuntimeError("Docker compose failed") + mock_docker.compose_down.return_value = True + + os.makedirs("/tmp/netbird-test", exist_ok=True) + + result = await netbird_service.deploy_customer(db_session, sample_customer.id) + + assert result["success"] is False + assert "Docker compose failed" in result["error"] + + # Verify rollback + db_session.refresh(sample_customer) + assert sample_customer.status == "error" + + # Verify error log + logs = db_session.query(DeploymentLog).filter( + DeploymentLog.customer_id == sample_customer.id, + DeploymentLog.status == "error", + ).all() + assert len(logs) >= 1 + + @patch("app.services.netbird_service.docker_service") + @patch("app.services.netbird_service.npm_service") + @pytest.mark.asyncio + async def test_undeploy_customer( + self, mock_npm, mock_docker, db_session, sample_customer, sample_deployment + ): + """Undeployment removes containers, NPM entry, and cleans up.""" + mock_docker.compose_down.return_value = True + mock_npm.delete_proxy_host = AsyncMock(return_value=True) + + result = await netbird_service.undeploy_customer(db_session, sample_customer.id) + + assert result["success"] is True + + # Verify deployment record removed + dep = db_session.query(Deployment).filter( + Deployment.customer_id == sample_customer.id + ).first() + assert dep is None + + +class TestStartStopRestart: + """Tests for start/stop/restart operations.""" + + @patch("app.services.netbird_service.docker_service") + def test_stop_customer(self, mock_docker, db_session, sample_customer, sample_deployment): + """Stop sets deployment_status to stopped.""" + mock_docker.compose_stop.return_value = True + + result = netbird_service.stop_customer(db_session, sample_customer.id) + assert result["success"] is True + + db_session.refresh(sample_deployment) + assert sample_deployment.deployment_status == "stopped" + + @patch("app.services.netbird_service.docker_service") + def test_start_customer(self, mock_docker, db_session, sample_customer, sample_deployment): + """Start sets deployment_status to running.""" + mock_docker.compose_start.return_value = True + + result = netbird_service.start_customer(db_session, sample_customer.id) + assert result["success"] is True + + db_session.refresh(sample_deployment) + assert sample_deployment.deployment_status == "running" + + @patch("app.services.netbird_service.docker_service") + def test_restart_customer(self, mock_docker, db_session, sample_customer, sample_deployment): + """Restart sets deployment_status to running.""" + mock_docker.compose_restart.return_value = True + + result = netbird_service.restart_customer(db_session, sample_customer.id) + assert result["success"] is True + + db_session.refresh(sample_deployment) + assert sample_deployment.deployment_status == "running" + + def test_stop_nonexistent_deployment(self, db_session, sample_customer): + """Stop fails gracefully when no deployment exists.""" + result = netbird_service.stop_customer(db_session, sample_customer.id) + assert result["success"] is False + + +class TestHealthCheck: + """Tests for health check functionality.""" + + @patch("app.services.netbird_service.docker_service") + def test_healthy_deployment(self, mock_docker, db_session, sample_customer, sample_deployment): + """Health check returns healthy when all containers are running.""" + mock_docker.get_container_status.return_value = [ + {"name": "netbird-kunde1-management", "status": "running", "health": "healthy", "image": "test", "created": ""}, + {"name": "netbird-kunde1-signal", "status": "running", "health": "N/A", "image": "test", "created": ""}, + ] + + result = netbird_service.get_customer_health(db_session, sample_customer.id) + assert result["healthy"] is True + assert len(result["containers"]) == 2 + + @patch("app.services.netbird_service.docker_service") + def test_unhealthy_deployment(self, mock_docker, db_session, sample_customer, sample_deployment): + """Health check returns unhealthy when a container is stopped.""" + mock_docker.get_container_status.return_value = [ + {"name": "netbird-kunde1-management", "status": "running", "health": "healthy", "image": "test", "created": ""}, + {"name": "netbird-kunde1-signal", "status": "exited", "health": "N/A", "image": "test", "created": ""}, + ] + + result = netbird_service.get_customer_health(db_session, sample_customer.id) + assert result["healthy"] is False + + def test_health_no_deployment(self, db_session, sample_customer): + """Health check handles missing deployment.""" + result = netbird_service.get_customer_health(db_session, sample_customer.id) + assert result["healthy"] is False + assert "No deployment" in result["error"] diff --git a/tests/test_docker_service.py b/tests/test_docker_service.py new file mode 100644 index 0000000..0de2770 --- /dev/null +++ b/tests/test_docker_service.py @@ -0,0 +1,155 @@ +"""Unit tests for the Docker service and port manager.""" + +import os +import pytest +from unittest.mock import patch, MagicMock + +os.environ["SECRET_KEY"] = "test-secret-key-for-unit-tests" +os.environ["DATABASE_PATH"] = ":memory:" + +from app.services import docker_service, port_manager +from app.models import Deployment + + +class TestPortManager: + """Tests for UDP port allocation.""" + + def test_allocate_first_port(self, db_session): + """First allocation returns base port.""" + port = port_manager.allocate_port(db_session, base_port=3478) + assert port == 3478 + + def test_allocate_skips_used_ports(self, db_session, sample_deployment): + """Allocation skips ports already in the database.""" + # sample_deployment uses port 3478 + port = port_manager.allocate_port(db_session, base_port=3478) + assert port == 3479 + + def test_allocate_raises_when_full(self, db_session): + """Allocation raises RuntimeError when all ports are used.""" + # Fill all ports + for i in range(100): + db_session.add(Deployment( + customer_id=1000 + i, + container_prefix=f"test-{i}", + relay_udp_port=3478 + i, + relay_secret="secret", + deployment_status="running", + )) + db_session.commit() + + with pytest.raises(RuntimeError, match="No available relay ports"): + port_manager.allocate_port(db_session, base_port=3478, max_ports=100) + + def test_get_allocated_ports(self, db_session, sample_deployment): + """Returns set of allocated ports.""" + ports = port_manager.get_allocated_ports(db_session) + assert 3478 in ports + + def test_validate_port_available(self, db_session): + """Available port returns True.""" + assert port_manager.validate_port_available(db_session, 3500) is True + + def test_validate_port_taken(self, db_session, sample_deployment): + """Allocated port returns False.""" + assert port_manager.validate_port_available(db_session, 3478) is False + + +class TestDockerService: + """Tests for Docker container management.""" + + @patch("app.services.docker_service.subprocess.run") + def test_compose_up_success(self, mock_run, tmp_path): + """compose_up succeeds when docker compose returns 0.""" + compose_file = tmp_path / "docker-compose.yml" + compose_file.write_text("version: '3.8'\nservices: {}") + mock_run.return_value = MagicMock(returncode=0, stdout="", stderr="") + + result = docker_service.compose_up(str(tmp_path), "test-project") + assert result is True + mock_run.assert_called_once() + + @patch("app.services.docker_service.subprocess.run") + def test_compose_up_failure(self, mock_run, tmp_path): + """compose_up raises RuntimeError on failure.""" + compose_file = tmp_path / "docker-compose.yml" + compose_file.write_text("version: '3.8'\nservices: {}") + mock_run.return_value = MagicMock(returncode=1, stderr="Some error") + + with pytest.raises(RuntimeError, match="docker compose up failed"): + docker_service.compose_up(str(tmp_path), "test-project") + + def test_compose_up_missing_file(self, tmp_path): + """compose_up raises FileNotFoundError when compose file is missing.""" + with pytest.raises(FileNotFoundError): + docker_service.compose_up(str(tmp_path), "test-project") + + @patch("app.services.docker_service.subprocess.run") + def test_compose_stop(self, mock_run, tmp_path): + """compose_stop returns True on success.""" + compose_file = tmp_path / "docker-compose.yml" + compose_file.write_text("") + mock_run.return_value = MagicMock(returncode=0) + + result = docker_service.compose_stop(str(tmp_path), "test-project") + assert result is True + + @patch("app.services.docker_service._get_client") + def test_get_container_status(self, mock_get_client): + """get_container_status returns formatted container info.""" + mock_container = MagicMock() + mock_container.name = "netbird-kunde1-management" + mock_container.status = "running" + mock_container.attrs = {"State": {"Health": {"Status": "healthy"}}, "Created": "2024-01-01"} + mock_container.image.tags = ["netbirdio/management:latest"] + + mock_client = MagicMock() + mock_client.containers.list.return_value = [mock_container] + mock_get_client.return_value = mock_client + + result = docker_service.get_container_status("netbird-kunde1") + assert len(result) == 1 + assert result[0]["name"] == "netbird-kunde1-management" + assert result[0]["status"] == "running" + assert result[0]["health"] == "healthy" + + @patch("app.services.docker_service._get_client") + def test_get_container_logs(self, mock_get_client): + """get_container_logs returns log text.""" + mock_container = MagicMock() + mock_container.logs.return_value = b"2024-01-01 12:00:00 Started\n" + + mock_client = MagicMock() + mock_client.containers.get.return_value = mock_container + mock_get_client.return_value = mock_client + + result = docker_service.get_container_logs("netbird-kunde1-management") + assert "Started" in result + + @patch("app.services.docker_service._get_client") + def test_get_container_logs_not_found(self, mock_get_client): + """get_container_logs handles missing container.""" + from docker.errors import NotFound + mock_client = MagicMock() + mock_client.containers.get.side_effect = NotFound("not found") + mock_get_client.return_value = mock_client + + result = docker_service.get_container_logs("nonexistent") + assert "not found" in result + + @patch("app.services.docker_service._get_client") + def test_remove_instance_containers(self, mock_get_client): + """remove_instance_containers force-removes all matching containers.""" + mock_c1 = MagicMock() + mock_c1.name = "netbird-kunde1-management" + mock_c2 = MagicMock() + mock_c2.name = "netbird-kunde1-signal" + + mock_client = MagicMock() + mock_client.containers.list.return_value = [mock_c1, mock_c2] + mock_get_client.return_value = mock_client + + result = docker_service.remove_instance_containers("netbird-kunde1") + assert result is True + mock_c1.remove.assert_called_once_with(force=True) + mock_c2.remove.assert_called_once_with(force=True)