Compare commits
7 Commits
f02ddf42aa
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f0e1d586c | ||
|
|
8c2d15225b | ||
|
|
81be9c5e42 | ||
|
|
95633a6722 | ||
|
|
731a833075 | ||
|
|
6279347e4b | ||
|
|
5def26e0df |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.sh text eol=lf
|
||||
99
.github/workflows/ci.yml
vendored
Normal file
99
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "**"
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
frontend:
|
||||
name: Frontend Build + Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
cache: npm
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Lint frontend
|
||||
run: npm run lint
|
||||
|
||||
- name: Build frontend
|
||||
run: npm run build
|
||||
|
||||
backend:
|
||||
name: Backend Build + Test + Prisma Checks
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
env:
|
||||
POSTGRES_USER: proxpanel
|
||||
POSTGRES_PASSWORD: proxpanel
|
||||
POSTGRES_DB: proxpanel
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd "pg_isready -U proxpanel -d proxpanel"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
env:
|
||||
DATABASE_URL: postgresql://proxpanel:proxpanel@localhost:5432/proxpanel?schema=public
|
||||
SHADOW_DATABASE_URL: postgresql://proxpanel:proxpanel@localhost:5432/proxpanel_shadow?schema=public
|
||||
JWT_SECRET: ci_super_secret_key_for_testing_12345
|
||||
JWT_REFRESH_SECRET: ci_super_refresh_secret_key_67890
|
||||
CORS_ORIGIN: http://localhost:5173
|
||||
NODE_ENV: test
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
cache: npm
|
||||
cache-dependency-path: backend/package-lock.json
|
||||
|
||||
- name: Install backend dependencies
|
||||
working-directory: backend
|
||||
run: npm ci
|
||||
|
||||
- name: Prepare shadow database
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y postgresql-client
|
||||
PGPASSWORD=proxpanel psql -h localhost -U proxpanel -d proxpanel -c 'CREATE DATABASE proxpanel_shadow;'
|
||||
|
||||
- name: Prisma generate
|
||||
working-directory: backend
|
||||
run: npm run prisma:generate
|
||||
|
||||
- name: Prisma validate
|
||||
working-directory: backend
|
||||
run: npm run prisma:validate
|
||||
|
||||
- name: Prisma migrate deploy
|
||||
working-directory: backend
|
||||
run: npm run prisma:deploy
|
||||
|
||||
- name: Prisma migration drift check
|
||||
working-directory: backend
|
||||
run: npx prisma migrate diff --from-migrations prisma/migrations --to-schema-datamodel prisma/schema.prisma --shadow-database-url "$SHADOW_DATABASE_URL" --exit-code
|
||||
|
||||
- name: Build backend
|
||||
working-directory: backend
|
||||
run: npm run build
|
||||
|
||||
- name: Run backend tests
|
||||
working-directory: backend
|
||||
run: npm run test
|
||||
47
.gitignore
vendored
Normal file
47
.gitignore
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# dependencies
|
||||
node_modules
|
||||
|
||||
# build outputs
|
||||
dist
|
||||
dist-ssr
|
||||
.vite
|
||||
backend/dist
|
||||
|
||||
# environment
|
||||
.env
|
||||
.env.*
|
||||
backend/.env
|
||||
backend/.env.*
|
||||
|
||||
# logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
|
||||
# ide
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
|
||||
# deployment artifacts / backups
|
||||
backups/
|
||||
_deploy_bundle.tar.gz
|
||||
|
||||
# local secret/material files
|
||||
Proxmox_API_Token.txt
|
||||
myProx_template_ssh_key.txt
|
||||
more_dev_work.txt
|
||||
audit.md
|
||||
proxpanel-report.md
|
||||
gitvotcloud_repo_credentials.txt
|
||||
*_repo_credentials.txt
|
||||
*credentials*.txt
|
||||
*token*.txt
|
||||
5
.vscode/extensions.json
vendored
Normal file
5
.vscode/extensions.json
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"codeium.codeium"
|
||||
]
|
||||
}
|
||||
72
API.md
Normal file
72
API.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# API Documentation (Core)
|
||||
|
||||
Base URL: `http://<host>:8080`
|
||||
|
||||
## Health
|
||||
|
||||
- `GET /api/health`
|
||||
|
||||
## Auth
|
||||
|
||||
- `POST /api/auth/login`
|
||||
- Body: `{ "email": "user@example.com", "password": "..." }`
|
||||
- Returns: `{ token, refresh_token, user }`
|
||||
- `POST /api/auth/refresh`
|
||||
- Body: `{ "refresh_token": "..." }`
|
||||
- Returns: `{ token, refresh_token }`
|
||||
- `GET /api/auth/me` (Bearer token)
|
||||
|
||||
## Proxmox Operations
|
||||
|
||||
- `POST /api/proxmox/sync`
|
||||
- `POST /api/proxmox/vms/:id/actions/:action`
|
||||
- `POST /api/proxmox/vms/:id/migrate`
|
||||
- `PATCH /api/proxmox/vms/:id/config`
|
||||
- `PATCH /api/proxmox/vms/:id/network`
|
||||
- `POST /api/proxmox/vms/:id/disks`
|
||||
- `POST /api/proxmox/vms/:id/reinstall`
|
||||
- `GET /api/proxmox/vms/:id/console`
|
||||
- `GET /api/proxmox/vms/:id/usage-graphs?timeframe=hour|day|week|month|year`
|
||||
- `GET /api/proxmox/nodes/:id/usage-graphs?timeframe=hour|day|week|month|year`
|
||||
- `GET /api/proxmox/cluster/usage-graphs?timeframe=hour|day|week|month|year`
|
||||
|
||||
## Resources API
|
||||
|
||||
Generic secured resource endpoints:
|
||||
|
||||
- `GET /api/resources/:resource`
|
||||
- `GET /api/resources/:resource/:id`
|
||||
- `POST /api/resources/:resource`
|
||||
- `PATCH /api/resources/:resource/:id`
|
||||
- `DELETE /api/resources/:resource/:id`
|
||||
|
||||
Tenant scope protections are enforced for tenant-scoped resources.
|
||||
|
||||
## Client Area
|
||||
|
||||
- `GET /api/client/overview`
|
||||
- `GET /api/client/usage-trends`
|
||||
- `GET /api/client/machines`
|
||||
- `POST /api/client/machines`
|
||||
- `PATCH /api/client/machines/:vmId/resources`
|
||||
- `POST /api/client/machines/:vmId/power-schedules`
|
||||
- `POST /api/client/machines/:vmId/backup-schedules`
|
||||
- `GET /api/client/firewall/rules`
|
||||
- `POST /api/client/firewall/rules`
|
||||
- `PATCH /api/client/firewall/rules/:id`
|
||||
- `DELETE /api/client/firewall/rules/:id`
|
||||
|
||||
## Monitoring
|
||||
|
||||
- `GET /api/monitoring/overview`
|
||||
- `GET /api/monitoring/health-checks`
|
||||
- `POST /api/monitoring/health-checks`
|
||||
- `GET /api/monitoring/alerts/events`
|
||||
- `GET /api/monitoring/insights/faulty-deployments`
|
||||
- `GET /api/monitoring/insights/cluster-forecast`
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
- Global API rate limiting is enabled.
|
||||
- Auth endpoints use stricter limits.
|
||||
- When exceeded, API returns HTTP `429`.
|
||||
129
DEPLOYMENT.md
Normal file
129
DEPLOYMENT.md
Normal file
@@ -0,0 +1,129 @@
|
||||
# ProxPanel Deployment Guide (Production Ubuntu)
|
||||
|
||||
## 1) Hands-Free Production Install (Recommended)
|
||||
|
||||
Run this on your Ubuntu server:
|
||||
|
||||
```bash
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y git
|
||||
git clone <YOUR_REPO_URL> /opt/proxpanel
|
||||
cd /opt/proxpanel
|
||||
sudo bash infra/deploy/install-proxpanel.sh \
|
||||
--branch main \
|
||||
--public-url http://102.69.243.167 \
|
||||
--admin-email admin@yourdomain.com \
|
||||
--configure-ufw
|
||||
```
|
||||
|
||||
If the repo already exists on the server, just run:
|
||||
|
||||
```bash
|
||||
cd /opt/proxpanel
|
||||
sudo bash infra/deploy/install-proxpanel.sh \
|
||||
--branch main \
|
||||
--public-url http://102.69.243.167 \
|
||||
--admin-email admin@yourdomain.com \
|
||||
--configure-ufw
|
||||
```
|
||||
|
||||
Installer behavior:
|
||||
- Installs Docker + prerequisites.
|
||||
- Builds and starts PostgreSQL, backend, frontend.
|
||||
- Applies Prisma schema (`prisma:deploy`, fallback to `prisma:push`).
|
||||
- Seeds admin user.
|
||||
- Verifies API health and login.
|
||||
- Writes deployment summary to `/root/proxpanel-install-summary.txt`.
|
||||
|
||||
## 2) Fast Production Checks
|
||||
|
||||
```bash
|
||||
cd /opt/proxpanel
|
||||
docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml ps
|
||||
curl -fsS http://127.0.0.1:8080/api/health
|
||||
curl -I http://102.69.243.167
|
||||
```
|
||||
|
||||
## 3) Connect Proxmox Cluster In App
|
||||
|
||||
### A. Create Proxmox API token
|
||||
In Proxmox UI:
|
||||
1. Open `Datacenter -> Permissions -> API Tokens`.
|
||||
2. Select your user (for example `root@pam` or a dedicated service user).
|
||||
3. Click `Add`.
|
||||
4. Set `Token ID` (example: `proxpanel`).
|
||||
5. Copy the generated token secret immediately.
|
||||
|
||||
### B. Save credentials in ProxPanel
|
||||
In ProxPanel UI:
|
||||
1. Login as admin.
|
||||
2. Go to `Settings -> Proxmox`.
|
||||
3. Fill:
|
||||
- `Host`: Proxmox hostname or IP (no `https://` prefix)
|
||||
- `Port`: `8006`
|
||||
- `Username`: e.g. `root@pam`
|
||||
- `Token ID`: e.g. `proxpanel`
|
||||
- `Token Secret`: generated secret
|
||||
- `Verify SSL`: enabled if Proxmox cert is trusted; disable only if using self-signed cert temporarily
|
||||
4. Click `Save Proxmox`.
|
||||
|
||||
### C. Trigger first sync
|
||||
Use API once to import nodes/VMs:
|
||||
|
||||
```bash
|
||||
APP_URL="http://102.69.243.167"
|
||||
ADMIN_EMAIL="admin@yourdomain.com"
|
||||
ADMIN_PASSWORD="<YOUR_ADMIN_PASSWORD>"
|
||||
|
||||
TOKEN=$(curl -s -X POST "$APP_URL/api/auth/login" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"email\":\"$ADMIN_EMAIL\",\"password\":\"$ADMIN_PASSWORD\"}" | jq -r '.token')
|
||||
|
||||
curl -s -X POST "$APP_URL/api/proxmox/sync" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
Then confirm:
|
||||
- `Nodes` page shows imported nodes.
|
||||
- Dashboard cards and usage graphs populate.
|
||||
|
||||
## 4) Security Hardening Checklist
|
||||
|
||||
- Set a DNS name and terminate TLS (Nginx/Caddy/Cloudflare).
|
||||
- Change the seeded admin password immediately.
|
||||
- Keep `CORS_ORIGIN` set to your real public URL only.
|
||||
- Use a dedicated Proxmox API user/token with least privileges.
|
||||
- Keep backend bound to localhost (`127.0.0.1`) and expose only frontend port.
|
||||
- Enable off-host backups for DB and app config.
|
||||
|
||||
## 5) PAT-Only Git Update Workflow (No Password Auth)
|
||||
|
||||
Use Personal Access Token (PAT) authentication only. Do not use account passwords for Git pull/push.
|
||||
|
||||
### A. Create PAT (Git server)
|
||||
1. Sign in to your Git server user settings.
|
||||
2. Create a PAT with minimum required scopes (`repo:read` for pull; add write only if needed).
|
||||
3. Save it securely (password manager/secret vault).
|
||||
|
||||
### B. Update app on server with PAT (no credential persistence)
|
||||
Run this on the server:
|
||||
|
||||
```bash
|
||||
cd /opt/proxpanel
|
||||
chmod +x infra/deploy/git-pat-sync.sh
|
||||
export GIT_USERNAME="your_git_username"
|
||||
export GIT_PAT="your_personal_access_token"
|
||||
bash infra/deploy/git-pat-sync.sh \
|
||||
--repo-dir /opt/proxpanel \
|
||||
--branch main \
|
||||
--repo-url https://git.votcloud.com/austindebest/proxpanel.git
|
||||
unset GIT_PAT
|
||||
```
|
||||
|
||||
Then deploy:
|
||||
|
||||
```bash
|
||||
cd /opt/proxpanel
|
||||
docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml up -d --build
|
||||
```
|
||||
15
Dockerfile
Normal file
15
Dockerfile
Normal file
@@ -0,0 +1,15 @@
|
||||
FROM node:22-alpine AS build
|
||||
WORKDIR /app
|
||||
COPY package.json package-lock.json* ./
|
||||
RUN npm ci
|
||||
COPY . .
|
||||
ARG VITE_API_BASE_URL=http://localhost:8080
|
||||
ENV VITE_API_BASE_URL=${VITE_API_BASE_URL}
|
||||
RUN npm run build
|
||||
|
||||
FROM nginx:1.27-alpine AS runtime
|
||||
WORKDIR /usr/share/nginx/html
|
||||
COPY --from=build /app/dist ./
|
||||
COPY infra/nginx/default.conf /etc/nginx/conf.d/default.conf
|
||||
EXPOSE 80
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
73
SETUP.md
Normal file
73
SETUP.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Setup Guide
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Node.js 22+
|
||||
- npm 10+
|
||||
- PostgreSQL 15+
|
||||
|
||||
## 1) Install Dependencies
|
||||
|
||||
```bash
|
||||
npm install
|
||||
cd backend && npm install
|
||||
```
|
||||
|
||||
## 2) Configure Backend Environment
|
||||
|
||||
Copy the template and set real secrets:
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
Required values:
|
||||
|
||||
- `DATABASE_URL`
|
||||
- `JWT_SECRET`
|
||||
- `JWT_REFRESH_SECRET`
|
||||
- `CORS_ORIGIN`
|
||||
|
||||
## 3) Prepare Database
|
||||
|
||||
Preferred (versioned migrations):
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
npm run prisma:migrate
|
||||
npm run prisma:generate
|
||||
npm run prisma:seed
|
||||
```
|
||||
|
||||
Alternative (dev only):
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
npm run prisma:push
|
||||
npm run prisma:seed
|
||||
```
|
||||
|
||||
## 4) Run Development Stack
|
||||
|
||||
Backend:
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Frontend (new terminal):
|
||||
|
||||
```bash
|
||||
npm run dev
|
||||
```
|
||||
|
||||
## 5) Quality Gates
|
||||
|
||||
```bash
|
||||
cd backend && npm run build
|
||||
cd ..
|
||||
npm run lint
|
||||
npm run build
|
||||
```
|
||||
89
Upgrade-Implementation-Tracker.md
Normal file
89
Upgrade-Implementation-Tracker.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Enterprise Upgrade Implementation Tracker
|
||||
|
||||
This tracker maps the feature scope from Upgrade.md into implementation phases with delivered status.
|
||||
|
||||
## Phase 1 - Control Plane Foundation (Implemented)
|
||||
|
||||
### Admin Area
|
||||
- [x] Boot/Reboot/Stop/Shut Down server actions (/api/proxmox/vms/:id/actions/:action)
|
||||
- [x] Migrate server between nodes (/api/proxmox/vms/:id/migrate)
|
||||
- [x] Access noVNC console ticket (/api/proxmox/vms/:id/console)
|
||||
- [x] Reinstall workflow endpoint (/api/proxmox/vms/:id/reinstall)
|
||||
- [x] Change VM hostname/ISO/boot settings/SSH key (/api/proxmox/vms/:id/config)
|
||||
- [x] Reconfigure server network (/api/proxmox/vms/:id/network)
|
||||
- [x] Add additional disk storage (/api/proxmox/vms/:id/disks)
|
||||
- [x] Auto backup before reinstall flag (ackup_before_reinstall)
|
||||
|
||||
### Tasking / Queue / History
|
||||
- [x] Operations task history model (OperationTask)
|
||||
- [x] Operation status lifecycle: queued/running/success/failed
|
||||
- [x] Operations task list API (GET /api/operations/tasks)
|
||||
- [x] Queue summary stats for waiting/running/failed/success
|
||||
- [x] Audit logging linked with task IDs for critical operations
|
||||
|
||||
### Scheduled Automation
|
||||
- [x] VM power schedule model (PowerSchedule)
|
||||
- [x] Power schedule CRUD APIs (/api/operations/power-schedules)
|
||||
- [x] Run-now trigger for schedules (POST /run)
|
||||
- [x] Cron-based power schedule worker
|
||||
|
||||
### Frontend
|
||||
- [x] Operations Center page (/operations)
|
||||
- [x] Task history table + queue counters
|
||||
- [x] Power schedules list/create/toggle/delete/run-now
|
||||
|
||||
## Phase 2 - Provisioning & Templates (Implemented)
|
||||
- [x] App template catalog (KVM/LXC templates, ISO, archives)
|
||||
- [x] Application groups + template assignment policies
|
||||
- [x] VM ID range policies per server/group
|
||||
- [x] Auto-node and weighted placement engine
|
||||
- [x] Service create/suspend/unsuspend/terminate flows with package options
|
||||
- [x] Deep Proxmox template-clone/image-boot orchestration per template type
|
||||
|
||||
## Phase 3 - Backup, Restore, Snapshots (In Progress)
|
||||
- [x] PBS integration workflow for file-level restore tasks
|
||||
- [x] Backup limits (count/size) enforcement per tenant/product
|
||||
- [x] Backup protection flags and routing policies
|
||||
- [x] Snapshot jobs with recurring policies and retention
|
||||
- [x] Cross-VM restore from owned servers
|
||||
|
||||
## Phase 4 - Network & IPAM Enterprise (In Progress)
|
||||
- [x] Public/private IPAM across server/VLAN/tag/node/bridge (Prisma models + APIs)
|
||||
- [x] IPv4/IPv6/subnet import/return workflows (bulk import + assignment return endpoints)
|
||||
- [x] Additional IP assignment automation and audit logs
|
||||
- [x] SDN-aware private network attach/detach controls (API + UI wiring)
|
||||
- [x] IP subnet utilization dashboard APIs and admin UI
|
||||
- [x] Stricter pool policies (tenant quotas + reserved ranges + policy-based best-fit allocation)
|
||||
- [x] Subnet heatmap widgets + tenant-level utilization trend charts on dashboard
|
||||
|
||||
## Phase 5 - Monitoring, Alerts, Notifications (Implemented)
|
||||
- [x] Server health check definitions and result logs
|
||||
- [x] Threshold alerts (CPU/RAM/network/disk I/O) with notifications
|
||||
- [x] Faulty deployment insights and failed-task analytics
|
||||
- [x] Cluster remaining-resource forecasting
|
||||
|
||||
## Phase 6 - Client Area Enterprise (Implemented)
|
||||
- [x] Client machine create/manage with configurable limits
|
||||
- [x] Resource upgrade/downgrade workflows
|
||||
- [x] Firewall rule management and policy packs
|
||||
- [x] VM power schedules and backup schedules in tenant UI
|
||||
- [x] Console proxy per-node/per-cluster configuration
|
||||
|
||||
## Phase 7 - Platform Governance, Scheduler, Logs (Implemented)
|
||||
- [x] Cron scheduler policy settings with live runtime reconfiguration from Admin Settings
|
||||
- [x] Operation task repetition thresholds (retry attempts + backoff) with automated retry worker
|
||||
- [x] Failure notification policy for operation tasks (webhook + email gateway routing)
|
||||
- [x] Queue insights API for waiting/retrying/failed/stale tasks and due scheduled actions
|
||||
- [x] Settings UI upgraded from mock form to real backend-backed enterprise controls
|
||||
|
||||
## Phase 8 - Resource Graphs & Timescale Telemetry (Implemented)
|
||||
- [x] Proxmox VM usage graph API with time-scale controls (hour/day/week/month/year)
|
||||
- [x] Graph data includes CPU, memory, disk usage, network throughput, and disk I/O
|
||||
- [x] Admin VM panel updated with interactive usage graph dialogs
|
||||
- [x] Client Area updated with per-machine telemetry graphs and timescale selector
|
||||
- [x] Node-level resource graph API and Nodes page telemetry dialogs (CPU/RAM/Disk/I/O wait + network)
|
||||
- [x] Cluster MRTG-style dashboard views with selectable timescale windows and aggregate summaries
|
||||
|
||||
## Notes
|
||||
- This phase establishes the operational backbone required by most advanced features.
|
||||
- Remaining phases will build on the task engine + schedule worker + audited VM operation APIs implemented in Phase 1.
|
||||
257
Upgrade.md
Normal file
257
Upgrade.md
Normal file
@@ -0,0 +1,257 @@
|
||||
Features
|
||||
|
||||
Admin Area
|
||||
Create/Suspend/Unsuspend/Terminate Service:
|
||||
VPS Type Product With Single VM Machine
|
||||
Cloud Type Product With Multiple VM Machines Created Within Defined Limits
|
||||
Create/Terminate User Account
|
||||
Change Package - Supports Configurable Options
|
||||
Reconfigure Server Network
|
||||
Import/Detach VM Machine
|
||||
Boot/Reboot/Stop/Shut Down Server
|
||||
Change User Role
|
||||
Access noVNC, SPICE And Xterm.js Console
|
||||
Migrate Server Between Nodes In The Same Cluster
|
||||
Reinstall Server
|
||||
Change Server Hostname, ISO Image, Boot Devices And SSH Public Key
|
||||
View Server Status, Details And Statistics
|
||||
View Graphs - With Option To Change Time Scale of MRTG Graphs
|
||||
Display Disk And Bandwidth Usage Of Each Product
|
||||
Display CPU And Memory Usage Of Each Product
|
||||
Display IP Sets (KVM)
|
||||
Auto Node - Automatically Create VM On Node With Most Free Space
|
||||
Configure Client Area Features Per Product
|
||||
Configure Network And Private Network Settings With SDN Support
|
||||
Import IP Address To Hosting IP Addresses Table
|
||||
Return IP Address To IP Addresses Subnet
|
||||
Add Additional IP Address To VM
|
||||
Add Additional Disks Storage To VM (KVM)
|
||||
Enable Qemu Guest Agent (KVM)
|
||||
Enable Backups Routing
|
||||
Enable Auto VM Backups Before Reinstallation
|
||||
Enable Load Balancer
|
||||
Receive Notifications About VM Upgrades And Creation Failures
|
||||
Display Servers:
|
||||
|
||||
List Per VPS And Cloud
|
||||
VMs List
|
||||
Clusters List
|
||||
VM Cleaner - Manage VM Not Existing In Your WHMCS
|
||||
Templates - Convert KVM VPS To KVM Template
|
||||
Settings
|
||||
Groups
|
||||
Recovery VM Configurations List With Export To Backup File
|
||||
Task History
|
||||
Statuses, Resources Usage, IP Assignments And Details
|
||||
|
||||
Manage Public And Private IP Addresses Per Server/VLAN/Tag/Node/Bridge:
|
||||
|
||||
IPv4 Addresses
|
||||
IPv6 Addresses
|
||||
IPv6 Subnets
|
||||
|
||||
View Logs Of IP Assignment Changes
|
||||
Configure App Templates:
|
||||
|
||||
Applications
|
||||
KVM/LXC Templates
|
||||
ISO Images
|
||||
KVM/LXC Archives
|
||||
|
||||
Create And Manage Custom Cloud-Init Scripts Per App Template
|
||||
Configure High Availability Settings Per App Template
|
||||
Create Application Groups And Assign App Templates
|
||||
Assign Virtual Machines To Nodes Based On Selected Application Groups
|
||||
Define VM ID Ranges Per Server
|
||||
Set Minimum VM ID For Product Without ID Ranges Defined
|
||||
Configure Resource Weights For Load Balancer Prioritization
|
||||
Configure Tasks Repetition Threshold And Email Notifications
|
||||
Configure Backup Tasks Limitation And File Restoration Settings
|
||||
Configure Console Proxy For Multiple Or Per-Node Connections
|
||||
Set Admin Area And Proxmox VE Widget Features
|
||||
Configure Scheduled Backups And Firewall
|
||||
View And Manage Logs
|
||||
View Queue Of Scheduled Tasks
|
||||
Configure Cron Scheduler Settings
|
||||
Customize Module Language Files With "Translations" Tool
|
||||
Manage Media Library With Logotypes For App Templates
|
||||
View Backup Tasks, Virtual Machine And Resource Usage Statistics
|
||||
|
||||
View Faulty VM Deployments
|
||||
View Waiting And Failed Tasks
|
||||
View Cluster Remaining Resources
|
||||
View Node Resources
|
||||
|
||||
Configurable Options
|
||||
|
||||
KVM For "VPS" Product Type:
|
||||
Additional Disks Space (With Configurable Storage, Units And Size)
|
||||
Amount of RAM
|
||||
Application
|
||||
Backup Files
|
||||
Backups Size
|
||||
Bandwidth
|
||||
CPU Cores
|
||||
CPU Sockets
|
||||
CPU units for a VM
|
||||
Custom Cloud-Init Configuration
|
||||
Disk Space
|
||||
Download Backup Files
|
||||
IPv4 Addresses
|
||||
IPv6 Addresses
|
||||
IPv6 Subnets
|
||||
Limit Of CPU
|
||||
Managed View
|
||||
Network Rate
|
||||
OS Type
|
||||
Private Network
|
||||
Protected Backup Files
|
||||
Restore Backup Files
|
||||
Server Monitoring
|
||||
Snapshot Jobs
|
||||
Snapshots
|
||||
Storage Disk Space
|
||||
TPM
|
||||
Tag
|
||||
VCPUs
|
||||
KVM Limits For "Cloud" Product Type:
|
||||
Additional Disk Space
|
||||
Backups Files Limit
|
||||
Backups Size
|
||||
Bandwidth
|
||||
CPU Cores
|
||||
CPU Limit
|
||||
CPU Sockets
|
||||
CPU Units Limit
|
||||
IPv4 Addresses
|
||||
IPv6 Addresses
|
||||
IPv6 Subnets
|
||||
Memory
|
||||
Network Rate
|
||||
Snapshot Jobs
|
||||
Snapshots
|
||||
Storage
|
||||
Storage Disk Space
|
||||
VCPUs
|
||||
Virtual Networks
|
||||
LXC For "VPS" Product Type:
|
||||
Additional Disks Space (With Configurable Storage, Units And Size)
|
||||
Amount of RAM
|
||||
Amount of SWAP
|
||||
Application
|
||||
Backup Files
|
||||
Backups Size
|
||||
Bandwidth
|
||||
CPU Cores
|
||||
CPU units for a VM
|
||||
Disk Space
|
||||
Download Backup Files
|
||||
IPv4 Addresses
|
||||
IPv6 Addresses
|
||||
IPv6 Subnets
|
||||
Limit Of CPU
|
||||
Managed View
|
||||
Network Rate
|
||||
Private Network
|
||||
Protected Backup Files
|
||||
Restore Backup Files
|
||||
Server Monitoring
|
||||
Snapshot Jobs
|
||||
Snapshots
|
||||
Storage Disk Space
|
||||
Tag
|
||||
LXC Limits For "Cloud" Product Type:
|
||||
Additional Disk Space
|
||||
Backups Files Limit
|
||||
Backups Size
|
||||
Bandwidth
|
||||
CPU Limit
|
||||
CPU Units Limit
|
||||
IPv4 Addresses
|
||||
IPv6 Addresses
|
||||
IPv6 Subnets
|
||||
Memory
|
||||
Network Rate
|
||||
SWAP
|
||||
Snapshot Jobs
|
||||
Snapshots
|
||||
Storage
|
||||
Storage Disk Space
|
||||
VCPUs
|
||||
Virtual Networks
|
||||
|
||||
Client Area
|
||||
|
||||
Create/Manage/View Server Status, Details And Statistics:
|
||||
VPS Type Product With Single VM Machine
|
||||
Cloud Type Product With Multiple VM Machines Created Within Available Limits:
|
||||
Define Machine Settings:
|
||||
Name
|
||||
Type
|
||||
Description
|
||||
Define Machine Parameters:
|
||||
Location
|
||||
Sockets (KVM)
|
||||
Cores (LXC)
|
||||
vCPU (KVM)
|
||||
CPU Priority
|
||||
VM RAM
|
||||
SWAP (LXC)
|
||||
Disk Size
|
||||
Default User (KVM)
|
||||
Password
|
||||
SSH Key
|
||||
Search Domain (KVM)
|
||||
Name Servers (KVM)
|
||||
Add Virtual Networks
|
||||
Add Additional Disks
|
||||
Start/Reboot/Stop/Shut Down/Delete Server
|
||||
Reconfigure Server Network
|
||||
Access noVNC, SPICE And Xterm.js Console
|
||||
Change Server Hostname, ISO Image, Boot Devices And SSH Public Key
|
||||
View And Edit Public SSH Key (KVM)
|
||||
Download Public And Private SSH Keys (LXC)
|
||||
Create/Restore/Delete Backups Of Current Server
|
||||
Manage Backups Within Defined Limits (Max Number And Size Of Files)
|
||||
Restore Backups From:
|
||||
Any Owned Server
|
||||
Proxmox Backup Server (PBS)
|
||||
Restore Backups Of:
|
||||
Selected Single Files And Directories With Option To Download Them (PBS)
|
||||
Full Server Backups
|
||||
Manage Backup Schedules Within Defined Limits (Max Number And Size Of Files)
|
||||
Protect Selected Backups From Manual Deletion And Backups Routing
|
||||
Add And Manage Additional Disks
|
||||
Manage Firewall Rules And Options
|
||||
View Resources Usage Graphs - With Option To Change Time Scale of MRTG Graphs:
|
||||
CPU
|
||||
Memory
|
||||
Network Traffic
|
||||
Disk I/O
|
||||
View Network Devices, Manage Private Interface And Attach Servers
|
||||
Reinstall Server Using Templates (KVM) And ISO Images
|
||||
Send Email Notifications When Server Exceeds Resource Thresholds:
|
||||
Network Traffic
|
||||
CPU Usage
|
||||
Memory Usage
|
||||
Disk Read And Write Speed
|
||||
Monitor Server Health
|
||||
Create Server Monitoring Checks
|
||||
View Check Result Logs
|
||||
View Successful And Failed Checks Graphs
|
||||
Create Server Snapshots:
|
||||
Manually
|
||||
Automatically:
|
||||
Every Number Of Hours
|
||||
Each Specified Day
|
||||
View Task History
|
||||
Manage VM Power Tasks To Automatically Start/Stop/Reboot Server At Specified Time
|
||||
Display CPU, Memory, Disk And Bandwidth Usage
|
||||
Choose Server Resources While Ordering And Upgrade/Downgrade Them Freely
|
||||
Convert KVM VPS To KVM Template ("Cloud" Type Product)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
21
backend/Dockerfile
Normal file
21
backend/Dockerfile
Normal file
@@ -0,0 +1,21 @@
|
||||
FROM node:22-alpine AS deps
|
||||
WORKDIR /app
|
||||
COPY package.json package-lock.json* ./
|
||||
RUN npm install
|
||||
|
||||
FROM node:22-alpine AS build
|
||||
WORKDIR /app
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
COPY . .
|
||||
RUN npm run prisma:generate
|
||||
RUN npm run build
|
||||
|
||||
FROM node:22-alpine AS runtime
|
||||
WORKDIR /app
|
||||
ENV NODE_ENV=production
|
||||
COPY --from=build /app/node_modules ./node_modules
|
||||
COPY --from=build /app/dist ./dist
|
||||
COPY --from=build /app/prisma ./prisma
|
||||
COPY --from=build /app/package.json ./package.json
|
||||
EXPOSE 8080
|
||||
CMD ["sh", "-c", "npm run prisma:deploy && node dist/index.js"]
|
||||
2402
backend/package-lock.json
generated
Normal file
2402
backend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
47
backend/package.json
Normal file
47
backend/package.json
Normal file
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"name": "proxpanel-backend",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"description": "Production API for ProxPanel (Proxmox VE SaaS control panel)",
|
||||
"scripts": {
|
||||
"dev": "tsx watch src/index.ts",
|
||||
"build": "tsc -p tsconfig.json",
|
||||
"start": "node dist/index.js",
|
||||
"test": "node --test dist/tests/**/*.test.js",
|
||||
"prisma:generate": "prisma generate",
|
||||
"prisma:migrate": "prisma migrate dev",
|
||||
"prisma:deploy": "prisma migrate deploy",
|
||||
"prisma:push": "prisma db push",
|
||||
"prisma:seed": "prisma db seed",
|
||||
"prisma:validate": "prisma validate"
|
||||
},
|
||||
"prisma": {
|
||||
"seed": "tsx prisma/seed.ts"
|
||||
},
|
||||
"dependencies": {
|
||||
"@prisma/client": "^6.6.0",
|
||||
"axios": "^1.9.0",
|
||||
"bcryptjs": "^2.4.3",
|
||||
"compression": "^1.8.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.5.0",
|
||||
"express": "^4.21.2",
|
||||
"helmet": "^8.1.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"morgan": "^1.10.0",
|
||||
"node-cron": "^4.0.7",
|
||||
"zod": "^3.24.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/bcryptjs": "^2.4.6",
|
||||
"@types/compression": "^1.7.5",
|
||||
"@types/cors": "^2.8.17",
|
||||
"@types/express": "^4.17.21",
|
||||
"@types/jsonwebtoken": "^9.0.9",
|
||||
"@types/morgan": "^1.9.9",
|
||||
"@types/node": "^22.15.2",
|
||||
"prisma": "^6.6.0",
|
||||
"tsx": "^4.19.4",
|
||||
"typescript": "^5.8.3"
|
||||
}
|
||||
}
|
||||
1352
backend/prisma/migrations/20260417120000_init/migration.sql
Normal file
1352
backend/prisma/migrations/20260417120000_init/migration.sql
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,110 @@
|
||||
-- AlterTable
|
||||
ALTER TABLE "User"
|
||||
ADD COLUMN "avatar_url" TEXT,
|
||||
ADD COLUMN "profile_metadata" JSONB NOT NULL DEFAULT '{}',
|
||||
ADD COLUMN "must_change_password" BOOLEAN NOT NULL DEFAULT false,
|
||||
ADD COLUMN "mfa_enabled" BOOLEAN NOT NULL DEFAULT false,
|
||||
ADD COLUMN "mfa_secret" TEXT,
|
||||
ADD COLUMN "mfa_recovery_codes" JSONB NOT NULL DEFAULT '[]',
|
||||
ADD COLUMN "password_changed_at" TIMESTAMP(3);
|
||||
|
||||
-- AlterTable
|
||||
ALTER TABLE "Tenant"
|
||||
ADD COLUMN "trial_starts_at" TIMESTAMP(3),
|
||||
ADD COLUMN "trial_ends_at" TIMESTAMP(3),
|
||||
ADD COLUMN "trial_grace_ends_at" TIMESTAMP(3),
|
||||
ADD COLUMN "trial_days" INTEGER,
|
||||
ADD COLUMN "trial_locked" BOOLEAN NOT NULL DEFAULT false;
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "AuthSession" (
|
||||
"id" TEXT NOT NULL,
|
||||
"user_id" TEXT NOT NULL,
|
||||
"refresh_token_hash" TEXT NOT NULL,
|
||||
"ip_address" TEXT,
|
||||
"user_agent" TEXT,
|
||||
"issued_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"expires_at" TIMESTAMP(3) NOT NULL,
|
||||
"last_used_at" TIMESTAMP(3),
|
||||
"revoked_at" TIMESTAMP(3),
|
||||
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||
|
||||
CONSTRAINT "AuthSession_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "PasswordResetToken" (
|
||||
"id" TEXT NOT NULL,
|
||||
"user_id" TEXT NOT NULL,
|
||||
"token_hash" TEXT NOT NULL,
|
||||
"expires_at" TIMESTAMP(3) NOT NULL,
|
||||
"used_at" TIMESTAMP(3),
|
||||
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
CONSTRAINT "PasswordResetToken_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "CmsPage" (
|
||||
"id" TEXT NOT NULL,
|
||||
"slug" TEXT NOT NULL,
|
||||
"title" TEXT NOT NULL,
|
||||
"section" TEXT NOT NULL DEFAULT 'general',
|
||||
"content" JSONB NOT NULL DEFAULT '{}',
|
||||
"is_published" BOOLEAN NOT NULL DEFAULT false,
|
||||
"created_by" TEXT,
|
||||
"updated_by" TEXT,
|
||||
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||
|
||||
CONSTRAINT "CmsPage_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "SiteNavigationItem" (
|
||||
"id" TEXT NOT NULL,
|
||||
"label" TEXT NOT NULL,
|
||||
"href" TEXT NOT NULL,
|
||||
"position" TEXT NOT NULL DEFAULT 'header',
|
||||
"sort_order" INTEGER NOT NULL DEFAULT 100,
|
||||
"is_enabled" BOOLEAN NOT NULL DEFAULT true,
|
||||
"metadata" JSONB NOT NULL DEFAULT '{}',
|
||||
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||
|
||||
CONSTRAINT "SiteNavigationItem_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "AuthSession_refresh_token_hash_key" ON "AuthSession"("refresh_token_hash");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "AuthSession_user_id_revoked_at_idx" ON "AuthSession"("user_id", "revoked_at");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "AuthSession_expires_at_idx" ON "AuthSession"("expires_at");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "PasswordResetToken_token_hash_key" ON "PasswordResetToken"("token_hash");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "PasswordResetToken_user_id_expires_at_idx" ON "PasswordResetToken"("user_id", "expires_at");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "CmsPage_slug_key" ON "CmsPage"("slug");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "CmsPage_section_is_published_idx" ON "CmsPage"("section", "is_published");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "SiteNavigationItem_position_sort_order_idx" ON "SiteNavigationItem"("position", "sort_order");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "Tenant_trial_ends_at_idx" ON "Tenant"("trial_ends_at");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "AuthSession" ADD CONSTRAINT "AuthSession_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "PasswordResetToken" ADD CONSTRAINT "PasswordResetToken_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
1
backend/prisma/migrations/migration_lock.toml
Normal file
1
backend/prisma/migrations/migration_lock.toml
Normal file
@@ -0,0 +1 @@
|
||||
provider = "postgresql"
|
||||
1281
backend/prisma/schema.prisma
Normal file
1281
backend/prisma/schema.prisma
Normal file
File diff suppressed because it is too large
Load Diff
184
backend/prisma/seed.js
Normal file
184
backend/prisma/seed.js
Normal file
@@ -0,0 +1,184 @@
|
||||
"use strict";
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const bcryptjs_1 = __importDefault(require("bcryptjs"));
|
||||
const client_1 = require("@prisma/client");
|
||||
const prisma = new client_1.PrismaClient();
|
||||
async function main() {
|
||||
const adminEmail = process.env.ADMIN_EMAIL ?? "admin@proxpanel.local";
|
||||
const adminPassword = process.env.ADMIN_PASSWORD ?? "ChangeMe123!";
|
||||
const password_hash = await bcryptjs_1.default.hash(adminPassword, 12);
|
||||
const tenant = await prisma.tenant.upsert({
|
||||
where: { slug: "default-tenant" },
|
||||
update: {},
|
||||
create: {
|
||||
name: "Default Tenant",
|
||||
slug: "default-tenant",
|
||||
owner_email: adminEmail,
|
||||
currency: client_1.Currency.NGN,
|
||||
payment_provider: client_1.PaymentProvider.PAYSTACK
|
||||
}
|
||||
});
|
||||
await prisma.user.upsert({
|
||||
where: { email: adminEmail },
|
||||
update: {
|
||||
role: client_1.Role.SUPER_ADMIN,
|
||||
password_hash,
|
||||
tenant_id: tenant.id
|
||||
},
|
||||
create: {
|
||||
email: adminEmail,
|
||||
full_name: "System Administrator",
|
||||
password_hash,
|
||||
role: client_1.Role.SUPER_ADMIN,
|
||||
tenant_id: tenant.id
|
||||
}
|
||||
});
|
||||
await prisma.setting.upsert({
|
||||
where: { key: "proxmox" },
|
||||
update: {},
|
||||
create: {
|
||||
key: "proxmox",
|
||||
type: "PROXMOX",
|
||||
value: {
|
||||
host: "",
|
||||
port: 8006,
|
||||
username: "root@pam",
|
||||
token_id: "",
|
||||
token_secret: "",
|
||||
verify_ssl: true
|
||||
}
|
||||
}
|
||||
});
|
||||
await prisma.setting.upsert({
|
||||
where: { key: "payment" },
|
||||
update: {},
|
||||
create: {
|
||||
key: "payment",
|
||||
type: "PAYMENT",
|
||||
value: {
|
||||
default_provider: "paystack",
|
||||
paystack_public: "",
|
||||
paystack_secret: "",
|
||||
flutterwave_public: "",
|
||||
flutterwave_secret: "",
|
||||
flutterwave_webhook_hash: "",
|
||||
callback_url: ""
|
||||
}
|
||||
}
|
||||
});
|
||||
await prisma.setting.upsert({
|
||||
where: { key: "provisioning" },
|
||||
update: {},
|
||||
create: {
|
||||
key: "provisioning",
|
||||
type: "GENERAL",
|
||||
value: {
|
||||
min_vmid: 100
|
||||
}
|
||||
}
|
||||
});
|
||||
await prisma.setting.upsert({
|
||||
where: { key: "backup" },
|
||||
update: {},
|
||||
create: {
|
||||
key: "backup",
|
||||
type: "GENERAL",
|
||||
value: {
|
||||
default_source: "local",
|
||||
default_storage: "local-lvm",
|
||||
max_restore_file_count: 100,
|
||||
pbs_enabled: false,
|
||||
pbs_host: "",
|
||||
pbs_datastore: "",
|
||||
pbs_namespace: "",
|
||||
pbs_verify_ssl: true
|
||||
}
|
||||
}
|
||||
});
|
||||
await prisma.billingPlan.upsert({
|
||||
where: { slug: "starter" },
|
||||
update: {},
|
||||
create: {
|
||||
name: "Starter",
|
||||
slug: "starter",
|
||||
description: "Entry plan for lightweight VM workloads",
|
||||
price_monthly: 12000,
|
||||
price_hourly: 12000 / 720,
|
||||
currency: client_1.Currency.NGN,
|
||||
cpu_cores: 2,
|
||||
ram_mb: 4096,
|
||||
disk_gb: 60,
|
||||
bandwidth_gb: 2000,
|
||||
features: ["basic-support", "daily-backups"]
|
||||
}
|
||||
});
|
||||
const ubuntuTemplate = await prisma.appTemplate.upsert({
|
||||
where: { slug: "ubuntu-22-04-golden" },
|
||||
update: {},
|
||||
create: {
|
||||
name: "Ubuntu 22.04 Golden",
|
||||
slug: "ubuntu-22-04-golden",
|
||||
template_type: "KVM_TEMPLATE",
|
||||
virtualization_type: "QEMU",
|
||||
source: "local:vztmpl/ubuntu-22.04-golden.qcow2",
|
||||
description: "Baseline hardened Ubuntu template",
|
||||
metadata: {
|
||||
os_family: "linux",
|
||||
os_version: "22.04"
|
||||
}
|
||||
}
|
||||
});
|
||||
const webGroup = await prisma.applicationGroup.upsert({
|
||||
where: { slug: "web-workloads" },
|
||||
update: {},
|
||||
create: {
|
||||
name: "Web Workloads",
|
||||
slug: "web-workloads",
|
||||
description: "HTTP-facing application services"
|
||||
}
|
||||
});
|
||||
await prisma.applicationGroupTemplate.upsert({
|
||||
where: {
|
||||
group_id_template_id: {
|
||||
group_id: webGroup.id,
|
||||
template_id: ubuntuTemplate.id
|
||||
}
|
||||
},
|
||||
update: {},
|
||||
create: {
|
||||
group_id: webGroup.id,
|
||||
template_id: ubuntuTemplate.id,
|
||||
priority: 10
|
||||
}
|
||||
});
|
||||
await prisma.backupPolicy.upsert({
|
||||
where: {
|
||||
id: "default-tenant-backup-policy"
|
||||
},
|
||||
update: {},
|
||||
create: {
|
||||
id: "default-tenant-backup-policy",
|
||||
tenant_id: tenant.id,
|
||||
max_files: 25,
|
||||
max_total_size_mb: 102400,
|
||||
max_protected_files: 5,
|
||||
allow_file_restore: true,
|
||||
allow_cross_vm_restore: true,
|
||||
allow_pbs_restore: true
|
||||
}
|
||||
});
|
||||
}
|
||||
main()
|
||||
.then(async () => {
|
||||
await prisma.$disconnect();
|
||||
})
|
||||
.catch(async (error) => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Seed failed:", error);
|
||||
await prisma.$disconnect();
|
||||
process.exit(1);
|
||||
});
|
||||
//# sourceMappingURL=seed.js.map
|
||||
1
backend/prisma/seed.js.map
Normal file
1
backend/prisma/seed.js.map
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"seed.js","sourceRoot":"","sources":["seed.ts"],"names":[],"mappings":";;;;;AAAA,wDAA8B;AAC9B,2CAA+E;AAE/E,MAAM,MAAM,GAAG,IAAI,qBAAY,EAAE,CAAC;AAElC,KAAK,UAAU,IAAI;IACjB,MAAM,UAAU,GAAG,OAAO,CAAC,GAAG,CAAC,WAAW,IAAI,uBAAuB,CAAC;IACtE,MAAM,aAAa,GAAG,OAAO,CAAC,GAAG,CAAC,cAAc,IAAI,cAAc,CAAC;IACnE,MAAM,aAAa,GAAG,MAAM,kBAAM,CAAC,IAAI,CAAC,aAAa,EAAE,EAAE,CAAC,CAAC;IAE3D,MAAM,MAAM,GAAG,MAAM,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC;QACxC,KAAK,EAAE,EAAE,IAAI,EAAE,gBAAgB,EAAE;QACjC,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,IAAI,EAAE,gBAAgB;YACtB,IAAI,EAAE,gBAAgB;YACtB,WAAW,EAAE,UAAU;YACvB,QAAQ,EAAE,iBAAQ,CAAC,GAAG;YACtB,gBAAgB,EAAE,wBAAe,CAAC,QAAQ;SAC3C;KACF,CAAC,CAAC;IAEH,MAAM,MAAM,CAAC,IAAI,CAAC,MAAM,CAAC;QACvB,KAAK,EAAE,EAAE,KAAK,EAAE,UAAU,EAAE;QAC5B,MAAM,EAAE;YACN,IAAI,EAAE,aAAI,CAAC,WAAW;YACtB,aAAa;YACb,SAAS,EAAE,MAAM,CAAC,EAAE;SACrB;QACD,MAAM,EAAE;YACN,KAAK,EAAE,UAAU;YACjB,SAAS,EAAE,sBAAsB;YACjC,aAAa;YACb,IAAI,EAAE,aAAI,CAAC,WAAW;YACtB,SAAS,EAAE,MAAM,CAAC,EAAE;SACrB;KACF,CAAC,CAAC;IAEH,MAAM,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC;QAC1B,KAAK,EAAE,EAAE,GAAG,EAAE,SAAS,EAAE;QACzB,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,GAAG,EAAE,SAAS;YACd,IAAI,EAAE,SAAS;YACf,KAAK,EAAE;gBACL,IAAI,EAAE,EAAE;gBACR,IAAI,EAAE,IAAI;gBACV,QAAQ,EAAE,UAAU;gBACpB,QAAQ,EAAE,EAAE;gBACZ,YAAY,EAAE,EAAE;gBAChB,UAAU,EAAE,IAAI;aACjB;SACF;KACF,CAAC,CAAC;IAEH,MAAM,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC;QAC1B,KAAK,EAAE,EAAE,GAAG,EAAE,SAAS,EAAE;QACzB,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,GAAG,EAAE,SAAS;YACd,IAAI,EAAE,SAAS;YACf,KAAK,EAAE;gBACL,gBAAgB,EAAE,UAAU;gBAC5B,eAAe,EAAE,EAAE;gBACnB,eAAe,EAAE,EAAE;gBACnB,kBAAkB,EAAE,EAAE;gBACtB,kBAAkB,EAAE,EAAE;gBACtB,wBAAwB,EAAE,EAAE;gBAC5B,YAAY,EAAE,EAAE;aACjB;SACF;KACF,CAAC,CAAC;IAEH,MAAM,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC;QAC1B,KAAK,EAAE,EAAE,GAAG,EAAE,cAAc,EAAE;QAC9B,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,GAAG,EAAE,cAAc;YACnB,IAAI,EAAE,SAAS;YACf,KAAK,EAAE;gBACL,QAAQ,EAAE,GAAG;aACd;SACF;KACF,CAAC,CAAC;IAEH,MAAM,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC;QAC1B,KAAK,EAAE,EAAE,GAAG,EAAE,QAAQ,EAAE;QACxB,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,GAAG,EAAE,QAAQ;YACb,IAAI,EAAE,SAAS;YACf,KAAK,EAAE;gBACL,cAAc,EAAE,OAAO;gBACvB,eAAe,EAAE,WAAW;gBAC5B,sBAAsB,EAAE,GAAG;gBAC3B,WAAW,EAAE,KAAK;gBAClB,QAAQ,EAAE,EAAE;gBACZ,aAAa,EAAE,EAAE;gBACjB,aAAa,EAAE,EAAE;gBACjB,cAAc,EAAE,IAAI;aACrB;SACF;KACF,CAAC,CAAC;IAEH,MAAM,MAAM,CAAC,WAAW,CAAC,MAAM,CAAC;QAC9B,KAAK,EAAE,EAAE,IAAI,EAAE,SAAS,EAAE;QAC1B,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,IAAI,EAAE,SAAS;YACf,IAAI,EAAE,SAAS;YACf,WAAW,EAAE,yCAAyC;YACtD,aAAa,EAAE,KAAK;YACpB,YAAY,EAAE,KAAK,GAAG,GAAG;YACzB,QAAQ,EAAE,iBAAQ,CAAC,GAAG;YACtB,SAAS,EAAE,CAAC;YACZ,MAAM,EAAE,IAAI;YACZ,OAAO,EAAE,EAAE;YACX,YAAY,EAAE,IAAI;YAClB,QAAQ,EAAE,CAAC,eAAe,EAAE,eAAe,CAAC;SAC7C;KACF,CAAC,CAAC;IAEH,MAAM,cAAc,GAAG,MAAM,MAAM,CAAC,WAAW,CAAC,MAAM,CAAC;QACrD,KAAK,EAAE,EAAE,IAAI,EAAE,qBAAqB,EAAE;QACtC,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,IAAI,EAAE,qBAAqB;YAC3B,IAAI,EAAE,qBAAqB;YAC3B,aAAa,EAAE,cAAc;YAC7B,mBAAmB,EAAE,MAAM;YAC3B,MAAM,EAAE,wCAAwC;YAChD,WAAW,EAAE,mCAAmC;YAChD,QAAQ,EAAE;gBACR,SAAS,EAAE,OAAO;gBAClB,UAAU,EAAE,OAAO;aACpB;SACF;KACF,CAAC,CAAC;IAEH,MAAM,QAAQ,GAAG,MAAM,MAAM,CAAC,gBAAgB,CAAC,MAAM,CAAC;QACpD,KAAK,EAAE,EAAE,IAAI,EAAE,eAAe,EAAE;QAChC,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,IAAI,EAAE,eAAe;YACrB,IAAI,EAAE,eAAe;YACrB,WAAW,EAAE,kCAAkC;SAChD;KACF,CAAC,CAAC;IAEH,MAAM,MAAM,CAAC,wBAAwB,CAAC,MAAM,CAAC;QAC3C,KAAK,EAAE;YACL,oBAAoB,EAAE;gBACpB,QAAQ,EAAE,QAAQ,CAAC,EAAE;gBACrB,WAAW,EAAE,cAAc,CAAC,EAAE;aAC/B;SACF;QACD,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,QAAQ,EAAE,QAAQ,CAAC,EAAE;YACrB,WAAW,EAAE,cAAc,CAAC,EAAE;YAC9B,QAAQ,EAAE,EAAE;SACb;KACF,CAAC,CAAC;IAEH,MAAM,MAAM,CAAC,YAAY,CAAC,MAAM,CAAC;QAC/B,KAAK,EAAE;YACL,EAAE,EAAE,8BAA8B;SACnC;QACD,MAAM,EAAE,EAAE;QACV,MAAM,EAAE;YACN,EAAE,EAAE,8BAA8B;YAClC,SAAS,EAAE,MAAM,CAAC,EAAE;YACpB,SAAS,EAAE,EAAE;YACb,iBAAiB,EAAE,MAAM;YACzB,mBAAmB,EAAE,CAAC;YACtB,kBAAkB,EAAE,IAAI;YACxB,sBAAsB,EAAE,IAAI;YAC5B,iBAAiB,EAAE,IAAI;SACxB;KACF,CAAC,CAAC;AACL,CAAC;AAED,IAAI,EAAE;KACH,IAAI,CAAC,KAAK,IAAI,EAAE;IACf,MAAM,MAAM,CAAC,WAAW,EAAE,CAAC;AAC7B,CAAC,CAAC;KACD,KAAK,CAAC,KAAK,EAAE,KAAK,EAAE,EAAE;IACrB,sCAAsC;IACtC,OAAO,CAAC,KAAK,CAAC,cAAc,EAAE,KAAK,CAAC,CAAC;IACrC,MAAM,MAAM,CAAC,WAAW,EAAE,CAAC;IAC3B,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AAClB,CAAC,CAAC,CAAC"}
|
||||
192
backend/prisma/seed.ts
Normal file
192
backend/prisma/seed.ts
Normal file
@@ -0,0 +1,192 @@
|
||||
import bcrypt from "bcryptjs";
|
||||
import { PrismaClient, Role, Currency, PaymentProvider } from "@prisma/client";
|
||||
|
||||
const prisma = new PrismaClient();
|
||||
|
||||
async function main() {
|
||||
const adminEmail = process.env.ADMIN_EMAIL ?? "admin@proxpanel.local";
|
||||
const adminPassword = process.env.ADMIN_PASSWORD ?? "ChangeMe123!";
|
||||
const password_hash = await bcrypt.hash(adminPassword, 12);
|
||||
|
||||
const tenant = await prisma.tenant.upsert({
|
||||
where: { slug: "default-tenant" },
|
||||
update: {},
|
||||
create: {
|
||||
name: "Default Tenant",
|
||||
slug: "default-tenant",
|
||||
owner_email: adminEmail,
|
||||
currency: Currency.NGN,
|
||||
payment_provider: PaymentProvider.PAYSTACK
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.user.upsert({
|
||||
where: { email: adminEmail },
|
||||
update: {
|
||||
role: Role.SUPER_ADMIN,
|
||||
password_hash,
|
||||
tenant_id: tenant.id
|
||||
},
|
||||
create: {
|
||||
email: adminEmail,
|
||||
full_name: "System Administrator",
|
||||
password_hash,
|
||||
role: Role.SUPER_ADMIN,
|
||||
tenant_id: tenant.id
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.setting.upsert({
|
||||
where: { key: "proxmox" },
|
||||
update: {},
|
||||
create: {
|
||||
key: "proxmox",
|
||||
type: "PROXMOX",
|
||||
value: {
|
||||
host: "",
|
||||
port: 8006,
|
||||
username: "root@pam",
|
||||
token_id: "",
|
||||
token_secret: "",
|
||||
verify_ssl: true
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.setting.upsert({
|
||||
where: { key: "payment" },
|
||||
update: {},
|
||||
create: {
|
||||
key: "payment",
|
||||
type: "PAYMENT",
|
||||
value: {
|
||||
default_provider: "paystack",
|
||||
paystack_public: "",
|
||||
paystack_secret: "",
|
||||
flutterwave_public: "",
|
||||
flutterwave_secret: "",
|
||||
flutterwave_webhook_hash: "",
|
||||
callback_url: ""
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.setting.upsert({
|
||||
where: { key: "provisioning" },
|
||||
update: {},
|
||||
create: {
|
||||
key: "provisioning",
|
||||
type: "GENERAL",
|
||||
value: {
|
||||
min_vmid: 100
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.setting.upsert({
|
||||
where: { key: "backup" },
|
||||
update: {},
|
||||
create: {
|
||||
key: "backup",
|
||||
type: "GENERAL",
|
||||
value: {
|
||||
default_source: "local",
|
||||
default_storage: "local-lvm",
|
||||
max_restore_file_count: 100,
|
||||
pbs_enabled: false,
|
||||
pbs_host: "",
|
||||
pbs_datastore: "",
|
||||
pbs_namespace: "",
|
||||
pbs_verify_ssl: true
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.billingPlan.upsert({
|
||||
where: { slug: "starter" },
|
||||
update: {},
|
||||
create: {
|
||||
name: "Starter",
|
||||
slug: "starter",
|
||||
description: "Entry plan for lightweight VM workloads",
|
||||
price_monthly: 12000,
|
||||
price_hourly: 12000 / 720,
|
||||
currency: Currency.NGN,
|
||||
cpu_cores: 2,
|
||||
ram_mb: 4096,
|
||||
disk_gb: 60,
|
||||
bandwidth_gb: 2000,
|
||||
features: ["basic-support", "daily-backups"]
|
||||
}
|
||||
});
|
||||
|
||||
const ubuntuTemplate = await prisma.appTemplate.upsert({
|
||||
where: { slug: "ubuntu-22-04-golden" },
|
||||
update: {},
|
||||
create: {
|
||||
name: "Ubuntu 22.04 Golden",
|
||||
slug: "ubuntu-22-04-golden",
|
||||
template_type: "KVM_TEMPLATE",
|
||||
virtualization_type: "QEMU",
|
||||
source: "local:vztmpl/ubuntu-22.04-golden.qcow2",
|
||||
description: "Baseline hardened Ubuntu template",
|
||||
metadata: {
|
||||
os_family: "linux",
|
||||
os_version: "22.04"
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const webGroup = await prisma.applicationGroup.upsert({
|
||||
where: { slug: "web-workloads" },
|
||||
update: {},
|
||||
create: {
|
||||
name: "Web Workloads",
|
||||
slug: "web-workloads",
|
||||
description: "HTTP-facing application services"
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.applicationGroupTemplate.upsert({
|
||||
where: {
|
||||
group_id_template_id: {
|
||||
group_id: webGroup.id,
|
||||
template_id: ubuntuTemplate.id
|
||||
}
|
||||
},
|
||||
update: {},
|
||||
create: {
|
||||
group_id: webGroup.id,
|
||||
template_id: ubuntuTemplate.id,
|
||||
priority: 10
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.backupPolicy.upsert({
|
||||
where: {
|
||||
id: "default-tenant-backup-policy"
|
||||
},
|
||||
update: {},
|
||||
create: {
|
||||
id: "default-tenant-backup-policy",
|
||||
tenant_id: tenant.id,
|
||||
max_files: 25,
|
||||
max_total_size_mb: 102400,
|
||||
max_protected_files: 5,
|
||||
allow_file_restore: true,
|
||||
allow_cross_vm_restore: true,
|
||||
allow_pbs_restore: true
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
main()
|
||||
.then(async () => {
|
||||
await prisma.$disconnect();
|
||||
})
|
||||
.catch(async (error) => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Seed failed:", error);
|
||||
await prisma.$disconnect();
|
||||
process.exit(1);
|
||||
});
|
||||
98
backend/src/app.ts
Normal file
98
backend/src/app.ts
Normal file
@@ -0,0 +1,98 @@
|
||||
import express from "express";
|
||||
import cors from "cors";
|
||||
import helmet from "helmet";
|
||||
import compression from "compression";
|
||||
import morgan from "morgan";
|
||||
import { env } from "./config/env";
|
||||
import authRoutes from "./routes/auth.routes";
|
||||
import healthRoutes from "./routes/health.routes";
|
||||
import dashboardRoutes from "./routes/dashboard.routes";
|
||||
import resourceRoutes from "./routes/resources.routes";
|
||||
import billingRoutes from "./routes/billing.routes";
|
||||
import paymentRoutes from "./routes/payment.routes";
|
||||
import proxmoxRoutes from "./routes/proxmox.routes";
|
||||
import settingsRoutes from "./routes/settings.routes";
|
||||
import operationsRoutes from "./routes/operations.routes";
|
||||
import provisioningRoutes from "./routes/provisioning.routes";
|
||||
import backupRoutes from "./routes/backup.routes";
|
||||
import networkRoutes from "./routes/network.routes";
|
||||
import monitoringRoutes from "./routes/monitoring.routes";
|
||||
import clientRoutes from "./routes/client.routes";
|
||||
import profileRoutes from "./routes/profile.routes";
|
||||
import adminUsersRoutes from "./routes/admin-users.routes";
|
||||
import systemRoutes from "./routes/system.routes";
|
||||
import announcementsRoutes from "./routes/announcements.routes";
|
||||
import { errorHandler, notFoundHandler } from "./middleware/error-handler";
|
||||
import { createRateLimit } from "./middleware/rate-limit";
|
||||
|
||||
export function createApp() {
|
||||
const app = express();
|
||||
app.set("trust proxy", 1);
|
||||
|
||||
const globalRateLimit = createRateLimit({
|
||||
windowMs: env.RATE_LIMIT_WINDOW_MS,
|
||||
max: env.RATE_LIMIT_MAX
|
||||
});
|
||||
const authRateLimit = createRateLimit({
|
||||
windowMs: env.AUTH_RATE_LIMIT_WINDOW_MS,
|
||||
max: env.AUTH_RATE_LIMIT_MAX,
|
||||
keyGenerator: (req) => {
|
||||
const email = typeof req.body?.email === "string" ? req.body.email.toLowerCase().trim() : "";
|
||||
return `${req.ip}:${email}`;
|
||||
}
|
||||
});
|
||||
|
||||
app.use(
|
||||
cors({
|
||||
origin: env.CORS_ORIGIN === "*" ? true : env.CORS_ORIGIN.split(",").map((item) => item.trim()),
|
||||
credentials: true
|
||||
})
|
||||
);
|
||||
app.use(helmet());
|
||||
app.use(compression());
|
||||
app.use(
|
||||
express.json({
|
||||
limit: "2mb",
|
||||
verify: (req, _res, buffer) => {
|
||||
const request = req as express.Request;
|
||||
request.rawBody = buffer.toString("utf8");
|
||||
}
|
||||
})
|
||||
);
|
||||
app.use(morgan("dev"));
|
||||
app.use("/api", globalRateLimit);
|
||||
app.use("/api/auth/login", authRateLimit);
|
||||
app.use("/api/auth/refresh", authRateLimit);
|
||||
|
||||
app.get("/", (_req, res) => {
|
||||
res.json({
|
||||
name: "ProxPanel API",
|
||||
version: "1.0.0",
|
||||
docs: "/api/health"
|
||||
});
|
||||
});
|
||||
|
||||
app.use("/api/health", healthRoutes);
|
||||
app.use("/api/auth", authRoutes);
|
||||
app.use("/api/dashboard", dashboardRoutes);
|
||||
app.use("/api/resources", resourceRoutes);
|
||||
app.use("/api/billing", billingRoutes);
|
||||
app.use("/api/payments", paymentRoutes);
|
||||
app.use("/api/proxmox", proxmoxRoutes);
|
||||
app.use("/api/settings", settingsRoutes);
|
||||
app.use("/api/operations", operationsRoutes);
|
||||
app.use("/api/provisioning", provisioningRoutes);
|
||||
app.use("/api/backups", backupRoutes);
|
||||
app.use("/api/network", networkRoutes);
|
||||
app.use("/api/monitoring", monitoringRoutes);
|
||||
app.use("/api/client", clientRoutes);
|
||||
app.use("/api/profile", profileRoutes);
|
||||
app.use("/api/admin", adminUsersRoutes);
|
||||
app.use("/api/system", systemRoutes);
|
||||
app.use("/api/announcements", announcementsRoutes);
|
||||
|
||||
app.use(notFoundHandler);
|
||||
app.use(errorHandler);
|
||||
|
||||
return app;
|
||||
}
|
||||
39
backend/src/config/env.ts
Normal file
39
backend/src/config/env.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
import dotenv from "dotenv";
|
||||
import { z } from "zod";
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const envSchema = z.object({
|
||||
NODE_ENV: z.enum(["development", "test", "production"]).default("development"),
|
||||
PORT: z.coerce.number().default(8080),
|
||||
DATABASE_URL: z.string().min(1, "DATABASE_URL is required"),
|
||||
JWT_SECRET: z.string().min(16, "JWT_SECRET must be at least 16 characters"),
|
||||
SETTINGS_ENCRYPTION_KEY: z.string().min(16).optional(),
|
||||
JWT_EXPIRES_IN: z.string().default("7d"),
|
||||
JWT_REFRESH_SECRET: z.string().min(16, "JWT_REFRESH_SECRET must be at least 16 characters").optional(),
|
||||
JWT_REFRESH_EXPIRES_IN: z.string().default("30d"),
|
||||
CORS_ORIGIN: z.string().default("*"),
|
||||
RATE_LIMIT_WINDOW_MS: z.coerce.number().int().positive().default(60_000),
|
||||
RATE_LIMIT_MAX: z.coerce.number().int().positive().default(600),
|
||||
AUTH_RATE_LIMIT_WINDOW_MS: z.coerce.number().int().positive().default(60_000),
|
||||
AUTH_RATE_LIMIT_MAX: z.coerce.number().int().positive().default(20),
|
||||
SCHEDULER_LEASE_MS: z.coerce.number().int().positive().default(180_000),
|
||||
SCHEDULER_HEARTBEAT_MS: z.coerce.number().int().positive().default(30_000),
|
||||
ENABLE_SCHEDULER: z.coerce.boolean().default(true),
|
||||
BILLING_CRON: z.string().default("0 * * * *"),
|
||||
BACKUP_CRON: z.string().default("*/15 * * * *"),
|
||||
POWER_SCHEDULE_CRON: z.string().default("* * * * *"),
|
||||
MONITORING_CRON: z.string().default("*/5 * * * *"),
|
||||
PROXMOX_TIMEOUT_MS: z.coerce.number().default(15000)
|
||||
});
|
||||
|
||||
const parsed = envSchema.parse(process.env);
|
||||
|
||||
if (parsed.NODE_ENV === "production" && parsed.CORS_ORIGIN === "*") {
|
||||
throw new Error("CORS_ORIGIN cannot be '*' in production");
|
||||
}
|
||||
|
||||
export const env = {
|
||||
...parsed,
|
||||
JWT_REFRESH_SECRET: parsed.JWT_REFRESH_SECRET ?? parsed.JWT_SECRET
|
||||
};
|
||||
23
backend/src/index.ts
Normal file
23
backend/src/index.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { createApp } from "./app";
|
||||
import { env } from "./config/env";
|
||||
import { prisma } from "./lib/prisma";
|
||||
import { startSchedulers } from "./services/scheduler.service";
|
||||
|
||||
async function bootstrap() {
|
||||
await prisma.$connect();
|
||||
|
||||
const app = createApp();
|
||||
app.listen(env.PORT, () => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`ProxPanel API running on port ${env.PORT}`);
|
||||
});
|
||||
|
||||
await startSchedulers();
|
||||
}
|
||||
|
||||
bootstrap().catch(async (error) => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Failed to start server:", error);
|
||||
await prisma.$disconnect();
|
||||
process.exit(1);
|
||||
});
|
||||
12
backend/src/lib/http-error.ts
Normal file
12
backend/src/lib/http-error.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
export class HttpError extends Error {
|
||||
status: number;
|
||||
code: string;
|
||||
details?: unknown;
|
||||
|
||||
constructor(status: number, message: string, code = "HTTP_ERROR", details?: unknown) {
|
||||
super(message);
|
||||
this.status = status;
|
||||
this.code = code;
|
||||
this.details = details;
|
||||
}
|
||||
}
|
||||
48
backend/src/lib/prisma-json.ts
Normal file
48
backend/src/lib/prisma-json.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import type { Prisma } from "@prisma/client";
|
||||
|
||||
export function toPrismaJsonValue(value: unknown): Prisma.InputJsonValue {
|
||||
if (value === null) {
|
||||
return "null";
|
||||
}
|
||||
|
||||
if (typeof value === "string" || typeof value === "boolean") {
|
||||
return value;
|
||||
}
|
||||
|
||||
if (typeof value === "number") {
|
||||
return Number.isFinite(value) ? value : String(value);
|
||||
}
|
||||
|
||||
if (typeof value === "bigint") {
|
||||
return value.toString();
|
||||
}
|
||||
|
||||
if (value instanceof Date) {
|
||||
return value.toISOString();
|
||||
}
|
||||
|
||||
if (value instanceof Error) {
|
||||
return {
|
||||
name: value.name,
|
||||
message: value.message,
|
||||
stack: value.stack ?? ""
|
||||
};
|
||||
}
|
||||
|
||||
if (Array.isArray(value)) {
|
||||
return value.map((item) => toPrismaJsonValue(item));
|
||||
}
|
||||
|
||||
if (typeof value === "object") {
|
||||
const output: Record<string, Prisma.InputJsonValue> = {};
|
||||
|
||||
for (const [key, raw] of Object.entries(value as Record<string, unknown>)) {
|
||||
if (raw === undefined) continue;
|
||||
output[key] = toPrismaJsonValue(raw);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
return String(value);
|
||||
}
|
||||
3
backend/src/lib/prisma.ts
Normal file
3
backend/src/lib/prisma.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
import { PrismaClient } from "@prisma/client";
|
||||
|
||||
export const prisma = new PrismaClient();
|
||||
105
backend/src/lib/security.ts
Normal file
105
backend/src/lib/security.ts
Normal file
@@ -0,0 +1,105 @@
|
||||
import crypto from "crypto";
|
||||
import { env } from "../config/env";
|
||||
|
||||
const PASSWORD_ALPHABET = "ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz23456789!@#$%^&*";
|
||||
|
||||
type EncryptedEnvelope = {
|
||||
__enc: "v1";
|
||||
iv: string;
|
||||
tag: string;
|
||||
data: string;
|
||||
};
|
||||
|
||||
function getEncryptionKey() {
|
||||
const seed = env.SETTINGS_ENCRYPTION_KEY ?? env.JWT_SECRET;
|
||||
return crypto.createHash("sha256").update(seed).digest();
|
||||
}
|
||||
|
||||
function normalizeSecret(value: string) {
|
||||
return crypto.createHash("sha256").update(value).digest("hex");
|
||||
}
|
||||
|
||||
export function generateSecurePassword(length = 20) {
|
||||
const bytes = crypto.randomBytes(length * 2);
|
||||
let output = "";
|
||||
for (let i = 0; i < bytes.length && output.length < length; i += 1) {
|
||||
output += PASSWORD_ALPHABET[bytes[i] % PASSWORD_ALPHABET.length];
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
export function hashToken(token: string) {
|
||||
return normalizeSecret(token);
|
||||
}
|
||||
|
||||
export function timingSafeEqualHash(candidate: string, storedHash: string) {
|
||||
const candidateHash = Buffer.from(normalizeSecret(candidate), "utf8");
|
||||
const knownHash = Buffer.from(storedHash, "utf8");
|
||||
if (candidateHash.length !== knownHash.length) return false;
|
||||
return crypto.timingSafeEqual(candidateHash, knownHash);
|
||||
}
|
||||
|
||||
export function generateRecoveryCodes(count = 8) {
|
||||
const codes: string[] = [];
|
||||
for (let i = 0; i < count; i += 1) {
|
||||
const raw = crypto.randomBytes(5).toString("hex").toUpperCase();
|
||||
codes.push(`${raw.slice(0, 5)}-${raw.slice(5, 10)}`);
|
||||
}
|
||||
return codes;
|
||||
}
|
||||
|
||||
export function hashRecoveryCodes(codes: string[]) {
|
||||
return codes.map((code) => normalizeSecret(code.trim().toUpperCase()));
|
||||
}
|
||||
|
||||
export function consumeRecoveryCode(input: string, hashes: string[]) {
|
||||
const normalized = normalizeSecret(input.trim().toUpperCase());
|
||||
const matchIndex = hashes.findIndex((hash) => hash === normalized);
|
||||
if (matchIndex < 0) {
|
||||
return { matched: false, remainingHashes: hashes };
|
||||
}
|
||||
const remainingHashes = [...hashes.slice(0, matchIndex), ...hashes.slice(matchIndex + 1)];
|
||||
return { matched: true, remainingHashes };
|
||||
}
|
||||
|
||||
function isEncryptedEnvelope(value: unknown): value is EncryptedEnvelope {
|
||||
return (
|
||||
typeof value === "object" &&
|
||||
value !== null &&
|
||||
(value as Record<string, unknown>).__enc === "v1" &&
|
||||
typeof (value as Record<string, unknown>).iv === "string" &&
|
||||
typeof (value as Record<string, unknown>).tag === "string" &&
|
||||
typeof (value as Record<string, unknown>).data === "string"
|
||||
);
|
||||
}
|
||||
|
||||
export function encryptJson(value: unknown): EncryptedEnvelope {
|
||||
const key = getEncryptionKey();
|
||||
const iv = crypto.randomBytes(12);
|
||||
const cipher = crypto.createCipheriv("aes-256-gcm", key, iv);
|
||||
const payload = Buffer.from(JSON.stringify(value), "utf8");
|
||||
const encrypted = Buffer.concat([cipher.update(payload), cipher.final()]);
|
||||
const tag = cipher.getAuthTag();
|
||||
|
||||
return {
|
||||
__enc: "v1",
|
||||
iv: iv.toString("base64"),
|
||||
tag: tag.toString("base64"),
|
||||
data: encrypted.toString("base64")
|
||||
};
|
||||
}
|
||||
|
||||
export function decryptJson<T = unknown>(value: unknown): T {
|
||||
if (!isEncryptedEnvelope(value)) {
|
||||
return value as T;
|
||||
}
|
||||
|
||||
const key = getEncryptionKey();
|
||||
const iv = Buffer.from(value.iv, "base64");
|
||||
const tag = Buffer.from(value.tag, "base64");
|
||||
const encrypted = Buffer.from(value.data, "base64");
|
||||
const decipher = crypto.createDecipheriv("aes-256-gcm", key, iv);
|
||||
decipher.setAuthTag(tag);
|
||||
const decrypted = Buffer.concat([decipher.update(encrypted), decipher.final()]).toString("utf8");
|
||||
return JSON.parse(decrypted) as T;
|
||||
}
|
||||
100
backend/src/lib/totp.ts
Normal file
100
backend/src/lib/totp.ts
Normal file
@@ -0,0 +1,100 @@
|
||||
import crypto from "crypto";
|
||||
|
||||
const BASE32_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567";
|
||||
|
||||
function base32Encode(buffer: Buffer) {
|
||||
let bits = 0;
|
||||
let value = 0;
|
||||
let output = "";
|
||||
|
||||
for (const byte of buffer) {
|
||||
value = (value << 8) | byte;
|
||||
bits += 8;
|
||||
while (bits >= 5) {
|
||||
output += BASE32_ALPHABET[(value >>> (bits - 5)) & 31];
|
||||
bits -= 5;
|
||||
}
|
||||
}
|
||||
|
||||
if (bits > 0) {
|
||||
output += BASE32_ALPHABET[(value << (5 - bits)) & 31];
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
function base32Decode(input: string) {
|
||||
const normalized = input.toUpperCase().replace(/=+$/g, "").replace(/[^A-Z2-7]/g, "");
|
||||
let bits = 0;
|
||||
let value = 0;
|
||||
const bytes: number[] = [];
|
||||
|
||||
for (const char of normalized) {
|
||||
const index = BASE32_ALPHABET.indexOf(char);
|
||||
if (index < 0) continue;
|
||||
value = (value << 5) | index;
|
||||
bits += 5;
|
||||
if (bits >= 8) {
|
||||
bytes.push((value >>> (bits - 8)) & 0xff);
|
||||
bits -= 8;
|
||||
}
|
||||
}
|
||||
|
||||
return Buffer.from(bytes);
|
||||
}
|
||||
|
||||
function hotp(secret: string, counter: number, digits = 6) {
|
||||
const key = base32Decode(secret);
|
||||
const counterBuffer = Buffer.alloc(8);
|
||||
const high = Math.floor(counter / 0x100000000);
|
||||
const low = counter % 0x100000000;
|
||||
counterBuffer.writeUInt32BE(high >>> 0, 0);
|
||||
counterBuffer.writeUInt32BE(low >>> 0, 4);
|
||||
|
||||
const hmac = crypto.createHmac("sha1", key).update(counterBuffer).digest();
|
||||
const offset = hmac[hmac.length - 1] & 0x0f;
|
||||
const codeInt =
|
||||
((hmac[offset] & 0x7f) << 24) |
|
||||
((hmac[offset + 1] & 0xff) << 16) |
|
||||
((hmac[offset + 2] & 0xff) << 8) |
|
||||
(hmac[offset + 3] & 0xff);
|
||||
|
||||
return String(codeInt % 10 ** digits).padStart(digits, "0");
|
||||
}
|
||||
|
||||
export function generateTotpSecret(bytes = 20) {
|
||||
return base32Encode(crypto.randomBytes(bytes));
|
||||
}
|
||||
|
||||
export function generateTotpCode(secret: string, timestampMs = Date.now(), stepSeconds = 30, digits = 6) {
|
||||
const counter = Math.floor(timestampMs / 1000 / stepSeconds);
|
||||
return hotp(secret, counter, digits);
|
||||
}
|
||||
|
||||
export function verifyTotpCode(
|
||||
token: string,
|
||||
secret: string,
|
||||
options?: { window?: number; timestampMs?: number; stepSeconds?: number; digits?: number }
|
||||
) {
|
||||
const window = options?.window ?? 1;
|
||||
const timestampMs = options?.timestampMs ?? Date.now();
|
||||
const stepSeconds = options?.stepSeconds ?? 30;
|
||||
const digits = options?.digits ?? 6;
|
||||
|
||||
const normalizedToken = token.replace(/\s+/g, "");
|
||||
if (!/^\d{6,8}$/.test(normalizedToken)) return false;
|
||||
|
||||
const baseCounter = Math.floor(timestampMs / 1000 / stepSeconds);
|
||||
for (let i = -window; i <= window; i += 1) {
|
||||
if (hotp(secret, baseCounter + i, digits) === normalizedToken) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
export function buildTotpUri(issuer: string, accountLabel: string, secret: string) {
|
||||
const safeIssuer = encodeURIComponent(issuer);
|
||||
const safeAccount = encodeURIComponent(accountLabel);
|
||||
return `otpauth://totp/${safeIssuer}:${safeAccount}?secret=${secret}&issuer=${safeIssuer}&algorithm=SHA1&digits=6&period=30`;
|
||||
}
|
||||
177
backend/src/middleware/auth.ts
Normal file
177
backend/src/middleware/auth.ts
Normal file
@@ -0,0 +1,177 @@
|
||||
import type { NextFunction, Request as ExpressRequest, Response } from "express";
|
||||
import jwt, { type JwtPayload, type SignOptions } from "jsonwebtoken";
|
||||
import type { Role } from "@prisma/client";
|
||||
import { env } from "../config/env";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
|
||||
type Permission =
|
||||
| "vm:create"
|
||||
| "vm:read"
|
||||
| "vm:update"
|
||||
| "vm:delete"
|
||||
| "vm:start"
|
||||
| "vm:stop"
|
||||
| "node:manage"
|
||||
| "node:read"
|
||||
| "tenant:manage"
|
||||
| "tenant:read"
|
||||
| "billing:manage"
|
||||
| "billing:read"
|
||||
| "backup:manage"
|
||||
| "backup:read"
|
||||
| "rbac:manage"
|
||||
| "settings:manage"
|
||||
| "settings:read"
|
||||
| "audit:read"
|
||||
| "security:manage"
|
||||
| "security:read"
|
||||
| "user:manage"
|
||||
| "user:read"
|
||||
| "profile:read"
|
||||
| "profile:manage"
|
||||
| "session:manage";
|
||||
|
||||
const rolePermissions: Record<Role, Set<Permission>> = {
|
||||
SUPER_ADMIN: new Set<Permission>([
|
||||
"vm:create",
|
||||
"vm:read",
|
||||
"vm:update",
|
||||
"vm:delete",
|
||||
"vm:start",
|
||||
"vm:stop",
|
||||
"node:manage",
|
||||
"node:read",
|
||||
"tenant:manage",
|
||||
"tenant:read",
|
||||
"billing:manage",
|
||||
"billing:read",
|
||||
"backup:manage",
|
||||
"backup:read",
|
||||
"rbac:manage",
|
||||
"settings:manage",
|
||||
"settings:read",
|
||||
"audit:read",
|
||||
"security:manage",
|
||||
"security:read",
|
||||
"user:manage",
|
||||
"user:read",
|
||||
"profile:read",
|
||||
"profile:manage",
|
||||
"session:manage"
|
||||
]),
|
||||
TENANT_ADMIN: new Set<Permission>([
|
||||
"vm:create",
|
||||
"vm:read",
|
||||
"vm:update",
|
||||
"vm:delete",
|
||||
"vm:start",
|
||||
"vm:stop",
|
||||
"node:read",
|
||||
"tenant:read",
|
||||
"billing:read",
|
||||
"backup:manage",
|
||||
"backup:read",
|
||||
"settings:read",
|
||||
"audit:read",
|
||||
"security:read",
|
||||
"user:read",
|
||||
"profile:read",
|
||||
"profile:manage",
|
||||
"session:manage"
|
||||
]),
|
||||
OPERATOR: new Set<Permission>([
|
||||
"vm:read",
|
||||
"vm:start",
|
||||
"vm:stop",
|
||||
"node:manage",
|
||||
"node:read",
|
||||
"billing:read",
|
||||
"backup:manage",
|
||||
"backup:read",
|
||||
"audit:read",
|
||||
"security:manage",
|
||||
"security:read",
|
||||
"profile:read",
|
||||
"profile:manage"
|
||||
]),
|
||||
VIEWER: new Set<Permission>([
|
||||
"vm:read",
|
||||
"node:read",
|
||||
"tenant:read",
|
||||
"billing:read",
|
||||
"backup:read",
|
||||
"audit:read",
|
||||
"security:read",
|
||||
"settings:read",
|
||||
"user:read",
|
||||
"profile:read",
|
||||
"profile:manage"
|
||||
])
|
||||
};
|
||||
|
||||
export function createJwtToken(payload: Express.UserToken): string {
|
||||
const expiresIn = env.JWT_EXPIRES_IN as SignOptions["expiresIn"];
|
||||
return jwt.sign(payload, env.JWT_SECRET, {
|
||||
expiresIn
|
||||
});
|
||||
}
|
||||
|
||||
export function createRefreshToken(payload: Express.UserToken): string {
|
||||
const expiresIn = env.JWT_REFRESH_EXPIRES_IN as SignOptions["expiresIn"];
|
||||
return jwt.sign(payload, env.JWT_REFRESH_SECRET, {
|
||||
expiresIn
|
||||
});
|
||||
}
|
||||
|
||||
export function verifyRefreshToken(token: string): Express.UserToken | null {
|
||||
try {
|
||||
const decoded = jwt.verify(token, env.JWT_REFRESH_SECRET) as JwtPayload & Express.UserToken;
|
||||
if (!decoded?.id || !decoded?.email || !decoded?.role) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
id: decoded.id,
|
||||
email: decoded.email,
|
||||
role: decoded.role,
|
||||
tenant_id: decoded.tenant_id,
|
||||
sid: decoded.sid
|
||||
};
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function requireAuth(req: ExpressRequest, _res: Response, next: NextFunction) {
|
||||
const authHeader = req.header("authorization");
|
||||
const token = authHeader?.startsWith("Bearer ") ? authHeader.slice(7) : null;
|
||||
|
||||
if (!token) {
|
||||
return next(new HttpError(401, "Missing bearer token", "AUTH_REQUIRED"));
|
||||
}
|
||||
|
||||
try {
|
||||
const decoded = jwt.verify(token, env.JWT_SECRET) as Express.UserToken;
|
||||
req.user = decoded;
|
||||
return next();
|
||||
} catch {
|
||||
return next(new HttpError(401, "Invalid or expired token", "INVALID_TOKEN"));
|
||||
}
|
||||
}
|
||||
|
||||
export function authorize(permission: Permission) {
|
||||
return (req: ExpressRequest, _res: Response, next: NextFunction) => {
|
||||
if (!req.user) {
|
||||
return next(new HttpError(401, "Unauthenticated", "AUTH_REQUIRED"));
|
||||
}
|
||||
const allowed = rolePermissions[req.user.role]?.has(permission);
|
||||
if (!allowed) {
|
||||
return next(new HttpError(403, "Insufficient permission", "FORBIDDEN"));
|
||||
}
|
||||
return next();
|
||||
};
|
||||
}
|
||||
|
||||
export function isTenantScopedUser(req: Pick<Express.Request, "user">): boolean {
|
||||
if (!req.user) return false;
|
||||
return req.user.role === "TENANT_ADMIN" || req.user.role === "VIEWER";
|
||||
}
|
||||
54
backend/src/middleware/error-handler.ts
Normal file
54
backend/src/middleware/error-handler.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
import type { NextFunction, Request, Response } from "express";
|
||||
import { Prisma } from "@prisma/client";
|
||||
import { ZodError } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
|
||||
export function notFoundHandler(_req: Request, res: Response) {
|
||||
res.status(404).json({
|
||||
error: {
|
||||
code: "NOT_FOUND",
|
||||
message: "Resource not found"
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export function errorHandler(error: unknown, _req: Request, res: Response, _next: NextFunction) {
|
||||
if (error instanceof HttpError) {
|
||||
return res.status(error.status).json({
|
||||
error: {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
details: error.details
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (error instanceof ZodError) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
code: "VALIDATION_ERROR",
|
||||
message: "Payload validation failed",
|
||||
details: error.flatten()
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (error instanceof Prisma.PrismaClientKnownRequestError) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
code: "DATABASE_ERROR",
|
||||
message: error.message,
|
||||
details: error.meta
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Unhandled error:", error);
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
code: "INTERNAL_SERVER_ERROR",
|
||||
message: "An unexpected server error occurred"
|
||||
}
|
||||
});
|
||||
}
|
||||
60
backend/src/middleware/rate-limit.ts
Normal file
60
backend/src/middleware/rate-limit.ts
Normal file
@@ -0,0 +1,60 @@
|
||||
import type { NextFunction, Request, Response } from "express";
|
||||
|
||||
type RateLimitOptions = {
|
||||
windowMs: number;
|
||||
max: number;
|
||||
keyGenerator?: (req: Request) => string;
|
||||
};
|
||||
|
||||
type Bucket = {
|
||||
count: number;
|
||||
resetAt: number;
|
||||
};
|
||||
|
||||
export function createRateLimit(options: RateLimitOptions) {
|
||||
const windowMs = Math.max(1_000, options.windowMs);
|
||||
const max = Math.max(1, options.max);
|
||||
const buckets = new Map<string, Bucket>();
|
||||
|
||||
return (req: Request, res: Response, next: NextFunction) => {
|
||||
const key = options.keyGenerator?.(req) ?? req.ip ?? "unknown";
|
||||
const now = Date.now();
|
||||
const existing = buckets.get(key);
|
||||
|
||||
if (!existing || existing.resetAt <= now) {
|
||||
buckets.set(key, {
|
||||
count: 1,
|
||||
resetAt: now + windowMs
|
||||
});
|
||||
res.setHeader("X-RateLimit-Limit", String(max));
|
||||
res.setHeader("X-RateLimit-Remaining", String(max - 1));
|
||||
res.setHeader("X-RateLimit-Reset", String(Math.ceil((now + windowMs) / 1000)));
|
||||
return next();
|
||||
}
|
||||
|
||||
existing.count += 1;
|
||||
const remaining = Math.max(0, max - existing.count);
|
||||
res.setHeader("X-RateLimit-Limit", String(max));
|
||||
res.setHeader("X-RateLimit-Remaining", String(remaining));
|
||||
res.setHeader("X-RateLimit-Reset", String(Math.ceil(existing.resetAt / 1000)));
|
||||
|
||||
if (existing.count > max) {
|
||||
return res.status(429).json({
|
||||
error: {
|
||||
code: "RATE_LIMIT_EXCEEDED",
|
||||
message: "Too many requests. Please retry later."
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (buckets.size > 10_000) {
|
||||
for (const [bucketKey, bucketValue] of buckets.entries()) {
|
||||
if (bucketValue.resetAt <= now) {
|
||||
buckets.delete(bucketKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return next();
|
||||
};
|
||||
}
|
||||
272
backend/src/routes/admin-users.routes.ts
Normal file
272
backend/src/routes/admin-users.routes.ts
Normal file
@@ -0,0 +1,272 @@
|
||||
import { Router } from "express";
|
||||
import bcrypt from "bcryptjs";
|
||||
import { Role } from "@prisma/client";
|
||||
import { z } from "zod";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { generateSecurePassword } from "../lib/security";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const createUserSchema = z.object({
|
||||
email: z.string().email(),
|
||||
full_name: z.string().min(1).max(120),
|
||||
role: z.nativeEnum(Role),
|
||||
tenant_id: z.string().optional(),
|
||||
password: z.string().min(10).max(120).optional(),
|
||||
generate_password: z.boolean().default(true),
|
||||
is_active: z.boolean().default(true)
|
||||
});
|
||||
|
||||
const updateUserSchema = z.object({
|
||||
full_name: z.string().min(1).max(120).optional(),
|
||||
role: z.nativeEnum(Role).optional(),
|
||||
tenant_id: z.string().nullable().optional(),
|
||||
is_active: z.boolean().optional()
|
||||
});
|
||||
|
||||
function rolesCatalog() {
|
||||
return [
|
||||
{
|
||||
role: "SUPER_ADMIN",
|
||||
label: "Super Admin",
|
||||
scope: "Global",
|
||||
description: "Full platform control including billing, security, and system configuration."
|
||||
},
|
||||
{
|
||||
role: "TENANT_ADMIN",
|
||||
label: "Tenant Admin",
|
||||
scope: "Tenant",
|
||||
description: "Owns a tenant environment, users, workloads, and tenant-level billing views."
|
||||
},
|
||||
{
|
||||
role: "OPERATOR",
|
||||
label: "Operator",
|
||||
scope: "Ops",
|
||||
description: "Runs day-2 operations for compute, backup, and node workflows."
|
||||
},
|
||||
{
|
||||
role: "VIEWER",
|
||||
label: "Viewer",
|
||||
scope: "Read-only",
|
||||
description: "Read-only access for auditors and stakeholders."
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
router.get("/roles", requireAuth, authorize("rbac:manage"), async (_req, res) => {
|
||||
res.json(rolesCatalog());
|
||||
});
|
||||
|
||||
router.get("/users", requireAuth, authorize("user:read"), async (req, res, next) => {
|
||||
try {
|
||||
const tenantId = typeof req.query.tenant_id === "string" ? req.query.tenant_id : undefined;
|
||||
const role = typeof req.query.role === "string" ? req.query.role.toUpperCase() : undefined;
|
||||
|
||||
const where: Record<string, unknown> = {};
|
||||
if (tenantId) where.tenant_id = tenantId;
|
||||
if (role && Object.values(Role).includes(role as Role)) where.role = role;
|
||||
|
||||
const users = await prisma.user.findMany({
|
||||
where,
|
||||
orderBy: { created_at: "desc" },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
full_name: true,
|
||||
role: true,
|
||||
tenant_id: true,
|
||||
is_active: true,
|
||||
mfa_enabled: true,
|
||||
must_change_password: true,
|
||||
created_at: true,
|
||||
updated_at: true,
|
||||
last_login_at: true,
|
||||
tenant: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
slug: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
res.json(users);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/users", requireAuth, authorize("user:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createUserSchema.parse(req.body ?? {});
|
||||
if (req.user?.role !== "SUPER_ADMIN") {
|
||||
throw new HttpError(403, "Only SUPER_ADMIN can create administrative users", "FORBIDDEN");
|
||||
}
|
||||
if (payload.role === "TENANT_ADMIN" && !payload.tenant_id) {
|
||||
throw new HttpError(400, "tenant_id is required for TENANT_ADMIN users", "VALIDATION_ERROR");
|
||||
}
|
||||
|
||||
const existing = await prisma.user.findUnique({ where: { email: payload.email.toLowerCase().trim() } });
|
||||
if (existing) {
|
||||
throw new HttpError(409, "A user with this email already exists", "USER_EXISTS");
|
||||
}
|
||||
|
||||
const tempPassword = payload.generate_password || !payload.password ? generateSecurePassword(20) : payload.password;
|
||||
const passwordHash = await bcrypt.hash(tempPassword, 12);
|
||||
|
||||
const user = await prisma.user.create({
|
||||
data: {
|
||||
email: payload.email.toLowerCase().trim(),
|
||||
full_name: payload.full_name,
|
||||
role: payload.role,
|
||||
tenant_id: payload.tenant_id ?? null,
|
||||
is_active: payload.is_active,
|
||||
password_hash: passwordHash,
|
||||
must_change_password: true,
|
||||
password_changed_at: new Date()
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
full_name: true,
|
||||
role: true,
|
||||
tenant_id: true,
|
||||
is_active: true,
|
||||
must_change_password: true,
|
||||
created_at: true
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "rbac.user.create",
|
||||
resource_type: "USER",
|
||||
resource_id: user.id,
|
||||
resource_name: user.email,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
role: user.role,
|
||||
tenant_id: user.tenant_id
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json({
|
||||
user,
|
||||
temporary_password: tempPassword
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/users/:id", requireAuth, authorize("user:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = updateUserSchema.parse(req.body ?? {});
|
||||
const existing = await prisma.user.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (req.user?.role !== "SUPER_ADMIN" && existing.role === "SUPER_ADMIN") {
|
||||
throw new HttpError(403, "Only SUPER_ADMIN can modify this user", "FORBIDDEN");
|
||||
}
|
||||
if (payload.role === "SUPER_ADMIN" && req.user?.role !== "SUPER_ADMIN") {
|
||||
throw new HttpError(403, "Only SUPER_ADMIN can assign SUPER_ADMIN role", "FORBIDDEN");
|
||||
}
|
||||
|
||||
const updated = await prisma.user.update({
|
||||
where: { id: req.params.id },
|
||||
data: payload,
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
full_name: true,
|
||||
role: true,
|
||||
tenant_id: true,
|
||||
is_active: true,
|
||||
must_change_password: true,
|
||||
created_at: true,
|
||||
updated_at: true,
|
||||
last_login_at: true
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "rbac.user.update",
|
||||
resource_type: "USER",
|
||||
resource_id: updated.id,
|
||||
resource_name: updated.email,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ changes: payload }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(updated);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/users/:id/reset-password", requireAuth, authorize("user:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.user.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (existing.role === "SUPER_ADMIN" && req.user?.role !== "SUPER_ADMIN") {
|
||||
throw new HttpError(403, "Only SUPER_ADMIN can reset this account", "FORBIDDEN");
|
||||
}
|
||||
|
||||
const tempPassword = generateSecurePassword(20);
|
||||
const passwordHash = await bcrypt.hash(tempPassword, 12);
|
||||
|
||||
await prisma.user.update({
|
||||
where: { id: existing.id },
|
||||
data: {
|
||||
password_hash: passwordHash,
|
||||
must_change_password: true,
|
||||
password_changed_at: new Date(),
|
||||
mfa_enabled: false,
|
||||
mfa_secret: null,
|
||||
mfa_recovery_codes: []
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.authSession.updateMany({
|
||||
where: {
|
||||
user_id: existing.id,
|
||||
revoked_at: null
|
||||
},
|
||||
data: {
|
||||
revoked_at: new Date()
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "rbac.user.reset_password",
|
||||
resource_type: "USER",
|
||||
resource_id: existing.id,
|
||||
resource_name: existing.email,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
temporary_password: tempPassword
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
189
backend/src/routes/announcements.routes.ts
Normal file
189
backend/src/routes/announcements.routes.ts
Normal file
@@ -0,0 +1,189 @@
|
||||
import { Role } from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import {
|
||||
ANNOUNCEMENT_SEVERITIES,
|
||||
buildInboxForUser,
|
||||
deleteAnnouncement,
|
||||
getAnnouncementState,
|
||||
markAllAnnouncementsRead,
|
||||
markAnnouncementRead,
|
||||
upsertAnnouncement
|
||||
} from "../services/announcement.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const roleEnum = z.nativeEnum(Role);
|
||||
const announcementSeverityEnum = z.enum(ANNOUNCEMENT_SEVERITIES);
|
||||
|
||||
const createAnnouncementSchema = z.object({
|
||||
title: z.string().min(2).max(180),
|
||||
message: z.string().min(2).max(5000),
|
||||
severity: announcementSeverityEnum.default("INFO"),
|
||||
audience_roles: z.array(roleEnum).default([]),
|
||||
is_active: z.boolean().default(true),
|
||||
published_at: z.string().datetime().optional(),
|
||||
expires_at: z.string().datetime().nullable().optional()
|
||||
});
|
||||
|
||||
const updateAnnouncementSchema = createAnnouncementSchema.partial();
|
||||
|
||||
router.get("/inbox", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const state = await getAnnouncementState();
|
||||
const inbox = buildInboxForUser(state, {
|
||||
user_id: req.user!.id,
|
||||
role: req.user!.role
|
||||
});
|
||||
return res.json(inbox);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/:id/read", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const result = await markAnnouncementRead({
|
||||
user_id: req.user!.id,
|
||||
announcement_id: req.params.id
|
||||
});
|
||||
if (!result.updated) {
|
||||
throw new HttpError(404, "Announcement not found", "ANNOUNCEMENT_NOT_FOUND");
|
||||
}
|
||||
|
||||
const state = await getAnnouncementState();
|
||||
const inbox = buildInboxForUser(state, {
|
||||
user_id: req.user!.id,
|
||||
role: req.user!.role
|
||||
});
|
||||
return res.json({
|
||||
success: true,
|
||||
unread_count: inbox.unread_count
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/read-all", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const result = await markAllAnnouncementsRead({
|
||||
user_id: req.user!.id,
|
||||
role: req.user!.role
|
||||
});
|
||||
return res.json({
|
||||
success: true,
|
||||
marked_count: result.updated
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/admin", requireAuth, authorize("settings:manage"), async (_req, res, next) => {
|
||||
try {
|
||||
const state = await getAnnouncementState();
|
||||
return res.json({
|
||||
items: state.items,
|
||||
total_count: state.items.length
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/admin", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createAnnouncementSchema.parse(req.body ?? {});
|
||||
const announcement = await upsertAnnouncement({
|
||||
...payload,
|
||||
actor_email: req.user!.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "announcement.create",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: announcement.id,
|
||||
resource_name: announcement.title,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(announcement);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/admin/:id", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = updateAnnouncementSchema.parse(req.body ?? {});
|
||||
if (Object.keys(payload).length === 0) {
|
||||
throw new HttpError(400, "No fields provided", "VALIDATION_ERROR");
|
||||
}
|
||||
|
||||
const state = await getAnnouncementState();
|
||||
const existing = state.items.find((item) => item.id === req.params.id);
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Announcement not found", "ANNOUNCEMENT_NOT_FOUND");
|
||||
}
|
||||
|
||||
const announcement = await upsertAnnouncement({
|
||||
id: req.params.id,
|
||||
title: payload.title ?? existing.title,
|
||||
message: payload.message ?? existing.message,
|
||||
severity: payload.severity ?? existing.severity,
|
||||
audience_roles: payload.audience_roles ?? existing.audience_roles,
|
||||
is_active: payload.is_active ?? existing.is_active,
|
||||
published_at: payload.published_at ?? existing.published_at,
|
||||
expires_at: payload.expires_at ?? existing.expires_at,
|
||||
actor_email: req.user!.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "announcement.update",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: announcement.id,
|
||||
resource_name: announcement.title,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ updated_fields: Object.keys(payload) }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json(announcement);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/admin/:id", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const result = await deleteAnnouncement(req.params.id);
|
||||
if (!result.deleted) {
|
||||
throw new HttpError(404, "Announcement not found", "ANNOUNCEMENT_NOT_FOUND");
|
||||
}
|
||||
|
||||
await logAudit({
|
||||
action: "announcement.delete",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: req.params.id,
|
||||
resource_name: req.params.id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(204).send();
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
295
backend/src/routes/auth.routes.ts
Normal file
295
backend/src/routes/auth.routes.ts
Normal file
@@ -0,0 +1,295 @@
|
||||
import { Router } from "express";
|
||||
import crypto from "crypto";
|
||||
import bcrypt from "bcryptjs";
|
||||
import { z } from "zod";
|
||||
import jwt from "jsonwebtoken";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { createJwtToken, createRefreshToken, requireAuth, verifyRefreshToken } from "../middleware/auth";
|
||||
import { consumeRecoveryCode, hashToken } from "../lib/security";
|
||||
import { verifyTotpCode } from "../lib/totp";
|
||||
import { getUserModuleAccess } from "../services/module-access.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const loginSchema = z.object({
|
||||
email: z.string().email(),
|
||||
password: z.string().min(1),
|
||||
mfa_code: z.string().optional(),
|
||||
recovery_code: z.string().optional()
|
||||
});
|
||||
|
||||
const refreshSchema = z.object({
|
||||
refresh_token: z.string().min(1)
|
||||
});
|
||||
|
||||
const logoutSchema = z.object({
|
||||
refresh_token: z.string().min(1).optional()
|
||||
});
|
||||
|
||||
function tokenExpiryDate(refreshToken: string) {
|
||||
const decoded = jwt.decode(refreshToken) as { exp?: number } | null;
|
||||
const exp = decoded?.exp ? new Date(decoded.exp * 1000) : new Date(Date.now() + 30 * 24 * 60 * 60 * 1000);
|
||||
return exp;
|
||||
}
|
||||
|
||||
async function createAuthSession(input: {
|
||||
user: {
|
||||
id: string;
|
||||
email: string;
|
||||
role: Express.UserToken["role"];
|
||||
tenant_id?: string | null;
|
||||
};
|
||||
ipAddress?: string;
|
||||
userAgent?: string;
|
||||
}) {
|
||||
const sessionId = crypto.randomUUID();
|
||||
const basePayload = {
|
||||
id: input.user.id,
|
||||
email: input.user.email,
|
||||
role: input.user.role,
|
||||
tenant_id: input.user.tenant_id,
|
||||
sid: sessionId
|
||||
};
|
||||
const accessToken = createJwtToken(basePayload);
|
||||
const refreshToken = createRefreshToken(basePayload);
|
||||
|
||||
await prisma.authSession.create({
|
||||
data: {
|
||||
id: sessionId,
|
||||
user_id: input.user.id,
|
||||
refresh_token_hash: hashToken(refreshToken),
|
||||
ip_address: input.ipAddress,
|
||||
user_agent: input.userAgent,
|
||||
expires_at: tokenExpiryDate(refreshToken),
|
||||
last_used_at: new Date()
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
token: accessToken,
|
||||
refresh_token: refreshToken
|
||||
};
|
||||
}
|
||||
|
||||
router.post("/login", async (req, res, next) => {
|
||||
try {
|
||||
const payload = loginSchema.parse(req.body);
|
||||
const user = await prisma.user.findUnique({ where: { email: payload.email.toLowerCase().trim() } });
|
||||
if (!user || !user.is_active) {
|
||||
throw new HttpError(401, "Invalid email or password", "INVALID_CREDENTIALS");
|
||||
}
|
||||
const matched = await bcrypt.compare(payload.password, user.password_hash);
|
||||
if (!matched) {
|
||||
throw new HttpError(401, "Invalid email or password", "INVALID_CREDENTIALS");
|
||||
}
|
||||
|
||||
if (user.mfa_enabled) {
|
||||
const mfaCode = payload.mfa_code?.trim();
|
||||
const recoveryCode = payload.recovery_code?.trim();
|
||||
if (!mfaCode && !recoveryCode) {
|
||||
throw new HttpError(401, "MFA code is required", "MFA_REQUIRED");
|
||||
}
|
||||
|
||||
let mfaPassed = false;
|
||||
if (mfaCode && user.mfa_secret) {
|
||||
mfaPassed = verifyTotpCode(mfaCode, user.mfa_secret, { window: 1 });
|
||||
}
|
||||
|
||||
if (!mfaPassed && recoveryCode) {
|
||||
const existingHashes = Array.isArray(user.mfa_recovery_codes) ? (user.mfa_recovery_codes as string[]) : [];
|
||||
const result = consumeRecoveryCode(recoveryCode, existingHashes);
|
||||
if (result.matched) {
|
||||
mfaPassed = true;
|
||||
await prisma.user.update({
|
||||
where: { id: user.id },
|
||||
data: { mfa_recovery_codes: result.remainingHashes }
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (!mfaPassed) {
|
||||
throw new HttpError(401, "Invalid MFA code", "MFA_INVALID");
|
||||
}
|
||||
}
|
||||
|
||||
await prisma.user.update({
|
||||
where: { id: user.id },
|
||||
data: { last_login_at: new Date() }
|
||||
});
|
||||
|
||||
const userPayload = {
|
||||
id: user.id,
|
||||
email: user.email,
|
||||
role: user.role,
|
||||
tenant_id: user.tenant_id
|
||||
};
|
||||
const tokens = await createAuthSession({
|
||||
user: userPayload,
|
||||
ipAddress: req.ip,
|
||||
userAgent: req.get("user-agent") ?? undefined
|
||||
});
|
||||
const moduleAccess = await getUserModuleAccess(user.role);
|
||||
|
||||
res.json({
|
||||
token: tokens.token,
|
||||
refresh_token: tokens.refresh_token,
|
||||
must_change_password: user.must_change_password,
|
||||
user: {
|
||||
id: user.id,
|
||||
email: user.email,
|
||||
full_name: user.full_name,
|
||||
role: user.role,
|
||||
tenant_id: user.tenant_id,
|
||||
avatar_url: user.avatar_url,
|
||||
mfa_enabled: user.mfa_enabled,
|
||||
module_access: moduleAccess.access
|
||||
},
|
||||
module_access: moduleAccess.access
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/refresh", async (req, res, next) => {
|
||||
try {
|
||||
const payload = refreshSchema.parse(req.body ?? {});
|
||||
const decoded = verifyRefreshToken(payload.refresh_token);
|
||||
if (!decoded || !decoded.sid) {
|
||||
throw new HttpError(401, "Invalid refresh token", "INVALID_REFRESH_TOKEN");
|
||||
}
|
||||
|
||||
const session = await prisma.authSession.findFirst({
|
||||
where: {
|
||||
id: decoded.sid,
|
||||
user_id: decoded.id,
|
||||
revoked_at: null
|
||||
}
|
||||
});
|
||||
if (!session) {
|
||||
throw new HttpError(401, "Refresh session not found", "INVALID_REFRESH_TOKEN");
|
||||
}
|
||||
if (session.expires_at.getTime() < Date.now()) {
|
||||
throw new HttpError(401, "Refresh session expired", "INVALID_REFRESH_TOKEN");
|
||||
}
|
||||
if (session.refresh_token_hash !== hashToken(payload.refresh_token)) {
|
||||
throw new HttpError(401, "Refresh token mismatch", "INVALID_REFRESH_TOKEN");
|
||||
}
|
||||
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: decoded.id },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
role: true,
|
||||
tenant_id: true,
|
||||
is_active: true
|
||||
}
|
||||
});
|
||||
if (!user || !user.is_active) {
|
||||
throw new HttpError(401, "Refresh token user is invalid", "INVALID_REFRESH_TOKEN");
|
||||
}
|
||||
|
||||
const userPayload = {
|
||||
id: user.id,
|
||||
email: user.email,
|
||||
role: user.role,
|
||||
tenant_id: user.tenant_id,
|
||||
sid: decoded.sid
|
||||
};
|
||||
const token = createJwtToken(userPayload);
|
||||
const refreshToken = createRefreshToken(userPayload);
|
||||
|
||||
await prisma.authSession.update({
|
||||
where: { id: decoded.sid },
|
||||
data: {
|
||||
refresh_token_hash: hashToken(refreshToken),
|
||||
expires_at: tokenExpiryDate(refreshToken),
|
||||
last_used_at: new Date(),
|
||||
ip_address: req.ip,
|
||||
user_agent: req.get("user-agent") ?? session.user_agent
|
||||
}
|
||||
});
|
||||
|
||||
res.json({
|
||||
token,
|
||||
refresh_token: refreshToken
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/logout", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const payload = logoutSchema.parse(req.body ?? {});
|
||||
const refreshToken = payload.refresh_token;
|
||||
if (!refreshToken) {
|
||||
await prisma.authSession.updateMany({
|
||||
where: {
|
||||
user_id: req.user!.id,
|
||||
revoked_at: null
|
||||
},
|
||||
data: {
|
||||
revoked_at: new Date()
|
||||
}
|
||||
});
|
||||
return res.json({ success: true, revoked: "all" });
|
||||
}
|
||||
|
||||
const decoded = verifyRefreshToken(refreshToken);
|
||||
if (!decoded?.sid || decoded.id !== req.user!.id) {
|
||||
throw new HttpError(401, "Invalid refresh token", "INVALID_REFRESH_TOKEN");
|
||||
}
|
||||
|
||||
await prisma.authSession.updateMany({
|
||||
where: {
|
||||
id: decoded.sid,
|
||||
user_id: req.user!.id,
|
||||
refresh_token_hash: hashToken(refreshToken),
|
||||
revoked_at: null
|
||||
},
|
||||
data: {
|
||||
revoked_at: new Date()
|
||||
}
|
||||
});
|
||||
|
||||
return res.json({ success: true, revoked: decoded.sid });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/me", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: req.user!.id },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
full_name: true,
|
||||
role: true,
|
||||
tenant_id: true,
|
||||
is_active: true,
|
||||
created_at: true,
|
||||
avatar_url: true,
|
||||
profile_metadata: true,
|
||||
mfa_enabled: true,
|
||||
must_change_password: true,
|
||||
last_login_at: true
|
||||
}
|
||||
});
|
||||
if (!user) throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
if (!user.is_active) throw new HttpError(401, "User account is inactive", "USER_INACTIVE");
|
||||
const moduleAccess = await getUserModuleAccess(user.role);
|
||||
res.json({
|
||||
...user,
|
||||
module_access: moduleAccess.access
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
491
backend/src/routes/backup.routes.ts
Normal file
491
backend/src/routes/backup.routes.ts
Normal file
@@ -0,0 +1,491 @@
|
||||
import {
|
||||
BackupRestoreMode,
|
||||
BackupRestoreStatus,
|
||||
BackupSchedule,
|
||||
BackupSource,
|
||||
BackupStatus,
|
||||
BackupType,
|
||||
SnapshotFrequency
|
||||
} from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
createBackup,
|
||||
createRestoreTask,
|
||||
createSnapshotJob,
|
||||
deleteBackup,
|
||||
deleteSnapshotJob,
|
||||
listBackupPolicies,
|
||||
listBackups,
|
||||
listRestoreTasks,
|
||||
listSnapshotJobs,
|
||||
runRestoreTaskNow,
|
||||
runSnapshotJobNow,
|
||||
toggleBackupProtection,
|
||||
updateSnapshotJob,
|
||||
upsertBackupPolicy
|
||||
} from "../services/backup.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const createBackupSchema = z.object({
|
||||
vm_id: z.string().min(1),
|
||||
type: z.nativeEnum(BackupType).optional(),
|
||||
source: z.nativeEnum(BackupSource).optional(),
|
||||
schedule: z.nativeEnum(BackupSchedule).optional(),
|
||||
retention_days: z.number().int().positive().optional(),
|
||||
storage: z.string().optional(),
|
||||
route_key: z.string().optional(),
|
||||
is_protected: z.boolean().optional(),
|
||||
notes: z.string().optional(),
|
||||
requested_size_mb: z.number().positive().optional()
|
||||
});
|
||||
|
||||
const protectionSchema = z.object({
|
||||
is_protected: z.boolean()
|
||||
});
|
||||
|
||||
const createRestoreSchema = z.object({
|
||||
backup_id: z.string().min(1),
|
||||
target_vm_id: z.string().optional(),
|
||||
mode: z.nativeEnum(BackupRestoreMode),
|
||||
requested_files: z.array(z.string().min(1)).optional(),
|
||||
pbs_enabled: z.boolean().optional(),
|
||||
run_immediately: z.boolean().default(true)
|
||||
});
|
||||
|
||||
const createSnapshotSchema = z.object({
|
||||
vm_id: z.string().min(1),
|
||||
name: z.string().min(2),
|
||||
frequency: z.nativeEnum(SnapshotFrequency),
|
||||
interval: z.number().int().positive().optional(),
|
||||
day_of_week: z.number().int().min(0).max(6).optional(),
|
||||
hour_utc: z.number().int().min(0).max(23).optional(),
|
||||
minute_utc: z.number().int().min(0).max(59).optional(),
|
||||
retention: z.number().int().positive().optional(),
|
||||
enabled: z.boolean().optional()
|
||||
});
|
||||
|
||||
const updateSnapshotSchema = z.object({
|
||||
name: z.string().min(2).optional(),
|
||||
frequency: z.nativeEnum(SnapshotFrequency).optional(),
|
||||
interval: z.number().int().positive().optional(),
|
||||
day_of_week: z.number().int().min(0).max(6).nullable().optional(),
|
||||
hour_utc: z.number().int().min(0).max(23).optional(),
|
||||
minute_utc: z.number().int().min(0).max(59).optional(),
|
||||
retention: z.number().int().positive().optional(),
|
||||
enabled: z.boolean().optional()
|
||||
});
|
||||
|
||||
const upsertPolicySchema = z.object({
|
||||
tenant_id: z.string().optional(),
|
||||
billing_plan_id: z.string().optional(),
|
||||
max_files: z.number().int().positive().optional(),
|
||||
max_total_size_mb: z.number().positive().optional(),
|
||||
max_protected_files: z.number().int().positive().optional(),
|
||||
allow_file_restore: z.boolean().optional(),
|
||||
allow_cross_vm_restore: z.boolean().optional(),
|
||||
allow_pbs_restore: z.boolean().optional()
|
||||
});
|
||||
|
||||
function parseOptionalBackupStatus(value: unknown) {
|
||||
if (typeof value !== "string") return undefined;
|
||||
const normalized = value.toUpperCase();
|
||||
return Object.values(BackupStatus).includes(normalized as BackupStatus)
|
||||
? (normalized as BackupStatus)
|
||||
: undefined;
|
||||
}
|
||||
|
||||
function parseOptionalRestoreStatus(value: unknown) {
|
||||
if (typeof value !== "string") return undefined;
|
||||
const normalized = value.toUpperCase();
|
||||
return Object.values(BackupRestoreStatus).includes(normalized as BackupRestoreStatus)
|
||||
? (normalized as BackupRestoreStatus)
|
||||
: undefined;
|
||||
}
|
||||
|
||||
async function ensureVmTenantScope(vmId: string, req: Express.Request) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
async function ensureBackupTenantScope(backupId: string, req: Express.Request) {
|
||||
const backup = await prisma.backup.findUnique({
|
||||
where: { id: backupId },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!backup) throw new HttpError(404, "Backup not found", "BACKUP_NOT_FOUND");
|
||||
|
||||
const tenantId = backup.tenant_id ?? backup.vm.tenant_id;
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && tenantId !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return backup;
|
||||
}
|
||||
|
||||
async function ensureRestoreTaskTenantScope(taskId: string, req: Express.Request) {
|
||||
const task = await prisma.backupRestoreTask.findUnique({
|
||||
where: { id: taskId },
|
||||
include: {
|
||||
source_vm: {
|
||||
select: {
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!task) throw new HttpError(404, "Restore task not found", "RESTORE_TASK_NOT_FOUND");
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && task.source_vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
async function ensureSnapshotJobTenantScope(jobId: string, req: Express.Request) {
|
||||
const job = await prisma.snapshotJob.findUnique({
|
||||
where: { id: jobId },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!job) throw new HttpError(404, "Snapshot job not found", "SNAPSHOT_JOB_NOT_FOUND");
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && job.vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
return job;
|
||||
}
|
||||
|
||||
router.get("/", requireAuth, authorize("backup:read"), async (req, res, next) => {
|
||||
try {
|
||||
const status = parseOptionalBackupStatus(req.query.status);
|
||||
const vmId = typeof req.query.vm_id === "string" ? req.query.vm_id : undefined;
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const offset = typeof req.query.offset === "string" ? Number(req.query.offset) : undefined;
|
||||
|
||||
if (vmId) {
|
||||
await ensureVmTenantScope(vmId, req);
|
||||
}
|
||||
|
||||
const result = await listBackups({
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined,
|
||||
status,
|
||||
vmId,
|
||||
limit,
|
||||
offset
|
||||
});
|
||||
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createBackupSchema.parse(req.body ?? {});
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
|
||||
const backup = await createBackup({
|
||||
vmId: payload.vm_id,
|
||||
type: payload.type,
|
||||
source: payload.source,
|
||||
schedule: payload.schedule,
|
||||
retentionDays: payload.retention_days,
|
||||
storage: payload.storage,
|
||||
routeKey: payload.route_key,
|
||||
isProtected: payload.is_protected,
|
||||
notes: payload.notes,
|
||||
requestedSizeMb: payload.requested_size_mb,
|
||||
createdBy: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "backup.create",
|
||||
resource_type: "BACKUP",
|
||||
resource_id: backup.id,
|
||||
resource_name: backup.vm_name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: payload,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(backup);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/:id/protection", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = protectionSchema.parse(req.body ?? {});
|
||||
await ensureBackupTenantScope(req.params.id, req);
|
||||
|
||||
const backup = await toggleBackupProtection(req.params.id, payload.is_protected);
|
||||
res.json(backup);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/:id", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureBackupTenantScope(req.params.id, req);
|
||||
const force = req.query.force === "true";
|
||||
await deleteBackup(req.params.id, force);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/restores", requireAuth, authorize("backup:read"), async (req, res, next) => {
|
||||
try {
|
||||
const status = parseOptionalRestoreStatus(req.query.status);
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const offset = typeof req.query.offset === "string" ? Number(req.query.offset) : undefined;
|
||||
|
||||
const result = await listRestoreTasks({
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined,
|
||||
status,
|
||||
limit,
|
||||
offset
|
||||
});
|
||||
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/restores", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createRestoreSchema.parse(req.body ?? {});
|
||||
await ensureBackupTenantScope(payload.backup_id, req);
|
||||
|
||||
if (payload.target_vm_id) {
|
||||
await ensureVmTenantScope(payload.target_vm_id, req);
|
||||
}
|
||||
|
||||
const task = await createRestoreTask({
|
||||
backupId: payload.backup_id,
|
||||
targetVmId: payload.target_vm_id,
|
||||
mode: payload.mode,
|
||||
requestedFiles: payload.requested_files,
|
||||
pbsEnabled: payload.pbs_enabled,
|
||||
createdBy: req.user?.email,
|
||||
runImmediately: payload.run_immediately
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "backup.restore.create",
|
||||
resource_type: "BACKUP",
|
||||
resource_id: payload.backup_id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: payload,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(task);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/restores/:id/run", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureRestoreTaskTenantScope(req.params.id, req);
|
||||
const task = await runRestoreTaskNow(req.params.id);
|
||||
res.json(task);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/snapshot-jobs", requireAuth, authorize("backup:read"), async (req, res, next) => {
|
||||
try {
|
||||
const jobs = await listSnapshotJobs({
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined
|
||||
});
|
||||
res.json({ data: jobs });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/snapshot-jobs", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createSnapshotSchema.parse(req.body ?? {});
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
|
||||
const job = await createSnapshotJob({
|
||||
vmId: payload.vm_id,
|
||||
name: payload.name,
|
||||
frequency: payload.frequency,
|
||||
interval: payload.interval,
|
||||
dayOfWeek: payload.day_of_week,
|
||||
hourUtc: payload.hour_utc,
|
||||
minuteUtc: payload.minute_utc,
|
||||
retention: payload.retention,
|
||||
enabled: payload.enabled,
|
||||
createdBy: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "snapshot_job.create",
|
||||
resource_type: "BACKUP",
|
||||
resource_id: job.id,
|
||||
resource_name: job.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: payload,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(job);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/snapshot-jobs/:id", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = updateSnapshotSchema.parse(req.body ?? {});
|
||||
await ensureSnapshotJobTenantScope(req.params.id, req);
|
||||
|
||||
const job = await updateSnapshotJob(req.params.id, {
|
||||
name: payload.name,
|
||||
frequency: payload.frequency,
|
||||
interval: payload.interval,
|
||||
dayOfWeek: payload.day_of_week,
|
||||
hourUtc: payload.hour_utc,
|
||||
minuteUtc: payload.minute_utc,
|
||||
retention: payload.retention,
|
||||
enabled: payload.enabled
|
||||
});
|
||||
|
||||
res.json(job);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/snapshot-jobs/:id", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureSnapshotJobTenantScope(req.params.id, req);
|
||||
await deleteSnapshotJob(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/snapshot-jobs/:id/run", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureSnapshotJobTenantScope(req.params.id, req);
|
||||
const result = await runSnapshotJobNow(req.params.id);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/policies", requireAuth, authorize("backup:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const all = await listBackupPolicies();
|
||||
const data =
|
||||
isTenantScopedUser(_req) && _req.user?.tenant_id
|
||||
? all.filter((item) => item.tenant_id === _req.user?.tenant_id)
|
||||
: all;
|
||||
res.json({ data });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/policies", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = upsertPolicySchema.parse(req.body ?? {});
|
||||
const tenantId = isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : payload.tenant_id;
|
||||
if (isTenantScopedUser(req) && payload.tenant_id && req.user?.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
const policy = await upsertBackupPolicy({
|
||||
tenantId,
|
||||
billingPlanId: payload.billing_plan_id,
|
||||
maxFiles: payload.max_files,
|
||||
maxTotalSizeMb: payload.max_total_size_mb,
|
||||
maxProtectedFiles: payload.max_protected_files,
|
||||
allowFileRestore: payload.allow_file_restore,
|
||||
allowCrossVmRestore: payload.allow_cross_vm_restore,
|
||||
allowPbsRestore: payload.allow_pbs_restore
|
||||
});
|
||||
|
||||
res.status(201).json(policy);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/policies/:id", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = upsertPolicySchema.parse(req.body ?? {});
|
||||
const tenantId = isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : payload.tenant_id;
|
||||
if (isTenantScopedUser(req) && payload.tenant_id && req.user?.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
const policy = await upsertBackupPolicy({
|
||||
policyId: req.params.id,
|
||||
tenantId,
|
||||
billingPlanId: payload.billing_plan_id,
|
||||
maxFiles: payload.max_files,
|
||||
maxTotalSizeMb: payload.max_total_size_mb,
|
||||
maxProtectedFiles: payload.max_protected_files,
|
||||
allowFileRestore: payload.allow_file_restore,
|
||||
allowCrossVmRestore: payload.allow_cross_vm_restore,
|
||||
allowPbsRestore: payload.allow_pbs_restore
|
||||
});
|
||||
|
||||
res.json(policy);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
46
backend/src/routes/billing.routes.ts
Normal file
46
backend/src/routes/billing.routes.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import { generateInvoicesFromUnbilledUsage, markInvoicePaid, meterHourlyUsage } from "../services/billing.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
router.post("/meter/hourly", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const result = await meterHourlyUsage(req.user?.email ?? "system@proxpanel.local");
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/invoices/generate", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const result = await generateInvoicesFromUnbilledUsage(req.user?.email ?? "system@proxpanel.local");
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const markPaidSchema = z.object({
|
||||
payment_provider: z.enum(["PAYSTACK", "FLUTTERWAVE", "MANUAL"]).default("MANUAL"),
|
||||
payment_reference: z.string().min(2)
|
||||
});
|
||||
|
||||
router.post("/invoices/:id/pay", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = markPaidSchema.parse(req.body ?? {});
|
||||
const invoice = await markInvoicePaid(
|
||||
req.params.id,
|
||||
payload.payment_provider,
|
||||
payload.payment_reference,
|
||||
req.user?.email ?? "system@proxpanel.local"
|
||||
);
|
||||
res.json(invoice);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
1247
backend/src/routes/client.routes.ts
Normal file
1247
backend/src/routes/client.routes.ts
Normal file
File diff suppressed because it is too large
Load Diff
390
backend/src/routes/dashboard.routes.ts
Normal file
390
backend/src/routes/dashboard.routes.ts
Normal file
@@ -0,0 +1,390 @@
|
||||
import { Router } from "express";
|
||||
import { IpScope, IpVersion } from "@prisma/client";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { subnetUtilizationDashboard } from "../services/network.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
type HeatLevel = "critical" | "warning" | "elevated" | "healthy";
|
||||
|
||||
function clampInteger(value: unknown, min: number, max: number, fallback: number) {
|
||||
if (typeof value !== "string") return fallback;
|
||||
const parsed = Number(value);
|
||||
if (!Number.isInteger(parsed)) return fallback;
|
||||
return Math.min(Math.max(parsed, min), max);
|
||||
}
|
||||
|
||||
function toUtcDayStart(date: Date) {
|
||||
return new Date(Date.UTC(date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate()));
|
||||
}
|
||||
|
||||
function toDateKey(date: Date) {
|
||||
return date.toISOString().slice(0, 10);
|
||||
}
|
||||
|
||||
function resolveHeatLevel(pressurePct: number): HeatLevel {
|
||||
if (pressurePct >= 90) return "critical";
|
||||
if (pressurePct >= 75) return "warning";
|
||||
if (pressurePct >= 60) return "elevated";
|
||||
return "healthy";
|
||||
}
|
||||
|
||||
router.get("/summary", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const tenantScoped = isTenantScopedUser(req) && req.user?.tenant_id;
|
||||
const tenantWhere = tenantScoped ? { tenant_id: req.user!.tenant_id! } : {};
|
||||
|
||||
const [vmTotal, vmRunning, nodeTotal, tenantTotal, invoicePaidAgg, invoicePendingAgg] = await Promise.all([
|
||||
prisma.virtualMachine.count({ where: tenantWhere }),
|
||||
prisma.virtualMachine.count({ where: { ...tenantWhere, status: "RUNNING" } }),
|
||||
prisma.proxmoxNode.count(),
|
||||
prisma.tenant.count(),
|
||||
prisma.invoice.aggregate({
|
||||
where: { ...tenantWhere, status: "PAID" },
|
||||
_sum: { amount: true }
|
||||
}),
|
||||
prisma.invoice.aggregate({
|
||||
where: { ...tenantWhere, status: "PENDING" },
|
||||
_sum: { amount: true }
|
||||
})
|
||||
]);
|
||||
|
||||
const usage = await prisma.usageRecord.findMany({
|
||||
where: {
|
||||
...tenantWhere,
|
||||
period_start: {
|
||||
gte: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000)
|
||||
}
|
||||
},
|
||||
orderBy: { period_start: "asc" }
|
||||
});
|
||||
|
||||
const hourlyRevenueMap = new Map<string, number>();
|
||||
for (const record of usage) {
|
||||
const key = new Date(record.period_start).toISOString().slice(0, 13) + ":00:00Z";
|
||||
hourlyRevenueMap.set(key, (hourlyRevenueMap.get(key) ?? 0) + Number(record.total_cost));
|
||||
}
|
||||
|
||||
const topVmMap = new Map<string, { vm_name: string; total: number }>();
|
||||
for (const record of usage) {
|
||||
const current = topVmMap.get(record.vm_id) ?? { vm_name: record.vm_name, total: 0 };
|
||||
current.total += Number(record.total_cost);
|
||||
topVmMap.set(record.vm_id, current);
|
||||
}
|
||||
|
||||
const topVms = Array.from(topVmMap.entries())
|
||||
.map(([vm_id, value]) => ({ vm_id, ...value }))
|
||||
.sort((a, b) => b.total - a.total)
|
||||
.slice(0, 5);
|
||||
|
||||
const recentVms = await prisma.virtualMachine.findMany({
|
||||
where: tenantWhere,
|
||||
orderBy: { created_at: "desc" },
|
||||
take: 8,
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
status: true,
|
||||
node: true,
|
||||
tenant_id: true,
|
||||
cpu_usage: true,
|
||||
ram_usage: true,
|
||||
disk_usage: true,
|
||||
created_at: true
|
||||
}
|
||||
});
|
||||
|
||||
res.json({
|
||||
metrics: {
|
||||
vm_total: vmTotal,
|
||||
vm_running: vmRunning,
|
||||
node_total: nodeTotal,
|
||||
tenant_total: tenantTotal,
|
||||
revenue_paid_total: Number(invoicePaidAgg._sum.amount ?? 0),
|
||||
revenue_pending_total: Number(invoicePendingAgg._sum.amount ?? 0)
|
||||
},
|
||||
hourly_revenue_7d: Array.from(hourlyRevenueMap.entries()).map(([time, value]) => ({
|
||||
time,
|
||||
value
|
||||
})),
|
||||
top_vms_by_cost: topVms,
|
||||
recent_vms: recentVms
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/network-utilization", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const tenantScoped = isTenantScopedUser(req) && req.user?.tenant_id;
|
||||
const selectedTenantId =
|
||||
tenantScoped && req.user?.tenant_id
|
||||
? req.user.tenant_id
|
||||
: typeof req.query.tenant_id === "string"
|
||||
? req.query.tenant_id
|
||||
: undefined;
|
||||
|
||||
const scopeQuery = typeof req.query.scope === "string" ? req.query.scope.toUpperCase() : undefined;
|
||||
const versionQuery = typeof req.query.version === "string" ? req.query.version.toUpperCase() : undefined;
|
||||
const rawVlanTag = typeof req.query.vlan_tag === "string" ? Number(req.query.vlan_tag) : undefined;
|
||||
const vlanTag = typeof rawVlanTag === "number" && Number.isInteger(rawVlanTag) ? rawVlanTag : undefined;
|
||||
|
||||
const scope = Object.values(IpScope).includes(scopeQuery as IpScope) ? (scopeQuery as IpScope) : undefined;
|
||||
const version = Object.values(IpVersion).includes(versionQuery as IpVersion) ? (versionQuery as IpVersion) : undefined;
|
||||
|
||||
const days = clampInteger(req.query.days, 7, 60, 14);
|
||||
const maxTenants = clampInteger(req.query.max_tenants, 1, 10, 5);
|
||||
|
||||
const subnetDashboard = await subnetUtilizationDashboard({
|
||||
scope,
|
||||
version,
|
||||
node_hostname: typeof req.query.node_hostname === "string" ? req.query.node_hostname : undefined,
|
||||
bridge: typeof req.query.bridge === "string" ? req.query.bridge : undefined,
|
||||
vlan_tag: vlanTag,
|
||||
tenant_id: selectedTenantId
|
||||
});
|
||||
|
||||
const heatmapCells = subnetDashboard.subnets.slice(0, 18).map((subnet, index) => ({
|
||||
rank: index + 1,
|
||||
subnet: subnet.subnet,
|
||||
scope: subnet.scope,
|
||||
version: subnet.version,
|
||||
node_hostname: subnet.node_hostname,
|
||||
bridge: subnet.bridge,
|
||||
vlan_tag: subnet.vlan_tag,
|
||||
total: subnet.total,
|
||||
assigned: subnet.assigned,
|
||||
reserved: subnet.reserved,
|
||||
available: subnet.available,
|
||||
utilization_pct: subnet.utilization_pct,
|
||||
pressure_pct: subnet.pressure_pct,
|
||||
heat_level: resolveHeatLevel(subnet.pressure_pct)
|
||||
}));
|
||||
|
||||
const heatmapSummary = subnetDashboard.subnets.reduce(
|
||||
(acc, subnet) => {
|
||||
const level = resolveHeatLevel(subnet.pressure_pct);
|
||||
acc.total_subnets += 1;
|
||||
if (level === "critical") acc.critical += 1;
|
||||
if (level === "warning") acc.warning += 1;
|
||||
if (level === "elevated") acc.elevated += 1;
|
||||
if (level === "healthy") acc.healthy += 1;
|
||||
return acc;
|
||||
},
|
||||
{
|
||||
total_subnets: 0,
|
||||
critical: 0,
|
||||
warning: 0,
|
||||
elevated: 0,
|
||||
healthy: 0
|
||||
}
|
||||
);
|
||||
|
||||
let tenantIds: string[] = [];
|
||||
if (selectedTenantId) {
|
||||
tenantIds = [selectedTenantId];
|
||||
} else {
|
||||
const groupedTenants = await prisma.ipAssignment.groupBy({
|
||||
by: ["tenant_id"],
|
||||
where: {
|
||||
is_active: true,
|
||||
tenant_id: {
|
||||
not: null
|
||||
}
|
||||
},
|
||||
_count: {
|
||||
_all: true
|
||||
},
|
||||
orderBy: {
|
||||
_count: {
|
||||
tenant_id: "desc"
|
||||
}
|
||||
},
|
||||
take: maxTenants
|
||||
});
|
||||
|
||||
tenantIds = groupedTenants.map((item) => item.tenant_id).filter((item): item is string => Boolean(item));
|
||||
}
|
||||
|
||||
if (tenantIds.length === 0) {
|
||||
return res.json({
|
||||
generated_at: new Date().toISOString(),
|
||||
subnet_heatmap: {
|
||||
summary: heatmapSummary,
|
||||
cells: heatmapCells
|
||||
},
|
||||
tenant_trends: {
|
||||
window_days: days,
|
||||
series: [],
|
||||
chart_points: []
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const rangeEnd = new Date();
|
||||
rangeEnd.setUTCHours(23, 59, 59, 999);
|
||||
const rangeStart = toUtcDayStart(rangeEnd);
|
||||
rangeStart.setUTCDate(rangeStart.getUTCDate() - (days - 1));
|
||||
|
||||
const dayFrames = Array.from({ length: days }, (_, index) => {
|
||||
const start = new Date(rangeStart);
|
||||
start.setUTCDate(rangeStart.getUTCDate() + index);
|
||||
const end = new Date(start);
|
||||
end.setUTCHours(23, 59, 59, 999);
|
||||
return {
|
||||
key: toDateKey(start),
|
||||
end
|
||||
};
|
||||
});
|
||||
|
||||
const [tenants, quotas, assignments] = await Promise.all([
|
||||
prisma.tenant.findMany({
|
||||
where: {
|
||||
id: {
|
||||
in: tenantIds
|
||||
}
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
name: true
|
||||
}
|
||||
}),
|
||||
prisma.tenantIpQuota.findMany({
|
||||
where: {
|
||||
tenant_id: {
|
||||
in: tenantIds
|
||||
}
|
||||
},
|
||||
select: {
|
||||
tenant_id: true,
|
||||
ipv4_limit: true,
|
||||
ipv6_limit: true,
|
||||
burst_allowed: true
|
||||
}
|
||||
}),
|
||||
prisma.ipAssignment.findMany({
|
||||
where: {
|
||||
tenant_id: {
|
||||
in: tenantIds
|
||||
},
|
||||
assigned_at: {
|
||||
lte: rangeEnd
|
||||
},
|
||||
OR: [
|
||||
{
|
||||
released_at: null
|
||||
},
|
||||
{
|
||||
released_at: {
|
||||
gte: rangeStart
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
select: {
|
||||
tenant_id: true,
|
||||
assigned_at: true,
|
||||
released_at: true,
|
||||
ip_address: {
|
||||
select: {
|
||||
version: true
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
]);
|
||||
|
||||
const tenantMap = new Map(tenants.map((tenant) => [tenant.id, tenant]));
|
||||
const quotaMap = new Map(quotas.map((quota) => [quota.tenant_id, quota]));
|
||||
const assignmentsByTenant = new Map<string, typeof assignments>();
|
||||
|
||||
for (const assignment of assignments) {
|
||||
if (!assignment.tenant_id) continue;
|
||||
if (!assignmentsByTenant.has(assignment.tenant_id)) {
|
||||
assignmentsByTenant.set(assignment.tenant_id, []);
|
||||
}
|
||||
assignmentsByTenant.get(assignment.tenant_id)!.push(assignment);
|
||||
}
|
||||
|
||||
const orderedTenantIds = tenantIds.filter((tenantId) => tenantMap.has(tenantId));
|
||||
const series = orderedTenantIds.map((tenantId) => {
|
||||
const tenant = tenantMap.get(tenantId)!;
|
||||
const quota = quotaMap.get(tenantId);
|
||||
const tenantAssignments = assignmentsByTenant.get(tenantId) ?? [];
|
||||
|
||||
const points = dayFrames.map((day) => {
|
||||
let assignedIpv4 = 0;
|
||||
let assignedIpv6 = 0;
|
||||
|
||||
for (const assignment of tenantAssignments) {
|
||||
const activeAtDayEnd =
|
||||
assignment.assigned_at <= day.end && (!assignment.released_at || assignment.released_at > day.end);
|
||||
if (!activeAtDayEnd) continue;
|
||||
if (assignment.ip_address.version === IpVersion.IPV4) assignedIpv4 += 1;
|
||||
if (assignment.ip_address.version === IpVersion.IPV6) assignedIpv6 += 1;
|
||||
}
|
||||
|
||||
const quotaPressure: number[] = [];
|
||||
if (typeof quota?.ipv4_limit === "number" && quota.ipv4_limit > 0) {
|
||||
quotaPressure.push((assignedIpv4 / quota.ipv4_limit) * 100);
|
||||
}
|
||||
if (typeof quota?.ipv6_limit === "number" && quota.ipv6_limit > 0) {
|
||||
quotaPressure.push((assignedIpv6 / quota.ipv6_limit) * 100);
|
||||
}
|
||||
|
||||
return {
|
||||
date: day.key,
|
||||
assigned_total: assignedIpv4 + assignedIpv6,
|
||||
assigned_ipv4: assignedIpv4,
|
||||
assigned_ipv6: assignedIpv6,
|
||||
quota_utilization_pct: quotaPressure.length > 0 ? Number(Math.max(...quotaPressure).toFixed(2)) : null
|
||||
};
|
||||
});
|
||||
|
||||
const lastPoint = points[points.length - 1];
|
||||
return {
|
||||
tenant_id: tenant.id,
|
||||
tenant_name: tenant.name,
|
||||
current_assigned: lastPoint?.assigned_total ?? 0,
|
||||
peak_assigned: points.reduce((peak, point) => (point.assigned_total > peak ? point.assigned_total : peak), 0),
|
||||
quota: {
|
||||
ipv4_limit: quota?.ipv4_limit ?? null,
|
||||
ipv6_limit: quota?.ipv6_limit ?? null,
|
||||
burst_allowed: quota?.burst_allowed ?? false
|
||||
},
|
||||
points
|
||||
};
|
||||
});
|
||||
|
||||
const chartPoints = dayFrames.map((day, index) => {
|
||||
const point: Record<string, string | number> = {
|
||||
date: day.key
|
||||
};
|
||||
|
||||
for (const tenant of series) {
|
||||
point[tenant.tenant_id] = tenant.points[index]?.assigned_total ?? 0;
|
||||
}
|
||||
|
||||
return point;
|
||||
});
|
||||
|
||||
return res.json({
|
||||
generated_at: new Date().toISOString(),
|
||||
subnet_heatmap: {
|
||||
summary: heatmapSummary,
|
||||
cells: heatmapCells
|
||||
},
|
||||
tenant_trends: {
|
||||
window_days: days,
|
||||
series,
|
||||
chart_points: chartPoints
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
22
backend/src/routes/health.routes.ts
Normal file
22
backend/src/routes/health.routes.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import { Router } from "express";
|
||||
import { prisma } from "../lib/prisma";
|
||||
|
||||
const router = Router();
|
||||
|
||||
router.get("/", async (_req, res) => {
|
||||
let db = "ok";
|
||||
try {
|
||||
await prisma.$queryRaw`SELECT 1`;
|
||||
} catch {
|
||||
db = "error";
|
||||
}
|
||||
res.json({
|
||||
status: db === "ok" ? "ok" : "degraded",
|
||||
services: {
|
||||
database: db
|
||||
},
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
});
|
||||
|
||||
export default router;
|
||||
391
backend/src/routes/monitoring.routes.ts
Normal file
391
backend/src/routes/monitoring.routes.ts
Normal file
@@ -0,0 +1,391 @@
|
||||
import {
|
||||
AlertChannel,
|
||||
HealthCheckTargetType,
|
||||
HealthCheckType,
|
||||
MonitoringAlertStatus,
|
||||
Severity
|
||||
} from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
clusterResourceForecast,
|
||||
createAlertRule,
|
||||
createHealthCheckDefinition,
|
||||
evaluateAlertRulesNow,
|
||||
faultyDeploymentInsights,
|
||||
listAlertEvents,
|
||||
listAlertNotifications,
|
||||
listAlertRules,
|
||||
listHealthCheckResults,
|
||||
listHealthChecks,
|
||||
monitoringOverview,
|
||||
runHealthCheckNow,
|
||||
updateAlertRule,
|
||||
updateHealthCheckDefinition
|
||||
} from "../services/monitoring.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const healthCheckSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
description: z.string().optional(),
|
||||
target_type: z.nativeEnum(HealthCheckTargetType),
|
||||
check_type: z.nativeEnum(HealthCheckType).optional(),
|
||||
tenant_id: z.string().optional(),
|
||||
vm_id: z.string().optional(),
|
||||
node_id: z.string().optional(),
|
||||
cpu_warn_pct: z.number().min(0).max(100).optional(),
|
||||
cpu_critical_pct: z.number().min(0).max(100).optional(),
|
||||
ram_warn_pct: z.number().min(0).max(100).optional(),
|
||||
ram_critical_pct: z.number().min(0).max(100).optional(),
|
||||
disk_warn_pct: z.number().min(0).max(100).optional(),
|
||||
disk_critical_pct: z.number().min(0).max(100).optional(),
|
||||
disk_io_read_warn: z.number().min(0).optional(),
|
||||
disk_io_read_critical: z.number().min(0).optional(),
|
||||
disk_io_write_warn: z.number().min(0).optional(),
|
||||
disk_io_write_critical: z.number().min(0).optional(),
|
||||
network_in_warn: z.number().min(0).optional(),
|
||||
network_in_critical: z.number().min(0).optional(),
|
||||
network_out_warn: z.number().min(0).optional(),
|
||||
network_out_critical: z.number().min(0).optional(),
|
||||
latency_warn_ms: z.number().int().min(1).optional(),
|
||||
latency_critical_ms: z.number().int().min(1).optional(),
|
||||
schedule_minutes: z.number().int().min(1).max(1440).optional(),
|
||||
enabled: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const alertRuleSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
description: z.string().optional(),
|
||||
tenant_id: z.string().optional(),
|
||||
vm_id: z.string().optional(),
|
||||
node_id: z.string().optional(),
|
||||
cpu_threshold_pct: z.number().min(0).max(100).optional(),
|
||||
ram_threshold_pct: z.number().min(0).max(100).optional(),
|
||||
disk_threshold_pct: z.number().min(0).max(100).optional(),
|
||||
disk_io_read_threshold: z.number().min(0).optional(),
|
||||
disk_io_write_threshold: z.number().min(0).optional(),
|
||||
network_in_threshold: z.number().min(0).optional(),
|
||||
network_out_threshold: z.number().min(0).optional(),
|
||||
consecutive_breaches: z.number().int().min(1).max(20).optional(),
|
||||
evaluation_window_minutes: z.number().int().min(1).max(1440).optional(),
|
||||
severity: z.nativeEnum(Severity).optional(),
|
||||
channels: z.array(z.nativeEnum(AlertChannel)).optional(),
|
||||
enabled: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
async function ensureVmTenantScope(vmId: string, req: Pick<Express.Request, "user">) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
function scopedTenantId(req: Pick<Express.Request, "user">) {
|
||||
return isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined;
|
||||
}
|
||||
|
||||
function queryTenantId(req: { query?: Record<string, unknown> }) {
|
||||
return typeof req.query?.tenant_id === "string" ? req.query.tenant_id : undefined;
|
||||
}
|
||||
|
||||
router.get("/overview", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await monitoringOverview({
|
||||
tenant_id: scopedTenantId(req)
|
||||
});
|
||||
return res.json(data);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/health-checks", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await listHealthChecks({
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req),
|
||||
enabled: typeof req.query.enabled === "string" ? req.query.enabled === "true" : undefined
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/health-checks", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = healthCheckSchema.parse(req.body ?? {});
|
||||
|
||||
if (payload.vm_id) {
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
}
|
||||
|
||||
const tenantId = scopedTenantId(req) ?? payload.tenant_id;
|
||||
const check = await createHealthCheckDefinition({
|
||||
...payload,
|
||||
tenant_id: tenantId,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "monitoring.health_check.create",
|
||||
resource_type: "SECURITY",
|
||||
resource_id: check.id,
|
||||
resource_name: check.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(check);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/health-checks/:id", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = healthCheckSchema.partial().parse(req.body ?? {});
|
||||
const existing = await prisma.serverHealthCheck.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Health check not found", "HEALTH_CHECK_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
if (payload.vm_id) {
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
}
|
||||
|
||||
const updated = await updateHealthCheckDefinition(req.params.id, {
|
||||
...payload,
|
||||
tenant_id: scopedTenantId(req) ?? payload.tenant_id
|
||||
});
|
||||
|
||||
return res.json(updated);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/health-checks/:id/run", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.serverHealthCheck.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: { id: true, tenant_id: true }
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Health check not found", "HEALTH_CHECK_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const result = await runHealthCheckNow(existing.id);
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/health-checks/:id/results", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.serverHealthCheck.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: { id: true, tenant_id: true }
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Health check not found", "HEALTH_CHECK_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const data = await listHealthCheckResults(existing.id, limit);
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/alerts/rules", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await listAlertRules({
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req),
|
||||
enabled: typeof req.query.enabled === "string" ? req.query.enabled === "true" : undefined
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/alerts/rules", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = alertRuleSchema.parse(req.body ?? {});
|
||||
|
||||
if (payload.vm_id) {
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
}
|
||||
|
||||
const tenantId = scopedTenantId(req) ?? payload.tenant_id;
|
||||
const rule = await createAlertRule({
|
||||
...payload,
|
||||
tenant_id: tenantId,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "monitoring.alert_rule.create",
|
||||
resource_type: "SECURITY",
|
||||
resource_id: rule.id,
|
||||
resource_name: rule.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(rule);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/alerts/rules/:id", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = alertRuleSchema.partial().parse(req.body ?? {});
|
||||
const existing = await prisma.monitoringAlertRule.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Alert rule not found", "ALERT_RULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
if (payload.vm_id) {
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
}
|
||||
|
||||
const updated = await updateAlertRule(req.params.id, {
|
||||
...payload,
|
||||
tenant_id: scopedTenantId(req) ?? payload.tenant_id
|
||||
});
|
||||
return res.json(updated);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/alerts/events", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const statusRaw = typeof req.query.status === "string" ? req.query.status.toUpperCase() : undefined;
|
||||
const status = Object.values(MonitoringAlertStatus).includes(statusRaw as MonitoringAlertStatus)
|
||||
? (statusRaw as MonitoringAlertStatus)
|
||||
: undefined;
|
||||
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const data = await listAlertEvents({
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req),
|
||||
status,
|
||||
limit
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/alerts/notifications", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const data = await listAlertNotifications({
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req),
|
||||
limit
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/alerts/evaluate", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const result = await evaluateAlertRulesNow(scopedTenantId(req));
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/insights/faulty-deployments", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const days = typeof req.query.days === "string" ? Number(req.query.days) : undefined;
|
||||
const data = await faultyDeploymentInsights({
|
||||
days,
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req)
|
||||
});
|
||||
return res.json(data);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/insights/cluster-forecast", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const horizon = typeof req.query.horizon_days === "string" ? Number(req.query.horizon_days) : undefined;
|
||||
const data = await clusterResourceForecast({
|
||||
horizon_days: horizon,
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req)
|
||||
});
|
||||
return res.json(data);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
636
backend/src/routes/network.routes.ts
Normal file
636
backend/src/routes/network.routes.ts
Normal file
@@ -0,0 +1,636 @@
|
||||
import { IpAddressStatus, IpAssignmentType, IpAllocationStrategy, IpScope, IpVersion, PrivateNetworkType } from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
assignIpToVm,
|
||||
attachPrivateNetwork,
|
||||
createPrivateNetwork,
|
||||
detachPrivateNetwork,
|
||||
importIpAddresses,
|
||||
listIpAddresses,
|
||||
listIpAssignments,
|
||||
listIpPoolPolicies,
|
||||
listIpReservedRanges,
|
||||
listPrivateNetworks,
|
||||
listTenantIpQuotas,
|
||||
returnAssignedIp,
|
||||
subnetUtilizationDashboard,
|
||||
upsertIpPoolPolicy,
|
||||
upsertTenantIpQuota,
|
||||
createIpReservedRange,
|
||||
updateIpReservedRange
|
||||
} from "../services/network.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const ipImportSchema = z.object({
|
||||
addresses: z.array(z.string().min(2)).optional(),
|
||||
cidr_blocks: z.array(z.string().min(3)).optional(),
|
||||
scope: z.nativeEnum(IpScope).optional(),
|
||||
server: z.string().optional(),
|
||||
node_id: z.string().optional(),
|
||||
node_hostname: z.string().optional(),
|
||||
bridge: z.string().optional(),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
sdn_zone: z.string().optional(),
|
||||
gateway: z.string().optional(),
|
||||
subnet: z.string().optional(),
|
||||
tags: z.array(z.string().min(1)).optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const ipAssignSchema = z.object({
|
||||
vm_id: z.string().min(1),
|
||||
ip_address_id: z.string().optional(),
|
||||
address: z.string().optional(),
|
||||
scope: z.nativeEnum(IpScope).optional(),
|
||||
version: z.nativeEnum(IpVersion).optional(),
|
||||
assignment_type: z.nativeEnum(IpAssignmentType).default(IpAssignmentType.ADDITIONAL),
|
||||
interface_name: z.string().optional(),
|
||||
notes: z.string().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const ipReturnSchema = z
|
||||
.object({
|
||||
assignment_id: z.string().optional(),
|
||||
ip_address_id: z.string().optional()
|
||||
})
|
||||
.refine((value) => value.assignment_id || value.ip_address_id, {
|
||||
message: "assignment_id or ip_address_id is required"
|
||||
});
|
||||
|
||||
const privateNetworkCreateSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
slug: z.string().optional(),
|
||||
network_type: z.nativeEnum(PrivateNetworkType).optional(),
|
||||
cidr: z.string().min(3),
|
||||
gateway: z.string().optional(),
|
||||
bridge: z.string().optional(),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
sdn_zone: z.string().optional(),
|
||||
server: z.string().optional(),
|
||||
node_hostname: z.string().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const privateNetworkAttachSchema = z.object({
|
||||
network_id: z.string().min(1),
|
||||
vm_id: z.string().min(1),
|
||||
interface_name: z.string().optional(),
|
||||
requested_ip: z.string().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const tenantQuotaSchema = z.object({
|
||||
tenant_id: z.string().min(1),
|
||||
ipv4_limit: z.number().int().positive().nullable().optional(),
|
||||
ipv6_limit: z.number().int().positive().nullable().optional(),
|
||||
reserved_ipv4: z.number().int().min(0).optional(),
|
||||
reserved_ipv6: z.number().int().min(0).optional(),
|
||||
burst_allowed: z.boolean().optional(),
|
||||
burst_ipv4_limit: z.number().int().positive().nullable().optional(),
|
||||
burst_ipv6_limit: z.number().int().positive().nullable().optional(),
|
||||
is_active: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const reservedRangeSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
cidr: z.string().min(3),
|
||||
scope: z.nativeEnum(IpScope).optional(),
|
||||
tenant_id: z.string().optional(),
|
||||
reason: z.string().optional(),
|
||||
node_hostname: z.string().optional(),
|
||||
bridge: z.string().optional(),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
sdn_zone: z.string().optional(),
|
||||
is_active: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const ipPoolPolicySchema = z.object({
|
||||
name: z.string().min(2),
|
||||
tenant_id: z.string().optional(),
|
||||
scope: z.nativeEnum(IpScope).optional(),
|
||||
version: z.nativeEnum(IpVersion).optional(),
|
||||
node_hostname: z.string().optional(),
|
||||
bridge: z.string().optional(),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
sdn_zone: z.string().optional(),
|
||||
allocation_strategy: z.nativeEnum(IpAllocationStrategy).optional(),
|
||||
enforce_quota: z.boolean().optional(),
|
||||
disallow_reserved_use: z.boolean().optional(),
|
||||
is_active: z.boolean().optional(),
|
||||
priority: z.number().int().min(1).max(1000).optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
async function ensureVmTenantScope(vmId: string, req: Pick<Express.Request, "user">) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
router.get("/ip-addresses", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const status = typeof req.query.status === "string" ? req.query.status.toUpperCase() : undefined;
|
||||
const version = typeof req.query.version === "string" ? req.query.version.toUpperCase() : undefined;
|
||||
const scope = typeof req.query.scope === "string" ? req.query.scope.toUpperCase() : undefined;
|
||||
|
||||
const result = await listIpAddresses({
|
||||
status: Object.values(IpAddressStatus).includes(status as IpAddressStatus) ? (status as IpAddressStatus) : undefined,
|
||||
version: Object.values(IpVersion).includes(version as IpVersion) ? (version as IpVersion) : undefined,
|
||||
scope: Object.values(IpScope).includes(scope as IpScope) ? (scope as IpScope) : undefined,
|
||||
nodeHostname: typeof req.query.node_hostname === "string" ? req.query.node_hostname : undefined,
|
||||
bridge: typeof req.query.bridge === "string" ? req.query.bridge : undefined,
|
||||
vlanTag: typeof req.query.vlan_tag === "string" ? Number(req.query.vlan_tag) : undefined,
|
||||
assignedVmId: typeof req.query.assigned_vm_id === "string" ? req.query.assigned_vm_id : undefined,
|
||||
limit: typeof req.query.limit === "string" ? Number(req.query.limit) : undefined,
|
||||
offset: typeof req.query.offset === "string" ? Number(req.query.offset) : undefined
|
||||
});
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id) {
|
||||
const tenantData = result.data.filter(
|
||||
(item) =>
|
||||
item.assigned_tenant_id === req.user?.tenant_id ||
|
||||
(item.status === IpAddressStatus.AVAILABLE && item.scope === IpScope.PRIVATE)
|
||||
);
|
||||
return res.json({
|
||||
data: tenantData,
|
||||
meta: {
|
||||
...result.meta,
|
||||
total: tenantData.length
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/ip-addresses/import", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipImportSchema.parse(req.body ?? {});
|
||||
const result = await importIpAddresses({
|
||||
...payload,
|
||||
imported_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_address.import",
|
||||
resource_type: "SYSTEM",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
...payload,
|
||||
result
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/subnet-utilization", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const scope = typeof req.query.scope === "string" ? req.query.scope.toUpperCase() : undefined;
|
||||
const version = typeof req.query.version === "string" ? req.query.version.toUpperCase() : undefined;
|
||||
|
||||
const dashboard = await subnetUtilizationDashboard({
|
||||
scope: Object.values(IpScope).includes(scope as IpScope) ? (scope as IpScope) : undefined,
|
||||
version: Object.values(IpVersion).includes(version as IpVersion) ? (version as IpVersion) : undefined,
|
||||
node_hostname: typeof req.query.node_hostname === "string" ? req.query.node_hostname : undefined,
|
||||
bridge: typeof req.query.bridge === "string" ? req.query.bridge : undefined,
|
||||
vlan_tag: typeof req.query.vlan_tag === "string" ? Number(req.query.vlan_tag) : undefined,
|
||||
tenant_id:
|
||||
isTenantScopedUser(req) && req.user?.tenant_id
|
||||
? req.user.tenant_id
|
||||
: typeof req.query.tenant_id === "string"
|
||||
? req.query.tenant_id
|
||||
: undefined
|
||||
});
|
||||
|
||||
return res.json(dashboard);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/ip-assignments", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await listIpAssignments({
|
||||
vm_id: typeof req.query.vm_id === "string" ? req.query.vm_id : undefined,
|
||||
tenant_id:
|
||||
isTenantScopedUser(req) && req.user?.tenant_id
|
||||
? req.user.tenant_id
|
||||
: typeof req.query.tenant_id === "string"
|
||||
? req.query.tenant_id
|
||||
: undefined,
|
||||
active_only: req.query.active_only === "true"
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/ip-assignments", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipAssignSchema.parse(req.body ?? {});
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
|
||||
const assignment = await assignIpToVm({
|
||||
vm_id: payload.vm_id,
|
||||
ip_address_id: payload.ip_address_id,
|
||||
address: payload.address,
|
||||
scope: payload.scope,
|
||||
version: payload.version,
|
||||
assignment_type: payload.assignment_type,
|
||||
interface_name: payload.interface_name,
|
||||
notes: payload.notes,
|
||||
metadata: payload.metadata,
|
||||
actor_email: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_address.assign",
|
||||
resource_type: "VM",
|
||||
resource_id: payload.vm_id,
|
||||
resource_name: assignment.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
assignment_id: assignment.id,
|
||||
ip_address: assignment.ip_address.address,
|
||||
cidr: assignment.ip_address.cidr,
|
||||
assignment_type: assignment.assignment_type,
|
||||
interface_name: assignment.interface_name
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(assignment);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/ip-assignments/return", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipReturnSchema.parse(req.body ?? {});
|
||||
if (payload.assignment_id) {
|
||||
const existing = await prisma.ipAssignment.findUnique({
|
||||
where: { id: payload.assignment_id },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) throw new HttpError(404, "IP assignment not found", "IP_ASSIGNMENT_NOT_FOUND");
|
||||
await ensureVmTenantScope(existing.vm.id, req);
|
||||
}
|
||||
|
||||
const assignment = await returnAssignedIp(payload);
|
||||
|
||||
await logAudit({
|
||||
action: "ip_address.return",
|
||||
resource_type: "VM",
|
||||
resource_id: assignment.vm_id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
assignment_id: assignment.id,
|
||||
ip_address_id: assignment.ip_address_id
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json(assignment);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/tenant-quotas", requireAuth, authorize("tenant:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await listTenantIpQuotas(
|
||||
isTenantScopedUser(req) && req.user?.tenant_id ? req.user.tenant_id : typeof req.query.tenant_id === "string" ? req.query.tenant_id : undefined
|
||||
);
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/tenant-quotas", requireAuth, authorize("tenant:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = tenantQuotaSchema.parse(req.body ?? {});
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const quota = await upsertTenantIpQuota({
|
||||
...payload,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_quota.upsert",
|
||||
resource_type: "TENANT",
|
||||
resource_id: quota.tenant_id,
|
||||
resource_name: quota.tenant.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(quota);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/reserved-ranges", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const all = await listIpReservedRanges();
|
||||
const data =
|
||||
isTenantScopedUser(req) && req.user?.tenant_id
|
||||
? all.filter((item) => !item.tenant_id || item.tenant_id === req.user?.tenant_id)
|
||||
: all;
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/reserved-ranges", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = reservedRangeSchema.parse(req.body ?? {});
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && payload.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const range = await createIpReservedRange({
|
||||
...payload,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_reserved_range.create",
|
||||
resource_type: "NETWORK",
|
||||
resource_id: range.id,
|
||||
resource_name: range.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(range);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/reserved-ranges/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = reservedRangeSchema.partial().parse(req.body ?? {});
|
||||
const existing = await prisma.ipReservedRange.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) throw new HttpError(404, "Reserved range not found", "RESERVED_RANGE_NOT_FOUND");
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const updated = await updateIpReservedRange(req.params.id, payload);
|
||||
return res.json(updated);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/policies", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const all = await listIpPoolPolicies();
|
||||
const data =
|
||||
isTenantScopedUser(req) && req.user?.tenant_id
|
||||
? all.filter((item) => !item.tenant_id || item.tenant_id === req.user?.tenant_id)
|
||||
: all;
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/policies", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipPoolPolicySchema.parse(req.body ?? {});
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && payload.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const policy = await upsertIpPoolPolicy({
|
||||
...payload,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_pool_policy.create",
|
||||
resource_type: "NETWORK",
|
||||
resource_id: policy.id,
|
||||
resource_name: policy.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(policy);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/policies/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipPoolPolicySchema.partial().parse(req.body ?? {});
|
||||
const existing = await prisma.ipPoolPolicy.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) throw new HttpError(404, "IP pool policy not found", "IP_POOL_POLICY_NOT_FOUND");
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const policy = await upsertIpPoolPolicy({
|
||||
policy_id: existing.id,
|
||||
name: payload.name ?? existing.name,
|
||||
tenant_id: payload.tenant_id ?? existing.tenant_id ?? undefined,
|
||||
scope: payload.scope ?? existing.scope ?? undefined,
|
||||
version: payload.version ?? existing.version ?? undefined,
|
||||
node_hostname: payload.node_hostname ?? existing.node_hostname ?? undefined,
|
||||
bridge: payload.bridge ?? existing.bridge ?? undefined,
|
||||
vlan_tag: payload.vlan_tag ?? existing.vlan_tag ?? undefined,
|
||||
sdn_zone: payload.sdn_zone ?? existing.sdn_zone ?? undefined,
|
||||
allocation_strategy: payload.allocation_strategy ?? existing.allocation_strategy,
|
||||
enforce_quota: payload.enforce_quota ?? existing.enforce_quota,
|
||||
disallow_reserved_use: payload.disallow_reserved_use ?? existing.disallow_reserved_use,
|
||||
is_active: payload.is_active ?? existing.is_active,
|
||||
priority: payload.priority ?? existing.priority,
|
||||
metadata: payload.metadata
|
||||
});
|
||||
|
||||
return res.json(policy);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/private-networks", requireAuth, authorize("node:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const data = await listPrivateNetworks();
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/private-networks", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = privateNetworkCreateSchema.parse(req.body ?? {});
|
||||
const network = await createPrivateNetwork({
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
network_type: payload.network_type,
|
||||
cidr: payload.cidr,
|
||||
gateway: payload.gateway,
|
||||
bridge: payload.bridge,
|
||||
vlan_tag: payload.vlan_tag,
|
||||
sdn_zone: payload.sdn_zone,
|
||||
server: payload.server,
|
||||
node_hostname: payload.node_hostname,
|
||||
metadata: payload.metadata,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "private_network.create",
|
||||
resource_type: "NETWORK",
|
||||
resource_id: network.id,
|
||||
resource_name: network.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(network);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/private-networks/attach", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = privateNetworkAttachSchema.parse(req.body ?? {});
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
|
||||
const attachment = await attachPrivateNetwork({
|
||||
network_id: payload.network_id,
|
||||
vm_id: payload.vm_id,
|
||||
interface_name: payload.interface_name,
|
||||
requested_ip: payload.requested_ip,
|
||||
metadata: payload.metadata,
|
||||
actor_email: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "private_network.attach",
|
||||
resource_type: "VM",
|
||||
resource_id: payload.vm_id,
|
||||
resource_name: attachment.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
attachment_id: attachment.id,
|
||||
network_id: payload.network_id,
|
||||
interface_name: attachment.interface_name,
|
||||
requested_ip: payload.requested_ip
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(attachment);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/private-networks/attachments/:id/detach", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.privateNetworkAttachment.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: {
|
||||
id: true,
|
||||
vm_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) throw new HttpError(404, "Private network attachment not found", "PRIVATE_NETWORK_ATTACHMENT_NOT_FOUND");
|
||||
await ensureVmTenantScope(existing.vm_id, req);
|
||||
|
||||
const attachment = await detachPrivateNetwork({
|
||||
attachment_id: req.params.id,
|
||||
actor_email: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "private_network.detach",
|
||||
resource_type: "VM",
|
||||
resource_id: attachment.vm.id,
|
||||
resource_name: attachment.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
attachment_id: attachment.id,
|
||||
network_id: attachment.network_id,
|
||||
interface_name: attachment.interface_name
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json(attachment);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
275
backend/src/routes/operations.routes.ts
Normal file
275
backend/src/routes/operations.routes.ts
Normal file
@@ -0,0 +1,275 @@
|
||||
import { OperationTaskStatus, OperationTaskType, PowerScheduleAction } from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import {
|
||||
createPowerSchedule,
|
||||
deletePowerSchedule,
|
||||
executeVmPowerActionNow,
|
||||
listOperationTasks,
|
||||
operationQueueInsights,
|
||||
listPowerSchedules,
|
||||
updatePowerSchedule
|
||||
} from "../services/operations.service";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const scheduleCreateSchema = z.object({
|
||||
vm_id: z.string().min(1),
|
||||
action: z.nativeEnum(PowerScheduleAction),
|
||||
cron_expression: z.string().min(5),
|
||||
timezone: z.string().default("UTC")
|
||||
});
|
||||
|
||||
const scheduleUpdateSchema = z.object({
|
||||
action: z.nativeEnum(PowerScheduleAction).optional(),
|
||||
cron_expression: z.string().min(5).optional(),
|
||||
timezone: z.string().min(1).optional(),
|
||||
enabled: z.boolean().optional()
|
||||
});
|
||||
|
||||
function parseOptionalEnum<T extends Record<string, string>>(value: unknown, enumObject: T) {
|
||||
if (typeof value !== "string") return undefined;
|
||||
const candidate = value.toUpperCase();
|
||||
return Object.values(enumObject).includes(candidate as T[keyof T])
|
||||
? (candidate as T[keyof T])
|
||||
: undefined;
|
||||
}
|
||||
|
||||
async function ensureVmTenantAccess(vmId: string, req: Express.Request) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
node: true,
|
||||
tenant_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
router.get("/tasks", requireAuth, authorize("audit:read"), async (req, res, next) => {
|
||||
try {
|
||||
const status = parseOptionalEnum(req.query.status, OperationTaskStatus);
|
||||
const taskType = parseOptionalEnum(req.query.task_type, OperationTaskType);
|
||||
const vmId = typeof req.query.vm_id === "string" ? req.query.vm_id : undefined;
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const offset = typeof req.query.offset === "string" ? Number(req.query.offset) : undefined;
|
||||
|
||||
const result = await listOperationTasks({
|
||||
status,
|
||||
taskType,
|
||||
vmId,
|
||||
limit,
|
||||
offset,
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id : undefined
|
||||
});
|
||||
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/queue-insights", requireAuth, authorize("audit:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await operationQueueInsights(isTenantScopedUser(req) ? req.user?.tenant_id : undefined);
|
||||
return res.json(data);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/power-schedules", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const schedules = await listPowerSchedules(isTenantScopedUser(req) ? req.user?.tenant_id : undefined);
|
||||
res.json({ data: schedules });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/power-schedules", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = scheduleCreateSchema.parse(req.body ?? {});
|
||||
const vm = await ensureVmTenantAccess(payload.vm_id, req);
|
||||
|
||||
const schedule = await createPowerSchedule({
|
||||
vmId: vm.id,
|
||||
action: payload.action,
|
||||
cronExpression: payload.cron_expression,
|
||||
timezone: payload.timezone,
|
||||
createdBy: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "power_schedule.create",
|
||||
resource_type: "VM",
|
||||
resource_id: vm.id,
|
||||
resource_name: vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
schedule_id: schedule.id,
|
||||
action: payload.action,
|
||||
cron_expression: payload.cron_expression
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(schedule);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/power-schedules/:id", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = scheduleUpdateSchema.parse(req.body ?? {});
|
||||
const existing = await prisma.powerSchedule.findUnique({
|
||||
where: { id: req.params.id },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Power schedule not found", "POWER_SCHEDULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const schedule = await updatePowerSchedule(existing.id, {
|
||||
action: payload.action,
|
||||
cronExpression: payload.cron_expression,
|
||||
timezone: payload.timezone,
|
||||
enabled: payload.enabled
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "power_schedule.update",
|
||||
resource_type: "VM",
|
||||
resource_id: existing.vm.id,
|
||||
resource_name: existing.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
schedule_id: existing.id,
|
||||
payload
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(schedule);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/power-schedules/:id", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.powerSchedule.findUnique({
|
||||
where: { id: req.params.id },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Power schedule not found", "POWER_SCHEDULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
await deletePowerSchedule(existing.id);
|
||||
|
||||
await logAudit({
|
||||
action: "power_schedule.delete",
|
||||
resource_type: "VM",
|
||||
resource_id: existing.vm.id,
|
||||
resource_name: existing.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
schedule_id: existing.id
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/power-schedules/:id/run", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.powerSchedule.findUnique({
|
||||
where: { id: req.params.id },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Power schedule not found", "POWER_SCHEDULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const execution = await executeVmPowerActionNow(existing.vm_id, existing.action, req.user!.email, {
|
||||
payload: {
|
||||
source: "manual_schedule_run",
|
||||
schedule_id: existing.id
|
||||
},
|
||||
scheduledFor: new Date()
|
||||
});
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
task_id: execution.task.id,
|
||||
upid: execution.upid
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
71
backend/src/routes/payment.routes.ts
Normal file
71
backend/src/routes/payment.routes.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import {
|
||||
createInvoicePaymentLink,
|
||||
handleManualInvoicePayment,
|
||||
processFlutterwaveWebhook,
|
||||
processPaystackWebhook,
|
||||
verifyFlutterwaveSignature,
|
||||
verifyPaystackSignature
|
||||
} from "../services/payment.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const createLinkSchema = z.object({
|
||||
provider: z.enum(["paystack", "flutterwave", "manual"]).optional()
|
||||
});
|
||||
|
||||
router.post("/invoices/:id/link", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createLinkSchema.parse(req.body ?? {});
|
||||
const result = await createInvoicePaymentLink(req.params.id, payload.provider);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const manualSchema = z.object({
|
||||
payment_reference: z.string().min(2)
|
||||
});
|
||||
|
||||
router.post("/invoices/:id/manual-pay", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = manualSchema.parse(req.body ?? {});
|
||||
const invoice = await handleManualInvoicePayment(req.params.id, payload.payment_reference, req.user?.email ?? "manual@system");
|
||||
res.json(invoice);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/webhooks/paystack", async (req, res, next) => {
|
||||
try {
|
||||
const signature = req.header("x-paystack-signature");
|
||||
const valid = await verifyPaystackSignature(signature, req.rawBody);
|
||||
if (!valid) {
|
||||
return res.status(401).json({ error: { code: "INVALID_SIGNATURE", message: "Invalid signature" } });
|
||||
}
|
||||
const result = await processPaystackWebhook(req.body);
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/webhooks/flutterwave", async (req, res, next) => {
|
||||
try {
|
||||
const signature = req.header("verif-hash");
|
||||
const valid = await verifyFlutterwaveSignature(signature);
|
||||
if (!valid) {
|
||||
return res.status(401).json({ error: { code: "INVALID_SIGNATURE", message: "Invalid signature" } });
|
||||
}
|
||||
const result = await processFlutterwaveWebhook(req.body);
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
488
backend/src/routes/profile.routes.ts
Normal file
488
backend/src/routes/profile.routes.ts
Normal file
@@ -0,0 +1,488 @@
|
||||
import { Router } from "express";
|
||||
import crypto from "crypto";
|
||||
import bcrypt from "bcryptjs";
|
||||
import { z } from "zod";
|
||||
import { requireAuth } from "../middleware/auth";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { buildTotpUri, generateTotpSecret, verifyTotpCode } from "../lib/totp";
|
||||
import { generateRecoveryCodes, hashRecoveryCodes, hashToken } from "../lib/security";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const imageDataUrlRegex = /^data:image\/(png|jpe?g|webp|gif);base64,[A-Za-z0-9+/=]+$/i;
|
||||
|
||||
const updateProfileSchema = z.object({
|
||||
full_name: z.string().min(1).max(120).optional(),
|
||||
phone: z.string().min(3).max(40).optional(),
|
||||
telephone: z.string().min(3).max(40).optional(),
|
||||
address: z.string().min(3).max(280).optional(),
|
||||
state: z.string().min(2).max(80).optional(),
|
||||
city: z.string().min(2).max(80).optional(),
|
||||
country: z.string().min(2).max(80).optional(),
|
||||
avatar_url: z.union([z.string().url().max(1000), z.string().regex(imageDataUrlRegex).max(1_500_000)]).optional(),
|
||||
avatar_data_url: z.string().regex(imageDataUrlRegex).max(1_500_000).optional(),
|
||||
profile_metadata: z.record(z.string(), z.union([z.string(), z.number(), z.boolean(), z.null()])).optional()
|
||||
});
|
||||
|
||||
const changePasswordSchema = z.object({
|
||||
current_password: z.string().min(1),
|
||||
new_password: z.string().min(10).max(120)
|
||||
});
|
||||
|
||||
const mfaSetupSchema = z.object({
|
||||
password: z.string().min(1)
|
||||
});
|
||||
|
||||
const mfaEnableSchema = z.object({
|
||||
code: z.string().min(6).max(8)
|
||||
});
|
||||
|
||||
const mfaDisableSchema = z.object({
|
||||
password: z.string().min(1),
|
||||
code: z.string().min(6).max(8).optional()
|
||||
});
|
||||
|
||||
const uploadAvatarSchema = z.object({
|
||||
data_url: z.string().regex(imageDataUrlRegex).max(1_500_000)
|
||||
});
|
||||
|
||||
function metadataObject(value: unknown) {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) return {};
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function profileFieldsFromMetadata(value: unknown) {
|
||||
const meta = metadataObject(value);
|
||||
const asString = (entry: unknown) => (typeof entry === "string" ? entry : "");
|
||||
|
||||
return {
|
||||
phone: asString(meta.phone),
|
||||
telephone: asString(meta.telephone),
|
||||
address: asString(meta.address),
|
||||
state: asString(meta.state),
|
||||
city: asString(meta.city),
|
||||
country: asString(meta.country)
|
||||
};
|
||||
}
|
||||
|
||||
function withProfileFields<T extends { profile_metadata: unknown }>(user: T) {
|
||||
return {
|
||||
...user,
|
||||
profile_fields: profileFieldsFromMetadata(user.profile_metadata)
|
||||
};
|
||||
}
|
||||
|
||||
router.get("/", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: req.user!.id },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
full_name: true,
|
||||
avatar_url: true,
|
||||
profile_metadata: true,
|
||||
role: true,
|
||||
tenant_id: true,
|
||||
mfa_enabled: true,
|
||||
must_change_password: true,
|
||||
created_at: true,
|
||||
updated_at: true,
|
||||
last_login_at: true
|
||||
}
|
||||
});
|
||||
if (!user) {
|
||||
throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
}
|
||||
return res.json(withProfileFields(user));
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const payload = updateProfileSchema.parse(req.body ?? {});
|
||||
if (Object.keys(payload).length === 0) {
|
||||
throw new HttpError(400, "No profile fields were provided", "VALIDATION_ERROR");
|
||||
}
|
||||
|
||||
const existing = await prisma.user.findUnique({
|
||||
where: { id: req.user!.id },
|
||||
select: {
|
||||
id: true,
|
||||
profile_metadata: true
|
||||
}
|
||||
});
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
}
|
||||
|
||||
const nextMetadata = {
|
||||
...metadataObject(existing.profile_metadata),
|
||||
...(payload.profile_metadata ?? {})
|
||||
} as Record<string, unknown>;
|
||||
|
||||
const metadataFields: Array<keyof ReturnType<typeof profileFieldsFromMetadata>> = [
|
||||
"phone",
|
||||
"telephone",
|
||||
"address",
|
||||
"state",
|
||||
"city",
|
||||
"country"
|
||||
];
|
||||
for (const field of metadataFields) {
|
||||
if (field in payload) {
|
||||
nextMetadata[field] = payload[field] ?? "";
|
||||
}
|
||||
}
|
||||
|
||||
const user = await prisma.user.update({
|
||||
where: { id: req.user!.id },
|
||||
data: {
|
||||
...(payload.full_name !== undefined ? { full_name: payload.full_name } : {}),
|
||||
...(payload.avatar_url !== undefined ? { avatar_url: payload.avatar_url } : {}),
|
||||
...(payload.avatar_data_url !== undefined ? { avatar_url: payload.avatar_data_url } : {}),
|
||||
profile_metadata: toPrismaJsonValue(nextMetadata)
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
full_name: true,
|
||||
avatar_url: true,
|
||||
profile_metadata: true,
|
||||
role: true,
|
||||
tenant_id: true,
|
||||
mfa_enabled: true,
|
||||
must_change_password: true,
|
||||
created_at: true,
|
||||
updated_at: true,
|
||||
last_login_at: true
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "profile.update",
|
||||
resource_type: "USER",
|
||||
resource_id: user.id,
|
||||
resource_name: user.email,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ updated_fields: Object.keys(payload) }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json(withProfileFields(user));
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/avatar", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const payload = uploadAvatarSchema.parse(req.body ?? {});
|
||||
const user = await prisma.user.update({
|
||||
where: { id: req.user!.id },
|
||||
data: {
|
||||
avatar_url: payload.data_url
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
avatar_url: true,
|
||||
updated_at: true
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "profile.avatar.upload",
|
||||
resource_type: "USER",
|
||||
resource_id: user.id,
|
||||
resource_name: user.email,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ avatar_uploaded: true }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json(user);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/change-password", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const payload = changePasswordSchema.parse(req.body ?? {});
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: req.user!.id },
|
||||
select: { id: true, email: true, password_hash: true }
|
||||
});
|
||||
if (!user) {
|
||||
throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
}
|
||||
|
||||
const matched = await bcrypt.compare(payload.current_password, user.password_hash);
|
||||
if (!matched) {
|
||||
throw new HttpError(401, "Current password is incorrect", "INVALID_CREDENTIALS");
|
||||
}
|
||||
|
||||
const newHash = await bcrypt.hash(payload.new_password, 12);
|
||||
await prisma.user.update({
|
||||
where: { id: user.id },
|
||||
data: {
|
||||
password_hash: newHash,
|
||||
must_change_password: false,
|
||||
password_changed_at: new Date()
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.authSession.updateMany({
|
||||
where: {
|
||||
user_id: user.id,
|
||||
revoked_at: null
|
||||
},
|
||||
data: {
|
||||
revoked_at: new Date()
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "profile.password.change",
|
||||
resource_type: "USER",
|
||||
resource_id: user.id,
|
||||
resource_name: user.email,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ revoked_sessions: true }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json({ success: true, message: "Password changed. All active sessions were revoked." });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/mfa/setup", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const payload = mfaSetupSchema.parse(req.body ?? {});
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: req.user!.id },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
password_hash: true
|
||||
}
|
||||
});
|
||||
if (!user) {
|
||||
throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
}
|
||||
|
||||
const matched = await bcrypt.compare(payload.password, user.password_hash);
|
||||
if (!matched) {
|
||||
throw new HttpError(401, "Password is incorrect", "INVALID_CREDENTIALS");
|
||||
}
|
||||
|
||||
const secret = generateTotpSecret();
|
||||
const recoveryCodes = generateRecoveryCodes();
|
||||
await prisma.user.update({
|
||||
where: { id: user.id },
|
||||
data: {
|
||||
mfa_secret: secret,
|
||||
mfa_enabled: false,
|
||||
mfa_recovery_codes: hashRecoveryCodes(recoveryCodes)
|
||||
}
|
||||
});
|
||||
|
||||
return res.json({
|
||||
secret,
|
||||
otpauth_uri: buildTotpUri("ProxPanel", user.email, secret),
|
||||
recovery_codes: recoveryCodes
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/mfa/enable", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const payload = mfaEnableSchema.parse(req.body ?? {});
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: req.user!.id },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
mfa_secret: true
|
||||
}
|
||||
});
|
||||
if (!user || !user.mfa_secret) {
|
||||
throw new HttpError(400, "MFA setup is not initialized", "MFA_NOT_CONFIGURED");
|
||||
}
|
||||
|
||||
if (!verifyTotpCode(payload.code, user.mfa_secret, { window: 1 })) {
|
||||
throw new HttpError(401, "Invalid MFA code", "MFA_INVALID");
|
||||
}
|
||||
|
||||
await prisma.user.update({
|
||||
where: { id: user.id },
|
||||
data: {
|
||||
mfa_enabled: true
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "profile.mfa.enable",
|
||||
resource_type: "USER",
|
||||
resource_id: user.id,
|
||||
resource_name: user.email,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json({ success: true, mfa_enabled: true });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/mfa/disable", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const payload = mfaDisableSchema.parse(req.body ?? {});
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: req.user!.id },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
password_hash: true,
|
||||
mfa_enabled: true,
|
||||
mfa_secret: true
|
||||
}
|
||||
});
|
||||
if (!user) {
|
||||
throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
}
|
||||
|
||||
const matched = await bcrypt.compare(payload.password, user.password_hash);
|
||||
if (!matched) {
|
||||
throw new HttpError(401, "Password is incorrect", "INVALID_CREDENTIALS");
|
||||
}
|
||||
|
||||
if (user.mfa_enabled && user.mfa_secret && payload.code && !verifyTotpCode(payload.code, user.mfa_secret, { window: 1 })) {
|
||||
throw new HttpError(401, "Invalid MFA code", "MFA_INVALID");
|
||||
}
|
||||
|
||||
await prisma.user.update({
|
||||
where: { id: user.id },
|
||||
data: {
|
||||
mfa_enabled: false,
|
||||
mfa_secret: null,
|
||||
mfa_recovery_codes: []
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "profile.mfa.disable",
|
||||
resource_type: "USER",
|
||||
resource_id: user.id,
|
||||
resource_name: user.email,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json({ success: true, mfa_enabled: false });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/sessions", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const sessions = await prisma.authSession.findMany({
|
||||
where: { user_id: req.user!.id },
|
||||
orderBy: { issued_at: "desc" },
|
||||
select: {
|
||||
id: true,
|
||||
ip_address: true,
|
||||
user_agent: true,
|
||||
issued_at: true,
|
||||
last_used_at: true,
|
||||
expires_at: true,
|
||||
revoked_at: true
|
||||
}
|
||||
});
|
||||
return res.json(sessions);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/sessions/:id/revoke", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const updated = await prisma.authSession.updateMany({
|
||||
where: {
|
||||
id: req.params.id,
|
||||
user_id: req.user!.id,
|
||||
revoked_at: null
|
||||
},
|
||||
data: {
|
||||
revoked_at: new Date()
|
||||
}
|
||||
});
|
||||
return res.json({ success: true, revoked: updated.count });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/sessions/revoke-all", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const result = await prisma.authSession.updateMany({
|
||||
where: {
|
||||
user_id: req.user!.id,
|
||||
revoked_at: null
|
||||
},
|
||||
data: { revoked_at: new Date() }
|
||||
});
|
||||
return res.json({ success: true, revoked: result.count });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/password-reset/request", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: req.user!.id },
|
||||
select: { id: true }
|
||||
});
|
||||
if (!user) throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
|
||||
const token = crypto.randomUUID().replace(/-/g, "");
|
||||
const tokenHash = hashToken(token);
|
||||
const expiresAt = new Date(Date.now() + 30 * 60 * 1000);
|
||||
|
||||
await prisma.passwordResetToken.create({
|
||||
data: {
|
||||
user_id: user.id,
|
||||
token_hash: tokenHash,
|
||||
expires_at: expiresAt
|
||||
}
|
||||
});
|
||||
|
||||
return res.json({
|
||||
success: true,
|
||||
token,
|
||||
expires_at: expiresAt
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
566
backend/src/routes/provisioning.routes.ts
Normal file
566
backend/src/routes/provisioning.routes.ts
Normal file
@@ -0,0 +1,566 @@
|
||||
import {
|
||||
ProductType,
|
||||
ServiceLifecycleStatus,
|
||||
TemplateType,
|
||||
VmType
|
||||
} from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
createApplicationGroup,
|
||||
createPlacementPolicy,
|
||||
createProvisionedService,
|
||||
createTemplate,
|
||||
createVmIdRange,
|
||||
deleteApplicationGroup,
|
||||
deletePlacementPolicy,
|
||||
deleteTemplate,
|
||||
deleteVmIdRange,
|
||||
listApplicationGroups,
|
||||
listPlacementPolicies,
|
||||
listProvisionedServices,
|
||||
listTemplates,
|
||||
listVmIdRanges,
|
||||
setApplicationGroupTemplates,
|
||||
suspendProvisionedService,
|
||||
terminateProvisionedService,
|
||||
unsuspendProvisionedService,
|
||||
updateApplicationGroup,
|
||||
updatePlacementPolicy,
|
||||
updateProvisionedServicePackage,
|
||||
updateTemplate,
|
||||
updateVmIdRange
|
||||
} from "../services/provisioning.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const templateCreateSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
slug: z.string().optional(),
|
||||
template_type: z.nativeEnum(TemplateType),
|
||||
virtualization_type: z.nativeEnum(VmType).optional(),
|
||||
source: z.string().optional(),
|
||||
description: z.string().optional(),
|
||||
default_cloud_init: z.string().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const templateUpdateSchema = z.object({
|
||||
name: z.string().min(2).optional(),
|
||||
slug: z.string().optional(),
|
||||
source: z.string().optional(),
|
||||
description: z.string().optional(),
|
||||
default_cloud_init: z.string().optional(),
|
||||
is_active: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const groupCreateSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
slug: z.string().optional(),
|
||||
description: z.string().optional()
|
||||
});
|
||||
|
||||
const groupUpdateSchema = z.object({
|
||||
name: z.string().min(2).optional(),
|
||||
slug: z.string().optional(),
|
||||
description: z.string().optional(),
|
||||
is_active: z.boolean().optional()
|
||||
});
|
||||
|
||||
const groupTemplatesSchema = z.object({
|
||||
templates: z
|
||||
.array(
|
||||
z.object({
|
||||
template_id: z.string().min(1),
|
||||
priority: z.number().int().positive().optional()
|
||||
})
|
||||
)
|
||||
.default([])
|
||||
});
|
||||
|
||||
const placementPolicySchema = z.object({
|
||||
group_id: z.string().optional(),
|
||||
node_id: z.string().optional(),
|
||||
product_type: z.nativeEnum(ProductType).optional(),
|
||||
cpu_weight: z.number().int().min(0).max(1000).optional(),
|
||||
ram_weight: z.number().int().min(0).max(1000).optional(),
|
||||
disk_weight: z.number().int().min(0).max(1000).optional(),
|
||||
vm_count_weight: z.number().int().min(0).max(1000).optional(),
|
||||
max_vms: z.number().int().positive().optional(),
|
||||
min_free_ram_mb: z.number().int().positive().optional(),
|
||||
min_free_disk_gb: z.number().int().positive().optional(),
|
||||
is_active: z.boolean().optional()
|
||||
});
|
||||
|
||||
const vmidRangeCreateSchema = z.object({
|
||||
node_id: z.string().optional(),
|
||||
node_hostname: z.string().min(1),
|
||||
application_group_id: z.string().optional(),
|
||||
range_start: z.number().int().positive(),
|
||||
range_end: z.number().int().positive(),
|
||||
next_vmid: z.number().int().positive().optional()
|
||||
});
|
||||
|
||||
const vmidRangeUpdateSchema = z.object({
|
||||
range_start: z.number().int().positive().optional(),
|
||||
range_end: z.number().int().positive().optional(),
|
||||
next_vmid: z.number().int().positive().optional(),
|
||||
is_active: z.boolean().optional()
|
||||
});
|
||||
|
||||
const serviceCreateSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
tenant_id: z.string().min(1),
|
||||
product_type: z.nativeEnum(ProductType).default(ProductType.VPS),
|
||||
virtualization_type: z.nativeEnum(VmType).default(VmType.QEMU),
|
||||
vm_count: z.number().int().min(1).max(20).default(1),
|
||||
target_node: z.string().optional(),
|
||||
auto_node: z.boolean().default(true),
|
||||
application_group_id: z.string().optional(),
|
||||
template_id: z.string().optional(),
|
||||
billing_plan_id: z.string().optional(),
|
||||
package_options: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const serviceSuspendSchema = z.object({
|
||||
reason: z.string().optional()
|
||||
});
|
||||
|
||||
const serviceTerminateSchema = z.object({
|
||||
reason: z.string().optional(),
|
||||
hard_delete: z.boolean().default(false)
|
||||
});
|
||||
|
||||
const servicePackageSchema = z.object({
|
||||
package_options: z.record(z.unknown())
|
||||
});
|
||||
|
||||
function parseOptionalLifecycleStatus(value: unknown) {
|
||||
if (typeof value !== "string") return undefined;
|
||||
const normalized = value.toUpperCase();
|
||||
return Object.values(ServiceLifecycleStatus).includes(normalized as ServiceLifecycleStatus)
|
||||
? (normalized as ServiceLifecycleStatus)
|
||||
: undefined;
|
||||
}
|
||||
|
||||
async function ensureServiceTenantScope(serviceId: string, req: Express.Request) {
|
||||
const service = await prisma.provisionedService.findUnique({
|
||||
where: { id: serviceId },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!service) {
|
||||
throw new HttpError(404, "Provisioned service not found", "SERVICE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && service.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return service;
|
||||
}
|
||||
|
||||
router.get("/templates", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const templateType = typeof req.query.template_type === "string" ? req.query.template_type.toUpperCase() : undefined;
|
||||
const isActive =
|
||||
typeof req.query.is_active === "string"
|
||||
? req.query.is_active === "true"
|
||||
: undefined;
|
||||
|
||||
const templates = await listTemplates({
|
||||
templateType,
|
||||
isActive
|
||||
});
|
||||
|
||||
res.json({ data: templates });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/templates", requireAuth, authorize("vm:create"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = templateCreateSchema.parse(req.body ?? {});
|
||||
const template = await createTemplate({
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
templateType: payload.template_type,
|
||||
virtualizationType: payload.virtualization_type,
|
||||
source: payload.source,
|
||||
description: payload.description,
|
||||
defaultCloudInit: payload.default_cloud_init,
|
||||
metadata: payload.metadata ? toPrismaJsonValue(payload.metadata) : undefined
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "template.create",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: template.id,
|
||||
resource_name: template.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(template);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/templates/:id", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = templateUpdateSchema.parse(req.body ?? {});
|
||||
const template = await updateTemplate(req.params.id, {
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
source: payload.source,
|
||||
description: payload.description,
|
||||
defaultCloudInit: payload.default_cloud_init,
|
||||
isActive: payload.is_active,
|
||||
metadata: payload.metadata ? toPrismaJsonValue(payload.metadata) : undefined
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "template.update",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: template.id,
|
||||
resource_name: template.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(template);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/templates/:id", requireAuth, authorize("vm:delete"), async (req, res, next) => {
|
||||
try {
|
||||
await deleteTemplate(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/application-groups", requireAuth, authorize("vm:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const groups = await listApplicationGroups();
|
||||
res.json({ data: groups });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/application-groups", requireAuth, authorize("vm:create"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = groupCreateSchema.parse(req.body ?? {});
|
||||
const group = await createApplicationGroup({
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
description: payload.description
|
||||
});
|
||||
res.status(201).json(group);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/application-groups/:id", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = groupUpdateSchema.parse(req.body ?? {});
|
||||
const group = await updateApplicationGroup(req.params.id, {
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
description: payload.description,
|
||||
isActive: payload.is_active
|
||||
});
|
||||
res.json(group);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/application-groups/:id", requireAuth, authorize("vm:delete"), async (req, res, next) => {
|
||||
try {
|
||||
await deleteApplicationGroup(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/application-groups/:id/templates", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = groupTemplatesSchema.parse(req.body ?? {});
|
||||
|
||||
const assignments = await setApplicationGroupTemplates(
|
||||
req.params.id,
|
||||
payload.templates.map((template) => ({
|
||||
templateId: template.template_id,
|
||||
priority: template.priority
|
||||
}))
|
||||
);
|
||||
|
||||
res.json({ data: assignments });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/placement-policies", requireAuth, authorize("node:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const policies = await listPlacementPolicies();
|
||||
res.json({ data: policies });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/placement-policies", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = placementPolicySchema.parse(req.body ?? {});
|
||||
const policy = await createPlacementPolicy({
|
||||
groupId: payload.group_id,
|
||||
nodeId: payload.node_id,
|
||||
productType: payload.product_type,
|
||||
cpuWeight: payload.cpu_weight,
|
||||
ramWeight: payload.ram_weight,
|
||||
diskWeight: payload.disk_weight,
|
||||
vmCountWeight: payload.vm_count_weight,
|
||||
maxVms: payload.max_vms,
|
||||
minFreeRamMb: payload.min_free_ram_mb,
|
||||
minFreeDiskGb: payload.min_free_disk_gb
|
||||
});
|
||||
res.status(201).json(policy);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/placement-policies/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = placementPolicySchema.parse(req.body ?? {});
|
||||
const policy = await updatePlacementPolicy(req.params.id, {
|
||||
cpuWeight: payload.cpu_weight,
|
||||
ramWeight: payload.ram_weight,
|
||||
diskWeight: payload.disk_weight,
|
||||
vmCountWeight: payload.vm_count_weight,
|
||||
maxVms: payload.max_vms ?? null,
|
||||
minFreeRamMb: payload.min_free_ram_mb ?? null,
|
||||
minFreeDiskGb: payload.min_free_disk_gb ?? null,
|
||||
isActive: payload.is_active
|
||||
});
|
||||
res.json(policy);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/placement-policies/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await deletePlacementPolicy(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/vmid-ranges", requireAuth, authorize("node:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const ranges = await listVmIdRanges();
|
||||
res.json({ data: ranges });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/vmid-ranges", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = vmidRangeCreateSchema.parse(req.body ?? {});
|
||||
const range = await createVmIdRange({
|
||||
nodeId: payload.node_id,
|
||||
nodeHostname: payload.node_hostname,
|
||||
applicationGroupId: payload.application_group_id,
|
||||
rangeStart: payload.range_start,
|
||||
rangeEnd: payload.range_end,
|
||||
nextVmid: payload.next_vmid
|
||||
});
|
||||
res.status(201).json(range);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/vmid-ranges/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = vmidRangeUpdateSchema.parse(req.body ?? {});
|
||||
const range = await updateVmIdRange(req.params.id, {
|
||||
rangeStart: payload.range_start,
|
||||
rangeEnd: payload.range_end,
|
||||
nextVmid: payload.next_vmid,
|
||||
isActive: payload.is_active
|
||||
});
|
||||
res.json(range);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/vmid-ranges/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await deleteVmIdRange(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/services", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const lifecycleStatus = parseOptionalLifecycleStatus(req.query.lifecycle_status);
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const offset = typeof req.query.offset === "string" ? Number(req.query.offset) : undefined;
|
||||
|
||||
const result = await listProvisionedServices({
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined,
|
||||
lifecycleStatus,
|
||||
limit,
|
||||
offset
|
||||
});
|
||||
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/services", requireAuth, authorize("vm:create"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = serviceCreateSchema.parse(req.body ?? {});
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const services = await createProvisionedService({
|
||||
name: payload.name,
|
||||
tenantId: payload.tenant_id,
|
||||
productType: payload.product_type,
|
||||
virtualizationType: payload.virtualization_type,
|
||||
vmCount: payload.vm_count,
|
||||
targetNode: payload.target_node,
|
||||
autoNode: payload.auto_node,
|
||||
applicationGroupId: payload.application_group_id,
|
||||
templateId: payload.template_id,
|
||||
billingPlanId: payload.billing_plan_id,
|
||||
packageOptions: payload.package_options ? toPrismaJsonValue(payload.package_options) : undefined,
|
||||
createdBy: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "service.create",
|
||||
resource_type: "VM",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
tenant_id: payload.tenant_id,
|
||||
product_type: payload.product_type,
|
||||
vm_count: payload.vm_count,
|
||||
created_services: services.map((service) => service.id)
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json({ data: services });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/services/:id/suspend", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = serviceSuspendSchema.parse(req.body ?? {});
|
||||
await ensureServiceTenantScope(req.params.id, req);
|
||||
|
||||
const service = await suspendProvisionedService({
|
||||
serviceId: req.params.id,
|
||||
actorEmail: req.user!.email,
|
||||
reason: payload.reason
|
||||
});
|
||||
|
||||
res.json(service);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/services/:id/unsuspend", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureServiceTenantScope(req.params.id, req);
|
||||
const service = await unsuspendProvisionedService({
|
||||
serviceId: req.params.id,
|
||||
actorEmail: req.user!.email
|
||||
});
|
||||
res.json(service);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/services/:id/terminate", requireAuth, authorize("vm:delete"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = serviceTerminateSchema.parse(req.body ?? {});
|
||||
await ensureServiceTenantScope(req.params.id, req);
|
||||
|
||||
const service = await terminateProvisionedService({
|
||||
serviceId: req.params.id,
|
||||
actorEmail: req.user!.email,
|
||||
reason: payload.reason,
|
||||
hardDelete: payload.hard_delete
|
||||
});
|
||||
|
||||
res.json(service);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/services/:id/package-options", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = servicePackageSchema.parse(req.body ?? {});
|
||||
await ensureServiceTenantScope(req.params.id, req);
|
||||
|
||||
const service = await updateProvisionedServicePackage({
|
||||
serviceId: req.params.id,
|
||||
actorEmail: req.user!.email,
|
||||
packageOptions: toPrismaJsonValue(payload.package_options)
|
||||
});
|
||||
|
||||
res.json(service);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
637
backend/src/routes/proxmox.routes.ts
Normal file
637
backend/src/routes/proxmox.routes.ts
Normal file
@@ -0,0 +1,637 @@
|
||||
import { OperationTaskType, Prisma } from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import {
|
||||
addVmDisk,
|
||||
clusterUsageGraphs,
|
||||
deleteVm,
|
||||
migrateVm,
|
||||
nodeUsageGraphs,
|
||||
vmUsageGraphs,
|
||||
reinstallVm,
|
||||
reconfigureVmNetwork,
|
||||
restartVm,
|
||||
resumeVm,
|
||||
shutdownVm,
|
||||
startVm,
|
||||
stopVm,
|
||||
suspendVm,
|
||||
syncNodesAndVirtualMachines,
|
||||
updateVmConfiguration,
|
||||
vmConsoleTicket
|
||||
} from "../services/proxmox.service";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
createOperationTask,
|
||||
markOperationTaskFailed,
|
||||
markOperationTaskRunning,
|
||||
markOperationTaskSuccess
|
||||
} from "../services/operations.service";
|
||||
|
||||
const router = Router();
|
||||
const consoleTypeSchema = z.enum(["novnc", "spice", "xterm"]);
|
||||
const graphTimeframeSchema = z.enum(["hour", "day", "week", "month", "year"]);
|
||||
|
||||
function vmRuntimeType(vm: { type: "QEMU" | "LXC" }) {
|
||||
return vm.type === "LXC" ? "lxc" : "qemu";
|
||||
}
|
||||
|
||||
function withUpid(payload: Prisma.InputJsonObject, upid?: string): Prisma.InputJsonObject {
|
||||
if (!upid) {
|
||||
return payload;
|
||||
}
|
||||
|
||||
return {
|
||||
...payload,
|
||||
upid
|
||||
};
|
||||
}
|
||||
|
||||
async function fetchVm(vmId: string) {
|
||||
const vm = await prisma.virtualMachine.findUnique({ where: { id: vmId } });
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
return vm;
|
||||
}
|
||||
|
||||
async function resolveConsoleProxyTarget(node: string, consoleType: "novnc" | "spice" | "xterm") {
|
||||
const setting = await prisma.setting.findUnique({
|
||||
where: {
|
||||
key: "console_proxy"
|
||||
}
|
||||
});
|
||||
|
||||
const raw = setting?.value as
|
||||
| {
|
||||
mode?: "cluster" | "per_node";
|
||||
cluster?: Record<string, unknown>;
|
||||
nodes?: Record<string, Record<string, unknown>>;
|
||||
}
|
||||
| undefined;
|
||||
|
||||
if (!raw) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const mode = raw.mode ?? "cluster";
|
||||
if (mode === "per_node") {
|
||||
const nodeConfig = raw.nodes?.[node];
|
||||
if (nodeConfig && typeof nodeConfig[consoleType] === "string") {
|
||||
return String(nodeConfig[consoleType]);
|
||||
}
|
||||
}
|
||||
|
||||
if (raw.cluster && typeof raw.cluster[consoleType] === "string") {
|
||||
return String(raw.cluster[consoleType]);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
router.post("/sync", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.SYSTEM_SYNC,
|
||||
requestedBy: req.user?.email,
|
||||
payload: { source: "manual_sync" }
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const result = await syncNodesAndVirtualMachines();
|
||||
await markOperationTaskSuccess(task.id, {
|
||||
node_count: result.node_count
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "proxmox_sync",
|
||||
resource_type: "NODE",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
node_count: result.node_count,
|
||||
task_id: task.id
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json({
|
||||
...result,
|
||||
task_id: task.id
|
||||
});
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Proxmox sync failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const actionSchema = z.object({
|
||||
action: z.enum(["start", "stop", "restart", "shutdown", "suspend", "resume", "delete"])
|
||||
});
|
||||
|
||||
router.post("/vms/:id/actions/:action", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const { action } = actionSchema.parse(req.params);
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const taskType = action === "delete" ? OperationTaskType.VM_DELETE : OperationTaskType.VM_POWER;
|
||||
const task = await createOperationTask({
|
||||
taskType,
|
||||
vm: {
|
||||
id: vm.id,
|
||||
name: vm.name,
|
||||
node: vm.node
|
||||
},
|
||||
requestedBy: req.user?.email,
|
||||
payload: { action }
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
let upid: string | undefined;
|
||||
|
||||
try {
|
||||
if (action === "start") {
|
||||
upid = await startVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "RUNNING", proxmox_upid: upid } });
|
||||
} else if (action === "stop") {
|
||||
upid = await stopVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "STOPPED", proxmox_upid: upid } });
|
||||
} else if (action === "restart") {
|
||||
upid = await restartVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "RUNNING", proxmox_upid: upid } });
|
||||
} else if (action === "shutdown") {
|
||||
upid = await shutdownVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "STOPPED", proxmox_upid: upid } });
|
||||
} else if (action === "suspend") {
|
||||
upid = await suspendVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "PAUSED", proxmox_upid: upid } });
|
||||
} else if (action === "resume") {
|
||||
upid = await resumeVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "RUNNING", proxmox_upid: upid } });
|
||||
} else {
|
||||
upid = await deleteVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.delete({ where: { id: vm.id } });
|
||||
}
|
||||
|
||||
const taskResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
action
|
||||
},
|
||||
upid
|
||||
);
|
||||
|
||||
await markOperationTaskSuccess(task.id, taskResult, upid);
|
||||
|
||||
await logAudit({
|
||||
action: `vm_${action}`,
|
||||
resource_type: "VM",
|
||||
resource_id: vm.id,
|
||||
resource_name: vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
...taskResult,
|
||||
task_id: task.id
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json({ success: true, action, upid, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM action failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const migrateSchema = z.object({
|
||||
target_node: z.string().min(1)
|
||||
});
|
||||
|
||||
router.post("/vms/:id/migrate", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = migrateSchema.parse(req.body);
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_MIGRATION,
|
||||
vm: {
|
||||
id: vm.id,
|
||||
name: vm.name,
|
||||
node: vm.node
|
||||
},
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const upid = await migrateVm(vm.node, vm.vmid, payload.target_node, type);
|
||||
await prisma.virtualMachine.update({
|
||||
where: { id: vm.id },
|
||||
data: { node: payload.target_node, status: "MIGRATING", proxmox_upid: upid }
|
||||
});
|
||||
|
||||
const migrationResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
from_node: vm.node,
|
||||
target_node: payload.target_node
|
||||
},
|
||||
upid
|
||||
);
|
||||
|
||||
await markOperationTaskSuccess(task.id, migrationResult, upid);
|
||||
res.json({ success: true, upid, target_node: payload.target_node, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM migrate failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const configSchema = z
|
||||
.object({
|
||||
hostname: z.string().min(1).optional(),
|
||||
iso_image: z.string().min(1).optional(),
|
||||
boot_order: z.string().min(1).optional(),
|
||||
ssh_public_key: z.string().min(10).optional(),
|
||||
qemu_guest_agent: z.boolean().optional()
|
||||
})
|
||||
.refine((value) => Object.keys(value).length > 0, {
|
||||
message: "At least one configuration field is required"
|
||||
});
|
||||
|
||||
router.patch("/vms/:id/config", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = configSchema.parse(req.body ?? {});
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const config: Record<string, string | number | boolean> = {};
|
||||
if (payload.hostname) config.name = payload.hostname;
|
||||
if (payload.boot_order) config.boot = payload.boot_order;
|
||||
if (payload.ssh_public_key) config.sshkeys = payload.ssh_public_key;
|
||||
if (payload.iso_image && vm.type === "QEMU") config.ide2 = `${payload.iso_image},media=cdrom`;
|
||||
if (typeof payload.qemu_guest_agent === "boolean" && vm.type === "QEMU") {
|
||||
config.agent = payload.qemu_guest_agent ? 1 : 0;
|
||||
}
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_CONFIG,
|
||||
vm: { id: vm.id, name: vm.name, node: vm.node },
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const upid = await updateVmConfiguration(vm.node, vm.vmid, type, config);
|
||||
const configResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
config: config as unknown as Prisma.InputJsonValue
|
||||
},
|
||||
upid
|
||||
);
|
||||
await markOperationTaskSuccess(task.id, configResult, upid);
|
||||
|
||||
await logAudit({
|
||||
action: "vm_config_update",
|
||||
resource_type: "VM",
|
||||
resource_id: vm.id,
|
||||
resource_name: vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
config: config as unknown as Prisma.InputJsonValue,
|
||||
task_id: task.id,
|
||||
...(upid ? { upid } : {})
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json({ success: true, upid, task_id: task.id, config_applied: config });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM config update failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const networkSchema = z.object({
|
||||
interface_name: z.string().optional(),
|
||||
bridge: z.string().min(1),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
rate_mbps: z.number().int().positive().optional(),
|
||||
firewall: z.boolean().optional(),
|
||||
ip_mode: z.enum(["dhcp", "static"]).default("dhcp"),
|
||||
ip_cidr: z.string().optional(),
|
||||
gateway: z.string().optional()
|
||||
});
|
||||
|
||||
router.patch("/vms/:id/network", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = networkSchema.parse(req.body ?? {});
|
||||
if (payload.ip_mode === "static" && !payload.ip_cidr) {
|
||||
throw new HttpError(400, "ip_cidr is required when ip_mode=static", "INVALID_NETWORK_PAYLOAD");
|
||||
}
|
||||
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_NETWORK,
|
||||
vm: { id: vm.id, name: vm.name, node: vm.node },
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const networkInput: Parameters<typeof reconfigureVmNetwork>[3] = {
|
||||
interface_name: payload.interface_name,
|
||||
bridge: payload.bridge,
|
||||
vlan_tag: payload.vlan_tag,
|
||||
rate_mbps: payload.rate_mbps,
|
||||
firewall: payload.firewall,
|
||||
ip_mode: payload.ip_mode,
|
||||
ip_cidr: payload.ip_cidr,
|
||||
gateway: payload.gateway
|
||||
};
|
||||
const upid = await reconfigureVmNetwork(vm.node, vm.vmid, type, networkInput);
|
||||
const networkResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
network: payload as unknown as Prisma.InputJsonValue
|
||||
},
|
||||
upid
|
||||
);
|
||||
await markOperationTaskSuccess(task.id, networkResult, upid);
|
||||
res.json({ success: true, upid, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM network update failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const diskSchema = z.object({
|
||||
storage: z.string().min(1),
|
||||
size_gb: z.number().int().positive(),
|
||||
bus: z.enum(["scsi", "sata", "virtio", "ide"]).default("scsi"),
|
||||
mount_point: z.string().optional()
|
||||
});
|
||||
|
||||
router.post("/vms/:id/disks", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = diskSchema.parse(req.body ?? {});
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_CONFIG,
|
||||
vm: { id: vm.id, name: vm.name, node: vm.node },
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const diskInput: Parameters<typeof addVmDisk>[3] = {
|
||||
storage: payload.storage,
|
||||
size_gb: payload.size_gb,
|
||||
bus: payload.bus,
|
||||
mount_point: payload.mount_point
|
||||
};
|
||||
const upid = await addVmDisk(vm.node, vm.vmid, type, diskInput);
|
||||
const diskResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
disk: payload as unknown as Prisma.InputJsonValue
|
||||
},
|
||||
upid
|
||||
);
|
||||
await markOperationTaskSuccess(task.id, diskResult, upid);
|
||||
res.status(201).json({ success: true, upid, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM disk attach failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const reinstallSchema = z.object({
|
||||
backup_before_reinstall: z.boolean().default(false),
|
||||
iso_image: z.string().optional(),
|
||||
ssh_public_key: z.string().optional()
|
||||
});
|
||||
|
||||
router.post("/vms/:id/reinstall", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = reinstallSchema.parse(req.body ?? {});
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
if (payload.backup_before_reinstall) {
|
||||
await prisma.backup.create({
|
||||
data: {
|
||||
vm_id: vm.id,
|
||||
vm_name: vm.name,
|
||||
node: vm.node,
|
||||
status: "PENDING",
|
||||
type: "FULL",
|
||||
schedule: "MANUAL",
|
||||
notes: "Auto-created before VM reinstall"
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_REINSTALL,
|
||||
vm: { id: vm.id, name: vm.name, node: vm.node },
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const upid = await reinstallVm(vm.node, vm.vmid, type, {
|
||||
iso_image: payload.iso_image,
|
||||
ssh_public_key: payload.ssh_public_key
|
||||
});
|
||||
|
||||
await prisma.virtualMachine.update({
|
||||
where: { id: vm.id },
|
||||
data: {
|
||||
status: "RUNNING",
|
||||
proxmox_upid: upid ?? undefined
|
||||
}
|
||||
});
|
||||
|
||||
const reinstallResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
reinstall: payload as unknown as Prisma.InputJsonValue
|
||||
},
|
||||
upid
|
||||
);
|
||||
|
||||
await markOperationTaskSuccess(task.id, reinstallResult, upid);
|
||||
|
||||
res.json({ success: true, upid, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM reinstall failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/vms/:id/console", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
const consoleType = consoleTypeSchema.parse(
|
||||
typeof req.query.console_type === "string"
|
||||
? req.query.console_type.toLowerCase()
|
||||
: "novnc"
|
||||
);
|
||||
const ticket = await vmConsoleTicket(vm.node, vm.vmid, type, consoleType);
|
||||
const proxyTarget = await resolveConsoleProxyTarget(vm.node, consoleType);
|
||||
|
||||
res.json({
|
||||
...ticket,
|
||||
console_type: consoleType,
|
||||
proxy_target: proxyTarget ?? null
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/vms/:id/usage-graphs", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
const timeframe = graphTimeframeSchema.parse(
|
||||
typeof req.query.timeframe === "string" ? req.query.timeframe.toLowerCase() : "day"
|
||||
);
|
||||
|
||||
const graph = await vmUsageGraphs(vm.node, vm.vmid, type, timeframe, {
|
||||
cpu_usage: vm.cpu_usage,
|
||||
ram_usage: vm.ram_usage,
|
||||
disk_usage: vm.disk_usage,
|
||||
network_in: vm.network_in,
|
||||
network_out: vm.network_out
|
||||
});
|
||||
|
||||
return res.json({
|
||||
vm_id: vm.id,
|
||||
vm_name: vm.name,
|
||||
vm_type: vm.type,
|
||||
node: vm.node,
|
||||
timeframe: graph.timeframe,
|
||||
source: graph.source,
|
||||
summary: graph.summary,
|
||||
points: graph.points
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/nodes/:id/usage-graphs", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const node = await prisma.proxmoxNode.findFirst({
|
||||
where: {
|
||||
OR: [{ id: req.params.id }, { hostname: req.params.id }, { name: req.params.id }]
|
||||
}
|
||||
});
|
||||
|
||||
if (!node) {
|
||||
throw new HttpError(404, "Node not found", "NODE_NOT_FOUND");
|
||||
}
|
||||
|
||||
const timeframe = graphTimeframeSchema.parse(
|
||||
typeof req.query.timeframe === "string" ? req.query.timeframe.toLowerCase() : "day"
|
||||
);
|
||||
|
||||
const graph = await nodeUsageGraphs(node.hostname, timeframe, {
|
||||
cpu_usage: node.cpu_usage,
|
||||
ram_used_mb: node.ram_used_mb,
|
||||
ram_total_mb: node.ram_total_mb,
|
||||
disk_used_gb: node.disk_used_gb,
|
||||
disk_total_gb: node.disk_total_gb
|
||||
});
|
||||
|
||||
return res.json({
|
||||
node_id: node.id,
|
||||
node_name: node.name,
|
||||
node_hostname: node.hostname,
|
||||
timeframe: graph.timeframe,
|
||||
source: graph.source,
|
||||
summary: graph.summary,
|
||||
points: graph.points
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/cluster/usage-graphs", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const timeframe = graphTimeframeSchema.parse(
|
||||
typeof req.query.timeframe === "string" ? req.query.timeframe.toLowerCase() : "day"
|
||||
);
|
||||
const graph = await clusterUsageGraphs(timeframe);
|
||||
|
||||
return res.json({
|
||||
timeframe: graph.timeframe,
|
||||
source: graph.source,
|
||||
node_count: graph.node_count,
|
||||
nodes: graph.nodes,
|
||||
summary: graph.summary,
|
||||
points: graph.points
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
785
backend/src/routes/resources.routes.ts
Normal file
785
backend/src/routes/resources.routes.ts
Normal file
@@ -0,0 +1,785 @@
|
||||
import { Router } from "express";
|
||||
import bcrypt from "bcryptjs";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import { prisma } from "../lib/prisma";
|
||||
const router = Router();
|
||||
|
||||
type ResourceMeta = {
|
||||
model: string;
|
||||
readPermission: Parameters<typeof authorize>[0];
|
||||
createPermission?: Parameters<typeof authorize>[0];
|
||||
updatePermission?: Parameters<typeof authorize>[0];
|
||||
deletePermission?: Parameters<typeof authorize>[0];
|
||||
tenantScoped: boolean;
|
||||
searchFields?: string[];
|
||||
};
|
||||
|
||||
const resourceMap: Record<string, ResourceMeta> = {
|
||||
tenants: {
|
||||
model: "tenant",
|
||||
readPermission: "tenant:read",
|
||||
createPermission: "tenant:manage",
|
||||
updatePermission: "tenant:manage",
|
||||
deletePermission: "tenant:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "owner_email", "slug"]
|
||||
},
|
||||
"virtual-machines": {
|
||||
model: "virtualMachine",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:create",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:delete",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "ip_address", "node"]
|
||||
},
|
||||
nodes: {
|
||||
model: "proxmoxNode",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "hostname"]
|
||||
},
|
||||
"billing-plans": {
|
||||
model: "billingPlan",
|
||||
readPermission: "billing:read",
|
||||
createPermission: "billing:manage",
|
||||
updatePermission: "billing:manage",
|
||||
deletePermission: "billing:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "slug", "description"]
|
||||
},
|
||||
invoices: {
|
||||
model: "invoice",
|
||||
readPermission: "billing:read",
|
||||
createPermission: "billing:manage",
|
||||
updatePermission: "billing:manage",
|
||||
deletePermission: "billing:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["invoice_number", "tenant_name", "payment_reference"]
|
||||
},
|
||||
"usage-records": {
|
||||
model: "usageRecord",
|
||||
readPermission: "billing:read",
|
||||
createPermission: "billing:manage",
|
||||
updatePermission: "billing:manage",
|
||||
deletePermission: "billing:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["vm_name", "tenant_name", "plan_name"]
|
||||
},
|
||||
backups: {
|
||||
model: "backup",
|
||||
readPermission: "backup:read",
|
||||
createPermission: "backup:manage",
|
||||
updatePermission: "backup:manage",
|
||||
deletePermission: "backup:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["vm_name", "node", "storage"]
|
||||
},
|
||||
"backup-policies": {
|
||||
model: "backupPolicy",
|
||||
readPermission: "backup:read",
|
||||
createPermission: "backup:manage",
|
||||
updatePermission: "backup:manage",
|
||||
deletePermission: "backup:manage",
|
||||
tenantScoped: true
|
||||
},
|
||||
"backup-restore-tasks": {
|
||||
model: "backupRestoreTask",
|
||||
readPermission: "backup:read",
|
||||
createPermission: "backup:manage",
|
||||
updatePermission: "backup:manage",
|
||||
deletePermission: "backup:manage",
|
||||
tenantScoped: true
|
||||
},
|
||||
"snapshot-jobs": {
|
||||
model: "snapshotJob",
|
||||
readPermission: "backup:read",
|
||||
createPermission: "backup:manage",
|
||||
updatePermission: "backup:manage",
|
||||
deletePermission: "backup:manage",
|
||||
tenantScoped: true
|
||||
},
|
||||
"audit-logs": {
|
||||
model: "auditLog",
|
||||
readPermission: "audit:read",
|
||||
tenantScoped: false,
|
||||
searchFields: ["action", "resource_name", "actor_email"]
|
||||
},
|
||||
"security-events": {
|
||||
model: "securityEvent",
|
||||
readPermission: "security:read",
|
||||
createPermission: "security:manage",
|
||||
updatePermission: "security:manage",
|
||||
deletePermission: "security:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["event_type", "source_ip", "description"]
|
||||
},
|
||||
"firewall-rules": {
|
||||
model: "firewallRule",
|
||||
readPermission: "security:read",
|
||||
createPermission: "security:manage",
|
||||
updatePermission: "security:manage",
|
||||
deletePermission: "security:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "source_ip", "destination_ip", "description"]
|
||||
},
|
||||
users: {
|
||||
model: "user",
|
||||
readPermission: "user:read",
|
||||
createPermission: "user:manage",
|
||||
updatePermission: "user:manage",
|
||||
deletePermission: "user:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["email", "full_name"]
|
||||
},
|
||||
"app-templates": {
|
||||
model: "appTemplate",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:create",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:delete",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "slug", "description", "source"]
|
||||
},
|
||||
"application-groups": {
|
||||
model: "applicationGroup",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:create",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:delete",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "slug", "description"]
|
||||
},
|
||||
"placement-policies": {
|
||||
model: "nodePlacementPolicy",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: false
|
||||
},
|
||||
"vmid-ranges": {
|
||||
model: "vmIdRange",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: false
|
||||
},
|
||||
"provisioned-services": {
|
||||
model: "provisionedService",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:create",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:delete",
|
||||
tenantScoped: true
|
||||
},
|
||||
"ip-addresses": {
|
||||
model: "ipAddressPool",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["address", "subnet", "node_hostname", "bridge", "sdn_zone"]
|
||||
},
|
||||
"ip-assignments": {
|
||||
model: "ipAssignment",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:update",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:update",
|
||||
tenantScoped: true
|
||||
},
|
||||
"private-networks": {
|
||||
model: "privateNetwork",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "slug", "cidr", "bridge", "sdn_zone", "node_hostname"]
|
||||
},
|
||||
"private-network-attachments": {
|
||||
model: "privateNetworkAttachment",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:update",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:update",
|
||||
tenantScoped: true
|
||||
},
|
||||
"tenant-ip-quotas": {
|
||||
model: "tenantIpQuota",
|
||||
readPermission: "tenant:read",
|
||||
createPermission: "tenant:manage",
|
||||
updatePermission: "tenant:manage",
|
||||
deletePermission: "tenant:manage",
|
||||
tenantScoped: true
|
||||
},
|
||||
"ip-reserved-ranges": {
|
||||
model: "ipReservedRange",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "cidr", "reason", "node_hostname", "bridge", "sdn_zone"]
|
||||
},
|
||||
"ip-pool-policies": {
|
||||
model: "ipPoolPolicy",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "node_hostname", "bridge", "sdn_zone"]
|
||||
},
|
||||
"server-health-checks": {
|
||||
model: "serverHealthCheck",
|
||||
readPermission: "security:read",
|
||||
createPermission: "security:manage",
|
||||
updatePermission: "security:manage",
|
||||
deletePermission: "security:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "description"]
|
||||
},
|
||||
"server-health-check-results": {
|
||||
model: "serverHealthCheckResult",
|
||||
readPermission: "security:read",
|
||||
tenantScoped: true
|
||||
},
|
||||
"monitoring-alert-rules": {
|
||||
model: "monitoringAlertRule",
|
||||
readPermission: "security:read",
|
||||
createPermission: "security:manage",
|
||||
updatePermission: "security:manage",
|
||||
deletePermission: "security:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "description"]
|
||||
},
|
||||
"monitoring-alert-events": {
|
||||
model: "monitoringAlertEvent",
|
||||
readPermission: "security:read",
|
||||
updatePermission: "security:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["title", "message", "metric_key"]
|
||||
},
|
||||
"monitoring-alert-notifications": {
|
||||
model: "monitoringAlertNotification",
|
||||
readPermission: "security:read",
|
||||
tenantScoped: true,
|
||||
searchFields: ["destination", "provider_message"]
|
||||
},
|
||||
"cms-pages": {
|
||||
model: "cmsPage",
|
||||
readPermission: "settings:read",
|
||||
createPermission: "settings:manage",
|
||||
updatePermission: "settings:manage",
|
||||
deletePermission: "settings:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["slug", "title", "section"]
|
||||
},
|
||||
"site-navigation-items": {
|
||||
model: "siteNavigationItem",
|
||||
readPermission: "settings:read",
|
||||
createPermission: "settings:manage",
|
||||
updatePermission: "settings:manage",
|
||||
deletePermission: "settings:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["label", "href", "position"]
|
||||
},
|
||||
"auth-sessions": {
|
||||
model: "authSession",
|
||||
readPermission: "user:manage",
|
||||
updatePermission: "user:manage",
|
||||
deletePermission: "user:manage",
|
||||
tenantScoped: false
|
||||
}
|
||||
};
|
||||
|
||||
function toEnumUpper(value: unknown): unknown {
|
||||
if (typeof value !== "string") return value;
|
||||
return value.replace(/-/g, "_").toUpperCase();
|
||||
}
|
||||
|
||||
function normalizePayload(resource: string, input: Record<string, unknown>) {
|
||||
const data = { ...input };
|
||||
const enumFieldsByResource: Record<string, string[]> = {
|
||||
tenants: ["status", "currency", "payment_provider"],
|
||||
"virtual-machines": ["status", "type"],
|
||||
nodes: ["status"],
|
||||
"billing-plans": ["currency"],
|
||||
invoices: ["status", "currency", "payment_provider"],
|
||||
"usage-records": ["currency"],
|
||||
backups: ["status", "type", "schedule", "source"],
|
||||
"backup-restore-tasks": ["mode", "status"],
|
||||
"snapshot-jobs": ["frequency"],
|
||||
"audit-logs": ["resource_type", "severity"],
|
||||
"security-events": ["severity", "status"],
|
||||
"firewall-rules": ["direction", "action", "protocol", "applies_to"],
|
||||
users: ["role"],
|
||||
"app-templates": ["template_type", "virtualization_type"],
|
||||
"placement-policies": ["product_type"],
|
||||
"provisioned-services": ["product_type", "lifecycle_status"],
|
||||
"server-health-checks": ["target_type", "check_type"],
|
||||
"server-health-check-results": ["status", "severity"],
|
||||
"monitoring-alert-rules": ["severity"],
|
||||
"monitoring-alert-events": ["status", "severity"],
|
||||
"monitoring-alert-notifications": ["channel", "status"]
|
||||
};
|
||||
|
||||
for (const field of enumFieldsByResource[resource] ?? []) {
|
||||
if (field in data && data[field] !== undefined && data[field] !== null) {
|
||||
data[field] = toEnumUpper(data[field]);
|
||||
}
|
||||
}
|
||||
|
||||
if (resource === "billing-plans") {
|
||||
const monthly = data.price_monthly;
|
||||
if (monthly !== undefined && (data.price_hourly === undefined || data.price_hourly === null)) {
|
||||
const monthlyNumber = Number(monthly);
|
||||
data.price_hourly = Number((monthlyNumber / 720).toFixed(4));
|
||||
}
|
||||
if (typeof data.features === "string") {
|
||||
try {
|
||||
data.features = JSON.parse(data.features);
|
||||
} catch {
|
||||
data.features = [];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (resource === "tenants" && typeof data.member_emails === "string") {
|
||||
try {
|
||||
data.member_emails = JSON.parse(data.member_emails);
|
||||
} catch {
|
||||
data.member_emails = [];
|
||||
}
|
||||
}
|
||||
|
||||
if (resource === "invoices" && !data.invoice_number) {
|
||||
data.invoice_number = `INV-${Date.now()}-${Math.floor(1000 + Math.random() * 9000)}`;
|
||||
}
|
||||
|
||||
if (resource === "invoices" && data.due_date && typeof data.due_date === "string") {
|
||||
data.due_date = new Date(data.due_date);
|
||||
}
|
||||
if (resource === "invoices" && data.paid_date && typeof data.paid_date === "string") {
|
||||
data.paid_date = new Date(data.paid_date);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
function getModel(meta: ResourceMeta) {
|
||||
return (prisma as any)[meta.model];
|
||||
}
|
||||
|
||||
function normalizeSortField(field: string) {
|
||||
const aliases: Record<string, string> = {
|
||||
created_date: "created_at",
|
||||
updated_date: "updated_at"
|
||||
};
|
||||
return aliases[field] ?? field;
|
||||
}
|
||||
|
||||
function parseOrder(sort?: string) {
|
||||
if (!sort) return { created_at: "desc" as const };
|
||||
if (sort.startsWith("-")) return { [normalizeSortField(sort.slice(1))]: "desc" as const };
|
||||
return { [normalizeSortField(sort)]: "asc" as const };
|
||||
}
|
||||
|
||||
function attachTenantWhere(req: Express.Request, meta: ResourceMeta, where: Record<string, unknown>) {
|
||||
if (!meta.tenantScoped || !isTenantScopedUser(req)) return;
|
||||
const tenantId = req.user?.tenant_id;
|
||||
if (!tenantId) return;
|
||||
|
||||
if (meta.model === "backup") {
|
||||
where.OR = [{ tenant_id: tenantId }, { vm: { tenant_id: tenantId } }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "backupRestoreTask") {
|
||||
where.source_vm = { tenant_id: tenantId };
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "snapshotJob") {
|
||||
where.vm = { tenant_id: tenantId };
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "backupPolicy") {
|
||||
where.tenant_id = tenantId;
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "ipAddressPool") {
|
||||
where.OR = [{ assigned_tenant_id: tenantId }, { status: "AVAILABLE", scope: "PRIVATE" }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "ipAssignment") {
|
||||
where.tenant_id = tenantId;
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "privateNetworkAttachment") {
|
||||
where.tenant_id = tenantId;
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "tenantIpQuota") {
|
||||
where.tenant_id = tenantId;
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "ipReservedRange" || meta.model === "ipPoolPolicy") {
|
||||
where.OR = [{ tenant_id: tenantId }, { tenant_id: null }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "serverHealthCheck") {
|
||||
where.OR = [{ tenant_id: tenantId }, { tenant_id: null }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "serverHealthCheckResult") {
|
||||
where.check = {
|
||||
OR: [{ tenant_id: tenantId }, { tenant_id: null }]
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "monitoringAlertRule" || meta.model === "monitoringAlertEvent") {
|
||||
where.OR = [{ tenant_id: tenantId }, { tenant_id: null }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "monitoringAlertNotification") {
|
||||
where.event = {
|
||||
OR: [{ tenant_id: tenantId }, { tenant_id: null }]
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
where.tenant_id = tenantId;
|
||||
}
|
||||
|
||||
function attachSearchWhere(
|
||||
where: Record<string, unknown>,
|
||||
search: string,
|
||||
searchFields: string[] | undefined
|
||||
) {
|
||||
if (!search || !searchFields?.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
const searchFilter = {
|
||||
OR: searchFields.map((field) => ({
|
||||
[field]: { contains: search, mode: "insensitive" }
|
||||
}))
|
||||
};
|
||||
|
||||
if (Array.isArray(where.OR)) {
|
||||
const existingOr = where.OR;
|
||||
delete where.OR;
|
||||
const existingAnd = Array.isArray(where.AND) ? where.AND : [];
|
||||
where.AND = [...existingAnd, { OR: existingOr }, searchFilter];
|
||||
return;
|
||||
}
|
||||
|
||||
if (Array.isArray(where.AND)) {
|
||||
where.AND = [...where.AND, searchFilter];
|
||||
return;
|
||||
}
|
||||
|
||||
where.AND = [searchFilter];
|
||||
}
|
||||
|
||||
async function ensureItemTenantScope(req: Express.Request, meta: ResourceMeta, item: Record<string, unknown>) {
|
||||
if (!meta.tenantScoped || !isTenantScopedUser(req) || !req.user?.tenant_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
const tenantId = req.user.tenant_id;
|
||||
let ownerTenantId: string | null | undefined;
|
||||
|
||||
if (meta.model === "backup") {
|
||||
ownerTenantId = (item.tenant_id as string | null | undefined) ?? null;
|
||||
if (!ownerTenantId && typeof item.vm_id === "string") {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: item.vm_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = vm?.tenant_id;
|
||||
}
|
||||
} else if (meta.model === "backupRestoreTask") {
|
||||
if (typeof item.source_vm_id === "string") {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: item.source_vm_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = vm?.tenant_id;
|
||||
}
|
||||
} else if (meta.model === "snapshotJob") {
|
||||
if (typeof item.vm_id === "string") {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: item.vm_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = vm?.tenant_id;
|
||||
}
|
||||
} else if (meta.model === "ipAddressPool") {
|
||||
ownerTenantId = item.assigned_tenant_id as string | null | undefined;
|
||||
if (!ownerTenantId && item.status === "AVAILABLE" && item.scope === "PRIVATE") {
|
||||
return;
|
||||
}
|
||||
} else if (meta.model === "ipAssignment" || meta.model === "privateNetworkAttachment") {
|
||||
ownerTenantId = item.tenant_id as string | null | undefined;
|
||||
} else if (meta.model === "tenantIpQuota" || meta.model === "ipReservedRange" || meta.model === "ipPoolPolicy") {
|
||||
ownerTenantId = (item.tenant_id as string | null | undefined) ?? null;
|
||||
if (!ownerTenantId) return;
|
||||
} else if (meta.model === "serverHealthCheck") {
|
||||
ownerTenantId = (item.tenant_id as string | null | undefined) ?? null;
|
||||
if (!ownerTenantId) return;
|
||||
} else if (meta.model === "serverHealthCheckResult") {
|
||||
if (typeof item.check_id === "string") {
|
||||
const check = await prisma.serverHealthCheck.findUnique({
|
||||
where: { id: item.check_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = check?.tenant_id;
|
||||
if (!ownerTenantId) return;
|
||||
}
|
||||
} else if (meta.model === "monitoringAlertRule" || meta.model === "monitoringAlertEvent") {
|
||||
ownerTenantId = (item.tenant_id as string | null | undefined) ?? null;
|
||||
if (!ownerTenantId) return;
|
||||
} else if (meta.model === "monitoringAlertNotification") {
|
||||
if (typeof item.alert_event_id === "string") {
|
||||
const event = await prisma.monitoringAlertEvent.findUnique({
|
||||
where: { id: item.alert_event_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = event?.tenant_id;
|
||||
if (!ownerTenantId) return;
|
||||
}
|
||||
} else {
|
||||
ownerTenantId = item.tenant_id as string | null | undefined;
|
||||
}
|
||||
|
||||
if (ownerTenantId !== tenantId) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
}
|
||||
|
||||
router.get("/:resource", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.readPermission)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const rawLimit = Number(req.query.limit ?? 100);
|
||||
const rawOffset = Number(req.query.offset ?? 0);
|
||||
const limit = Number.isFinite(rawLimit) && rawLimit > 0 ? Math.min(Math.floor(rawLimit), 500) : 100;
|
||||
const offset = Number.isFinite(rawOffset) && rawOffset >= 0 ? Math.floor(rawOffset) : 0;
|
||||
const where: Record<string, unknown> = {};
|
||||
|
||||
attachTenantWhere(req, meta, where);
|
||||
|
||||
if (typeof req.query.status === "string") where.status = toEnumUpper(req.query.status);
|
||||
if (typeof req.query.tenant_id === "string" && !isTenantScopedUser(req)) where.tenant_id = req.query.tenant_id;
|
||||
if (typeof req.query.vm_id === "string") where.vm_id = req.query.vm_id;
|
||||
if (typeof req.query.node === "string") where.node = req.query.node;
|
||||
|
||||
const search = typeof req.query.search === "string" ? req.query.search.trim() : "";
|
||||
attachSearchWhere(where, search, meta.searchFields);
|
||||
|
||||
const [data, total] = await Promise.all([
|
||||
model.findMany({
|
||||
where,
|
||||
orderBy: parseOrder(typeof req.query.sort === "string" ? req.query.sort : undefined),
|
||||
take: limit,
|
||||
skip: offset
|
||||
}),
|
||||
model.count({ where })
|
||||
]);
|
||||
|
||||
res.json({
|
||||
data,
|
||||
meta: { total, limit, offset }
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/:resource/:id", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.readPermission)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const item = await model.findUnique({ where: { id: req.params.id } });
|
||||
if (!item) throw new HttpError(404, "Record not found", "NOT_FOUND");
|
||||
await ensureItemTenantScope(req, meta, item);
|
||||
res.json(item);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/:resource", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
if (!meta.createPermission) throw new HttpError(405, "Resource is read-only", "READ_ONLY");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.createPermission!)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const payload = normalizePayload(resource, req.body ?? {});
|
||||
|
||||
if (resource === "users") {
|
||||
const email = typeof payload.email === "string" ? payload.email.toLowerCase().trim() : "";
|
||||
if (!email) {
|
||||
throw new HttpError(400, "email is required for users.create", "VALIDATION_ERROR");
|
||||
}
|
||||
payload.email = email;
|
||||
|
||||
const plainPassword = typeof payload.password === "string" ? payload.password : undefined;
|
||||
if (!plainPassword || plainPassword.length < 10) {
|
||||
throw new HttpError(400, "password (min 10 chars) is required for users.create", "VALIDATION_ERROR");
|
||||
}
|
||||
|
||||
payload.password_hash = await bcrypt.hash(plainPassword, 12);
|
||||
payload.must_change_password = true;
|
||||
payload.password_changed_at = new Date();
|
||||
delete payload.password;
|
||||
delete payload.password_hash_raw;
|
||||
}
|
||||
|
||||
if (meta.tenantScoped && isTenantScopedUser(req) && req.user?.tenant_id) {
|
||||
if (
|
||||
meta.model !== "backupRestoreTask" &&
|
||||
meta.model !== "snapshotJob"
|
||||
) {
|
||||
payload.tenant_id = req.user.tenant_id;
|
||||
}
|
||||
}
|
||||
|
||||
const created = await model.create({ data: payload });
|
||||
|
||||
await logAudit({
|
||||
action: `${resource}.create`,
|
||||
resource_type: resource === "virtual-machines" ? "VM" : "SYSTEM",
|
||||
resource_id: created.id,
|
||||
resource_name: created.name ?? created.invoice_number ?? created.id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ resource, payload: created }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(created);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/:resource/:id", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
if (!meta.updatePermission) throw new HttpError(405, "Resource is read-only", "READ_ONLY");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.updatePermission!)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const existing = await model.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) throw new HttpError(404, "Record not found", "NOT_FOUND");
|
||||
await ensureItemTenantScope(req, meta, existing);
|
||||
|
||||
const payload = normalizePayload(resource, req.body ?? {});
|
||||
if (resource === "users") {
|
||||
if (typeof payload.email === "string") {
|
||||
payload.email = payload.email.toLowerCase().trim();
|
||||
}
|
||||
if ("password_hash" in payload) {
|
||||
delete payload.password_hash;
|
||||
}
|
||||
if (typeof payload.password === "string") {
|
||||
if (payload.password.length < 10) {
|
||||
throw new HttpError(400, "password must be at least 10 characters", "VALIDATION_ERROR");
|
||||
}
|
||||
payload.password_hash = await bcrypt.hash(payload.password, 12);
|
||||
payload.must_change_password = false;
|
||||
payload.password_changed_at = new Date();
|
||||
}
|
||||
delete payload.password;
|
||||
}
|
||||
const updated = await model.update({
|
||||
where: { id: req.params.id },
|
||||
data: payload
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: `${resource}.update`,
|
||||
resource_type: resource === "virtual-machines" ? "VM" : "SYSTEM",
|
||||
resource_id: updated.id,
|
||||
resource_name: updated.name ?? updated.invoice_number ?? updated.id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ resource, payload }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(updated);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/:resource/:id", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
if (!meta.deletePermission) throw new HttpError(405, "Resource is read-only", "READ_ONLY");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.deletePermission!)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const existing = await model.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) throw new HttpError(404, "Record not found", "NOT_FOUND");
|
||||
await ensureItemTenantScope(req, meta, existing);
|
||||
|
||||
await model.delete({ where: { id: req.params.id } });
|
||||
|
||||
await logAudit({
|
||||
action: `${resource}.delete`,
|
||||
resource_type: resource === "virtual-machines" ? "VM" : "SYSTEM",
|
||||
resource_id: req.params.id,
|
||||
resource_name: existing.name ?? existing.invoice_number ?? existing.id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ resource }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
314
backend/src/routes/settings.routes.ts
Normal file
314
backend/src/routes/settings.routes.ts
Normal file
@@ -0,0 +1,314 @@
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import { decryptJson, encryptJson } from "../lib/security";
|
||||
import { getOperationsPolicy } from "../services/operations.service";
|
||||
import { getSchedulerRuntimeSnapshot, reconfigureSchedulers, schedulerDefaults } from "../services/scheduler.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const proxmoxSchema = z.object({
|
||||
host: z.string().min(1),
|
||||
port: z.number().int().positive().default(8006),
|
||||
username: z.string().min(1),
|
||||
token_id: z.string().min(1),
|
||||
token_secret: z.string().min(1),
|
||||
verify_ssl: z.boolean().default(true)
|
||||
});
|
||||
|
||||
const paymentSchema = z.object({
|
||||
default_provider: z.enum(["paystack", "flutterwave", "manual"]).default("paystack"),
|
||||
paystack_public: z.string().optional(),
|
||||
paystack_secret: z.string().optional(),
|
||||
paystack_secret_previous: z.string().optional(),
|
||||
flutterwave_public: z.string().optional(),
|
||||
flutterwave_secret: z.string().optional(),
|
||||
flutterwave_secret_previous: z.string().optional(),
|
||||
flutterwave_webhook_hash: z.string().optional(),
|
||||
flutterwave_webhook_hash_previous: z.string().optional(),
|
||||
callback_url: z.string().optional()
|
||||
});
|
||||
|
||||
const backupSchema = z.object({
|
||||
default_source: z.enum(["local", "pbs", "remote"]).default("local"),
|
||||
default_storage: z.string().default("local-lvm"),
|
||||
max_restore_file_count: z.number().int().positive().default(100),
|
||||
pbs_enabled: z.boolean().default(false),
|
||||
pbs_host: z.string().optional(),
|
||||
pbs_datastore: z.string().optional(),
|
||||
pbs_namespace: z.string().optional(),
|
||||
pbs_verify_ssl: z.boolean().default(true)
|
||||
});
|
||||
|
||||
const consoleProxyNodeSchema = z.object({
|
||||
novnc: z.string().url().optional(),
|
||||
spice: z.string().url().optional(),
|
||||
xterm: z.string().url().optional()
|
||||
});
|
||||
|
||||
const consoleProxySchema = z.object({
|
||||
mode: z.enum(["cluster", "per_node"]).default("cluster"),
|
||||
cluster: consoleProxyNodeSchema.optional(),
|
||||
nodes: z.record(consoleProxyNodeSchema).optional()
|
||||
});
|
||||
|
||||
const schedulerSchema = z.object({
|
||||
enable_scheduler: z.boolean().optional(),
|
||||
billing_cron: z.string().min(5).optional(),
|
||||
backup_cron: z.string().min(5).optional(),
|
||||
power_schedule_cron: z.string().min(5).optional(),
|
||||
monitoring_cron: z.string().min(5).optional(),
|
||||
operation_retry_cron: z.string().min(5).optional()
|
||||
});
|
||||
|
||||
const operationsPolicySchema = z.object({
|
||||
max_retry_attempts: z.number().int().min(0).max(10).optional(),
|
||||
retry_backoff_minutes: z.number().int().min(1).max(720).optional(),
|
||||
notify_on_task_failure: z.boolean().optional(),
|
||||
notification_email: z.string().email().optional(),
|
||||
notification_webhook_url: z.string().url().optional(),
|
||||
email_gateway_url: z.string().url().optional()
|
||||
});
|
||||
|
||||
const notificationsSchema = z.object({
|
||||
email_alerts: z.boolean().optional(),
|
||||
backup_alerts: z.boolean().optional(),
|
||||
billing_alerts: z.boolean().optional(),
|
||||
vm_alerts: z.boolean().optional(),
|
||||
monitoring_webhook_url: z.string().url().optional(),
|
||||
alert_webhook_url: z.string().url().optional(),
|
||||
email_gateway_url: z.string().url().optional(),
|
||||
notification_email_webhook: z.string().url().optional(),
|
||||
ops_email: z.string().email().optional()
|
||||
});
|
||||
|
||||
function decodeSettingValue<T>(raw: unknown, fallback: T): T {
|
||||
const value = decryptJson<T>(raw);
|
||||
if (value === null || value === undefined) return fallback;
|
||||
return value;
|
||||
}
|
||||
|
||||
async function loadSetting<T>(key: string, fallback: T): Promise<T> {
|
||||
const setting = await prisma.setting.findUnique({ where: { key } });
|
||||
if (!setting) return fallback;
|
||||
return decodeSettingValue<T>(setting.value, fallback);
|
||||
}
|
||||
|
||||
async function saveSetting<T>(input: {
|
||||
key: string;
|
||||
type: "PROXMOX" | "PAYMENT" | "GENERAL" | "EMAIL";
|
||||
value: T;
|
||||
encrypted: boolean;
|
||||
}) {
|
||||
const payloadValue = input.encrypted ? encryptJson(input.value) : input.value;
|
||||
const normalizedValue = toPrismaJsonValue(payloadValue);
|
||||
const setting = await prisma.setting.upsert({
|
||||
where: { key: input.key },
|
||||
update: { value: normalizedValue, is_encrypted: input.encrypted },
|
||||
create: { key: input.key, type: input.type, value: normalizedValue, is_encrypted: input.encrypted }
|
||||
});
|
||||
return decodeSettingValue<T>(setting.value, input.value);
|
||||
}
|
||||
|
||||
router.get("/proxmox", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const value = await loadSetting("proxmox", {});
|
||||
res.json(value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/proxmox", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = proxmoxSchema.parse(req.body);
|
||||
const value = await saveSetting({
|
||||
key: "proxmox",
|
||||
type: "PROXMOX",
|
||||
value: payload,
|
||||
encrypted: true
|
||||
});
|
||||
res.json(value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/payment", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const value = await loadSetting("payment", {});
|
||||
res.json(value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/payment", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = paymentSchema.parse(req.body);
|
||||
const value = await saveSetting({
|
||||
key: "payment",
|
||||
type: "PAYMENT",
|
||||
value: payload,
|
||||
encrypted: true
|
||||
});
|
||||
res.json(value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/backup", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const value = await loadSetting("backup", {});
|
||||
res.json(value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/backup", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = backupSchema.parse(req.body);
|
||||
const value = await saveSetting({
|
||||
key: "backup",
|
||||
type: "GENERAL",
|
||||
value: payload,
|
||||
encrypted: false
|
||||
});
|
||||
res.json(value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/console-proxy", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
res.json(
|
||||
(await loadSetting("console_proxy", {
|
||||
mode: "cluster",
|
||||
cluster: {},
|
||||
nodes: {}
|
||||
}))
|
||||
);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/console-proxy", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = consoleProxySchema.parse(req.body);
|
||||
const value = await saveSetting({
|
||||
key: "console_proxy",
|
||||
type: "PROXMOX",
|
||||
value: payload,
|
||||
encrypted: false
|
||||
});
|
||||
res.json(value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/scheduler", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const defaults = schedulerDefaults();
|
||||
const persisted = await loadSetting<Record<string, unknown>>("scheduler", {});
|
||||
const config = {
|
||||
...defaults,
|
||||
...persisted
|
||||
};
|
||||
return res.json({
|
||||
config,
|
||||
runtime: getSchedulerRuntimeSnapshot()
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/scheduler", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = schedulerSchema.parse(req.body);
|
||||
const config = await saveSetting({
|
||||
key: "scheduler",
|
||||
type: "GENERAL",
|
||||
value: payload,
|
||||
encrypted: false
|
||||
});
|
||||
|
||||
const runtime = await reconfigureSchedulers(payload);
|
||||
return res.json({
|
||||
config,
|
||||
runtime
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/operations-policy", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const policy = await getOperationsPolicy();
|
||||
return res.json(policy);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/operations-policy", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = operationsPolicySchema.parse(req.body);
|
||||
await saveSetting({
|
||||
key: "operations_policy",
|
||||
type: "GENERAL",
|
||||
value: payload,
|
||||
encrypted: true
|
||||
});
|
||||
|
||||
const policy = await getOperationsPolicy();
|
||||
return res.json(policy);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/notifications", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
return res.json(
|
||||
await loadSetting("notifications", {
|
||||
email_alerts: true,
|
||||
backup_alerts: true,
|
||||
billing_alerts: true,
|
||||
vm_alerts: true,
|
||||
monitoring_webhook_url: "",
|
||||
alert_webhook_url: "",
|
||||
email_gateway_url: "",
|
||||
notification_email_webhook: "",
|
||||
ops_email: ""
|
||||
})
|
||||
);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/notifications", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = notificationsSchema.parse(req.body);
|
||||
const value = await saveSetting({
|
||||
key: "notifications",
|
||||
type: "EMAIL",
|
||||
value: payload,
|
||||
encrypted: true
|
||||
});
|
||||
return res.json(value);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
703
backend/src/routes/system.routes.ts
Normal file
703
backend/src/routes/system.routes.ts
Normal file
@@ -0,0 +1,703 @@
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { Role, TenantStatus } from "@prisma/client";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import {
|
||||
MODULE_KEYS,
|
||||
getModulePolicy,
|
||||
getUserModuleAccess,
|
||||
toModulePolicyResponse,
|
||||
updateModulePolicy
|
||||
} from "../services/module-access.service";
|
||||
|
||||
const router = Router();
|
||||
const imageDataUrlRegex = /^data:image\/(png|jpe?g|webp|gif);base64,[A-Za-z0-9+/=]+$/i;
|
||||
|
||||
const brandingSchema = z.object({
|
||||
app_name: z.string().min(2).max(120),
|
||||
logo_url: z.union([z.string().url(), z.string().regex(imageDataUrlRegex).max(1_500_000)]).optional(),
|
||||
primary_color: z.string().optional(),
|
||||
accent_color: z.string().optional(),
|
||||
support_email: z.string().email().optional(),
|
||||
website_url: z.string().url().optional(),
|
||||
legal_company_name: z.string().optional(),
|
||||
copyright_notice: z.string().optional()
|
||||
});
|
||||
|
||||
const subscriptionPolicySchema = z.object({
|
||||
default_trial_days: z.number().int().min(1).max(90).default(14),
|
||||
default_grace_days: z.number().int().min(0).max(30).default(3),
|
||||
trial_vm_limit: z.number().int().min(1).max(200).default(2),
|
||||
auto_suspend_on_expiry: z.boolean().default(true)
|
||||
});
|
||||
|
||||
const startTrialSchema = z.object({
|
||||
days: z.number().int().min(1).max(90).optional(),
|
||||
grace_days: z.number().int().min(0).max(30).optional(),
|
||||
vm_limit: z.number().int().min(1).max(200).optional()
|
||||
});
|
||||
|
||||
const cmsPageSchema = z.object({
|
||||
slug: z.string().min(2).max(180).regex(/^[a-z0-9-]+$/),
|
||||
title: z.string().min(2).max(180),
|
||||
section: z.string().min(2).max(80).default("general"),
|
||||
content: z.record(z.string(), z.any()).default({}),
|
||||
is_published: z.boolean().default(false)
|
||||
});
|
||||
|
||||
const navItemSchema = z.object({
|
||||
label: z.string().min(1).max(120),
|
||||
href: z.string().min(1).max(260),
|
||||
position: z.enum(["header", "footer", "legal"]).default("header"),
|
||||
sort_order: z.number().int().min(0).max(10000).default(100),
|
||||
is_enabled: z.boolean().default(true),
|
||||
metadata: z.record(z.string(), z.any()).default({})
|
||||
});
|
||||
|
||||
const modulePolicySchema = z.object({
|
||||
modules: z
|
||||
.array(
|
||||
z.object({
|
||||
key: z.enum(MODULE_KEYS),
|
||||
enabled: z.boolean(),
|
||||
roles: z.array(z.nativeEnum(Role)).min(1).max(4)
|
||||
})
|
||||
)
|
||||
.min(1)
|
||||
});
|
||||
|
||||
const systemSearchSchema = z.object({
|
||||
q: z.string().trim().min(2).max(120),
|
||||
limit: z.coerce.number().int().min(1).max(30).default(12)
|
||||
});
|
||||
|
||||
async function getSetting<T = unknown>(key: string, fallback: T): Promise<T> {
|
||||
const setting = await prisma.setting.findUnique({ where: { key } });
|
||||
if (!setting) return fallback;
|
||||
return (setting.value as T) ?? fallback;
|
||||
}
|
||||
|
||||
async function upsertSetting<T = unknown>(input: { key: string; type?: "GENERAL" | "SECURITY" | "NETWORK" | "PROXMOX" | "PAYMENT" | "EMAIL"; value: T }) {
|
||||
const normalizedValue = toPrismaJsonValue(input.value);
|
||||
return prisma.setting.upsert({
|
||||
where: { key: input.key },
|
||||
update: {
|
||||
value: normalizedValue
|
||||
},
|
||||
create: {
|
||||
key: input.key,
|
||||
type: input.type ?? "GENERAL",
|
||||
value: normalizedValue,
|
||||
is_encrypted: false
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
router.get("/public/site", async (_req, res, next) => {
|
||||
try {
|
||||
const [branding, pages, navigation] = await Promise.all([
|
||||
getSetting("branding", {
|
||||
app_name: "VotCloud",
|
||||
legal_company_name: "VotCloud",
|
||||
copyright_notice: ""
|
||||
}),
|
||||
prisma.cmsPage.findMany({
|
||||
where: { is_published: true },
|
||||
orderBy: [{ section: "asc" }, { updated_at: "desc" }]
|
||||
}),
|
||||
prisma.siteNavigationItem.findMany({
|
||||
where: { is_enabled: true },
|
||||
orderBy: [{ position: "asc" }, { sort_order: "asc" }]
|
||||
})
|
||||
]);
|
||||
|
||||
res.json({
|
||||
branding,
|
||||
pages,
|
||||
navigation
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/branding", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const branding = await getSetting("branding", {
|
||||
app_name: "VotCloud",
|
||||
legal_company_name: "VotCloud",
|
||||
copyright_notice: ""
|
||||
});
|
||||
res.json(branding);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/module-access", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const access = await getUserModuleAccess(req.user!.role);
|
||||
return res.json(access);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/modules-policy", requireAuth, authorize("settings:manage"), async (_req, res, next) => {
|
||||
try {
|
||||
const policy = await getModulePolicy({ force_refresh: true });
|
||||
return res.json({
|
||||
modules: toModulePolicyResponse(policy)
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/modules-policy", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = modulePolicySchema.parse(req.body ?? {});
|
||||
const policy = await updateModulePolicy(payload.modules);
|
||||
|
||||
await logAudit({
|
||||
action: "system.modules_policy.update",
|
||||
resource_type: "SYSTEM",
|
||||
resource_name: "module_policy",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json({
|
||||
modules: toModulePolicyResponse(policy)
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/search", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const parsed = systemSearchSchema.parse({
|
||||
q: req.query.q,
|
||||
limit: req.query.limit
|
||||
});
|
||||
|
||||
const takePerType = Math.min(6, Math.max(2, Math.floor(parsed.limit / 2)));
|
||||
const isTenantScoped = isTenantScopedUser(req) && Boolean(req.user?.tenant_id);
|
||||
const tenantId = req.user?.tenant_id ?? null;
|
||||
|
||||
const vmWhere: Record<string, unknown> = {
|
||||
OR: [
|
||||
{ name: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ ip_address: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ node: { contains: parsed.q, mode: "insensitive" } }
|
||||
]
|
||||
};
|
||||
if (isTenantScoped && tenantId) {
|
||||
vmWhere.tenant_id = tenantId;
|
||||
}
|
||||
|
||||
const tenantWhere: Record<string, unknown> = {
|
||||
OR: [
|
||||
{ name: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ owner_email: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ slug: { contains: parsed.q, mode: "insensitive" } }
|
||||
]
|
||||
};
|
||||
if (isTenantScoped && tenantId) {
|
||||
tenantWhere.id = tenantId;
|
||||
}
|
||||
|
||||
const userWhere: Record<string, unknown> = {
|
||||
OR: [
|
||||
{ email: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ full_name: { contains: parsed.q, mode: "insensitive" } }
|
||||
]
|
||||
};
|
||||
if (isTenantScoped && tenantId) {
|
||||
userWhere.tenant_id = tenantId;
|
||||
}
|
||||
|
||||
const invoiceWhere: Record<string, unknown> = {
|
||||
OR: [
|
||||
{ invoice_number: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ tenant_name: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ payment_reference: { contains: parsed.q, mode: "insensitive" } }
|
||||
]
|
||||
};
|
||||
if (isTenantScoped && tenantId) {
|
||||
invoiceWhere.tenant_id = tenantId;
|
||||
}
|
||||
|
||||
const [vms, tenants, users, invoices, logs, alerts] = await Promise.all([
|
||||
prisma.virtualMachine.findMany({
|
||||
where: vmWhere as any,
|
||||
take: takePerType,
|
||||
orderBy: { updated_at: "desc" },
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
status: true,
|
||||
tenant_id: true,
|
||||
node: true,
|
||||
vmid: true,
|
||||
ip_address: true
|
||||
}
|
||||
}),
|
||||
prisma.tenant.findMany({
|
||||
where: tenantWhere as any,
|
||||
take: takePerType,
|
||||
orderBy: { updated_at: "desc" },
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
slug: true,
|
||||
status: true,
|
||||
owner_email: true
|
||||
}
|
||||
}),
|
||||
prisma.user.findMany({
|
||||
where: userWhere as any,
|
||||
take: takePerType,
|
||||
orderBy: { updated_at: "desc" },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
full_name: true,
|
||||
role: true,
|
||||
tenant_id: true
|
||||
}
|
||||
}),
|
||||
prisma.invoice.findMany({
|
||||
where: invoiceWhere as any,
|
||||
take: takePerType,
|
||||
orderBy: { created_at: "desc" },
|
||||
select: {
|
||||
id: true,
|
||||
invoice_number: true,
|
||||
tenant_name: true,
|
||||
status: true,
|
||||
amount: true,
|
||||
currency: true
|
||||
}
|
||||
}),
|
||||
isTenantScoped
|
||||
? Promise.resolve([])
|
||||
: prisma.auditLog.findMany({
|
||||
where: {
|
||||
OR: [
|
||||
{ action: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ resource_name: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ actor_email: { contains: parsed.q, mode: "insensitive" } }
|
||||
]
|
||||
},
|
||||
take: takePerType,
|
||||
orderBy: { created_at: "desc" },
|
||||
select: {
|
||||
id: true,
|
||||
action: true,
|
||||
actor_email: true,
|
||||
resource_name: true,
|
||||
created_at: true
|
||||
}
|
||||
}),
|
||||
prisma.monitoringAlertEvent.findMany({
|
||||
where: {
|
||||
...(isTenantScoped && tenantId ? { OR: [{ tenant_id: tenantId }, { tenant_id: null }] } : {}),
|
||||
OR: [
|
||||
{ title: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ message: { contains: parsed.q, mode: "insensitive" } },
|
||||
{ metric_key: { contains: parsed.q, mode: "insensitive" } }
|
||||
]
|
||||
},
|
||||
take: takePerType,
|
||||
orderBy: { created_at: "desc" },
|
||||
select: {
|
||||
id: true,
|
||||
title: true,
|
||||
severity: true,
|
||||
status: true,
|
||||
vm_id: true,
|
||||
node_id: true
|
||||
}
|
||||
})
|
||||
]);
|
||||
|
||||
const results = [
|
||||
...vms.map((item) => ({
|
||||
id: item.id,
|
||||
type: "virtual_machine",
|
||||
module_key: "vms",
|
||||
title: item.name,
|
||||
subtitle: `VM #${item.vmid} • ${item.status} • ${item.node}`,
|
||||
path: "/vms",
|
||||
context: item
|
||||
})),
|
||||
...tenants.map((item) => ({
|
||||
id: item.id,
|
||||
type: "tenant",
|
||||
module_key: "tenants",
|
||||
title: item.name,
|
||||
subtitle: `${item.slug} • ${item.status} • ${item.owner_email}`,
|
||||
path: "/tenants",
|
||||
context: item
|
||||
})),
|
||||
...users.map((item) => ({
|
||||
id: item.id,
|
||||
type: "user",
|
||||
module_key: "rbac",
|
||||
title: item.full_name || item.email,
|
||||
subtitle: `${item.email} • ${item.role}`,
|
||||
path: "/rbac",
|
||||
context: item
|
||||
})),
|
||||
...invoices.map((item) => ({
|
||||
id: item.id,
|
||||
type: "invoice",
|
||||
module_key: "billing",
|
||||
title: item.invoice_number,
|
||||
subtitle: `${item.tenant_name || "-"} • ${item.status} • ${item.amount} ${item.currency}`,
|
||||
path: "/billing",
|
||||
context: item
|
||||
})),
|
||||
...logs.map((item) => ({
|
||||
id: item.id,
|
||||
type: "audit_log",
|
||||
module_key: "audit_logs",
|
||||
title: item.action,
|
||||
subtitle: `${item.actor_email} • ${item.resource_name || "-"} • ${new Date(item.created_at).toLocaleString()}`,
|
||||
path: "/audit-logs",
|
||||
context: item
|
||||
})),
|
||||
...alerts.map((item) => ({
|
||||
id: item.id,
|
||||
type: "monitoring_alert",
|
||||
module_key: "monitoring",
|
||||
title: item.title,
|
||||
subtitle: `${item.severity} • ${item.status}`,
|
||||
path: "/monitoring",
|
||||
context: item
|
||||
}))
|
||||
];
|
||||
|
||||
return res.json({
|
||||
query: parsed.q,
|
||||
results: results.slice(0, parsed.limit)
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/branding", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = brandingSchema.parse(req.body ?? {});
|
||||
const setting = await upsertSetting({
|
||||
key: "branding",
|
||||
value: payload
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "system.branding.update",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: setting.id,
|
||||
resource_name: "branding",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(setting.value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/subscription-policy", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const policy = await getSetting("subscription_policy", {
|
||||
default_trial_days: 14,
|
||||
default_grace_days: 3,
|
||||
trial_vm_limit: 2,
|
||||
auto_suspend_on_expiry: true
|
||||
});
|
||||
res.json(policy);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/subscription-policy", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = subscriptionPolicySchema.parse(req.body ?? {});
|
||||
const setting = await upsertSetting({
|
||||
key: "subscription_policy",
|
||||
value: payload
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "system.subscription_policy.update",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: setting.id,
|
||||
resource_name: "subscription_policy",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(setting.value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/trials/:tenantId/start", requireAuth, authorize("tenant:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = startTrialSchema.parse(req.body ?? {});
|
||||
const policy = await getSetting("subscription_policy", {
|
||||
default_trial_days: 14,
|
||||
default_grace_days: 3,
|
||||
trial_vm_limit: 2
|
||||
});
|
||||
const now = new Date();
|
||||
const days = payload.days ?? Number(policy.default_trial_days ?? 14);
|
||||
const graceDays = payload.grace_days ?? Number(policy.default_grace_days ?? 3);
|
||||
const trialEndsAt = new Date(now.getTime() + days * 24 * 60 * 60 * 1000);
|
||||
const trialGraceEndsAt = new Date(trialEndsAt.getTime() + graceDays * 24 * 60 * 60 * 1000);
|
||||
|
||||
const tenant = await prisma.tenant.update({
|
||||
where: { id: req.params.tenantId },
|
||||
data: {
|
||||
status: TenantStatus.TRIAL,
|
||||
trial_days: days,
|
||||
trial_starts_at: now,
|
||||
trial_ends_at: trialEndsAt,
|
||||
trial_grace_ends_at: trialGraceEndsAt,
|
||||
trial_locked: false,
|
||||
vm_limit: payload.vm_limit ?? Number(policy.trial_vm_limit ?? 2)
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "tenant.trial.start",
|
||||
resource_type: "TENANT",
|
||||
resource_id: tenant.id,
|
||||
resource_name: tenant.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
days,
|
||||
trial_ends_at: trialEndsAt.toISOString(),
|
||||
trial_grace_ends_at: trialGraceEndsAt.toISOString()
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(tenant);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/trials/expire", requireAuth, authorize("tenant:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const now = new Date();
|
||||
const policy = await getSetting("subscription_policy", {
|
||||
auto_suspend_on_expiry: true
|
||||
});
|
||||
if (!policy.auto_suspend_on_expiry) {
|
||||
return res.json({ success: true, expired_count: 0, skipped: true });
|
||||
}
|
||||
|
||||
const expiredTenants = await prisma.tenant.findMany({
|
||||
where: {
|
||||
status: TenantStatus.TRIAL,
|
||||
trial_ends_at: { lt: now }
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
name: true
|
||||
}
|
||||
});
|
||||
|
||||
if (expiredTenants.length === 0) {
|
||||
return res.json({ success: true, expired_count: 0 });
|
||||
}
|
||||
|
||||
const ids = expiredTenants.map((tenant) => tenant.id);
|
||||
await prisma.tenant.updateMany({
|
||||
where: { id: { in: ids } },
|
||||
data: {
|
||||
status: TenantStatus.SUSPENDED,
|
||||
trial_locked: true
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "tenant.trial.expire.batch",
|
||||
resource_type: "TENANT",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
expired_tenants: expiredTenants
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json({
|
||||
success: true,
|
||||
expired_count: expiredTenants.length,
|
||||
tenants: expiredTenants
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/cms/pages", requireAuth, authorize("settings:read"), async (req, res, next) => {
|
||||
try {
|
||||
const includeDrafts = req.query.include_drafts === "true";
|
||||
const pages = await prisma.cmsPage.findMany({
|
||||
where: includeDrafts ? undefined : { is_published: true },
|
||||
orderBy: [{ section: "asc" }, { updated_at: "desc" }]
|
||||
});
|
||||
res.json(pages);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/cms/pages", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = cmsPageSchema.parse(req.body ?? {});
|
||||
const page = await prisma.cmsPage.create({
|
||||
data: {
|
||||
...payload,
|
||||
created_by: req.user!.email,
|
||||
updated_by: req.user!.email
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "cms.page.create",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: page.id,
|
||||
resource_name: page.slug,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ section: page.section, is_published: page.is_published }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(page);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/cms/pages/:id", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const partial = cmsPageSchema.partial().parse(req.body ?? {});
|
||||
if (Object.keys(partial).length === 0) {
|
||||
throw new HttpError(400, "No fields provided", "VALIDATION_ERROR");
|
||||
}
|
||||
|
||||
const page = await prisma.cmsPage.update({
|
||||
where: { id: req.params.id },
|
||||
data: {
|
||||
...partial,
|
||||
updated_by: req.user!.email
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "cms.page.update",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: page.id,
|
||||
resource_name: page.slug,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ updated_fields: Object.keys(partial) }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(page);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/cms/pages/:id", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const page = await prisma.cmsPage.delete({
|
||||
where: { id: req.params.id }
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "cms.page.delete",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: page.id,
|
||||
resource_name: page.slug,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/cms/navigation", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const items = await prisma.siteNavigationItem.findMany({
|
||||
orderBy: [{ position: "asc" }, { sort_order: "asc" }]
|
||||
});
|
||||
res.json(items);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/cms/navigation", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = navItemSchema.parse(req.body ?? {});
|
||||
const item = await prisma.siteNavigationItem.create({ data: payload });
|
||||
res.status(201).json(item);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/cms/navigation/:id", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = navItemSchema.partial().parse(req.body ?? {});
|
||||
const item = await prisma.siteNavigationItem.update({
|
||||
where: { id: req.params.id },
|
||||
data: payload
|
||||
});
|
||||
res.json(item);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/cms/navigation/:id", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await prisma.siteNavigationItem.delete({ where: { id: req.params.id } });
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
240
backend/src/services/announcement.service.ts
Normal file
240
backend/src/services/announcement.service.ts
Normal file
@@ -0,0 +1,240 @@
|
||||
import { Role, SettingType, type Prisma } from "@prisma/client";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
|
||||
const ANNOUNCEMENT_SETTING_KEY = "announcement_center";
|
||||
|
||||
export const ANNOUNCEMENT_SEVERITIES = ["INFO", "WARNING", "CRITICAL"] as const;
|
||||
export type AnnouncementSeverity = (typeof ANNOUNCEMENT_SEVERITIES)[number];
|
||||
|
||||
export type AnnouncementItem = {
|
||||
id: string;
|
||||
title: string;
|
||||
message: string;
|
||||
severity: AnnouncementSeverity;
|
||||
audience_roles: Role[];
|
||||
is_active: boolean;
|
||||
published_at: string;
|
||||
expires_at: string | null;
|
||||
created_by: string;
|
||||
created_at: string;
|
||||
updated_at: string;
|
||||
};
|
||||
|
||||
type AnnouncementCenterState = {
|
||||
items: AnnouncementItem[];
|
||||
reads: Record<string, string[]>;
|
||||
};
|
||||
|
||||
const ROLE_VALUES = new Set(Object.values(Role));
|
||||
const SEVERITY_VALUES = new Set<string>(ANNOUNCEMENT_SEVERITIES);
|
||||
|
||||
function nowIso() {
|
||||
return new Date().toISOString();
|
||||
}
|
||||
|
||||
function normalizeAnnouncementItem(value: unknown): AnnouncementItem | null {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) return null;
|
||||
const entry = value as Record<string, unknown>;
|
||||
const id = typeof entry.id === "string" ? entry.id : null;
|
||||
const title = typeof entry.title === "string" ? entry.title.trim() : "";
|
||||
const message = typeof entry.message === "string" ? entry.message.trim() : "";
|
||||
const severityRaw = typeof entry.severity === "string" ? entry.severity.toUpperCase() : "INFO";
|
||||
const severity = SEVERITY_VALUES.has(severityRaw) ? (severityRaw as AnnouncementSeverity) : "INFO";
|
||||
const audienceRolesRaw = Array.isArray(entry.audience_roles) ? entry.audience_roles : [];
|
||||
const audience_roles = audienceRolesRaw.filter(
|
||||
(role): role is Role => typeof role === "string" && ROLE_VALUES.has(role as Role)
|
||||
);
|
||||
const is_active = typeof entry.is_active === "boolean" ? entry.is_active : true;
|
||||
const published_at = typeof entry.published_at === "string" ? entry.published_at : nowIso();
|
||||
const expires_at = typeof entry.expires_at === "string" ? entry.expires_at : null;
|
||||
const created_by = typeof entry.created_by === "string" ? entry.created_by : "system@proxpanel.local";
|
||||
const created_at = typeof entry.created_at === "string" ? entry.created_at : nowIso();
|
||||
const updated_at = typeof entry.updated_at === "string" ? entry.updated_at : created_at;
|
||||
|
||||
if (!id || title.length < 2 || message.length < 2) return null;
|
||||
|
||||
return {
|
||||
id,
|
||||
title,
|
||||
message,
|
||||
severity,
|
||||
audience_roles,
|
||||
is_active,
|
||||
published_at,
|
||||
expires_at,
|
||||
created_by,
|
||||
created_at,
|
||||
updated_at
|
||||
};
|
||||
}
|
||||
|
||||
function normalizeState(value: unknown): AnnouncementCenterState {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) {
|
||||
return { items: [], reads: {} };
|
||||
}
|
||||
|
||||
const entry = value as Record<string, unknown>;
|
||||
const rawItems = Array.isArray(entry.items) ? entry.items : [];
|
||||
const items = rawItems
|
||||
.map((item) => normalizeAnnouncementItem(item))
|
||||
.filter((item): item is AnnouncementItem => Boolean(item))
|
||||
.sort((a, b) => b.published_at.localeCompare(a.published_at))
|
||||
.slice(0, 250);
|
||||
|
||||
const reads: Record<string, string[]> = {};
|
||||
const rawReads = entry.reads;
|
||||
if (rawReads && typeof rawReads === "object" && !Array.isArray(rawReads)) {
|
||||
for (const [userId, rawIds] of Object.entries(rawReads as Record<string, unknown>)) {
|
||||
if (!userId || !Array.isArray(rawIds)) continue;
|
||||
reads[userId] = rawIds.filter((id): id is string => typeof id === "string").slice(0, 500);
|
||||
}
|
||||
}
|
||||
|
||||
return { items, reads };
|
||||
}
|
||||
|
||||
async function saveState(state: AnnouncementCenterState) {
|
||||
const value = toPrismaJsonValue(state) as Prisma.InputJsonValue;
|
||||
await prisma.setting.upsert({
|
||||
where: { key: ANNOUNCEMENT_SETTING_KEY },
|
||||
update: {
|
||||
value,
|
||||
is_encrypted: false
|
||||
},
|
||||
create: {
|
||||
key: ANNOUNCEMENT_SETTING_KEY,
|
||||
type: SettingType.GENERAL,
|
||||
value,
|
||||
is_encrypted: false
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function getAnnouncementState() {
|
||||
const setting = await prisma.setting.findUnique({
|
||||
where: { key: ANNOUNCEMENT_SETTING_KEY },
|
||||
select: { value: true }
|
||||
});
|
||||
|
||||
const normalized = normalizeState(setting?.value);
|
||||
if (!setting) {
|
||||
await saveState(normalized);
|
||||
}
|
||||
return normalized;
|
||||
}
|
||||
|
||||
function isAnnouncementVisibleToRole(item: AnnouncementItem, role: Role) {
|
||||
if (!item.is_active) return false;
|
||||
if (item.audience_roles.length === 0) return true;
|
||||
return item.audience_roles.includes(role);
|
||||
}
|
||||
|
||||
function isAnnouncementActive(item: AnnouncementItem, now: Date) {
|
||||
if (!item.is_active) return false;
|
||||
const publishedAt = new Date(item.published_at);
|
||||
if (Number.isNaN(publishedAt.getTime())) return false;
|
||||
if (publishedAt.getTime() > now.getTime()) return false;
|
||||
if (!item.expires_at) return true;
|
||||
const expiresAt = new Date(item.expires_at);
|
||||
if (Number.isNaN(expiresAt.getTime())) return true;
|
||||
return expiresAt.getTime() > now.getTime();
|
||||
}
|
||||
|
||||
export function buildInboxForUser(state: AnnouncementCenterState, input: { user_id: string; role: Role }) {
|
||||
const now = new Date();
|
||||
const readSet = new Set(state.reads[input.user_id] ?? []);
|
||||
const items = state.items
|
||||
.filter((item) => isAnnouncementActive(item, now))
|
||||
.filter((item) => isAnnouncementVisibleToRole(item, input.role))
|
||||
.map((item) => ({
|
||||
...item,
|
||||
is_read: readSet.has(item.id)
|
||||
}));
|
||||
|
||||
const unread_count = items.reduce((sum, item) => (item.is_read ? sum : sum + 1), 0);
|
||||
|
||||
return {
|
||||
items,
|
||||
unread_count,
|
||||
total_count: items.length
|
||||
};
|
||||
}
|
||||
|
||||
export async function upsertAnnouncement(input: {
|
||||
id?: string;
|
||||
title: string;
|
||||
message: string;
|
||||
severity: AnnouncementSeverity;
|
||||
audience_roles: Role[];
|
||||
is_active: boolean;
|
||||
published_at?: string;
|
||||
expires_at?: string | null;
|
||||
actor_email: string;
|
||||
}) {
|
||||
const state = await getAnnouncementState();
|
||||
const now = nowIso();
|
||||
const existingIndex = input.id ? state.items.findIndex((item) => item.id === input.id) : -1;
|
||||
|
||||
const announcement: AnnouncementItem = {
|
||||
id: existingIndex >= 0 ? state.items[existingIndex].id : `ann_${Date.now()}_${Math.floor(Math.random() * 10000)}`,
|
||||
title: input.title.trim(),
|
||||
message: input.message.trim(),
|
||||
severity: input.severity,
|
||||
audience_roles: input.audience_roles,
|
||||
is_active: input.is_active,
|
||||
published_at: input.published_at ?? (existingIndex >= 0 ? state.items[existingIndex].published_at : now),
|
||||
expires_at: input.expires_at ?? null,
|
||||
created_by: existingIndex >= 0 ? state.items[existingIndex].created_by : input.actor_email,
|
||||
created_at: existingIndex >= 0 ? state.items[existingIndex].created_at : now,
|
||||
updated_at: now
|
||||
};
|
||||
|
||||
if (existingIndex >= 0) {
|
||||
state.items[existingIndex] = announcement;
|
||||
} else {
|
||||
state.items.unshift(announcement);
|
||||
}
|
||||
|
||||
state.items = state.items.slice(0, 250);
|
||||
await saveState(state);
|
||||
return announcement;
|
||||
}
|
||||
|
||||
export async function deleteAnnouncement(id: string) {
|
||||
const state = await getAnnouncementState();
|
||||
const before = state.items.length;
|
||||
state.items = state.items.filter((item) => item.id !== id);
|
||||
if (state.items.length === before) {
|
||||
return { deleted: false };
|
||||
}
|
||||
|
||||
for (const userId of Object.keys(state.reads)) {
|
||||
state.reads[userId] = (state.reads[userId] ?? []).filter((itemId) => itemId !== id);
|
||||
}
|
||||
|
||||
await saveState(state);
|
||||
return { deleted: true };
|
||||
}
|
||||
|
||||
export async function markAnnouncementRead(input: { user_id: string; announcement_id: string }) {
|
||||
const state = await getAnnouncementState();
|
||||
const exists = state.items.some((item) => item.id === input.announcement_id);
|
||||
if (!exists) {
|
||||
return { updated: false };
|
||||
}
|
||||
const current = new Set(state.reads[input.user_id] ?? []);
|
||||
current.add(input.announcement_id);
|
||||
state.reads[input.user_id] = [...current].slice(-1000);
|
||||
await saveState(state);
|
||||
return { updated: true };
|
||||
}
|
||||
|
||||
export async function markAllAnnouncementsRead(input: { user_id: string; role: Role }) {
|
||||
const state = await getAnnouncementState();
|
||||
const inbox = buildInboxForUser(state, input);
|
||||
const ids = inbox.items.map((item) => item.id);
|
||||
state.reads[input.user_id] = ids;
|
||||
await saveState(state);
|
||||
return { updated: ids.length };
|
||||
}
|
||||
30
backend/src/services/audit.service.ts
Normal file
30
backend/src/services/audit.service.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import type { Prisma, ResourceType, Severity } from "@prisma/client";
|
||||
import { prisma } from "../lib/prisma";
|
||||
|
||||
type AuditInput = {
|
||||
action: string;
|
||||
resource_type: ResourceType;
|
||||
resource_id?: string;
|
||||
resource_name?: string;
|
||||
actor_email: string;
|
||||
actor_role?: string;
|
||||
severity?: Severity;
|
||||
details?: Prisma.InputJsonValue;
|
||||
ip_address?: string;
|
||||
};
|
||||
|
||||
export async function logAudit(input: AuditInput) {
|
||||
await prisma.auditLog.create({
|
||||
data: {
|
||||
action: input.action,
|
||||
resource_type: input.resource_type,
|
||||
resource_id: input.resource_id,
|
||||
resource_name: input.resource_name,
|
||||
actor_email: input.actor_email,
|
||||
actor_role: input.actor_role,
|
||||
severity: input.severity ?? "INFO",
|
||||
details: input.details,
|
||||
ip_address: input.ip_address
|
||||
}
|
||||
});
|
||||
}
|
||||
1086
backend/src/services/backup.service.ts
Normal file
1086
backend/src/services/backup.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
245
backend/src/services/billing.service.ts
Normal file
245
backend/src/services/billing.service.ts
Normal file
@@ -0,0 +1,245 @@
|
||||
import { Prisma, InvoiceStatus, PaymentProvider } from "@prisma/client";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { logAudit } from "./audit.service";
|
||||
|
||||
function startOfHour(date = new Date()) {
|
||||
const d = new Date(date);
|
||||
d.setMinutes(0, 0, 0);
|
||||
return d;
|
||||
}
|
||||
|
||||
export async function meterHourlyUsage(actorEmail = "system@proxpanel.local") {
|
||||
const periodStart = startOfHour();
|
||||
const periodEnd = new Date(periodStart.getTime() + 60 * 60 * 1000);
|
||||
|
||||
const vms = await prisma.virtualMachine.findMany({
|
||||
where: { status: "RUNNING" },
|
||||
include: {
|
||||
tenant: true,
|
||||
billing_plan: true
|
||||
}
|
||||
});
|
||||
|
||||
let created = 0;
|
||||
for (const vm of vms) {
|
||||
if (!vm.billing_plan) continue;
|
||||
|
||||
const exists = await prisma.usageRecord.findFirst({
|
||||
where: {
|
||||
vm_id: vm.id,
|
||||
period_start: periodStart,
|
||||
period_end: periodEnd
|
||||
}
|
||||
});
|
||||
if (exists) continue;
|
||||
|
||||
const hoursUsed = new Prisma.Decimal(1);
|
||||
const pricePerHour = vm.billing_plan.price_hourly;
|
||||
const totalCost = pricePerHour.mul(hoursUsed);
|
||||
|
||||
await prisma.usageRecord.create({
|
||||
data: {
|
||||
vm_id: vm.id,
|
||||
vm_name: vm.name,
|
||||
tenant_id: vm.tenant_id,
|
||||
tenant_name: vm.tenant.name,
|
||||
billing_plan_id: vm.billing_plan_id ?? undefined,
|
||||
plan_name: vm.billing_plan.name,
|
||||
hours_used: hoursUsed,
|
||||
price_per_hour: pricePerHour,
|
||||
currency: vm.billing_plan.currency,
|
||||
total_cost: totalCost,
|
||||
period_start: periodStart,
|
||||
period_end: periodEnd,
|
||||
cpu_hours: new Prisma.Decimal(vm.cpu_cores),
|
||||
ram_gb_hours: new Prisma.Decimal(vm.ram_mb / 1024),
|
||||
disk_gb_hours: new Prisma.Decimal(vm.disk_gb)
|
||||
}
|
||||
});
|
||||
created += 1;
|
||||
}
|
||||
|
||||
await logAudit({
|
||||
action: "hourly_usage_metering",
|
||||
resource_type: "BILLING",
|
||||
actor_email: actorEmail,
|
||||
severity: "INFO",
|
||||
details: { period_start: periodStart.toISOString(), created_records: created }
|
||||
});
|
||||
|
||||
return { created_records: created, period_start: periodStart.toISOString() };
|
||||
}
|
||||
|
||||
function invoiceNumber() {
|
||||
const rand = Math.floor(1000 + Math.random() * 9000);
|
||||
return `INV-${Date.now()}-${rand}`;
|
||||
}
|
||||
|
||||
export async function generateInvoicesFromUnbilledUsage(actorEmail = "system@proxpanel.local") {
|
||||
const usageRecords = await prisma.usageRecord.findMany({
|
||||
where: { billed: false },
|
||||
orderBy: { created_at: "asc" }
|
||||
});
|
||||
if (usageRecords.length === 0) {
|
||||
return { generated: 0, invoices: [] as Array<{ id: string; tenant_id: string; amount: string }> };
|
||||
}
|
||||
|
||||
const grouped = new Map<string, typeof usageRecords>();
|
||||
for (const item of usageRecords) {
|
||||
const key = `${item.tenant_id}:${item.currency}`;
|
||||
const current = grouped.get(key) ?? [];
|
||||
current.push(item);
|
||||
grouped.set(key, current);
|
||||
}
|
||||
|
||||
const createdInvoices: Array<{ id: string; tenant_id: string; amount: string }> = [];
|
||||
|
||||
for (const [key, records] of grouped.entries()) {
|
||||
const [tenantId] = key.split(":");
|
||||
const amount = records.reduce((sum, record) => sum.add(record.total_cost), new Prisma.Decimal(0));
|
||||
const tenant = await prisma.tenant.findUniqueOrThrow({ where: { id: tenantId } });
|
||||
|
||||
const invoice = await prisma.invoice.create({
|
||||
data: {
|
||||
invoice_number: invoiceNumber(),
|
||||
tenant_id: tenantId,
|
||||
tenant_name: tenant.name,
|
||||
status: InvoiceStatus.PENDING,
|
||||
amount,
|
||||
currency: records[0].currency,
|
||||
due_date: new Date(Date.now() + 7 * 24 * 60 * 60 * 1000),
|
||||
payment_provider: tenant.payment_provider,
|
||||
line_items: records.map((r) => ({
|
||||
usage_record_id: r.id,
|
||||
vm_name: r.vm_name,
|
||||
period_start: r.period_start,
|
||||
period_end: r.period_end,
|
||||
hours_used: r.hours_used.toString(),
|
||||
unit_price: r.price_per_hour.toString(),
|
||||
amount: r.total_cost.toString()
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.usageRecord.updateMany({
|
||||
where: { id: { in: records.map((r) => r.id) } },
|
||||
data: {
|
||||
billed: true,
|
||||
invoice_id: invoice.id
|
||||
}
|
||||
});
|
||||
|
||||
createdInvoices.push({
|
||||
id: invoice.id,
|
||||
tenant_id: invoice.tenant_id,
|
||||
amount: invoice.amount.toString()
|
||||
});
|
||||
}
|
||||
|
||||
await logAudit({
|
||||
action: "invoice_batch_generation",
|
||||
resource_type: "BILLING",
|
||||
actor_email: actorEmail,
|
||||
severity: "INFO",
|
||||
details: {
|
||||
generated_invoices: createdInvoices.length
|
||||
}
|
||||
});
|
||||
|
||||
return { generated: createdInvoices.length, invoices: createdInvoices };
|
||||
}
|
||||
|
||||
export async function markInvoicePaid(
|
||||
invoiceId: string,
|
||||
paymentProvider: PaymentProvider,
|
||||
paymentReference: string,
|
||||
actorEmail: string
|
||||
) {
|
||||
const invoice = await prisma.invoice.update({
|
||||
where: { id: invoiceId },
|
||||
data: {
|
||||
status: "PAID",
|
||||
paid_date: new Date(),
|
||||
payment_provider: paymentProvider,
|
||||
payment_reference: paymentReference
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "invoice_mark_paid",
|
||||
resource_type: "INVOICE",
|
||||
resource_id: invoice.id,
|
||||
resource_name: invoice.invoice_number,
|
||||
actor_email: actorEmail,
|
||||
severity: "INFO",
|
||||
details: { payment_provider: paymentProvider, payment_reference: paymentReference }
|
||||
});
|
||||
|
||||
return invoice;
|
||||
}
|
||||
|
||||
export async function updateOverdueInvoices(actorEmail = "system@proxpanel.local") {
|
||||
const result = await prisma.invoice.updateMany({
|
||||
where: {
|
||||
status: "PENDING",
|
||||
due_date: { lt: new Date() }
|
||||
},
|
||||
data: { status: "OVERDUE" }
|
||||
});
|
||||
|
||||
if (result.count > 0) {
|
||||
await logAudit({
|
||||
action: "invoice_overdue_scan",
|
||||
resource_type: "BILLING",
|
||||
actor_email: actorEmail,
|
||||
severity: "WARNING",
|
||||
details: { marked_overdue: result.count }
|
||||
});
|
||||
}
|
||||
|
||||
return result.count;
|
||||
}
|
||||
|
||||
function nextRunDate(schedule: "DAILY" | "WEEKLY" | "MONTHLY" | "MANUAL") {
|
||||
const now = new Date();
|
||||
if (schedule === "DAILY") return new Date(now.getTime() + 24 * 60 * 60 * 1000);
|
||||
if (schedule === "WEEKLY") return new Date(now.getTime() + 7 * 24 * 60 * 60 * 1000);
|
||||
if (schedule === "MONTHLY") return new Date(now.getTime() + 30 * 24 * 60 * 60 * 1000);
|
||||
return null;
|
||||
}
|
||||
|
||||
export async function processBackupSchedule(actorEmail = "system@proxpanel.local") {
|
||||
const now = new Date();
|
||||
const dueBackups = await prisma.backup.findMany({
|
||||
where: {
|
||||
schedule: { not: "MANUAL" },
|
||||
next_run_at: { lte: now },
|
||||
status: { in: ["PENDING", "COMPLETED", "FAILED"] }
|
||||
}
|
||||
});
|
||||
|
||||
for (const backup of dueBackups) {
|
||||
const nextRunAt = nextRunDate(backup.schedule);
|
||||
await prisma.backup.update({
|
||||
where: { id: backup.id },
|
||||
data: {
|
||||
status: "PENDING",
|
||||
started_at: null,
|
||||
completed_at: null,
|
||||
next_run_at: nextRunAt
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (dueBackups.length > 0) {
|
||||
await logAudit({
|
||||
action: "backup_scheduler_run",
|
||||
resource_type: "BACKUP",
|
||||
actor_email: actorEmail,
|
||||
severity: "INFO",
|
||||
details: { queued_backups: dueBackups.length }
|
||||
});
|
||||
}
|
||||
|
||||
return dueBackups.length;
|
||||
}
|
||||
304
backend/src/services/module-access.service.ts
Normal file
304
backend/src/services/module-access.service.ts
Normal file
@@ -0,0 +1,304 @@
|
||||
import { Role, SettingType, type Prisma } from "@prisma/client";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
|
||||
export const MODULE_KEYS = [
|
||||
"dashboard",
|
||||
"monitoring",
|
||||
"operations",
|
||||
"audit_logs",
|
||||
"vms",
|
||||
"nodes",
|
||||
"provisioning",
|
||||
"backups",
|
||||
"network",
|
||||
"security",
|
||||
"tenants",
|
||||
"client_area",
|
||||
"billing",
|
||||
"rbac",
|
||||
"profile",
|
||||
"system_management",
|
||||
"settings"
|
||||
] as const;
|
||||
|
||||
export type ModuleKey = (typeof MODULE_KEYS)[number];
|
||||
|
||||
export type ModuleDefinition = {
|
||||
key: ModuleKey;
|
||||
label: string;
|
||||
description: string;
|
||||
path: string;
|
||||
default_roles: Role[];
|
||||
};
|
||||
|
||||
type ModulePolicyEntry = {
|
||||
enabled: boolean;
|
||||
roles: Role[];
|
||||
};
|
||||
|
||||
export type ModulePolicy = Record<ModuleKey, ModulePolicyEntry>;
|
||||
|
||||
export const MODULE_DEFINITIONS: ModuleDefinition[] = [
|
||||
{
|
||||
key: "dashboard",
|
||||
label: "Dashboard",
|
||||
description: "Executive dashboard and KPIs.",
|
||||
path: "/",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "monitoring",
|
||||
label: "Monitoring",
|
||||
description: "Health checks, alerts, and insights.",
|
||||
path: "/monitoring",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "operations",
|
||||
label: "Operations",
|
||||
description: "Operational queue and scheduled jobs.",
|
||||
path: "/operations",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.OPERATOR, Role.TENANT_ADMIN]
|
||||
},
|
||||
{
|
||||
key: "audit_logs",
|
||||
label: "Audit Logs",
|
||||
description: "Immutable administrative audit events.",
|
||||
path: "/audit-logs",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.OPERATOR, Role.TENANT_ADMIN, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "vms",
|
||||
label: "Virtual Machines",
|
||||
description: "VM inventory and lifecycle controls.",
|
||||
path: "/vms",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "nodes",
|
||||
label: "Nodes",
|
||||
description: "Hypervisor node visibility and status.",
|
||||
path: "/nodes",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "provisioning",
|
||||
label: "Provisioning",
|
||||
description: "Template, package, and service provisioning workflows.",
|
||||
path: "/provisioning",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR]
|
||||
},
|
||||
{
|
||||
key: "backups",
|
||||
label: "Backups",
|
||||
description: "Backup policies, snapshots, and restore tasks.",
|
||||
path: "/backups",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "network",
|
||||
label: "Network",
|
||||
description: "IPAM pools, quotas, and private network operations.",
|
||||
path: "/network",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "security",
|
||||
label: "Security",
|
||||
description: "Security events and enforcement controls.",
|
||||
path: "/security",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "tenants",
|
||||
label: "Tenants",
|
||||
description: "Tenant and subscription lifecycle management.",
|
||||
path: "/tenants",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "client_area",
|
||||
label: "Client Area",
|
||||
description: "Tenant-facing service and usage controls.",
|
||||
path: "/client",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "billing",
|
||||
label: "Billing",
|
||||
description: "Invoices, usage records, and payment actions.",
|
||||
path: "/billing",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "rbac",
|
||||
label: "RBAC",
|
||||
description: "User lifecycle and access governance.",
|
||||
path: "/rbac",
|
||||
default_roles: [Role.SUPER_ADMIN]
|
||||
},
|
||||
{
|
||||
key: "profile",
|
||||
label: "Profile",
|
||||
description: "Identity profile and account security settings.",
|
||||
path: "/profile",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.TENANT_ADMIN, Role.OPERATOR, Role.VIEWER]
|
||||
},
|
||||
{
|
||||
key: "system_management",
|
||||
label: "System Management",
|
||||
description: "Branding, CMS, and platform lifecycle controls.",
|
||||
path: "/system",
|
||||
default_roles: [Role.SUPER_ADMIN]
|
||||
},
|
||||
{
|
||||
key: "settings",
|
||||
label: "Settings",
|
||||
description: "Proxmox, billing, scheduler, and global settings.",
|
||||
path: "/settings",
|
||||
default_roles: [Role.SUPER_ADMIN, Role.OPERATOR]
|
||||
}
|
||||
];
|
||||
|
||||
const MODULE_POLICY_SETTING_KEY = "module_policy";
|
||||
|
||||
const MODULE_ROLE_VALUES = new Set(Object.values(Role));
|
||||
|
||||
let cache: { value: ModulePolicy; expires_at: number } | null = null;
|
||||
const CACHE_TTL_MS = 30_000;
|
||||
|
||||
function defaultPolicy(): ModulePolicy {
|
||||
return MODULE_DEFINITIONS.reduce((acc, item) => {
|
||||
acc[item.key] = {
|
||||
enabled: true,
|
||||
roles: [...item.default_roles]
|
||||
};
|
||||
return acc;
|
||||
}, {} as ModulePolicy);
|
||||
}
|
||||
|
||||
function normalizePolicy(value: unknown): ModulePolicy {
|
||||
const fallback = defaultPolicy();
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) {
|
||||
return fallback;
|
||||
}
|
||||
|
||||
const input = value as Record<string, unknown>;
|
||||
for (const module of MODULE_DEFINITIONS) {
|
||||
const candidate = input[module.key];
|
||||
if (!candidate || typeof candidate !== "object" || Array.isArray(candidate)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const entry = candidate as Record<string, unknown>;
|
||||
const enabled = typeof entry.enabled === "boolean" ? entry.enabled : true;
|
||||
const rawRoles = Array.isArray(entry.roles) ? entry.roles : module.default_roles;
|
||||
const roles = rawRoles
|
||||
.filter((role): role is Role => typeof role === "string" && MODULE_ROLE_VALUES.has(role as Role))
|
||||
.slice(0, 4);
|
||||
|
||||
fallback[module.key] = {
|
||||
enabled,
|
||||
roles: roles.length > 0 ? roles : [...module.default_roles]
|
||||
};
|
||||
}
|
||||
|
||||
return fallback;
|
||||
}
|
||||
|
||||
async function savePolicy(policy: ModulePolicy) {
|
||||
const value = toPrismaJsonValue(policy) as Prisma.InputJsonValue;
|
||||
await prisma.setting.upsert({
|
||||
where: { key: MODULE_POLICY_SETTING_KEY },
|
||||
update: { value, is_encrypted: false },
|
||||
create: {
|
||||
key: MODULE_POLICY_SETTING_KEY,
|
||||
type: SettingType.GENERAL,
|
||||
value,
|
||||
is_encrypted: false
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export function toModulePolicyResponse(policy: ModulePolicy) {
|
||||
return MODULE_DEFINITIONS.map((module) => ({
|
||||
key: module.key,
|
||||
label: module.label,
|
||||
description: module.description,
|
||||
path: module.path,
|
||||
enabled: policy[module.key].enabled,
|
||||
roles: policy[module.key].roles
|
||||
}));
|
||||
}
|
||||
|
||||
export async function getModulePolicy(options?: { force_refresh?: boolean }) {
|
||||
const forceRefresh = options?.force_refresh === true;
|
||||
const now = Date.now();
|
||||
if (!forceRefresh && cache && cache.expires_at > now) {
|
||||
return cache.value;
|
||||
}
|
||||
|
||||
const setting = await prisma.setting.findUnique({
|
||||
where: { key: MODULE_POLICY_SETTING_KEY },
|
||||
select: { value: true }
|
||||
});
|
||||
const normalized = normalizePolicy(setting?.value);
|
||||
|
||||
// Self-heal invalid/missing policy into DB once discovered.
|
||||
if (!setting) {
|
||||
await savePolicy(normalized);
|
||||
}
|
||||
|
||||
cache = {
|
||||
value: normalized,
|
||||
expires_at: now + CACHE_TTL_MS
|
||||
};
|
||||
|
||||
return normalized;
|
||||
}
|
||||
|
||||
export async function updateModulePolicy(entries: Array<{ key: ModuleKey; enabled: boolean; roles: Role[] }>) {
|
||||
const merged = defaultPolicy();
|
||||
for (const entry of entries) {
|
||||
const roles = entry.roles.filter((role) => MODULE_ROLE_VALUES.has(role));
|
||||
merged[entry.key] = {
|
||||
enabled: entry.enabled,
|
||||
roles: roles.length > 0 ? roles : [...merged[entry.key].roles]
|
||||
};
|
||||
}
|
||||
|
||||
await savePolicy(merged);
|
||||
cache = {
|
||||
value: merged,
|
||||
expires_at: Date.now() + CACHE_TTL_MS
|
||||
};
|
||||
|
||||
return merged;
|
||||
}
|
||||
|
||||
export async function getUserModuleAccess(role: Role) {
|
||||
const policy = await getModulePolicy();
|
||||
|
||||
const access = MODULE_DEFINITIONS.reduce(
|
||||
(acc, module) => {
|
||||
const entry = policy[module.key];
|
||||
const allowed = role === Role.SUPER_ADMIN ? true : entry.enabled && entry.roles.includes(role);
|
||||
acc[module.key] = {
|
||||
allowed,
|
||||
enabled: entry.enabled,
|
||||
roles: entry.roles
|
||||
};
|
||||
return acc;
|
||||
},
|
||||
{} as Record<ModuleKey, { allowed: boolean; enabled: boolean; roles: Role[] }>
|
||||
);
|
||||
|
||||
return {
|
||||
modules: toModulePolicyResponse(policy).map((module) => ({
|
||||
...module,
|
||||
allowed: access[module.key].allowed
|
||||
})),
|
||||
access
|
||||
};
|
||||
}
|
||||
1454
backend/src/services/monitoring.service.ts
Normal file
1454
backend/src/services/monitoring.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
1402
backend/src/services/network.service.ts
Normal file
1402
backend/src/services/network.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
958
backend/src/services/operations.service.ts
Normal file
958
backend/src/services/operations.service.ts
Normal file
@@ -0,0 +1,958 @@
|
||||
import {
|
||||
OperationTaskStatus,
|
||||
OperationTaskType,
|
||||
PowerScheduleAction,
|
||||
Prisma,
|
||||
VmStatus
|
||||
} from "@prisma/client";
|
||||
import axios from "axios";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { decryptJson } from "../lib/security";
|
||||
import { restartVm, shutdownVm, startVm, stopVm } from "./proxmox.service";
|
||||
|
||||
type TaskCreateInput = {
|
||||
taskType: OperationTaskType;
|
||||
requestedBy?: string;
|
||||
vm?: {
|
||||
id: string;
|
||||
name: string;
|
||||
node: string;
|
||||
};
|
||||
payload?: Prisma.InputJsonValue;
|
||||
scheduledFor?: Date | null;
|
||||
status?: OperationTaskStatus;
|
||||
};
|
||||
|
||||
type TaskListInput = {
|
||||
status?: OperationTaskStatus;
|
||||
taskType?: OperationTaskType;
|
||||
vmId?: string;
|
||||
limit?: number;
|
||||
offset?: number;
|
||||
tenantId?: string | null;
|
||||
};
|
||||
|
||||
type PowerScheduleCreateInput = {
|
||||
vmId: string;
|
||||
action: PowerScheduleAction;
|
||||
cronExpression: string;
|
||||
timezone?: string;
|
||||
createdBy?: string;
|
||||
};
|
||||
|
||||
type PowerScheduleUpdateInput = {
|
||||
action?: PowerScheduleAction;
|
||||
cronExpression?: string;
|
||||
timezone?: string;
|
||||
enabled?: boolean;
|
||||
};
|
||||
|
||||
type ExecutePowerOptions = {
|
||||
scheduledFor?: Date | null;
|
||||
payload?: Prisma.InputJsonValue;
|
||||
};
|
||||
|
||||
export type OperationsPolicy = {
|
||||
max_retry_attempts: number;
|
||||
retry_backoff_minutes: number;
|
||||
notify_on_task_failure: boolean;
|
||||
notification_email: string | null;
|
||||
notification_webhook_url: string | null;
|
||||
email_gateway_url: string | null;
|
||||
};
|
||||
|
||||
const DEFAULT_OPERATIONS_POLICY: OperationsPolicy = {
|
||||
max_retry_attempts: 2,
|
||||
retry_backoff_minutes: 10,
|
||||
notify_on_task_failure: true,
|
||||
notification_email: null,
|
||||
notification_webhook_url: null,
|
||||
email_gateway_url: null
|
||||
};
|
||||
|
||||
function numberRange(min: number, max: number) {
|
||||
return Array.from({ length: max - min + 1 }, (_, idx) => min + idx);
|
||||
}
|
||||
|
||||
function parseSingleToken(token: string, min: number, max: number): number[] {
|
||||
if (token === "*") {
|
||||
return numberRange(min, max);
|
||||
}
|
||||
|
||||
if (token.includes("/")) {
|
||||
const [baseToken, stepToken] = token.split("/");
|
||||
const step = Number(stepToken);
|
||||
if (!Number.isInteger(step) || step <= 0) {
|
||||
throw new Error(`Invalid cron step: ${token}`);
|
||||
}
|
||||
|
||||
const baseValues = parseSingleToken(baseToken, min, max);
|
||||
const startValue = Math.min(...baseValues);
|
||||
return baseValues.filter((value) => (value - startValue) % step === 0);
|
||||
}
|
||||
|
||||
if (token.includes("-")) {
|
||||
const [startToken, endToken] = token.split("-");
|
||||
const start = Number(startToken);
|
||||
const end = Number(endToken);
|
||||
if (!Number.isInteger(start) || !Number.isInteger(end) || start > end) {
|
||||
throw new Error(`Invalid cron range: ${token}`);
|
||||
}
|
||||
if (start < min || end > max) {
|
||||
throw new Error(`Cron range out of bounds: ${token}`);
|
||||
}
|
||||
return numberRange(start, end);
|
||||
}
|
||||
|
||||
const value = Number(token);
|
||||
if (!Number.isInteger(value) || value < min || value > max) {
|
||||
throw new Error(`Invalid cron value: ${token}`);
|
||||
}
|
||||
return [value];
|
||||
}
|
||||
|
||||
function parseCronField(field: string, min: number, max: number): Set<number> {
|
||||
const values = new Set<number>();
|
||||
for (const rawToken of field.split(",")) {
|
||||
const token = rawToken.trim();
|
||||
if (!token) continue;
|
||||
for (const value of parseSingleToken(token, min, max)) {
|
||||
values.add(value);
|
||||
}
|
||||
}
|
||||
if (values.size === 0) {
|
||||
throw new Error(`Invalid cron field: ${field}`);
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
function parseCronExpression(expression: string) {
|
||||
const parts = expression.trim().split(/\s+/);
|
||||
if (parts.length !== 5) {
|
||||
throw new Error("Cron expression must contain exactly 5 fields");
|
||||
}
|
||||
|
||||
return {
|
||||
minute: parseCronField(parts[0], 0, 59),
|
||||
hour: parseCronField(parts[1], 0, 23),
|
||||
dayOfMonth: parseCronField(parts[2], 1, 31),
|
||||
month: parseCronField(parts[3], 1, 12),
|
||||
dayOfWeek: parseCronField(parts[4], 0, 6)
|
||||
};
|
||||
}
|
||||
|
||||
function cronMatchesParsed(date: Date, parsed: ReturnType<typeof parseCronExpression>) {
|
||||
return (
|
||||
parsed.minute.has(date.getMinutes()) &&
|
||||
parsed.hour.has(date.getHours()) &&
|
||||
parsed.dayOfMonth.has(date.getDate()) &&
|
||||
parsed.month.has(date.getMonth() + 1) &&
|
||||
parsed.dayOfWeek.has(date.getDay())
|
||||
);
|
||||
}
|
||||
|
||||
export function nextRunAt(cronExpression: string, fromDate = new Date()): Date | null {
|
||||
const parsed = parseCronExpression(cronExpression);
|
||||
const base = new Date(fromDate);
|
||||
base.setSeconds(0, 0);
|
||||
|
||||
const maxChecks = 60 * 24 * 365;
|
||||
for (let index = 1; index <= maxChecks; index += 1) {
|
||||
const candidate = new Date(base.getTime() + index * 60 * 1000);
|
||||
if (cronMatchesParsed(candidate, parsed)) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
export function validateCronExpression(cronExpression: string) {
|
||||
parseCronExpression(cronExpression);
|
||||
}
|
||||
|
||||
export async function createOperationTask(input: TaskCreateInput) {
|
||||
return prisma.operationTask.create({
|
||||
data: {
|
||||
task_type: input.taskType,
|
||||
status: input.status ?? OperationTaskStatus.QUEUED,
|
||||
vm_id: input.vm?.id,
|
||||
vm_name: input.vm?.name,
|
||||
node: input.vm?.node,
|
||||
requested_by: input.requestedBy,
|
||||
payload: input.payload,
|
||||
scheduled_for: input.scheduledFor ?? undefined
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function markOperationTaskRunning(taskId: string) {
|
||||
return prisma.operationTask.update({
|
||||
where: { id: taskId },
|
||||
data: {
|
||||
status: OperationTaskStatus.RUNNING,
|
||||
started_at: new Date(),
|
||||
error_message: null
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function markOperationTaskSuccess(taskId: string, result?: Prisma.InputJsonValue, proxmoxUpid?: string) {
|
||||
return prisma.operationTask.update({
|
||||
where: { id: taskId },
|
||||
data: {
|
||||
status: OperationTaskStatus.SUCCESS,
|
||||
result,
|
||||
proxmox_upid: proxmoxUpid,
|
||||
completed_at: new Date()
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function markOperationTaskFailed(taskId: string, errorMessage: string) {
|
||||
return prisma.operationTask.update({
|
||||
where: { id: taskId },
|
||||
data: {
|
||||
status: OperationTaskStatus.FAILED,
|
||||
error_message: errorMessage,
|
||||
completed_at: new Date()
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function asPlainObject(value: Prisma.JsonValue | Prisma.InputJsonValue | null | undefined): Record<string, unknown> {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) return {};
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function toPowerAction(value: unknown): PowerScheduleAction | null {
|
||||
if (typeof value !== "string") return null;
|
||||
const candidate = value.toUpperCase();
|
||||
return Object.values(PowerScheduleAction).includes(candidate as PowerScheduleAction)
|
||||
? (candidate as PowerScheduleAction)
|
||||
: null;
|
||||
}
|
||||
|
||||
function asStringOrNull(value: unknown) {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
function addMinutes(date: Date, minutes: number) {
|
||||
const copy = new Date(date);
|
||||
copy.setMinutes(copy.getMinutes() + minutes);
|
||||
return copy;
|
||||
}
|
||||
|
||||
export async function getOperationsPolicy(): Promise<OperationsPolicy> {
|
||||
const [setting, notificationsSetting] = await Promise.all([
|
||||
prisma.setting.findUnique({
|
||||
where: { key: "operations_policy" },
|
||||
select: { value: true }
|
||||
}),
|
||||
prisma.setting.findUnique({
|
||||
where: { key: "notifications" },
|
||||
select: { value: true }
|
||||
})
|
||||
]);
|
||||
|
||||
const settingValue = decryptJson(setting?.value) as unknown;
|
||||
const notificationsRaw = decryptJson(notificationsSetting?.value) as unknown;
|
||||
|
||||
const value =
|
||||
settingValue && typeof settingValue === "object" && !Array.isArray(settingValue)
|
||||
? (settingValue as Record<string, unknown>)
|
||||
: {};
|
||||
const notificationsValue =
|
||||
notificationsRaw && typeof notificationsRaw === "object" && !Array.isArray(notificationsRaw)
|
||||
? (notificationsRaw as Record<string, unknown>)
|
||||
: {};
|
||||
|
||||
const maxRetryAttemptsRaw = Number(value.max_retry_attempts);
|
||||
const retryBackoffRaw = Number(value.retry_backoff_minutes);
|
||||
|
||||
return {
|
||||
max_retry_attempts:
|
||||
Number.isInteger(maxRetryAttemptsRaw) && maxRetryAttemptsRaw >= 0
|
||||
? Math.min(maxRetryAttemptsRaw, 10)
|
||||
: DEFAULT_OPERATIONS_POLICY.max_retry_attempts,
|
||||
retry_backoff_minutes:
|
||||
Number.isInteger(retryBackoffRaw) && retryBackoffRaw >= 1
|
||||
? Math.min(retryBackoffRaw, 720)
|
||||
: DEFAULT_OPERATIONS_POLICY.retry_backoff_minutes,
|
||||
notify_on_task_failure:
|
||||
typeof value.notify_on_task_failure === "boolean"
|
||||
? value.notify_on_task_failure
|
||||
: DEFAULT_OPERATIONS_POLICY.notify_on_task_failure,
|
||||
notification_email: asStringOrNull(value.notification_email) ?? asStringOrNull(notificationsValue.ops_email),
|
||||
notification_webhook_url:
|
||||
asStringOrNull(value.notification_webhook_url) ??
|
||||
asStringOrNull(notificationsValue.monitoring_webhook_url) ??
|
||||
asStringOrNull(notificationsValue.alert_webhook_url),
|
||||
email_gateway_url:
|
||||
asStringOrNull(value.email_gateway_url) ??
|
||||
asStringOrNull(notificationsValue.email_gateway_url) ??
|
||||
asStringOrNull(notificationsValue.notification_email_webhook)
|
||||
};
|
||||
}
|
||||
|
||||
async function dispatchTaskFailureNotifications(input: {
|
||||
task: {
|
||||
id: string;
|
||||
task_type: OperationTaskType;
|
||||
vm_name: string | null;
|
||||
vm_id: string | null;
|
||||
node: string | null;
|
||||
retry_count: number;
|
||||
error_message: string | null;
|
||||
created_at: Date;
|
||||
completed_at: Date | null;
|
||||
requested_by: string | null;
|
||||
};
|
||||
policy: OperationsPolicy;
|
||||
stage: "retry_exhausted" | "non_retryable";
|
||||
}) {
|
||||
const destinationEmail = input.policy.notification_email;
|
||||
const emailGatewayUrl = input.policy.email_gateway_url;
|
||||
const webhookUrl = input.policy.notification_webhook_url;
|
||||
const eventPayload = {
|
||||
type: "operations.task_failure",
|
||||
stage: input.stage,
|
||||
task_id: input.task.id,
|
||||
task_type: input.task.task_type,
|
||||
vm_id: input.task.vm_id,
|
||||
vm_name: input.task.vm_name,
|
||||
node: input.task.node,
|
||||
retry_count: input.task.retry_count,
|
||||
error_message: input.task.error_message,
|
||||
created_at: input.task.created_at.toISOString(),
|
||||
completed_at: input.task.completed_at?.toISOString() ?? null,
|
||||
requested_by: input.task.requested_by
|
||||
};
|
||||
|
||||
const notifications: Array<{
|
||||
channel: "WEBHOOK" | "EMAIL";
|
||||
destination: string | null;
|
||||
status: "SENT" | "FAILED";
|
||||
provider_message: string;
|
||||
sent_at: Date | null;
|
||||
}> = [];
|
||||
|
||||
if (webhookUrl) {
|
||||
try {
|
||||
const response = await axios.post(webhookUrl, eventPayload, { timeout: 10_000 });
|
||||
notifications.push({
|
||||
channel: "WEBHOOK",
|
||||
destination: webhookUrl,
|
||||
status: "SENT",
|
||||
provider_message: `HTTP ${response.status}`,
|
||||
sent_at: new Date()
|
||||
});
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Webhook dispatch failed";
|
||||
notifications.push({
|
||||
channel: "WEBHOOK",
|
||||
destination: webhookUrl,
|
||||
status: "FAILED",
|
||||
provider_message: message.slice(0, 240),
|
||||
sent_at: null
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (emailGatewayUrl && destinationEmail) {
|
||||
try {
|
||||
const response = await axios.post(
|
||||
emailGatewayUrl,
|
||||
{
|
||||
type: "operations.task_failure.email",
|
||||
to: destinationEmail,
|
||||
subject: `[Task Failure] ${input.task.task_type} ${input.task.vm_name ?? input.task.vm_id ?? ""}`.trim(),
|
||||
message: input.task.error_message ?? "Operation task failed",
|
||||
payload: eventPayload
|
||||
},
|
||||
{ timeout: 10_000 }
|
||||
);
|
||||
notifications.push({
|
||||
channel: "EMAIL",
|
||||
destination: destinationEmail,
|
||||
status: "SENT",
|
||||
provider_message: `HTTP ${response.status}`,
|
||||
sent_at: new Date()
|
||||
});
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Email dispatch failed";
|
||||
notifications.push({
|
||||
channel: "EMAIL",
|
||||
destination: destinationEmail,
|
||||
status: "FAILED",
|
||||
provider_message: message.slice(0, 240),
|
||||
sent_at: null
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (notifications.length > 0) {
|
||||
await prisma.auditLog.createMany({
|
||||
data: notifications.map((notification) => ({
|
||||
action: "operations.task_failure_notification",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: input.task.id,
|
||||
resource_name: input.task.vm_name ?? input.task.id,
|
||||
actor_email: "system@proxpanel.local",
|
||||
actor_role: "SYSTEM",
|
||||
severity: notification.status === "FAILED" ? "ERROR" : "INFO",
|
||||
details: {
|
||||
channel: notification.channel,
|
||||
destination: notification.destination,
|
||||
dispatch_status: notification.status,
|
||||
provider_message: notification.provider_message,
|
||||
task_id: input.task.id,
|
||||
stage: input.stage
|
||||
}
|
||||
}))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async function handleOperationTaskFailure(taskId: string, errorMessage: string) {
|
||||
const policy = await getOperationsPolicy();
|
||||
const existing = await prisma.operationTask.findUnique({ where: { id: taskId } });
|
||||
|
||||
if (!existing) {
|
||||
return { status: "missing" as const, retry_scheduled: false };
|
||||
}
|
||||
|
||||
const canRetry =
|
||||
existing.task_type === OperationTaskType.VM_POWER &&
|
||||
existing.retry_count < policy.max_retry_attempts &&
|
||||
policy.max_retry_attempts > 0;
|
||||
|
||||
if (canRetry) {
|
||||
const nextRetryAt = addMinutes(new Date(), policy.retry_backoff_minutes);
|
||||
await prisma.operationTask.update({
|
||||
where: { id: existing.id },
|
||||
data: {
|
||||
status: OperationTaskStatus.RETRYING,
|
||||
error_message: errorMessage,
|
||||
completed_at: new Date(),
|
||||
retry_count: existing.retry_count + 1,
|
||||
scheduled_for: nextRetryAt
|
||||
}
|
||||
});
|
||||
return { status: "retrying" as const, retry_scheduled: true, next_retry_at: nextRetryAt };
|
||||
}
|
||||
|
||||
const failed = await prisma.operationTask.update({
|
||||
where: { id: existing.id },
|
||||
data: {
|
||||
status: OperationTaskStatus.FAILED,
|
||||
error_message: errorMessage,
|
||||
completed_at: new Date(),
|
||||
scheduled_for: null
|
||||
}
|
||||
});
|
||||
|
||||
if (policy.notify_on_task_failure) {
|
||||
await dispatchTaskFailureNotifications({
|
||||
task: failed,
|
||||
policy,
|
||||
stage: existing.task_type === OperationTaskType.VM_POWER ? "retry_exhausted" : "non_retryable"
|
||||
});
|
||||
}
|
||||
|
||||
return { status: "failed" as const, retry_scheduled: false };
|
||||
}
|
||||
|
||||
export async function listOperationTasks(input: TaskListInput) {
|
||||
const where: Prisma.OperationTaskWhereInput = {};
|
||||
|
||||
if (input.status) where.status = input.status;
|
||||
if (input.taskType) where.task_type = input.taskType;
|
||||
if (input.vmId) where.vm_id = input.vmId;
|
||||
if (input.tenantId) {
|
||||
where.vm = { tenant_id: input.tenantId };
|
||||
}
|
||||
|
||||
const limit = Math.min(Math.max(input.limit ?? 50, 1), 200);
|
||||
const offset = Math.max(input.offset ?? 0, 0);
|
||||
|
||||
const [data, total] = await Promise.all([
|
||||
prisma.operationTask.findMany({
|
||||
where,
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
tenant_id: true,
|
||||
node: true
|
||||
}
|
||||
}
|
||||
},
|
||||
orderBy: { created_at: "desc" },
|
||||
take: limit,
|
||||
skip: offset
|
||||
}),
|
||||
prisma.operationTask.count({ where })
|
||||
]);
|
||||
|
||||
const queue = await prisma.operationTask.groupBy({
|
||||
by: ["status"],
|
||||
_count: { status: true },
|
||||
where: input.tenantId ? { vm: { tenant_id: input.tenantId } } : undefined
|
||||
});
|
||||
|
||||
return {
|
||||
data,
|
||||
meta: {
|
||||
total,
|
||||
limit,
|
||||
offset,
|
||||
queue_summary: queue.reduce<Record<string, number>>((acc, item) => {
|
||||
acc[item.status] = item._count.status;
|
||||
return acc;
|
||||
}, {})
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function vmStatusFromPowerAction(action: PowerScheduleAction): VmStatus {
|
||||
if (action === PowerScheduleAction.START || action === PowerScheduleAction.RESTART) {
|
||||
return VmStatus.RUNNING;
|
||||
}
|
||||
return VmStatus.STOPPED;
|
||||
}
|
||||
|
||||
async function fetchVmForAction(vmId: string) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
node: true,
|
||||
vmid: true,
|
||||
type: true,
|
||||
tenant_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
async function runPowerAction(vm: Awaited<ReturnType<typeof fetchVmForAction>>, action: PowerScheduleAction) {
|
||||
const type = vm.type === "LXC" ? "lxc" : "qemu";
|
||||
|
||||
if (action === PowerScheduleAction.START) {
|
||||
return startVm(vm.node, vm.vmid, type);
|
||||
}
|
||||
|
||||
if (action === PowerScheduleAction.STOP) {
|
||||
return stopVm(vm.node, vm.vmid, type);
|
||||
}
|
||||
|
||||
if (action === PowerScheduleAction.RESTART) {
|
||||
return restartVm(vm.node, vm.vmid, type);
|
||||
}
|
||||
|
||||
return shutdownVm(vm.node, vm.vmid, type);
|
||||
}
|
||||
|
||||
export async function executeVmPowerActionNow(
|
||||
vmId: string,
|
||||
action: PowerScheduleAction,
|
||||
actorEmail: string,
|
||||
options?: ExecutePowerOptions
|
||||
) {
|
||||
const vm = await fetchVmForAction(vmId);
|
||||
const rawPayload = asPlainObject(options?.payload ?? null);
|
||||
const taskPayload: Prisma.InputJsonObject = {
|
||||
...rawPayload,
|
||||
action,
|
||||
vm_id: vm.id
|
||||
};
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_POWER,
|
||||
vm: {
|
||||
id: vm.id,
|
||||
name: vm.name,
|
||||
node: vm.node
|
||||
},
|
||||
requestedBy: actorEmail,
|
||||
payload: taskPayload,
|
||||
scheduledFor: options?.scheduledFor
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const upid = await runPowerAction(vm, action);
|
||||
await prisma.virtualMachine.update({
|
||||
where: { id: vm.id },
|
||||
data: {
|
||||
status: vmStatusFromPowerAction(action),
|
||||
proxmox_upid: upid ?? undefined
|
||||
}
|
||||
});
|
||||
|
||||
const resultPayload: Prisma.InputJsonObject = upid
|
||||
? {
|
||||
vm_id: vm.id,
|
||||
action,
|
||||
upid
|
||||
}
|
||||
: {
|
||||
vm_id: vm.id,
|
||||
action
|
||||
};
|
||||
|
||||
const updatedTask = await markOperationTaskSuccess(task.id, resultPayload, upid ?? undefined);
|
||||
return { task: updatedTask, upid };
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Unknown power action error";
|
||||
await handleOperationTaskFailure(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export async function listPowerSchedules(tenantId?: string | null) {
|
||||
const where: Prisma.PowerScheduleWhereInput = tenantId
|
||||
? {
|
||||
vm: {
|
||||
tenant_id: tenantId
|
||||
}
|
||||
}
|
||||
: {};
|
||||
|
||||
return prisma.powerSchedule.findMany({
|
||||
where,
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
node: true,
|
||||
tenant_id: true,
|
||||
status: true
|
||||
}
|
||||
}
|
||||
},
|
||||
orderBy: [
|
||||
{ enabled: "desc" },
|
||||
{ next_run_at: "asc" },
|
||||
{ created_at: "desc" }
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
export async function createPowerSchedule(input: PowerScheduleCreateInput) {
|
||||
validateCronExpression(input.cronExpression);
|
||||
const vm = await fetchVmForAction(input.vmId);
|
||||
|
||||
const nextRun = nextRunAt(input.cronExpression, new Date());
|
||||
|
||||
return prisma.powerSchedule.create({
|
||||
data: {
|
||||
vm_id: vm.id,
|
||||
action: input.action,
|
||||
cron_expression: input.cronExpression,
|
||||
timezone: input.timezone ?? "UTC",
|
||||
next_run_at: nextRun,
|
||||
created_by: input.createdBy
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function updatePowerSchedule(scheduleId: string, input: PowerScheduleUpdateInput) {
|
||||
const existing = await prisma.powerSchedule.findUnique({ where: { id: scheduleId } });
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Power schedule not found", "POWER_SCHEDULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (input.cronExpression) {
|
||||
validateCronExpression(input.cronExpression);
|
||||
}
|
||||
|
||||
const cronExpression = input.cronExpression ?? existing.cron_expression;
|
||||
const enabled = input.enabled ?? existing.enabled;
|
||||
const nextRun = enabled ? nextRunAt(cronExpression, new Date()) : null;
|
||||
|
||||
return prisma.powerSchedule.update({
|
||||
where: { id: scheduleId },
|
||||
data: {
|
||||
action: input.action,
|
||||
cron_expression: input.cronExpression,
|
||||
timezone: input.timezone,
|
||||
enabled: input.enabled,
|
||||
next_run_at: nextRun
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function deletePowerSchedule(scheduleId: string) {
|
||||
return prisma.powerSchedule.delete({ where: { id: scheduleId } });
|
||||
}
|
||||
|
||||
export async function processDuePowerSchedules(actorEmail = "system@proxpanel.local") {
|
||||
const now = new Date();
|
||||
const dueSchedules = await prisma.powerSchedule.findMany({
|
||||
where: {
|
||||
enabled: true,
|
||||
next_run_at: {
|
||||
lte: now
|
||||
}
|
||||
},
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
node: true,
|
||||
vmid: true,
|
||||
type: true
|
||||
}
|
||||
}
|
||||
},
|
||||
orderBy: {
|
||||
next_run_at: "asc"
|
||||
},
|
||||
take: 100
|
||||
});
|
||||
|
||||
let executed = 0;
|
||||
let failed = 0;
|
||||
let skipped = 0;
|
||||
|
||||
for (const schedule of dueSchedules) {
|
||||
const nextRun = nextRunAt(schedule.cron_expression, now);
|
||||
const claim = await prisma.powerSchedule.updateMany({
|
||||
where: {
|
||||
id: schedule.id,
|
||||
enabled: true,
|
||||
next_run_at: {
|
||||
lte: now
|
||||
}
|
||||
},
|
||||
data: {
|
||||
last_run_at: now,
|
||||
next_run_at: nextRun,
|
||||
enabled: nextRun ? schedule.enabled : false
|
||||
}
|
||||
});
|
||||
|
||||
if (claim.count === 0) {
|
||||
skipped += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
const payload: Prisma.InputJsonValue = {
|
||||
source: "power_schedule",
|
||||
schedule_id: schedule.id,
|
||||
action: schedule.action
|
||||
};
|
||||
|
||||
try {
|
||||
await executeVmPowerActionNow(schedule.vm_id, schedule.action, actorEmail, {
|
||||
payload,
|
||||
scheduledFor: schedule.next_run_at
|
||||
});
|
||||
executed += 1;
|
||||
} catch {
|
||||
failed += 1;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
scanned: dueSchedules.length,
|
||||
executed,
|
||||
failed,
|
||||
skipped
|
||||
};
|
||||
}
|
||||
|
||||
export async function processDueOperationRetries(actorEmail = "system@proxpanel.local") {
|
||||
const now = new Date();
|
||||
const dueRetries = await prisma.operationTask.findMany({
|
||||
where: {
|
||||
task_type: OperationTaskType.VM_POWER,
|
||||
status: OperationTaskStatus.RETRYING,
|
||||
scheduled_for: {
|
||||
lte: now
|
||||
}
|
||||
},
|
||||
orderBy: { scheduled_for: "asc" },
|
||||
take: 100
|
||||
});
|
||||
|
||||
let executed = 0;
|
||||
let succeeded = 0;
|
||||
let failed = 0;
|
||||
let rescheduled = 0;
|
||||
let invalidPayload = 0;
|
||||
let skipped = 0;
|
||||
|
||||
for (const task of dueRetries) {
|
||||
const claimedAt = new Date();
|
||||
const claim = await prisma.operationTask.updateMany({
|
||||
where: {
|
||||
id: task.id,
|
||||
task_type: OperationTaskType.VM_POWER,
|
||||
status: OperationTaskStatus.RETRYING,
|
||||
scheduled_for: {
|
||||
lte: now
|
||||
}
|
||||
},
|
||||
data: {
|
||||
status: OperationTaskStatus.RUNNING,
|
||||
started_at: claimedAt,
|
||||
error_message: null,
|
||||
completed_at: null
|
||||
}
|
||||
});
|
||||
|
||||
if (claim.count === 0) {
|
||||
skipped += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
executed += 1;
|
||||
const payload = asPlainObject(task.payload as Prisma.JsonValue | null);
|
||||
const action = toPowerAction(payload.action);
|
||||
|
||||
if (!task.vm_id || !action) {
|
||||
invalidPayload += 1;
|
||||
await handleOperationTaskFailure(task.id, "Retry payload missing actionable power action");
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const vm = await fetchVmForAction(task.vm_id);
|
||||
const upid = await runPowerAction(vm, action);
|
||||
await prisma.virtualMachine.update({
|
||||
where: { id: vm.id },
|
||||
data: {
|
||||
status: vmStatusFromPowerAction(action),
|
||||
proxmox_upid: upid ?? undefined
|
||||
}
|
||||
});
|
||||
|
||||
const resultPayload: Prisma.InputJsonObject = upid
|
||||
? {
|
||||
retry_of_task: task.id,
|
||||
vm_id: vm.id,
|
||||
action,
|
||||
upid
|
||||
}
|
||||
: {
|
||||
retry_of_task: task.id,
|
||||
vm_id: vm.id,
|
||||
action
|
||||
};
|
||||
|
||||
await markOperationTaskSuccess(task.id, resultPayload, upid ?? undefined);
|
||||
succeeded += 1;
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Retry power action failed";
|
||||
const failureResult = await handleOperationTaskFailure(task.id, message);
|
||||
failed += 1;
|
||||
if (failureResult.retry_scheduled) {
|
||||
rescheduled += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dueRetries.length > 0 || failed > 0 || rescheduled > 0) {
|
||||
await prisma.auditLog.create({
|
||||
data: {
|
||||
action: "operations.retry_cycle",
|
||||
resource_type: "SYSTEM",
|
||||
resource_name: "Operation Retry Worker",
|
||||
actor_email: actorEmail,
|
||||
actor_role: "SYSTEM",
|
||||
severity: failed > 0 ? "WARNING" : "INFO",
|
||||
details: {
|
||||
scanned: dueRetries.length,
|
||||
executed,
|
||||
succeeded,
|
||||
failed,
|
||||
rescheduled,
|
||||
invalid_payload: invalidPayload,
|
||||
skipped
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
scanned: dueRetries.length,
|
||||
executed,
|
||||
succeeded,
|
||||
failed,
|
||||
rescheduled,
|
||||
invalid_payload: invalidPayload,
|
||||
skipped
|
||||
};
|
||||
}
|
||||
|
||||
export async function operationQueueInsights(tenantId?: string | null) {
|
||||
const now = new Date();
|
||||
const staleThreshold = addMinutes(now, -15);
|
||||
const dayAgo = addMinutes(now, -24 * 60);
|
||||
|
||||
const tenantWhere: Prisma.OperationTaskWhereInput = tenantId ? { vm: { tenant_id: tenantId } } : {};
|
||||
|
||||
const [statusBuckets, staleQueued, failed24h, dueRetries, powerSchedulesDue] = await Promise.all([
|
||||
prisma.operationTask.groupBy({
|
||||
by: ["status"],
|
||||
_count: { status: true },
|
||||
where: tenantWhere
|
||||
}),
|
||||
prisma.operationTask.count({
|
||||
where: {
|
||||
...tenantWhere,
|
||||
status: OperationTaskStatus.QUEUED,
|
||||
created_at: { lte: staleThreshold }
|
||||
}
|
||||
}),
|
||||
prisma.operationTask.count({
|
||||
where: {
|
||||
...tenantWhere,
|
||||
status: OperationTaskStatus.FAILED,
|
||||
completed_at: { gte: dayAgo }
|
||||
}
|
||||
}),
|
||||
prisma.operationTask.count({
|
||||
where: {
|
||||
...tenantWhere,
|
||||
status: OperationTaskStatus.RETRYING,
|
||||
scheduled_for: { lte: now }
|
||||
}
|
||||
}),
|
||||
prisma.powerSchedule.count({
|
||||
where: {
|
||||
enabled: true,
|
||||
next_run_at: { lte: now },
|
||||
...(tenantId ? { vm: { tenant_id: tenantId } } : {})
|
||||
}
|
||||
})
|
||||
]);
|
||||
|
||||
const queueSummary = statusBuckets.reduce<Record<string, number>>((acc, bucket) => {
|
||||
acc[bucket.status] = bucket._count.status;
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return {
|
||||
generated_at: now.toISOString(),
|
||||
queue_summary: queueSummary,
|
||||
stale_queued_tasks: staleQueued,
|
||||
failed_tasks_24h: failed24h,
|
||||
due_retries: dueRetries,
|
||||
due_power_schedules: powerSchedulesDue
|
||||
};
|
||||
}
|
||||
192
backend/src/services/payment.service.ts
Normal file
192
backend/src/services/payment.service.ts
Normal file
@@ -0,0 +1,192 @@
|
||||
import axios from "axios";
|
||||
import crypto from "crypto";
|
||||
import { PaymentProvider } from "@prisma/client";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { decryptJson } from "../lib/security";
|
||||
import { markInvoicePaid } from "./billing.service";
|
||||
|
||||
type PaymentSettings = {
|
||||
default_provider?: "paystack" | "flutterwave" | "manual";
|
||||
paystack_public?: string;
|
||||
paystack_secret?: string;
|
||||
paystack_secret_previous?: string;
|
||||
flutterwave_public?: string;
|
||||
flutterwave_secret?: string;
|
||||
flutterwave_secret_previous?: string;
|
||||
flutterwave_webhook_hash?: string;
|
||||
flutterwave_webhook_hash_previous?: string;
|
||||
callback_url?: string;
|
||||
};
|
||||
|
||||
async function getPaymentSettings(): Promise<PaymentSettings> {
|
||||
const setting = await prisma.setting.findUnique({
|
||||
where: { key: "payment" }
|
||||
});
|
||||
return decryptJson<PaymentSettings>(setting?.value) ?? {};
|
||||
}
|
||||
|
||||
function normalizeProvider(provider: string | undefined, fallback: string): PaymentProvider {
|
||||
const value = (provider ?? fallback).toLowerCase();
|
||||
if (value === "paystack") return PaymentProvider.PAYSTACK;
|
||||
if (value === "flutterwave") return PaymentProvider.FLUTTERWAVE;
|
||||
return PaymentProvider.MANUAL;
|
||||
}
|
||||
|
||||
export async function createInvoicePaymentLink(invoiceId: string, requestedProvider?: string) {
|
||||
const invoice = await prisma.invoice.findUnique({
|
||||
where: { id: invoiceId },
|
||||
include: { tenant: true }
|
||||
});
|
||||
if (!invoice) {
|
||||
throw new HttpError(404, "Invoice not found", "INVOICE_NOT_FOUND");
|
||||
}
|
||||
|
||||
const settings = await getPaymentSettings();
|
||||
const provider = normalizeProvider(requestedProvider, settings.default_provider ?? "manual");
|
||||
if (provider === PaymentProvider.MANUAL) {
|
||||
throw new HttpError(400, "Manual payment provider cannot generate online links", "MANUAL_PROVIDER");
|
||||
}
|
||||
|
||||
const reference = invoice.payment_reference ?? `PAY-${invoice.invoice_number}-${Date.now()}`;
|
||||
|
||||
if (provider === PaymentProvider.PAYSTACK) {
|
||||
if (!settings.paystack_secret) {
|
||||
throw new HttpError(400, "Paystack secret key is missing", "PAYSTACK_CONFIG_MISSING");
|
||||
}
|
||||
const response = await axios.post(
|
||||
"https://api.paystack.co/transaction/initialize",
|
||||
{
|
||||
email: invoice.tenant.owner_email,
|
||||
amount: Math.round(Number(invoice.amount) * 100),
|
||||
reference,
|
||||
currency: invoice.currency,
|
||||
callback_url: settings.callback_url,
|
||||
metadata: {
|
||||
invoice_id: invoice.id,
|
||||
tenant_id: invoice.tenant_id
|
||||
}
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${settings.paystack_secret}`,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const paymentUrl = response.data?.data?.authorization_url as string | undefined;
|
||||
await prisma.invoice.update({
|
||||
where: { id: invoice.id },
|
||||
data: {
|
||||
status: "PENDING",
|
||||
payment_provider: provider,
|
||||
payment_reference: reference,
|
||||
payment_url: paymentUrl
|
||||
}
|
||||
});
|
||||
return { provider: "paystack", payment_url: paymentUrl, reference };
|
||||
}
|
||||
|
||||
if (!settings.flutterwave_secret) {
|
||||
throw new HttpError(400, "Flutterwave secret key is missing", "FLUTTERWAVE_CONFIG_MISSING");
|
||||
}
|
||||
const response = await axios.post(
|
||||
"https://api.flutterwave.com/v3/payments",
|
||||
{
|
||||
tx_ref: reference,
|
||||
amount: Number(invoice.amount),
|
||||
currency: invoice.currency,
|
||||
redirect_url: settings.callback_url,
|
||||
customer: {
|
||||
email: invoice.tenant.owner_email,
|
||||
name: invoice.tenant.name
|
||||
},
|
||||
customizations: {
|
||||
title: "ProxPanel Invoice Payment",
|
||||
description: `Invoice ${invoice.invoice_number}`
|
||||
},
|
||||
meta: {
|
||||
invoice_id: invoice.id,
|
||||
tenant_id: invoice.tenant_id
|
||||
}
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${settings.flutterwave_secret}`,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
}
|
||||
);
|
||||
const paymentUrl = response.data?.data?.link as string | undefined;
|
||||
await prisma.invoice.update({
|
||||
where: { id: invoice.id },
|
||||
data: {
|
||||
status: "PENDING",
|
||||
payment_provider: provider,
|
||||
payment_reference: reference,
|
||||
payment_url: paymentUrl
|
||||
}
|
||||
});
|
||||
return { provider: "flutterwave", payment_url: paymentUrl, reference };
|
||||
}
|
||||
|
||||
export async function handleManualInvoicePayment(invoiceId: string, reference: string, actorEmail: string) {
|
||||
return markInvoicePaid(invoiceId, PaymentProvider.MANUAL, reference, actorEmail);
|
||||
}
|
||||
|
||||
export async function verifyPaystackSignature(signature: string | undefined, rawBody: string | undefined) {
|
||||
if (!signature || !rawBody) return false;
|
||||
const settings = await getPaymentSettings();
|
||||
const secrets = [settings.paystack_secret, settings.paystack_secret_previous].filter(
|
||||
(value): value is string => typeof value === "string" && value.trim().length > 0
|
||||
);
|
||||
if (secrets.length === 0) return false;
|
||||
|
||||
return secrets.some((secret) => {
|
||||
const expected = crypto.createHmac("sha512", secret).update(rawBody).digest("hex");
|
||||
return expected === signature;
|
||||
});
|
||||
}
|
||||
|
||||
export async function verifyFlutterwaveSignature(signature: string | undefined) {
|
||||
const settings = await getPaymentSettings();
|
||||
const validHashes = [settings.flutterwave_webhook_hash, settings.flutterwave_webhook_hash_previous].filter(
|
||||
(value): value is string => typeof value === "string" && value.trim().length > 0
|
||||
);
|
||||
if (validHashes.length === 0 || !signature) return false;
|
||||
return validHashes.includes(signature);
|
||||
}
|
||||
|
||||
export async function processPaystackWebhook(payload: any) {
|
||||
if (payload?.event !== "charge.success") return { handled: false };
|
||||
const reference = payload?.data?.reference as string | undefined;
|
||||
if (!reference) return { handled: false };
|
||||
|
||||
const invoice = await prisma.invoice.findFirst({
|
||||
where: { payment_reference: reference }
|
||||
});
|
||||
if (!invoice) return { handled: false };
|
||||
|
||||
if (invoice.status !== "PAID") {
|
||||
await markInvoicePaid(invoice.id, PaymentProvider.PAYSTACK, reference, "webhook@paystack");
|
||||
}
|
||||
return { handled: true, invoice_id: invoice.id };
|
||||
}
|
||||
|
||||
export async function processFlutterwaveWebhook(payload: any) {
|
||||
const status = payload?.status?.toLowerCase();
|
||||
if (status !== "successful") return { handled: false };
|
||||
const reference = (payload?.txRef ?? payload?.tx_ref) as string | undefined;
|
||||
if (!reference) return { handled: false };
|
||||
|
||||
const invoice = await prisma.invoice.findFirst({
|
||||
where: { payment_reference: reference }
|
||||
});
|
||||
if (!invoice) return { handled: false };
|
||||
|
||||
if (invoice.status !== "PAID") {
|
||||
await markInvoicePaid(invoice.id, PaymentProvider.FLUTTERWAVE, reference, "webhook@flutterwave");
|
||||
}
|
||||
return { handled: true, invoice_id: invoice.id };
|
||||
}
|
||||
1123
backend/src/services/provisioning.service.ts
Normal file
1123
backend/src/services/provisioning.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
1452
backend/src/services/proxmox.service.ts
Normal file
1452
backend/src/services/proxmox.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
495
backend/src/services/scheduler.service.ts
Normal file
495
backend/src/services/scheduler.service.ts
Normal file
@@ -0,0 +1,495 @@
|
||||
import cron, { type ScheduledTask } from "node-cron";
|
||||
import os from "os";
|
||||
import { SettingType } from "@prisma/client";
|
||||
import { env } from "../config/env";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { meterHourlyUsage, generateInvoicesFromUnbilledUsage, processBackupSchedule, updateOverdueInvoices } from "./billing.service";
|
||||
import { processDuePowerSchedules, processDueOperationRetries } from "./operations.service";
|
||||
import { processDueSnapshotJobs, processPendingBackups } from "./backup.service";
|
||||
import { evaluateAlertRulesNow, processDueHealthChecks } from "./monitoring.service";
|
||||
|
||||
export type SchedulerConfig = {
|
||||
enable_scheduler: boolean;
|
||||
billing_cron: string;
|
||||
backup_cron: string;
|
||||
power_schedule_cron: string;
|
||||
monitoring_cron: string;
|
||||
operation_retry_cron: string;
|
||||
};
|
||||
|
||||
type WorkerKey = "billing" | "backup" | "power" | "monitoring" | "operation_retry";
|
||||
type WorkerStatus = "disabled" | "scheduled" | "running" | "success" | "failed";
|
||||
|
||||
type WorkerState = {
|
||||
worker: WorkerKey;
|
||||
cron: string;
|
||||
status: WorkerStatus;
|
||||
last_run_at: string | null;
|
||||
last_duration_ms: number | null;
|
||||
last_message: string | null;
|
||||
last_error: string | null;
|
||||
};
|
||||
|
||||
type SchedulerLeasePayload = {
|
||||
owner_id: string;
|
||||
lease_until: string;
|
||||
acquired_at: string;
|
||||
heartbeat_at: string;
|
||||
worker: WorkerKey;
|
||||
};
|
||||
|
||||
type SchedulerState = {
|
||||
started_at: string | null;
|
||||
config: SchedulerConfig;
|
||||
workers: Record<WorkerKey, WorkerState>;
|
||||
};
|
||||
|
||||
const DEFAULT_SCHEDULER_CONFIG: SchedulerConfig = {
|
||||
enable_scheduler: env.ENABLE_SCHEDULER,
|
||||
billing_cron: env.BILLING_CRON,
|
||||
backup_cron: env.BACKUP_CRON,
|
||||
power_schedule_cron: env.POWER_SCHEDULE_CRON,
|
||||
monitoring_cron: env.MONITORING_CRON,
|
||||
operation_retry_cron: "*/5 * * * *"
|
||||
};
|
||||
|
||||
let scheduledJobs: Partial<Record<WorkerKey, ScheduledTask>> = {};
|
||||
const activeWorkerRuns = new Set<WorkerKey>();
|
||||
const schedulerInstanceId = `${os.hostname()}:${process.pid}:${Math.random().toString(36).slice(2, 10)}`;
|
||||
|
||||
const schedulerState: SchedulerState = {
|
||||
started_at: null,
|
||||
config: DEFAULT_SCHEDULER_CONFIG,
|
||||
workers: {
|
||||
billing: {
|
||||
worker: "billing",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.billing_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
},
|
||||
backup: {
|
||||
worker: "backup",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.backup_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
},
|
||||
power: {
|
||||
worker: "power",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.power_schedule_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
},
|
||||
monitoring: {
|
||||
worker: "monitoring",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.monitoring_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
},
|
||||
operation_retry: {
|
||||
worker: "operation_retry",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.operation_retry_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
function normalizeCronExpression(value: unknown, fallback: string) {
|
||||
if (typeof value !== "string") return fallback;
|
||||
const trimmed = value.trim();
|
||||
if (trimmed.length === 0) return fallback;
|
||||
return cron.validate(trimmed) ? trimmed : fallback;
|
||||
}
|
||||
|
||||
function normalizeSchedulerConfig(raw?: unknown): SchedulerConfig {
|
||||
const record = raw && typeof raw === "object" && !Array.isArray(raw) ? (raw as Record<string, unknown>) : {};
|
||||
|
||||
const enabled =
|
||||
typeof record.enable_scheduler === "boolean" ? record.enable_scheduler : DEFAULT_SCHEDULER_CONFIG.enable_scheduler;
|
||||
|
||||
return {
|
||||
enable_scheduler: enabled,
|
||||
billing_cron: normalizeCronExpression(record.billing_cron, DEFAULT_SCHEDULER_CONFIG.billing_cron),
|
||||
backup_cron: normalizeCronExpression(record.backup_cron, DEFAULT_SCHEDULER_CONFIG.backup_cron),
|
||||
power_schedule_cron: normalizeCronExpression(record.power_schedule_cron, DEFAULT_SCHEDULER_CONFIG.power_schedule_cron),
|
||||
monitoring_cron: normalizeCronExpression(record.monitoring_cron, DEFAULT_SCHEDULER_CONFIG.monitoring_cron),
|
||||
operation_retry_cron: normalizeCronExpression(record.operation_retry_cron, DEFAULT_SCHEDULER_CONFIG.operation_retry_cron)
|
||||
};
|
||||
}
|
||||
|
||||
function lockSettingKey(worker: WorkerKey) {
|
||||
return `scheduler_lock:${worker}`;
|
||||
}
|
||||
|
||||
function nextLeaseDeadline(from = new Date()) {
|
||||
return new Date(from.getTime() + env.SCHEDULER_LEASE_MS);
|
||||
}
|
||||
|
||||
function parseLeasePayload(value: unknown): SchedulerLeasePayload | null {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) return null;
|
||||
const record = value as Record<string, unknown>;
|
||||
if (
|
||||
typeof record.owner_id !== "string" ||
|
||||
typeof record.lease_until !== "string" ||
|
||||
typeof record.acquired_at !== "string" ||
|
||||
typeof record.heartbeat_at !== "string" ||
|
||||
typeof record.worker !== "string"
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
owner_id: record.owner_id,
|
||||
lease_until: record.lease_until,
|
||||
acquired_at: record.acquired_at,
|
||||
heartbeat_at: record.heartbeat_at,
|
||||
worker: record.worker as WorkerKey
|
||||
};
|
||||
}
|
||||
|
||||
function leasePayload(worker: WorkerKey, now = new Date(), acquiredAt?: string): SchedulerLeasePayload {
|
||||
return {
|
||||
owner_id: schedulerInstanceId,
|
||||
lease_until: nextLeaseDeadline(now).toISOString(),
|
||||
acquired_at: acquiredAt ?? now.toISOString(),
|
||||
heartbeat_at: now.toISOString(),
|
||||
worker
|
||||
};
|
||||
}
|
||||
|
||||
async function acquireWorkerLease(worker: WorkerKey) {
|
||||
const now = new Date();
|
||||
const key = lockSettingKey(worker);
|
||||
const existing = await prisma.setting.findUnique({
|
||||
where: { key },
|
||||
select: {
|
||||
id: true,
|
||||
value: true,
|
||||
updated_at: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
try {
|
||||
await prisma.setting.create({
|
||||
data: {
|
||||
key,
|
||||
type: SettingType.GENERAL,
|
||||
is_encrypted: false,
|
||||
value: leasePayload(worker, now)
|
||||
}
|
||||
});
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const parsed = parseLeasePayload(existing.value);
|
||||
const leaseUntilMs = parsed ? Date.parse(parsed.lease_until) : 0;
|
||||
const activeOwner =
|
||||
parsed &&
|
||||
parsed.owner_id &&
|
||||
parsed.owner_id !== schedulerInstanceId &&
|
||||
Number.isFinite(leaseUntilMs) &&
|
||||
leaseUntilMs > now.getTime();
|
||||
|
||||
if (activeOwner) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const updated = await prisma.setting.updateMany({
|
||||
where: {
|
||||
id: existing.id,
|
||||
updated_at: existing.updated_at
|
||||
},
|
||||
data: {
|
||||
value: leasePayload(worker, now, parsed?.acquired_at)
|
||||
}
|
||||
});
|
||||
|
||||
return updated.count === 1;
|
||||
}
|
||||
|
||||
async function renewWorkerLease(worker: WorkerKey) {
|
||||
const now = new Date();
|
||||
const key = lockSettingKey(worker);
|
||||
const existing = await prisma.setting.findUnique({
|
||||
where: { key },
|
||||
select: {
|
||||
id: true,
|
||||
value: true,
|
||||
updated_at: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const parsed = parseLeasePayload(existing.value);
|
||||
if (!parsed || parsed.owner_id !== schedulerInstanceId) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const updated = await prisma.setting.updateMany({
|
||||
where: {
|
||||
id: existing.id,
|
||||
updated_at: existing.updated_at
|
||||
},
|
||||
data: {
|
||||
value: leasePayload(worker, now, parsed.acquired_at)
|
||||
}
|
||||
});
|
||||
|
||||
return updated.count === 1;
|
||||
}
|
||||
|
||||
async function releaseWorkerLease(worker: WorkerKey) {
|
||||
const key = lockSettingKey(worker);
|
||||
const existing = await prisma.setting.findUnique({
|
||||
where: { key },
|
||||
select: {
|
||||
id: true,
|
||||
value: true,
|
||||
updated_at: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
return;
|
||||
}
|
||||
|
||||
const parsed = parseLeasePayload(existing.value);
|
||||
if (!parsed || parsed.owner_id !== schedulerInstanceId) {
|
||||
return;
|
||||
}
|
||||
|
||||
const now = new Date();
|
||||
const leaseExpired = new Date(now.getTime() - 1000).toISOString();
|
||||
await prisma.setting.updateMany({
|
||||
where: {
|
||||
id: existing.id,
|
||||
updated_at: existing.updated_at
|
||||
},
|
||||
data: {
|
||||
value: {
|
||||
...parsed,
|
||||
owner_id: "",
|
||||
lease_until: leaseExpired,
|
||||
heartbeat_at: now.toISOString()
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function stopAllScheduledJobs() {
|
||||
const entries = Object.entries(scheduledJobs) as Array<[WorkerKey, ScheduledTask]>;
|
||||
for (const [, task] of entries) {
|
||||
try {
|
||||
task.stop();
|
||||
task.destroy();
|
||||
} catch {
|
||||
task.stop();
|
||||
}
|
||||
}
|
||||
scheduledJobs = {};
|
||||
}
|
||||
|
||||
function setWorkerDisabledState(config: SchedulerConfig) {
|
||||
schedulerState.workers.billing = {
|
||||
...schedulerState.workers.billing,
|
||||
cron: config.billing_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
schedulerState.workers.backup = {
|
||||
...schedulerState.workers.backup,
|
||||
cron: config.backup_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
schedulerState.workers.power = {
|
||||
...schedulerState.workers.power,
|
||||
cron: config.power_schedule_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
schedulerState.workers.monitoring = {
|
||||
...schedulerState.workers.monitoring,
|
||||
cron: config.monitoring_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
schedulerState.workers.operation_retry = {
|
||||
...schedulerState.workers.operation_retry,
|
||||
cron: config.operation_retry_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
}
|
||||
|
||||
async function runWorker(worker: WorkerKey, execute: () => Promise<string>) {
|
||||
if (activeWorkerRuns.has(worker)) {
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "scheduled",
|
||||
last_message: "Skipped: worker already running in this process"
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
const acquired = await acquireWorkerLease(worker);
|
||||
if (!acquired) {
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "scheduled",
|
||||
last_message: "Skipped: lease held by another scheduler instance"
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
activeWorkerRuns.add(worker);
|
||||
const startedAt = Date.now();
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "running",
|
||||
last_error: null
|
||||
};
|
||||
|
||||
const heartbeatEveryMs = Math.max(1_000, Math.min(env.SCHEDULER_HEARTBEAT_MS, Math.floor(env.SCHEDULER_LEASE_MS / 2)));
|
||||
const heartbeat = setInterval(() => {
|
||||
void renewWorkerLease(worker);
|
||||
}, heartbeatEveryMs);
|
||||
|
||||
try {
|
||||
const message = await execute();
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "success",
|
||||
last_run_at: new Date().toISOString(),
|
||||
last_duration_ms: Date.now() - startedAt,
|
||||
last_message: message,
|
||||
last_error: null
|
||||
};
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Unknown scheduler error";
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "failed",
|
||||
last_run_at: new Date().toISOString(),
|
||||
last_duration_ms: Date.now() - startedAt,
|
||||
last_error: message
|
||||
};
|
||||
} finally {
|
||||
clearInterval(heartbeat);
|
||||
activeWorkerRuns.delete(worker);
|
||||
await releaseWorkerLease(worker);
|
||||
}
|
||||
}
|
||||
|
||||
function registerWorker(worker: WorkerKey, cronExpression: string, execute: () => Promise<string>) {
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
cron: cronExpression,
|
||||
status: "scheduled",
|
||||
last_error: null
|
||||
};
|
||||
|
||||
const task = cron.schedule(cronExpression, () => {
|
||||
void runWorker(worker, execute);
|
||||
});
|
||||
|
||||
scheduledJobs[worker] = task;
|
||||
}
|
||||
|
||||
async function readSchedulerConfigSetting() {
|
||||
const setting = await prisma.setting.findUnique({
|
||||
where: { key: "scheduler" },
|
||||
select: { value: true }
|
||||
});
|
||||
return normalizeSchedulerConfig(setting?.value);
|
||||
}
|
||||
|
||||
function applyRuntimeConfig(config: SchedulerConfig) {
|
||||
schedulerState.config = config;
|
||||
schedulerState.started_at = new Date().toISOString();
|
||||
}
|
||||
|
||||
export async function configureSchedulers(config?: SchedulerConfig) {
|
||||
const resolvedConfig = config ?? (await readSchedulerConfigSetting());
|
||||
applyRuntimeConfig(resolvedConfig);
|
||||
|
||||
stopAllScheduledJobs();
|
||||
|
||||
if (!resolvedConfig.enable_scheduler) {
|
||||
setWorkerDisabledState(resolvedConfig);
|
||||
return getSchedulerRuntimeSnapshot();
|
||||
}
|
||||
|
||||
registerWorker("billing", resolvedConfig.billing_cron, async () => {
|
||||
await meterHourlyUsage();
|
||||
await generateInvoicesFromUnbilledUsage();
|
||||
await updateOverdueInvoices();
|
||||
return "Billing cycle completed";
|
||||
});
|
||||
|
||||
registerWorker("backup", resolvedConfig.backup_cron, async () => {
|
||||
const queued = await processBackupSchedule();
|
||||
const backupResult = await processPendingBackups();
|
||||
const snapshotResult = await processDueSnapshotJobs();
|
||||
return `Backup queue=${queued}, backups_completed=${backupResult.completed}, backups_skipped=${backupResult.skipped}, snapshot_scanned=${snapshotResult.scanned}, snapshot_executed=${snapshotResult.executed}, snapshot_failed=${snapshotResult.failed}, snapshot_pruned=${snapshotResult.pruned}, snapshot_skipped=${snapshotResult.skipped}`;
|
||||
});
|
||||
|
||||
registerWorker("power", resolvedConfig.power_schedule_cron, async () => {
|
||||
const result = await processDuePowerSchedules();
|
||||
return `Power schedules scanned=${result.scanned}, executed=${result.executed}, failed=${result.failed}, skipped=${result.skipped}`;
|
||||
});
|
||||
|
||||
registerWorker("monitoring", resolvedConfig.monitoring_cron, async () => {
|
||||
const checkResult = await processDueHealthChecks();
|
||||
const alertResult = await evaluateAlertRulesNow();
|
||||
return `Checks scanned=${checkResult.scanned}, executed=${checkResult.executed}, failed=${checkResult.failed}, skipped=${checkResult.skipped}; alerts evaluated=${alertResult.evaluated}, triggered=${alertResult.triggered}, resolved=${alertResult.resolved}`;
|
||||
});
|
||||
|
||||
registerWorker("operation_retry", resolvedConfig.operation_retry_cron, async () => {
|
||||
const retryResult = await processDueOperationRetries();
|
||||
return `Retry tasks scanned=${retryResult.scanned}, executed=${retryResult.executed}, succeeded=${retryResult.succeeded}, failed=${retryResult.failed}, rescheduled=${retryResult.rescheduled}, invalid_payload=${retryResult.invalid_payload}, skipped=${retryResult.skipped}`;
|
||||
});
|
||||
|
||||
return getSchedulerRuntimeSnapshot();
|
||||
}
|
||||
|
||||
export async function startSchedulers() {
|
||||
await configureSchedulers();
|
||||
}
|
||||
|
||||
export async function reconfigureSchedulers(config?: Partial<SchedulerConfig>) {
|
||||
const persisted = await readSchedulerConfigSetting();
|
||||
const merged = normalizeSchedulerConfig({
|
||||
...persisted,
|
||||
...(config ?? {})
|
||||
});
|
||||
return configureSchedulers(merged);
|
||||
}
|
||||
|
||||
export function getSchedulerRuntimeSnapshot() {
|
||||
return {
|
||||
generated_at: new Date().toISOString(),
|
||||
...schedulerState
|
||||
};
|
||||
}
|
||||
|
||||
export function schedulerDefaults() {
|
||||
return { ...DEFAULT_SCHEDULER_CONFIG };
|
||||
}
|
||||
20
backend/src/tests/operations.test.ts
Normal file
20
backend/src/tests/operations.test.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { nextRunAt, validateCronExpression } from "../services/operations.service";
|
||||
|
||||
test("nextRunAt returns a future date for a valid cron expression", () => {
|
||||
const base = new Date("2026-01-01T00:00:00.000Z");
|
||||
const next = nextRunAt("*/5 * * * *", base);
|
||||
assert.ok(next instanceof Date);
|
||||
assert.ok(next.getTime() > base.getTime());
|
||||
});
|
||||
|
||||
test("validateCronExpression accepts valid expressions", () => {
|
||||
assert.doesNotThrow(() => validateCronExpression("0 * * * *"));
|
||||
assert.doesNotThrow(() => validateCronExpression("*/10 1-23 * * 1,3,5"));
|
||||
});
|
||||
|
||||
test("validateCronExpression rejects invalid expressions", () => {
|
||||
assert.throws(() => validateCronExpression("invalid-cron"));
|
||||
assert.throws(() => validateCronExpression("* * * * * *"));
|
||||
});
|
||||
20
backend/src/types/express.d.ts
vendored
Normal file
20
backend/src/types/express.d.ts
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
import type { Role } from "@prisma/client";
|
||||
|
||||
declare global {
|
||||
namespace Express {
|
||||
interface UserToken {
|
||||
id: string;
|
||||
email: string;
|
||||
role: Role;
|
||||
tenant_id?: string | null;
|
||||
sid?: string;
|
||||
}
|
||||
|
||||
interface Request {
|
||||
user?: UserToken;
|
||||
rawBody?: string;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export {};
|
||||
20
backend/tsconfig.json
Normal file
20
backend/tsconfig.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2022",
|
||||
"lib": ["ES2022"],
|
||||
"module": "CommonJS",
|
||||
"moduleResolution": "Node",
|
||||
"rootDir": "src",
|
||||
"outDir": "dist",
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"skipLibCheck": true,
|
||||
"resolveJsonModule": true,
|
||||
"declaration": false,
|
||||
"sourceMap": true,
|
||||
"types": ["node"]
|
||||
},
|
||||
"include": ["src/**/*.ts", "src/**/*.d.ts"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
21
components.json
Normal file
21
components.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"$schema": "https://ui.shadcn.com/schema.json",
|
||||
"style": "new-york",
|
||||
"rsc": false,
|
||||
"tsx": false,
|
||||
"tailwind": {
|
||||
"config": "tailwind.config.js",
|
||||
"css": "src/index.css",
|
||||
"baseColor": "neutral",
|
||||
"cssVariables": true,
|
||||
"prefix": ""
|
||||
},
|
||||
"aliases": {
|
||||
"components": "@/components",
|
||||
"utils": "@/lib/utils",
|
||||
"ui": "@/components/ui",
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
},
|
||||
"iconLibrary": "lucide"
|
||||
}
|
||||
63
docker-compose.yml
Normal file
63
docker-compose.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: proxpanel-postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_USER: proxpanel
|
||||
POSTGRES_PASSWORD: proxpanel
|
||||
POSTGRES_DB: proxpanel
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U proxpanel -d proxpanel"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
backend:
|
||||
build:
|
||||
context: ./backend
|
||||
container_name: proxpanel-backend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
NODE_ENV: production
|
||||
PORT: 8080
|
||||
DATABASE_URL: postgresql://proxpanel:proxpanel@postgres:5432/proxpanel
|
||||
JWT_SECRET: change_this_to_a_long_secret_key_please
|
||||
JWT_REFRESH_SECRET: change_this_to_another_long_secret_key
|
||||
JWT_EXPIRES_IN: 15m
|
||||
JWT_REFRESH_EXPIRES_IN: 30d
|
||||
CORS_ORIGIN: http://localhost:80
|
||||
RATE_LIMIT_WINDOW_MS: 60000
|
||||
RATE_LIMIT_MAX: 600
|
||||
AUTH_RATE_LIMIT_WINDOW_MS: 60000
|
||||
AUTH_RATE_LIMIT_MAX: 20
|
||||
ENABLE_SCHEDULER: "true"
|
||||
BILLING_CRON: "0 * * * *"
|
||||
BACKUP_CRON: "*/15 * * * *"
|
||||
POWER_SCHEDULE_CRON: "* * * * *"
|
||||
MONITORING_CRON: "*/5 * * * *"
|
||||
PROXMOX_TIMEOUT_MS: 15000
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
frontend:
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
VITE_API_BASE_URL: http://localhost:8080
|
||||
container_name: proxpanel-frontend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- backend
|
||||
ports:
|
||||
- "80:80"
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
80
entities/AuditLog.json
Normal file
80
entities/AuditLog.json
Normal file
@@ -0,0 +1,80 @@
|
||||
{
|
||||
"name": "Backup",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"type": "string",
|
||||
"title": "VM ID"
|
||||
},
|
||||
"vm_name": {
|
||||
"type": "string",
|
||||
"title": "VM Name"
|
||||
},
|
||||
"node": {
|
||||
"type": "string",
|
||||
"title": "Node"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"pending",
|
||||
"running",
|
||||
"completed",
|
||||
"failed",
|
||||
"expired"
|
||||
],
|
||||
"title": "Status"
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"full",
|
||||
"incremental",
|
||||
"snapshot"
|
||||
],
|
||||
"title": "Backup Type"
|
||||
},
|
||||
"size_mb": {
|
||||
"type": "number",
|
||||
"title": "Size (MB)"
|
||||
},
|
||||
"storage": {
|
||||
"type": "string",
|
||||
"title": "Storage Location"
|
||||
},
|
||||
"schedule": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"manual",
|
||||
"daily",
|
||||
"weekly",
|
||||
"monthly"
|
||||
],
|
||||
"title": "Schedule"
|
||||
},
|
||||
"retention_days": {
|
||||
"type": "number",
|
||||
"title": "Retention Days"
|
||||
},
|
||||
"started_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Started At"
|
||||
},
|
||||
"completed_at": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Completed At"
|
||||
},
|
||||
"notes": {
|
||||
"type": "string",
|
||||
"title": "Notes"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"vm_id",
|
||||
"vm_name",
|
||||
"status",
|
||||
"type"
|
||||
]
|
||||
}
|
||||
72
entities/Backup.json
Normal file
72
entities/Backup.json
Normal file
@@ -0,0 +1,72 @@
|
||||
{
|
||||
"name": "BillingPlan",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Plan Name"
|
||||
},
|
||||
"slug": {
|
||||
"type": "string",
|
||||
"title": "Slug"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"title": "Description"
|
||||
},
|
||||
"price_monthly": {
|
||||
"type": "number",
|
||||
"title": "Monthly Price"
|
||||
},
|
||||
"price_hourly": {
|
||||
"type": "number",
|
||||
"title": "Hourly Price"
|
||||
},
|
||||
"currency": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"NGN",
|
||||
"USD",
|
||||
"GHS",
|
||||
"KES",
|
||||
"ZAR"
|
||||
],
|
||||
"title": "Currency"
|
||||
},
|
||||
"cpu_cores": {
|
||||
"type": "number",
|
||||
"title": "CPU Cores"
|
||||
},
|
||||
"ram_mb": {
|
||||
"type": "number",
|
||||
"title": "RAM (MB)"
|
||||
},
|
||||
"disk_gb": {
|
||||
"type": "number",
|
||||
"title": "Disk (GB)"
|
||||
},
|
||||
"bandwidth_gb": {
|
||||
"type": "number",
|
||||
"title": "Bandwidth (GB)"
|
||||
},
|
||||
"is_active": {
|
||||
"type": "boolean",
|
||||
"title": "Active"
|
||||
},
|
||||
"features": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Features"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"price_monthly",
|
||||
"currency",
|
||||
"cpu_cores",
|
||||
"ram_mb",
|
||||
"disk_gb"
|
||||
]
|
||||
}
|
||||
83
entities/BillingPlan.json
Normal file
83
entities/BillingPlan.json
Normal file
@@ -0,0 +1,83 @@
|
||||
{
|
||||
"name": "Invoice",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"invoice_number": {
|
||||
"type": "string",
|
||||
"title": "Invoice Number"
|
||||
},
|
||||
"tenant_id": {
|
||||
"type": "string",
|
||||
"title": "Tenant ID"
|
||||
},
|
||||
"tenant_name": {
|
||||
"type": "string",
|
||||
"title": "Tenant Name"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"draft",
|
||||
"pending",
|
||||
"paid",
|
||||
"overdue",
|
||||
"cancelled",
|
||||
"refunded"
|
||||
],
|
||||
"title": "Status"
|
||||
},
|
||||
"amount": {
|
||||
"type": "number",
|
||||
"title": "Amount"
|
||||
},
|
||||
"currency": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"NGN",
|
||||
"USD",
|
||||
"GHS",
|
||||
"KES",
|
||||
"ZAR"
|
||||
],
|
||||
"title": "Currency"
|
||||
},
|
||||
"due_date": {
|
||||
"type": "string",
|
||||
"format": "date",
|
||||
"title": "Due Date"
|
||||
},
|
||||
"paid_date": {
|
||||
"type": "string",
|
||||
"format": "date",
|
||||
"title": "Paid Date"
|
||||
},
|
||||
"payment_provider": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"paystack",
|
||||
"flutterwave",
|
||||
"manual"
|
||||
],
|
||||
"title": "Payment Provider"
|
||||
},
|
||||
"payment_reference": {
|
||||
"type": "string",
|
||||
"title": "Payment Reference"
|
||||
},
|
||||
"line_items": {
|
||||
"type": "string",
|
||||
"title": "Line Items JSON"
|
||||
},
|
||||
"notes": {
|
||||
"type": "string",
|
||||
"title": "Notes"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"invoice_number",
|
||||
"tenant_id",
|
||||
"status",
|
||||
"amount",
|
||||
"currency"
|
||||
]
|
||||
}
|
||||
95
entities/FirewallRule.json
Normal file
95
entities/FirewallRule.json
Normal file
@@ -0,0 +1,95 @@
|
||||
{
|
||||
"name": "UsageRecord",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"vm_id": {
|
||||
"type": "string",
|
||||
"title": "VM ID"
|
||||
},
|
||||
"vm_name": {
|
||||
"type": "string",
|
||||
"title": "VM Name"
|
||||
},
|
||||
"tenant_id": {
|
||||
"type": "string",
|
||||
"title": "Tenant ID"
|
||||
},
|
||||
"tenant_name": {
|
||||
"type": "string",
|
||||
"title": "Tenant Name"
|
||||
},
|
||||
"billing_plan_id": {
|
||||
"type": "string",
|
||||
"title": "Billing Plan ID"
|
||||
},
|
||||
"plan_name": {
|
||||
"type": "string",
|
||||
"title": "Plan Name"
|
||||
},
|
||||
"hours_used": {
|
||||
"type": "number",
|
||||
"title": "Hours Used"
|
||||
},
|
||||
"price_per_hour": {
|
||||
"type": "number",
|
||||
"title": "Price Per Hour"
|
||||
},
|
||||
"currency": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"NGN",
|
||||
"USD",
|
||||
"GHS",
|
||||
"KES",
|
||||
"ZAR"
|
||||
],
|
||||
"title": "Currency"
|
||||
},
|
||||
"total_cost": {
|
||||
"type": "number",
|
||||
"title": "Total Cost"
|
||||
},
|
||||
"period_start": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Period Start"
|
||||
},
|
||||
"period_end": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"title": "Period End"
|
||||
},
|
||||
"billed": {
|
||||
"type": "boolean",
|
||||
"title": "Billed"
|
||||
},
|
||||
"invoice_id": {
|
||||
"type": "string",
|
||||
"title": "Invoice ID"
|
||||
},
|
||||
"cpu_hours": {
|
||||
"type": "number",
|
||||
"title": "CPU Hours"
|
||||
},
|
||||
"ram_gb_hours": {
|
||||
"type": "number",
|
||||
"title": "RAM GB-Hours"
|
||||
},
|
||||
"disk_gb_hours": {
|
||||
"type": "number",
|
||||
"title": "Disk GB-Hours"
|
||||
},
|
||||
"network_gb": {
|
||||
"type": "number",
|
||||
"title": "Network GB Used"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"vm_id",
|
||||
"vm_name",
|
||||
"hours_used",
|
||||
"price_per_hour",
|
||||
"currency",
|
||||
"total_cost"
|
||||
]
|
||||
}
|
||||
83
entities/Invoice.json
Normal file
83
entities/Invoice.json
Normal file
@@ -0,0 +1,83 @@
|
||||
{
|
||||
"name": "Invoice",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"invoice_number": {
|
||||
"type": "string",
|
||||
"title": "Invoice Number"
|
||||
},
|
||||
"tenant_id": {
|
||||
"type": "string",
|
||||
"title": "Tenant ID"
|
||||
},
|
||||
"tenant_name": {
|
||||
"type": "string",
|
||||
"title": "Tenant Name"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"draft",
|
||||
"pending",
|
||||
"paid",
|
||||
"overdue",
|
||||
"cancelled",
|
||||
"refunded"
|
||||
],
|
||||
"title": "Status"
|
||||
},
|
||||
"amount": {
|
||||
"type": "number",
|
||||
"title": "Amount"
|
||||
},
|
||||
"currency": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"NGN",
|
||||
"USD",
|
||||
"GHS",
|
||||
"KES",
|
||||
"ZAR"
|
||||
],
|
||||
"title": "Currency"
|
||||
},
|
||||
"due_date": {
|
||||
"type": "string",
|
||||
"format": "date",
|
||||
"title": "Due Date"
|
||||
},
|
||||
"paid_date": {
|
||||
"type": "string",
|
||||
"format": "date",
|
||||
"title": "Paid Date"
|
||||
},
|
||||
"payment_provider": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"paystack",
|
||||
"flutterwave",
|
||||
"manual"
|
||||
],
|
||||
"title": "Payment Provider"
|
||||
},
|
||||
"payment_reference": {
|
||||
"type": "string",
|
||||
"title": "Payment Reference"
|
||||
},
|
||||
"line_items": {
|
||||
"type": "string",
|
||||
"title": "Line Items JSON"
|
||||
},
|
||||
"notes": {
|
||||
"type": "string",
|
||||
"title": "Notes"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"invoice_number",
|
||||
"tenant_id",
|
||||
"status",
|
||||
"amount",
|
||||
"currency"
|
||||
]
|
||||
}
|
||||
95
entities/ProxmoxNode.json
Normal file
95
entities/ProxmoxNode.json
Normal file
@@ -0,0 +1,95 @@
|
||||
{
|
||||
"name": "Tenant",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Organization Name"
|
||||
},
|
||||
"slug": {
|
||||
"type": "string",
|
||||
"title": "Slug"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"active",
|
||||
"suspended",
|
||||
"trial",
|
||||
"cancelled"
|
||||
],
|
||||
"title": "Status"
|
||||
},
|
||||
"plan": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"starter",
|
||||
"professional",
|
||||
"enterprise",
|
||||
"custom"
|
||||
],
|
||||
"title": "Plan"
|
||||
},
|
||||
"owner_email": {
|
||||
"type": "string",
|
||||
"title": "Owner Email"
|
||||
},
|
||||
"member_emails": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Member Emails"
|
||||
},
|
||||
"vm_limit": {
|
||||
"type": "number",
|
||||
"title": "VM Limit"
|
||||
},
|
||||
"cpu_limit": {
|
||||
"type": "number",
|
||||
"title": "CPU Limit"
|
||||
},
|
||||
"ram_limit_mb": {
|
||||
"type": "number",
|
||||
"title": "RAM Limit (MB)"
|
||||
},
|
||||
"disk_limit_gb": {
|
||||
"type": "number",
|
||||
"title": "Disk Limit (GB)"
|
||||
},
|
||||
"balance": {
|
||||
"type": "number",
|
||||
"title": "Balance"
|
||||
},
|
||||
"currency": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"NGN",
|
||||
"USD",
|
||||
"GHS",
|
||||
"KES",
|
||||
"ZAR"
|
||||
],
|
||||
"title": "Currency"
|
||||
},
|
||||
"payment_provider": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"paystack",
|
||||
"flutterwave",
|
||||
"manual"
|
||||
],
|
||||
"title": "Payment Provider"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "string",
|
||||
"title": "Metadata JSON"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"status",
|
||||
"plan",
|
||||
"owner_email"
|
||||
]
|
||||
}
|
||||
88
entities/SecurityEvent.json
Normal file
88
entities/SecurityEvent.json
Normal file
@@ -0,0 +1,88 @@
|
||||
{
|
||||
"name": "FirewallRule",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Rule Name"
|
||||
},
|
||||
"direction": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"inbound",
|
||||
"outbound",
|
||||
"both"
|
||||
],
|
||||
"title": "Direction"
|
||||
},
|
||||
"action": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"allow",
|
||||
"deny",
|
||||
"rate_limit",
|
||||
"log"
|
||||
],
|
||||
"title": "Action"
|
||||
},
|
||||
"protocol": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"tcp",
|
||||
"udp",
|
||||
"icmp",
|
||||
"any"
|
||||
],
|
||||
"title": "Protocol"
|
||||
},
|
||||
"source_ip": {
|
||||
"type": "string",
|
||||
"title": "Source IP / CIDR"
|
||||
},
|
||||
"destination_ip": {
|
||||
"type": "string",
|
||||
"title": "Destination IP / CIDR"
|
||||
},
|
||||
"port_range": {
|
||||
"type": "string",
|
||||
"title": "Port Range"
|
||||
},
|
||||
"priority": {
|
||||
"type": "number",
|
||||
"title": "Priority"
|
||||
},
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"title": "Enabled"
|
||||
},
|
||||
"applies_to": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"all_nodes",
|
||||
"all_vms",
|
||||
"specific_node",
|
||||
"specific_vm"
|
||||
],
|
||||
"title": "Applies To"
|
||||
},
|
||||
"target_id": {
|
||||
"type": "string",
|
||||
"title": "Target Node/VM ID"
|
||||
},
|
||||
"hit_count": {
|
||||
"type": "number",
|
||||
"title": "Hit Count"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"title": "Description"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"direction",
|
||||
"action",
|
||||
"protocol",
|
||||
"enabled"
|
||||
]
|
||||
}
|
||||
109
entities/Tenant.json
Normal file
109
entities/Tenant.json
Normal file
@@ -0,0 +1,109 @@
|
||||
{
|
||||
"name": "VirtualMachine",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "VM Name"
|
||||
},
|
||||
"vmid": {
|
||||
"type": "number",
|
||||
"title": "VM ID"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"running",
|
||||
"stopped",
|
||||
"paused",
|
||||
"migrating",
|
||||
"error"
|
||||
],
|
||||
"title": "Status"
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"qemu",
|
||||
"lxc"
|
||||
],
|
||||
"title": "Type"
|
||||
},
|
||||
"node": {
|
||||
"type": "string",
|
||||
"title": "Proxmox Node"
|
||||
},
|
||||
"tenant_id": {
|
||||
"type": "string",
|
||||
"title": "Tenant ID"
|
||||
},
|
||||
"os_template": {
|
||||
"type": "string",
|
||||
"title": "OS Template"
|
||||
},
|
||||
"cpu_cores": {
|
||||
"type": "number",
|
||||
"title": "CPU Cores"
|
||||
},
|
||||
"ram_mb": {
|
||||
"type": "number",
|
||||
"title": "RAM (MB)"
|
||||
},
|
||||
"disk_gb": {
|
||||
"type": "number",
|
||||
"title": "Disk (GB)"
|
||||
},
|
||||
"ip_address": {
|
||||
"type": "string",
|
||||
"title": "IP Address"
|
||||
},
|
||||
"cpu_usage": {
|
||||
"type": "number",
|
||||
"title": "CPU Usage %"
|
||||
},
|
||||
"ram_usage": {
|
||||
"type": "number",
|
||||
"title": "RAM Usage %"
|
||||
},
|
||||
"disk_usage": {
|
||||
"type": "number",
|
||||
"title": "Disk Usage %"
|
||||
},
|
||||
"network_in": {
|
||||
"type": "number",
|
||||
"title": "Network In (MB)"
|
||||
},
|
||||
"network_out": {
|
||||
"type": "number",
|
||||
"title": "Network Out (MB)"
|
||||
},
|
||||
"uptime_seconds": {
|
||||
"type": "number",
|
||||
"title": "Uptime (seconds)"
|
||||
},
|
||||
"billing_plan_id": {
|
||||
"type": "string",
|
||||
"title": "Billing Plan ID"
|
||||
},
|
||||
"tags": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Tags"
|
||||
},
|
||||
"notes": {
|
||||
"type": "string",
|
||||
"title": "Notes"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"status",
|
||||
"type",
|
||||
"node",
|
||||
"cpu_cores",
|
||||
"ram_mb",
|
||||
"disk_gb"
|
||||
]
|
||||
}
|
||||
37
entities/UsageRecord.json
Normal file
37
entities/UsageRecord.json
Normal file
@@ -0,0 +1,37 @@
|
||||
import { useEffect } from 'react';
|
||||
import { Outlet } from 'react-router-dom';
|
||||
import { useAuth } from '@/lib/AuthContext';
|
||||
import UserNotRegisteredError from '@/components/UserNotRegisteredError';
|
||||
|
||||
const DefaultFallback = () => (
|
||||
<div className="fixed inset-0 flex items-center justify-center">
|
||||
<div className="w-8 h-8 border-4 border-slate-200 border-t-slate-800 rounded-full animate-spin"></div>
|
||||
</div>
|
||||
);
|
||||
|
||||
export default function ProtectedRoute({ fallback = <DefaultFallback />, unauthenticatedElement }) {
|
||||
const { isAuthenticated, isLoadingAuth, authChecked, authError, checkUserAuth } = useAuth();
|
||||
|
||||
useEffect(() => {
|
||||
if (!authChecked && !isLoadingAuth) {
|
||||
checkUserAuth();
|
||||
}
|
||||
}, [authChecked, isLoadingAuth, checkUserAuth]);
|
||||
|
||||
if (isLoadingAuth || !authChecked) {
|
||||
return fallback;
|
||||
}
|
||||
|
||||
if (authError) {
|
||||
if (authError.type === 'user_not_registered') {
|
||||
return <UserNotRegisteredError />;
|
||||
}
|
||||
return unauthenticatedElement;
|
||||
}
|
||||
|
||||
if (!isAuthenticated) {
|
||||
return unauthenticatedElement;
|
||||
}
|
||||
|
||||
return <Outlet />;
|
||||
}
|
||||
63
entities/VirtualMachine.json
Normal file
63
entities/VirtualMachine.json
Normal file
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"name": "AuditLog",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"action": {
|
||||
"type": "string",
|
||||
"title": "Action"
|
||||
},
|
||||
"resource_type": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"vm",
|
||||
"tenant",
|
||||
"user",
|
||||
"backup",
|
||||
"invoice",
|
||||
"node",
|
||||
"system"
|
||||
],
|
||||
"title": "Resource Type"
|
||||
},
|
||||
"resource_id": {
|
||||
"type": "string",
|
||||
"title": "Resource ID"
|
||||
},
|
||||
"resource_name": {
|
||||
"type": "string",
|
||||
"title": "Resource Name"
|
||||
},
|
||||
"actor_email": {
|
||||
"type": "string",
|
||||
"title": "Actor Email"
|
||||
},
|
||||
"actor_role": {
|
||||
"type": "string",
|
||||
"title": "Actor Role"
|
||||
},
|
||||
"severity": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"info",
|
||||
"warning",
|
||||
"error",
|
||||
"critical"
|
||||
],
|
||||
"title": "Severity"
|
||||
},
|
||||
"details": {
|
||||
"type": "string",
|
||||
"title": "Details JSON"
|
||||
},
|
||||
"ip_address": {
|
||||
"type": "string",
|
||||
"title": "IP Address"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"resource_type",
|
||||
"actor_email",
|
||||
"severity"
|
||||
]
|
||||
}
|
||||
42
eslint.config.js
Normal file
42
eslint.config.js
Normal file
@@ -0,0 +1,42 @@
|
||||
import globals from "globals";
|
||||
import pluginJs from "@eslint/js";
|
||||
import pluginReact from "eslint-plugin-react";
|
||||
import pluginReactHooks from "eslint-plugin-react-hooks";
|
||||
import pluginUnusedImports from "eslint-plugin-unused-imports";
|
||||
import reactRefresh from "eslint-plugin-react-refresh";
|
||||
|
||||
export default [
|
||||
{ ignores: ["dist", "node_modules", "backend/dist"] },
|
||||
{
|
||||
files: ["src/**/*.{js,jsx}"],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2022,
|
||||
sourceType: "module",
|
||||
globals: globals.browser,
|
||||
parserOptions: { ecmaFeatures: { jsx: true } }
|
||||
},
|
||||
plugins: {
|
||||
react: pluginReact,
|
||||
"react-hooks": pluginReactHooks,
|
||||
"react-refresh": reactRefresh,
|
||||
"unused-imports": pluginUnusedImports
|
||||
},
|
||||
settings: {
|
||||
react: { version: "detect" }
|
||||
},
|
||||
rules: {
|
||||
...pluginJs.configs.recommended.rules,
|
||||
...pluginReact.configs.recommended.rules,
|
||||
...pluginReactHooks.configs.recommended.rules,
|
||||
"react/react-in-jsx-scope": "off",
|
||||
"react/prop-types": "off",
|
||||
"no-unused-vars": "off",
|
||||
"unused-imports/no-unused-imports": "error",
|
||||
"unused-imports/no-unused-vars": [
|
||||
"warn",
|
||||
{ vars: "all", varsIgnorePattern: "^_", args: "after-used", argsIgnorePattern: "^_" }
|
||||
],
|
||||
"react-refresh/only-export-components": ["warn", { allowConstantExport: true }]
|
||||
}
|
||||
}
|
||||
];
|
||||
12
index.html
Normal file
12
index.html
Normal file
@@ -0,0 +1,12 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>ProxPanel</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/main.jsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
20
infra/deploy/.backup.env.example
Normal file
20
infra/deploy/.backup.env.example
Normal file
@@ -0,0 +1,20 @@
|
||||
BACKUP_ENCRYPTION_KEY=replace_with_128_hex_chars
|
||||
|
||||
# Offsite replication (S3-compatible: AWS S3, Backblaze B2 S3, Wasabi)
|
||||
OFFSITE_BACKUP_ENABLED=false
|
||||
OFFSITE_S3_BUCKET=
|
||||
OFFSITE_S3_REGION=us-east-1
|
||||
OFFSITE_S3_PREFIX=proxpanel/db
|
||||
OFFSITE_S3_ENDPOINT_URL=
|
||||
OFFSITE_S3_ACCESS_KEY_ID=
|
||||
OFFSITE_S3_SECRET_ACCESS_KEY=
|
||||
OFFSITE_S3_SESSION_TOKEN=
|
||||
OFFSITE_S3_SSE=
|
||||
OFFSITE_REPLICA_RETENTION_DAYS=30
|
||||
|
||||
# Alerting for backup / restore failures
|
||||
BACKUP_ALERT_WEBHOOK_URL=
|
||||
BACKUP_ALERT_EMAIL_WEBHOOK_URL=
|
||||
BACKUP_ALERT_EMAIL_TO=
|
||||
BACKUP_ALERT_SUBJECT_PREFIX=[ProxPanel_Backup]
|
||||
BACKUP_ALERT_SEND_SUCCESS=false
|
||||
82
infra/deploy/OFFSITE_BACKUP_AND_ALERTING.md
Normal file
82
infra/deploy/OFFSITE_BACKUP_AND_ALERTING.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Offsite Backup Replication + Failure Alerting
|
||||
|
||||
This runbook configures:
|
||||
|
||||
1. Encrypted local DB backups
|
||||
2. Replication to S3-compatible object storage (AWS S3, Backblaze B2 S3, Wasabi)
|
||||
3. Webhook/email alerting on backup or restore-test failure
|
||||
|
||||
## 1) Configure secrets file
|
||||
|
||||
Create or edit:
|
||||
|
||||
`/opt/proxpanel/.backup.env`
|
||||
|
||||
Reference template:
|
||||
|
||||
`/opt/proxpanel/infra/deploy/.backup.env.example`
|
||||
|
||||
## 2) Example provider configs
|
||||
|
||||
### AWS S3
|
||||
|
||||
```bash
|
||||
OFFSITE_BACKUP_ENABLED=true
|
||||
OFFSITE_S3_BUCKET=my-proxpanel-backups
|
||||
OFFSITE_S3_REGION=us-east-1
|
||||
OFFSITE_S3_PREFIX=proxpanel/db
|
||||
OFFSITE_S3_ACCESS_KEY_ID=AKIA...
|
||||
OFFSITE_S3_SECRET_ACCESS_KEY=...
|
||||
OFFSITE_S3_ENDPOINT_URL=
|
||||
```
|
||||
|
||||
### Wasabi
|
||||
|
||||
```bash
|
||||
OFFSITE_BACKUP_ENABLED=true
|
||||
OFFSITE_S3_BUCKET=my-proxpanel-backups
|
||||
OFFSITE_S3_REGION=us-east-1
|
||||
OFFSITE_S3_PREFIX=proxpanel/db
|
||||
OFFSITE_S3_ENDPOINT_URL=https://s3.us-east-1.wasabisys.com
|
||||
OFFSITE_S3_ACCESS_KEY_ID=...
|
||||
OFFSITE_S3_SECRET_ACCESS_KEY=...
|
||||
```
|
||||
|
||||
### Backblaze B2 (S3 Compatible)
|
||||
|
||||
```bash
|
||||
OFFSITE_BACKUP_ENABLED=true
|
||||
OFFSITE_S3_BUCKET=my-proxpanel-backups
|
||||
OFFSITE_S3_REGION=us-west-002
|
||||
OFFSITE_S3_PREFIX=proxpanel/db
|
||||
OFFSITE_S3_ENDPOINT_URL=https://s3.us-west-002.backblazeb2.com
|
||||
OFFSITE_S3_ACCESS_KEY_ID=...
|
||||
OFFSITE_S3_SECRET_ACCESS_KEY=...
|
||||
```
|
||||
|
||||
## 3) Configure alerting
|
||||
|
||||
Set one or both:
|
||||
|
||||
```bash
|
||||
BACKUP_ALERT_WEBHOOK_URL=https://hooks.example.com/proxpanel-backup
|
||||
BACKUP_ALERT_EMAIL_WEBHOOK_URL=https://mailer.example.com/send
|
||||
BACKUP_ALERT_EMAIL_TO=ops@votcloud.com
|
||||
```
|
||||
|
||||
## 4) Apply cron schedule
|
||||
|
||||
```bash
|
||||
sudo bash /opt/proxpanel/infra/deploy/configure-db-backup-cron.sh --run-now
|
||||
```
|
||||
|
||||
## 5) Verification
|
||||
|
||||
1. Check local encrypted backup exists in `/opt/proxpanel-backups/daily/<timestamp>/`.
|
||||
2. Check offsite files:
|
||||
- `proxpanel.sql.enc`
|
||||
- `proxpanel.sql.enc.sha256`
|
||||
3. Check logs:
|
||||
- `/var/log/proxpanel-db-backup.log`
|
||||
- `/var/log/proxpanel-db-restore-test.log`
|
||||
4. Trigger controlled failure and confirm alert received (webhook/email).
|
||||
94
infra/deploy/PRODUCTION_CHECKLIST_my.votcloud.com.md
Normal file
94
infra/deploy/PRODUCTION_CHECKLIST_my.votcloud.com.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# ProxPanel Production Checklist (`my.votcloud.com`)
|
||||
|
||||
Date baseline: April 18, 2026
|
||||
Target host: `102.69.243.167`
|
||||
Domain: `my.votcloud.com`
|
||||
|
||||
## 1) Pre-Deploy Safety Gate
|
||||
|
||||
1. Confirm DNS `A` record points to `102.69.243.167`.
|
||||
2. Confirm server ports `80` and `443` are reachable externally.
|
||||
3. Confirm current stack health:
|
||||
- `docker compose --env-file /opt/proxpanel/.env.production -f /opt/proxpanel/infra/deploy/docker-compose.production.yml ps`
|
||||
- `curl -fsS http://127.0.0.1:8080/api/health`
|
||||
|
||||
## 2) Backup + Rollback Guardrails
|
||||
|
||||
1. Create timestamped release directory:
|
||||
- `/opt/proxpanel-backups/<timestamp>/`
|
||||
2. Backup application working tree:
|
||||
- `rsync -a /opt/proxpanel/ /opt/proxpanel-backups/<timestamp>/app/`
|
||||
3. Backup runtime env:
|
||||
- `cp /opt/proxpanel/.env.production /opt/proxpanel-backups/<timestamp>/env.production.bak`
|
||||
4. Backup Postgres before migration:
|
||||
- `docker exec proxpanel-postgres pg_dump -U proxpanel -d proxpanel > /opt/proxpanel-backups/<timestamp>/db_pre_migration.sql`
|
||||
|
||||
Rollback entrypoint:
|
||||
1. Restore app files from `/opt/proxpanel-backups/<timestamp>/app/`.
|
||||
2. Restore env file from `/opt/proxpanel-backups/<timestamp>/env.production.bak`.
|
||||
3. Recreate containers with prior source:
|
||||
- `docker compose --env-file /opt/proxpanel/.env.production -f /opt/proxpanel/infra/deploy/docker-compose.production.yml up -d --build`
|
||||
4. Restore DB if required:
|
||||
- `cat /opt/proxpanel-backups/<timestamp>/db_pre_migration.sql | docker exec -i proxpanel-postgres psql -U proxpanel -d proxpanel`
|
||||
|
||||
## 3) Deploy Order (Strict)
|
||||
|
||||
1. Upload release bundle to server.
|
||||
2. Extract bundle to `/opt/proxpanel` (keep `.env.production`).
|
||||
3. Build and start containers:
|
||||
- `docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml up -d --build`
|
||||
4. Wait for DB + backend readiness.
|
||||
5. Apply schema in this order:
|
||||
- `npm run prisma:deploy`
|
||||
- If no migration path or deploy failure, fallback once to `npm run prisma:push`
|
||||
6. Run seed:
|
||||
- `npm run prisma:seed`
|
||||
7. Health checks:
|
||||
- API health endpoint
|
||||
- Auth login flow
|
||||
- Key pages load
|
||||
8. Validate Proxmox sync endpoint with admin token.
|
||||
|
||||
## 4) HTTPS + Edge Routing
|
||||
|
||||
1. Nginx site config should proxy:
|
||||
- `my.votcloud.com` -> `http://127.0.0.1:<FRONTEND_PORT>`
|
||||
2. Issue cert:
|
||||
- `certbot --nginx -d my.votcloud.com --non-interactive --agree-tos -m <ops-email> --redirect`
|
||||
3. Verify:
|
||||
- `curl -I https://my.votcloud.com`
|
||||
- Certificate auto-renew timer active.
|
||||
|
||||
## 5) Post-Deploy Validation
|
||||
|
||||
1. `GET /api/health` returns `200`.
|
||||
2. Admin login works.
|
||||
3. RBAC page loads users/roles without crash.
|
||||
4. Audit logs page renders with no blank state error.
|
||||
5. Profile page can list sessions.
|
||||
6. System Management page can load branding/policy/CMS data.
|
||||
7. Proxmox sync returns success (or actionable credential/SSL error message).
|
||||
|
||||
## 6) Backup Hardening (Offsite + Alerts)
|
||||
|
||||
1. Configure `/opt/proxpanel/.backup.env`:
|
||||
- `OFFSITE_BACKUP_ENABLED=true`
|
||||
- `OFFSITE_S3_BUCKET`, `OFFSITE_S3_ACCESS_KEY_ID`, `OFFSITE_S3_SECRET_ACCESS_KEY`
|
||||
- `OFFSITE_S3_ENDPOINT_URL` (required for Wasabi/B2 S3)
|
||||
- `BACKUP_ALERT_WEBHOOK_URL` and/or `BACKUP_ALERT_EMAIL_WEBHOOK_URL`
|
||||
2. Apply cron wiring:
|
||||
- `sudo bash /opt/proxpanel/infra/deploy/configure-db-backup-cron.sh --run-now`
|
||||
3. Validate offsite object upload:
|
||||
- `aws s3 ls s3://<bucket>/<prefix>/<timestamp>/`
|
||||
4. Validate restore-test success and alert pipeline:
|
||||
- force a controlled failure and verify webhook/email delivery
|
||||
|
||||
## 7) Incident Rollback Criteria
|
||||
|
||||
Rollback immediately if any of the following persists > 10 minutes:
|
||||
|
||||
1. API health not green.
|
||||
2. Migration errors with broken runtime schema.
|
||||
3. Login failures for seeded admin.
|
||||
4. Persistent 5xx from `/api/proxmox/sync`.
|
||||
5. TLS provisioning failure with site unreachable.
|
||||
89
infra/deploy/SECRET_ROTATION_CHECKLIST.md
Normal file
89
infra/deploy/SECRET_ROTATION_CHECKLIST.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Secret Rotation Checklist (Production)
|
||||
|
||||
Target: `my.votcloud.com`
|
||||
Host: `102.69.243.167`
|
||||
|
||||
## Scope
|
||||
|
||||
Rotate the following regularly (monthly/quarterly or after any incident):
|
||||
|
||||
1. `JWT_SECRET`
|
||||
2. `JWT_REFRESH_SECRET`
|
||||
3. `POSTGRES_PASSWORD`
|
||||
4. `ADMIN_PASSWORD`
|
||||
|
||||
Recommended:
|
||||
|
||||
1. `BACKUP_ENCRYPTION_KEY` (with controlled key migration plan)
|
||||
2. Proxmox API token secret
|
||||
3. Payment/webhook secrets
|
||||
|
||||
Enterprise hardening:
|
||||
|
||||
1. Keep one grace window for webhook secret rotation (`*_previous`) to avoid dropped payment events during provider cutover.
|
||||
2. Validate new Proxmox token directly against `/api2/json/version` before applying it in panel settings.
|
||||
|
||||
## Runbook (Safe Order)
|
||||
|
||||
1. Create timestamped app/env/db backup.
|
||||
2. Rotate env secrets in `.env.production`.
|
||||
3. Apply DB password rotation (`ALTER USER ... WITH PASSWORD ...`).
|
||||
4. Restart app stack with new env.
|
||||
5. Re-seed admin (`npm run prisma:seed`) to sync rotated admin password.
|
||||
6. Revoke all active sessions (`AuthSession`) to invalidate old sessions.
|
||||
7. Verify:
|
||||
- `GET /api/health`
|
||||
- Admin login
|
||||
- Core pages (`/rbac`, `/profile`, `/system`, `/audit-logs`)
|
||||
8. Save secure summary with new admin credentials under `/root/`.
|
||||
|
||||
## Automation Script
|
||||
|
||||
Use:
|
||||
|
||||
```bash
|
||||
sudo bash /opt/proxpanel/infra/deploy/rotate-production-secrets.sh
|
||||
```
|
||||
|
||||
Script guarantees:
|
||||
|
||||
1. Backup directory created in `/opt/proxpanel-backups/<timestamp>-secret-rotation/`
|
||||
2. DB pre-rotation dump created
|
||||
3. Post-rotation health + login verified
|
||||
4. Summary written to `/root/proxpanel-secret-rotation-<timestamp>.txt`
|
||||
|
||||
For integration secrets (Proxmox + payment/webhook + alerting endpoints), use:
|
||||
|
||||
```bash
|
||||
sudo bash /opt/proxpanel/infra/deploy/rotate-integration-secrets.sh \
|
||||
--proxmox-token-secret 'new_token_secret' \
|
||||
--paystack-secret 'new_paystack_secret' \
|
||||
--flutterwave-webhook-hash 'new_hash'
|
||||
```
|
||||
|
||||
After external provider cutover is confirmed, clear grace secrets:
|
||||
|
||||
```bash
|
||||
sudo bash /opt/proxpanel/infra/deploy/rotate-integration-secrets.sh \
|
||||
--finalize-payment-webhook-grace
|
||||
```
|
||||
|
||||
## Rollback Plan
|
||||
|
||||
If post-rotation checks fail:
|
||||
|
||||
1. Restore `.env.production` from backup.
|
||||
2. Restore previous app files if needed.
|
||||
3. Restore DB dump if schema/state corruption occurred.
|
||||
4. Recreate containers:
|
||||
- `docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml up -d --build`
|
||||
|
||||
## Audit Trail
|
||||
|
||||
Store:
|
||||
|
||||
1. Rotation timestamp
|
||||
2. Operator identity
|
||||
3. Backup directory used
|
||||
4. Health verification evidence
|
||||
5. Any rollback events
|
||||
133
infra/deploy/configure-db-backup-cron.sh
Executable file
133
infra/deploy/configure-db-backup-cron.sh
Executable file
@@ -0,0 +1,133 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
SECRET_FILE="${SECRET_FILE:-$APP_DIR/.backup.env}"
|
||||
CRON_FILE="${CRON_FILE:-/etc/cron.d/proxpanel-db-backup}"
|
||||
BACKUP_LOG="${BACKUP_LOG:-/var/log/proxpanel-db-backup.log}"
|
||||
RESTORE_LOG="${RESTORE_LOG:-/var/log/proxpanel-db-restore-test.log}"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
sudo bash infra/deploy/configure-db-backup-cron.sh [--run-now]
|
||||
|
||||
Default schedule (UTC):
|
||||
- 02:15 daily: encrypted DB backup + optional offsite replication
|
||||
- 02:45 daily: restore test against latest encrypted backup
|
||||
|
||||
Alerting:
|
||||
- backup/restore failures dispatch webhook/email alerts if configured in /opt/proxpanel/.backup.env
|
||||
EOF
|
||||
}
|
||||
|
||||
log() {
|
||||
printf '[%s] %s\n' "$(date -u +'%Y-%m-%d %H:%M:%S UTC')" "$*"
|
||||
}
|
||||
|
||||
die() {
|
||||
printf '[ERROR] %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
random_key() {
|
||||
openssl rand -hex 64 | tr -d '\n'
|
||||
}
|
||||
|
||||
ensure_secret_setting() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
if ! grep -q "^${key}=" "$SECRET_FILE"; then
|
||||
printf '%s=%s\n' "$key" "$value" >>"$SECRET_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
local run_now="false"
|
||||
if [[ "${1:-}" == "--run-now" ]]; then
|
||||
run_now="true"
|
||||
elif [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
|
||||
usage
|
||||
exit 0
|
||||
elif [[ -n "${1:-}" ]]; then
|
||||
die "Unknown argument: $1"
|
||||
fi
|
||||
|
||||
[[ "${EUID:-$(id -u)}" -eq 0 ]] || die "Run as root (or with sudo)."
|
||||
[[ -d "$APP_DIR" ]] || die "Missing app dir: $APP_DIR"
|
||||
|
||||
if [[ ! -f "$SECRET_FILE" ]]; then
|
||||
log "Creating $SECRET_FILE"
|
||||
umask 077
|
||||
cat >"$SECRET_FILE" <<EOF
|
||||
BACKUP_ENCRYPTION_KEY=$(random_key)
|
||||
OFFSITE_BACKUP_ENABLED=false
|
||||
OFFSITE_S3_BUCKET=
|
||||
OFFSITE_S3_REGION=us-east-1
|
||||
OFFSITE_S3_PREFIX=proxpanel/db
|
||||
OFFSITE_S3_ENDPOINT_URL=
|
||||
OFFSITE_S3_ACCESS_KEY_ID=
|
||||
OFFSITE_S3_SECRET_ACCESS_KEY=
|
||||
OFFSITE_S3_SESSION_TOKEN=
|
||||
OFFSITE_S3_SSE=
|
||||
OFFSITE_REPLICA_RETENTION_DAYS=30
|
||||
BACKUP_ALERT_WEBHOOK_URL=
|
||||
BACKUP_ALERT_EMAIL_WEBHOOK_URL=
|
||||
BACKUP_ALERT_EMAIL_TO=
|
||||
BACKUP_ALERT_SUBJECT_PREFIX=[ProxPanel_Backup]
|
||||
BACKUP_ALERT_SEND_SUCCESS=false
|
||||
EOF
|
||||
fi
|
||||
chmod 600 "$SECRET_FILE"
|
||||
|
||||
ensure_secret_setting "OFFSITE_BACKUP_ENABLED" "false"
|
||||
ensure_secret_setting "OFFSITE_S3_BUCKET" ""
|
||||
ensure_secret_setting "OFFSITE_S3_REGION" "us-east-1"
|
||||
ensure_secret_setting "OFFSITE_S3_PREFIX" "proxpanel/db"
|
||||
ensure_secret_setting "OFFSITE_S3_ENDPOINT_URL" ""
|
||||
ensure_secret_setting "OFFSITE_S3_ACCESS_KEY_ID" ""
|
||||
ensure_secret_setting "OFFSITE_S3_SECRET_ACCESS_KEY" ""
|
||||
ensure_secret_setting "OFFSITE_S3_SESSION_TOKEN" ""
|
||||
ensure_secret_setting "OFFSITE_S3_SSE" ""
|
||||
ensure_secret_setting "OFFSITE_REPLICA_RETENTION_DAYS" "30"
|
||||
ensure_secret_setting "BACKUP_ALERT_WEBHOOK_URL" ""
|
||||
ensure_secret_setting "BACKUP_ALERT_EMAIL_WEBHOOK_URL" ""
|
||||
ensure_secret_setting "BACKUP_ALERT_EMAIL_TO" ""
|
||||
ensure_secret_setting "BACKUP_ALERT_SUBJECT_PREFIX" "[ProxPanel_Backup]"
|
||||
ensure_secret_setting "BACKUP_ALERT_SEND_SUCCESS" "false"
|
||||
|
||||
log "Making scripts executable"
|
||||
chmod +x \
|
||||
"$APP_DIR/infra/deploy/db-backup-encrypted.sh" \
|
||||
"$APP_DIR/infra/deploy/db-restore-test.sh" \
|
||||
"$APP_DIR/infra/deploy/db-backup-replicate-offsite.sh" \
|
||||
"$APP_DIR/infra/deploy/notify-backup-alert.sh" \
|
||||
"$APP_DIR/infra/deploy/db-backup-job.sh" \
|
||||
"$APP_DIR/infra/deploy/db-restore-test-job.sh"
|
||||
|
||||
log "Installing cron schedule at $CRON_FILE"
|
||||
cat >"$CRON_FILE" <<'EOF'
|
||||
SHELL=/bin/bash
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
|
||||
# Encrypted PostgreSQL backup every day at 02:15 UTC
|
||||
15 2 * * * root APP_DIR=/opt/proxpanel /opt/proxpanel/infra/deploy/db-backup-job.sh >> /var/log/proxpanel-db-backup.log 2>&1
|
||||
|
||||
# Restore test every day at 02:45 UTC
|
||||
45 2 * * * root APP_DIR=/opt/proxpanel /opt/proxpanel/infra/deploy/db-restore-test-job.sh >> /var/log/proxpanel-db-restore-test.log 2>&1
|
||||
EOF
|
||||
chmod 644 "$CRON_FILE"
|
||||
|
||||
touch "$BACKUP_LOG" "$RESTORE_LOG"
|
||||
chmod 640 "$BACKUP_LOG" "$RESTORE_LOG"
|
||||
|
||||
if [[ "$run_now" == "true" ]]; then
|
||||
log "Running immediate backup + restore test"
|
||||
APP_DIR="$APP_DIR" "$APP_DIR/infra/deploy/db-backup-job.sh"
|
||||
APP_DIR="$APP_DIR" "$APP_DIR/infra/deploy/db-restore-test-job.sh"
|
||||
fi
|
||||
|
||||
log "DB backup/restore cron configured successfully."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
74
infra/deploy/db-backup-encrypted.sh
Executable file
74
infra/deploy/db-backup-encrypted.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
ENV_FILE="${ENV_FILE:-$APP_DIR/.env.production}"
|
||||
SECRET_FILE="${SECRET_FILE:-$APP_DIR/.backup.env}"
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-/opt/proxpanel-backups/daily}"
|
||||
RETENTION_DAYS="${RETENTION_DAYS:-14}"
|
||||
|
||||
log() {
|
||||
printf '[%s] %s\n' "$(date -u +'%Y-%m-%d %H:%M:%S UTC')" "$*"
|
||||
}
|
||||
|
||||
die() {
|
||||
printf '[ERROR] %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_file() {
|
||||
[[ -f "$1" ]] || die "Missing required file: $1"
|
||||
}
|
||||
|
||||
require_command() {
|
||||
command -v "$1" >/dev/null 2>&1 || die "Missing required command: $1"
|
||||
}
|
||||
|
||||
main() {
|
||||
require_command docker
|
||||
require_command openssl
|
||||
require_command sha256sum
|
||||
require_file "$ENV_FILE"
|
||||
require_file "$SECRET_FILE"
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "$ENV_FILE"
|
||||
# shellcheck disable=SC1090
|
||||
source "$SECRET_FILE"
|
||||
|
||||
[[ -n "${BACKUP_ENCRYPTION_KEY:-}" ]] || die "BACKUP_ENCRYPTION_KEY is empty in $SECRET_FILE"
|
||||
export BACKUP_ENCRYPTION_KEY
|
||||
[[ -n "${POSTGRES_USER:-}" ]] || die "POSTGRES_USER missing in $ENV_FILE"
|
||||
[[ -n "${POSTGRES_DB:-}" ]] || die "POSTGRES_DB missing in $ENV_FILE"
|
||||
|
||||
local ts backup_dir plain_sql encrypted_sql
|
||||
ts="$(date -u +%Y%m%d-%H%M%S)"
|
||||
backup_dir="${BACKUP_ROOT}/${ts}"
|
||||
plain_sql="${backup_dir}/proxpanel.sql"
|
||||
encrypted_sql="${plain_sql}.enc"
|
||||
|
||||
mkdir -p "$backup_dir"
|
||||
chmod 700 "$backup_dir"
|
||||
|
||||
log "Creating plaintext dump"
|
||||
docker exec proxpanel-postgres pg_dump -U "$POSTGRES_USER" -d "$POSTGRES_DB" --clean --if-exists >"$plain_sql"
|
||||
|
||||
log "Encrypting dump"
|
||||
openssl enc -aes-256-cbc -salt -pbkdf2 -iter 200000 \
|
||||
-in "$plain_sql" \
|
||||
-out "$encrypted_sql" \
|
||||
-pass env:BACKUP_ENCRYPTION_KEY
|
||||
|
||||
log "Writing checksum"
|
||||
sha256sum "$encrypted_sql" >"${encrypted_sql}.sha256"
|
||||
|
||||
rm -f "$plain_sql"
|
||||
chmod 600 "$encrypted_sql" "${encrypted_sql}.sha256"
|
||||
|
||||
log "Applying retention policy (${RETENTION_DAYS} days)"
|
||||
find "$BACKUP_ROOT" -mindepth 1 -maxdepth 1 -type d -mtime +"$RETENTION_DAYS" -exec rm -rf {} +
|
||||
|
||||
log "Encrypted backup created: $encrypted_sql"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
48
infra/deploy/db-backup-job.sh
Normal file
48
infra/deploy/db-backup-job.sh
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
SECRET_FILE="${SECRET_FILE:-$APP_DIR/.backup.env}"
|
||||
|
||||
BACKUP_SCRIPT="${APP_DIR}/infra/deploy/db-backup-encrypted.sh"
|
||||
REPLICATE_SCRIPT="${APP_DIR}/infra/deploy/db-backup-replicate-offsite.sh"
|
||||
NOTIFY_SCRIPT="${APP_DIR}/infra/deploy/notify-backup-alert.sh"
|
||||
|
||||
job_failed() {
|
||||
local line="$1"
|
||||
local message="Daily backup job failed (line ${line}) on host $(hostname -f 2>/dev/null || hostname)"
|
||||
APP_DIR="$APP_DIR" "$NOTIFY_SCRIPT" \
|
||||
--event backup_failed \
|
||||
--severity critical \
|
||||
--status failed \
|
||||
--source db-backup-job \
|
||||
--message "$message" \
|
||||
--context-json "{\"line\":${line}}"
|
||||
}
|
||||
|
||||
main() {
|
||||
trap 'job_failed $LINENO' ERR
|
||||
|
||||
APP_DIR="$APP_DIR" "$BACKUP_SCRIPT"
|
||||
APP_DIR="$APP_DIR" "$REPLICATE_SCRIPT"
|
||||
|
||||
local send_success="false"
|
||||
if [[ -f "$SECRET_FILE" ]]; then
|
||||
# shellcheck disable=SC1090
|
||||
source "$SECRET_FILE"
|
||||
send_success="${BACKUP_ALERT_SEND_SUCCESS:-false}"
|
||||
fi
|
||||
|
||||
if [[ "$send_success" == "true" ]]; then
|
||||
APP_DIR="$APP_DIR" "$NOTIFY_SCRIPT" \
|
||||
--event backup_success \
|
||||
--severity info \
|
||||
--status ok \
|
||||
--source db-backup-job \
|
||||
--message "Daily backup + offsite replication completed successfully"
|
||||
fi
|
||||
|
||||
trap - ERR
|
||||
}
|
||||
|
||||
main "$@"
|
||||
163
infra/deploy/db-backup-replicate-offsite.sh
Normal file
163
infra/deploy/db-backup-replicate-offsite.sh
Normal file
@@ -0,0 +1,163 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
SECRET_FILE="${SECRET_FILE:-$APP_DIR/.backup.env}"
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-/opt/proxpanel-backups/daily}"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
APP_DIR=/opt/proxpanel /opt/proxpanel/infra/deploy/db-backup-replicate-offsite.sh [--backup-dir <dir>]
|
||||
|
||||
Reads replication settings from /opt/proxpanel/.backup.env.
|
||||
Required keys for S3-compatible replication:
|
||||
OFFSITE_BACKUP_ENABLED=true
|
||||
OFFSITE_S3_BUCKET=<bucket-name>
|
||||
OFFSITE_S3_ACCESS_KEY_ID=<access-key>
|
||||
OFFSITE_S3_SECRET_ACCESS_KEY=<secret-key>
|
||||
|
||||
Optional:
|
||||
OFFSITE_S3_REGION=us-east-1
|
||||
OFFSITE_S3_PREFIX=proxpanel/db
|
||||
OFFSITE_S3_ENDPOINT_URL=https://s3.us-west-1.wasabisys.com
|
||||
OFFSITE_S3_SSE=AES256
|
||||
OFFSITE_REPLICA_RETENTION_DAYS=30
|
||||
EOF
|
||||
}
|
||||
|
||||
log() {
|
||||
printf '[%s] %s\n' "$(date -u +'%Y-%m-%d %H:%M:%S UTC')" "$*"
|
||||
}
|
||||
|
||||
die() {
|
||||
printf '[ERROR] %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_file() {
|
||||
[[ -f "$1" ]] || die "Missing required file: $1"
|
||||
}
|
||||
|
||||
require_command() {
|
||||
command -v "$1" >/dev/null 2>&1 || die "Missing required command: $1"
|
||||
}
|
||||
|
||||
find_latest_backup_dir() {
|
||||
find "$BACKUP_ROOT" -mindepth 1 -maxdepth 1 -type d | sort | tail -n 1
|
||||
}
|
||||
|
||||
prune_remote_retention() {
|
||||
local bucket="$1"
|
||||
local prefix="$2"
|
||||
local endpoint_flag=()
|
||||
if [[ -n "${OFFSITE_S3_ENDPOINT_URL:-}" ]]; then
|
||||
endpoint_flag=(--endpoint-url "$OFFSITE_S3_ENDPOINT_URL")
|
||||
fi
|
||||
|
||||
local days="${OFFSITE_REPLICA_RETENTION_DAYS:-}"
|
||||
if [[ -z "$days" || ! "$days" =~ ^[0-9]+$ || "$days" -le 0 ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local cutoff
|
||||
cutoff="$(date -u -d "-${days} days" +%Y%m%d-%H%M%S)"
|
||||
log "Applying offsite retention policy (${days} days; cutoff=${cutoff})"
|
||||
|
||||
local listing prefixes old_prefix
|
||||
listing="$(aws "${endpoint_flag[@]}" s3 ls "s3://${bucket}/${prefix}/" || true)"
|
||||
prefixes="$(printf '%s\n' "$listing" | awk '/PRE [0-9]{8}-[0-9]{6}\// {print $2}' | tr -d '/')"
|
||||
|
||||
while IFS= read -r old_prefix; do
|
||||
[[ -n "$old_prefix" ]] || continue
|
||||
if [[ "$old_prefix" < "$cutoff" ]]; then
|
||||
log "Pruning remote backup prefix ${old_prefix}"
|
||||
aws "${endpoint_flag[@]}" s3 rm "s3://${bucket}/${prefix}/${old_prefix}/" --recursive >/dev/null
|
||||
fi
|
||||
done <<<"$prefixes"
|
||||
}
|
||||
|
||||
main() {
|
||||
local forced_backup_dir=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--backup-dir)
|
||||
forced_backup_dir="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
die "Unknown argument: $1"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
require_file "$SECRET_FILE"
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "$SECRET_FILE"
|
||||
|
||||
if [[ "${OFFSITE_BACKUP_ENABLED:-false}" != "true" ]]; then
|
||||
log "Offsite replication disabled (OFFSITE_BACKUP_ENABLED != true)."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
require_command aws
|
||||
|
||||
[[ -n "${OFFSITE_S3_BUCKET:-}" ]] || die "OFFSITE_S3_BUCKET is required."
|
||||
[[ -n "${OFFSITE_S3_ACCESS_KEY_ID:-}" ]] || die "OFFSITE_S3_ACCESS_KEY_ID is required."
|
||||
[[ -n "${OFFSITE_S3_SECRET_ACCESS_KEY:-}" ]] || die "OFFSITE_S3_SECRET_ACCESS_KEY is required."
|
||||
|
||||
export AWS_ACCESS_KEY_ID="$OFFSITE_S3_ACCESS_KEY_ID"
|
||||
export AWS_SECRET_ACCESS_KEY="$OFFSITE_S3_SECRET_ACCESS_KEY"
|
||||
if [[ -n "${OFFSITE_S3_SESSION_TOKEN:-}" ]]; then
|
||||
export AWS_SESSION_TOKEN="$OFFSITE_S3_SESSION_TOKEN"
|
||||
fi
|
||||
export AWS_DEFAULT_REGION="${OFFSITE_S3_REGION:-us-east-1}"
|
||||
|
||||
local backup_dir
|
||||
if [[ -n "$forced_backup_dir" ]]; then
|
||||
backup_dir="$forced_backup_dir"
|
||||
else
|
||||
backup_dir="$(find_latest_backup_dir)"
|
||||
fi
|
||||
|
||||
[[ -n "$backup_dir" && -d "$backup_dir" ]] || die "Unable to locate backup directory."
|
||||
|
||||
local encrypted_file checksum_file stamp prefix endpoint_flag sse_flag destination
|
||||
encrypted_file="${backup_dir}/proxpanel.sql.enc"
|
||||
checksum_file="${encrypted_file}.sha256"
|
||||
require_file "$encrypted_file"
|
||||
require_file "$checksum_file"
|
||||
|
||||
stamp="$(basename "$backup_dir")"
|
||||
prefix="${OFFSITE_S3_PREFIX:-proxpanel/db}"
|
||||
destination="s3://${OFFSITE_S3_BUCKET}/${prefix}/${stamp}/"
|
||||
|
||||
endpoint_flag=()
|
||||
if [[ -n "${OFFSITE_S3_ENDPOINT_URL:-}" ]]; then
|
||||
endpoint_flag=(--endpoint-url "$OFFSITE_S3_ENDPOINT_URL")
|
||||
fi
|
||||
|
||||
sse_flag=()
|
||||
if [[ -n "${OFFSITE_S3_SSE:-}" ]]; then
|
||||
sse_flag=(--sse "$OFFSITE_S3_SSE")
|
||||
fi
|
||||
|
||||
log "Replicating encrypted backup to ${destination}"
|
||||
aws "${endpoint_flag[@]}" s3 cp "$encrypted_file" "${destination}proxpanel.sql.enc" "${sse_flag[@]}" --only-show-errors
|
||||
aws "${endpoint_flag[@]}" s3 cp "$checksum_file" "${destination}proxpanel.sql.enc.sha256" "${sse_flag[@]}" --only-show-errors
|
||||
|
||||
log "Verifying offsite object presence"
|
||||
aws "${endpoint_flag[@]}" s3 ls "${destination}proxpanel.sql.enc" >/dev/null
|
||||
aws "${endpoint_flag[@]}" s3 ls "${destination}proxpanel.sql.enc.sha256" >/dev/null
|
||||
|
||||
prune_remote_retention "$OFFSITE_S3_BUCKET" "$prefix"
|
||||
log "Offsite replication completed successfully."
|
||||
}
|
||||
|
||||
main "$@"
|
||||
46
infra/deploy/db-restore-test-job.sh
Normal file
46
infra/deploy/db-restore-test-job.sh
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
SECRET_FILE="${SECRET_FILE:-$APP_DIR/.backup.env}"
|
||||
|
||||
RESTORE_SCRIPT="${APP_DIR}/infra/deploy/db-restore-test.sh"
|
||||
NOTIFY_SCRIPT="${APP_DIR}/infra/deploy/notify-backup-alert.sh"
|
||||
|
||||
job_failed() {
|
||||
local line="$1"
|
||||
local message="Daily restore test failed (line ${line}) on host $(hostname -f 2>/dev/null || hostname)"
|
||||
APP_DIR="$APP_DIR" "$NOTIFY_SCRIPT" \
|
||||
--event restore_test_failed \
|
||||
--severity critical \
|
||||
--status failed \
|
||||
--source db-restore-test-job \
|
||||
--message "$message" \
|
||||
--context-json "{\"line\":${line}}"
|
||||
}
|
||||
|
||||
main() {
|
||||
trap 'job_failed $LINENO' ERR
|
||||
|
||||
APP_DIR="$APP_DIR" "$RESTORE_SCRIPT"
|
||||
|
||||
local send_success="false"
|
||||
if [[ -f "$SECRET_FILE" ]]; then
|
||||
# shellcheck disable=SC1090
|
||||
source "$SECRET_FILE"
|
||||
send_success="${BACKUP_ALERT_SEND_SUCCESS:-false}"
|
||||
fi
|
||||
|
||||
if [[ "$send_success" == "true" ]]; then
|
||||
APP_DIR="$APP_DIR" "$NOTIFY_SCRIPT" \
|
||||
--event restore_test_success \
|
||||
--severity info \
|
||||
--status ok \
|
||||
--source db-restore-test-job \
|
||||
--message "Daily restore test completed successfully"
|
||||
fi
|
||||
|
||||
trap - ERR
|
||||
}
|
||||
|
||||
main "$@"
|
||||
114
infra/deploy/db-restore-test.sh
Executable file
114
infra/deploy/db-restore-test.sh
Executable file
@@ -0,0 +1,114 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
SECRET_FILE="${SECRET_FILE:-$APP_DIR/.backup.env}"
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-/opt/proxpanel-backups/daily}"
|
||||
TMP_ROOT="${TMP_ROOT:-/tmp/proxpanel-restore-test}"
|
||||
TEST_CONTAINER="${TEST_CONTAINER:-proxpanel-restore-test}"
|
||||
PG_IMAGE="${PG_IMAGE:-postgres:16-alpine}"
|
||||
PG_USER="${PG_USER:-proxpanel}"
|
||||
PG_PASSWORD="${PG_PASSWORD:-restoretestpass}"
|
||||
PG_DB="${PG_DB:-proxpanel_restore}"
|
||||
|
||||
cleanup() {
|
||||
docker rm -f "$TEST_CONTAINER" >/dev/null 2>&1 || true
|
||||
rm -rf "$TMP_ROOT"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
log() {
|
||||
printf '[%s] %s\n' "$(date -u +'%Y-%m-%d %H:%M:%S UTC')" "$*"
|
||||
}
|
||||
|
||||
die() {
|
||||
printf '[ERROR] %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_file() {
|
||||
[[ -f "$1" ]] || die "Missing required file: $1"
|
||||
}
|
||||
|
||||
require_command() {
|
||||
command -v "$1" >/dev/null 2>&1 || die "Missing required command: $1"
|
||||
}
|
||||
|
||||
find_latest_encrypted_backup() {
|
||||
find "$BACKUP_ROOT" -mindepth 2 -maxdepth 2 -type f -name 'proxpanel.sql.enc' | sort | tail -n 1
|
||||
}
|
||||
|
||||
wait_pg_ready() {
|
||||
local tries=60
|
||||
local i
|
||||
for ((i=1; i<=tries; i++)); do
|
||||
if docker exec "$TEST_CONTAINER" pg_isready -U "$PG_USER" -d "$PG_DB" >/dev/null 2>&1; then
|
||||
return
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
die "Restore test postgres did not become ready."
|
||||
}
|
||||
|
||||
main() {
|
||||
require_command docker
|
||||
require_command openssl
|
||||
require_command sha256sum
|
||||
require_file "$SECRET_FILE"
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "$SECRET_FILE"
|
||||
[[ -n "${BACKUP_ENCRYPTION_KEY:-}" ]] || die "BACKUP_ENCRYPTION_KEY is empty in $SECRET_FILE"
|
||||
export BACKUP_ENCRYPTION_KEY
|
||||
|
||||
local encrypted_backup checksum_file latest_dir decrypted_sql
|
||||
encrypted_backup="$(find_latest_encrypted_backup)"
|
||||
[[ -n "$encrypted_backup" ]] || die "No encrypted backup found in $BACKUP_ROOT"
|
||||
checksum_file="${encrypted_backup}.sha256"
|
||||
require_file "$checksum_file"
|
||||
|
||||
latest_dir="$(dirname "$encrypted_backup")"
|
||||
mkdir -p "$TMP_ROOT"
|
||||
chmod 700 "$TMP_ROOT"
|
||||
decrypted_sql="${TMP_ROOT}/restore.sql"
|
||||
|
||||
log "Verifying checksum for $encrypted_backup"
|
||||
(cd "$latest_dir" && sha256sum -c "$(basename "$checksum_file")")
|
||||
|
||||
log "Decrypting latest backup"
|
||||
openssl enc -d -aes-256-cbc -pbkdf2 -iter 200000 \
|
||||
-in "$encrypted_backup" \
|
||||
-out "$decrypted_sql" \
|
||||
-pass env:BACKUP_ENCRYPTION_KEY
|
||||
|
||||
log "Starting isolated restore-test postgres container"
|
||||
docker rm -f "$TEST_CONTAINER" >/dev/null 2>&1 || true
|
||||
docker run -d --name "$TEST_CONTAINER" \
|
||||
-e POSTGRES_USER="$PG_USER" \
|
||||
-e POSTGRES_PASSWORD="$PG_PASSWORD" \
|
||||
-e POSTGRES_DB="$PG_DB" \
|
||||
"$PG_IMAGE" >/dev/null
|
||||
|
||||
wait_pg_ready
|
||||
|
||||
log "Applying restored SQL into test DB"
|
||||
cat "$decrypted_sql" | docker exec -i "$TEST_CONTAINER" psql -U "$PG_USER" -d "$PG_DB" >/dev/null
|
||||
|
||||
log "Running restore sanity checks"
|
||||
local table_count required_table_count
|
||||
table_count="$(
|
||||
docker exec "$TEST_CONTAINER" psql -U "$PG_USER" -d "$PG_DB" -Atc \
|
||||
"select count(*) from information_schema.tables where table_schema='public';"
|
||||
)"
|
||||
required_table_count="$(
|
||||
docker exec "$TEST_CONTAINER" psql -U "$PG_USER" -d "$PG_DB" -Atc \
|
||||
"select count(*) from information_schema.tables where table_schema='public' and table_name in ('User','Tenant','AuditLog');"
|
||||
)"
|
||||
|
||||
[[ "${table_count:-0}" -ge 10 ]] || die "Restore sanity check failed (unexpected table count: $table_count)"
|
||||
[[ "${required_table_count:-0}" -eq 3 ]] || die "Restore sanity check failed (required tables missing)"
|
||||
|
||||
log "Restore test passed (tables=$table_count)"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
76
infra/deploy/docker-compose.production.yml
Normal file
76
infra/deploy/docker-compose.production.yml
Normal file
@@ -0,0 +1,76 @@
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: proxpanel-postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_USER: ${POSTGRES_USER:-proxpanel}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-proxpanel}
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-proxpanel} -d ${POSTGRES_DB:-proxpanel}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
|
||||
backend:
|
||||
build:
|
||||
context: ../../backend
|
||||
container_name: proxpanel-backend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
NODE_ENV: production
|
||||
PORT: 8080
|
||||
DATABASE_URL: postgresql://${POSTGRES_USER:-proxpanel}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB:-proxpanel}
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
JWT_REFRESH_SECRET: ${JWT_REFRESH_SECRET}
|
||||
JWT_EXPIRES_IN: ${JWT_EXPIRES_IN:-15m}
|
||||
JWT_REFRESH_EXPIRES_IN: ${JWT_REFRESH_EXPIRES_IN:-30d}
|
||||
CORS_ORIGIN: ${CORS_ORIGIN}
|
||||
RATE_LIMIT_WINDOW_MS: ${RATE_LIMIT_WINDOW_MS:-60000}
|
||||
RATE_LIMIT_MAX: ${RATE_LIMIT_MAX:-600}
|
||||
AUTH_RATE_LIMIT_WINDOW_MS: ${AUTH_RATE_LIMIT_WINDOW_MS:-60000}
|
||||
AUTH_RATE_LIMIT_MAX: ${AUTH_RATE_LIMIT_MAX:-20}
|
||||
ENABLE_SCHEDULER: ${ENABLE_SCHEDULER:-true}
|
||||
BILLING_CRON: ${BILLING_CRON:-0 * * * *}
|
||||
BACKUP_CRON: ${BACKUP_CRON:-*/15 * * * *}
|
||||
POWER_SCHEDULE_CRON: ${POWER_SCHEDULE_CRON:-* * * * *}
|
||||
MONITORING_CRON: ${MONITORING_CRON:-*/5 * * * *}
|
||||
PROXMOX_TIMEOUT_MS: ${PROXMOX_TIMEOUT_MS:-15000}
|
||||
ADMIN_EMAIL: ${ADMIN_EMAIL}
|
||||
ADMIN_PASSWORD: ${ADMIN_PASSWORD}
|
||||
expose:
|
||||
- "8080"
|
||||
ports:
|
||||
- "127.0.0.1:${BACKEND_PORT:-8080}:8080"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -q -O - http://localhost:8080/api/health >/dev/null 2>&1 || exit 1"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
|
||||
frontend:
|
||||
build:
|
||||
context: ../../
|
||||
args:
|
||||
VITE_API_BASE_URL: ""
|
||||
container_name: proxpanel-frontend
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:${FRONTEND_PORT:-80}:80"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "nginx -t >/dev/null 2>&1 || exit 1"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
105
infra/deploy/git-pat-sync.sh
Normal file
105
infra/deploy/git-pat-sync.sh
Normal file
@@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
# Pulls latest code using PAT auth without persisting password credentials.
|
||||
# Required env vars:
|
||||
# GIT_USERNAME
|
||||
# GIT_PAT
|
||||
#
|
||||
# Example:
|
||||
# export GIT_USERNAME="austindebest"
|
||||
# export GIT_PAT="***"
|
||||
# bash infra/deploy/git-pat-sync.sh --repo-dir /opt/proxpanel --branch main
|
||||
|
||||
REPO_DIR="/opt/proxpanel"
|
||||
BRANCH="main"
|
||||
REMOTE="origin"
|
||||
REPO_URL=""
|
||||
|
||||
die() {
|
||||
printf '\n[ERROR] %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
bash infra/deploy/git-pat-sync.sh [options]
|
||||
|
||||
Options:
|
||||
--repo-dir <path> Repository directory (default: /opt/proxpanel)
|
||||
--branch <name> Branch to pull (default: main)
|
||||
--remote <name> Remote to use (default: origin)
|
||||
--repo-url <url> Optional URL to set on remote before sync
|
||||
-h, --help Show this help
|
||||
|
||||
Required environment variables:
|
||||
GIT_USERNAME Git username
|
||||
GIT_PAT Personal Access Token
|
||||
EOF
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--repo-dir)
|
||||
REPO_DIR="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--branch)
|
||||
BRANCH="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--remote)
|
||||
REMOTE="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--repo-url)
|
||||
REPO_URL="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
die "Unknown argument: $1"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
require_env() {
|
||||
[[ -n "${GIT_USERNAME:-}" ]] || die "GIT_USERNAME is required."
|
||||
[[ -n "${GIT_PAT:-}" ]] || die "GIT_PAT is required."
|
||||
}
|
||||
|
||||
build_auth_header() {
|
||||
local pair
|
||||
pair="${GIT_USERNAME}:${GIT_PAT}"
|
||||
printf 'Authorization: Basic %s' "$(printf '%s' "${pair}" | base64 | tr -d '\n')"
|
||||
}
|
||||
|
||||
sync_repo() {
|
||||
[[ -d "${REPO_DIR}/.git" ]] || die "Not a git repo: ${REPO_DIR}"
|
||||
|
||||
local auth_header
|
||||
auth_header="$(build_auth_header)"
|
||||
|
||||
if [[ -n "${REPO_URL}" ]]; then
|
||||
git -C "${REPO_DIR}" remote set-url "${REMOTE}" "${REPO_URL}"
|
||||
fi
|
||||
|
||||
git -c http.extraHeader="${auth_header}" -C "${REPO_DIR}" fetch "${REMOTE}" --prune
|
||||
git -C "${REPO_DIR}" checkout "${BRANCH}"
|
||||
git -c http.extraHeader="${auth_header}" -C "${REPO_DIR}" pull --ff-only "${REMOTE}" "${BRANCH}"
|
||||
}
|
||||
|
||||
main() {
|
||||
parse_args "$@"
|
||||
require_env
|
||||
sync_repo
|
||||
echo "Repository synced successfully: ${REPO_DIR} (${BRANCH})"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
345
infra/deploy/install-proxpanel.sh
Normal file
345
infra/deploy/install-proxpanel.sh
Normal file
@@ -0,0 +1,345 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="/opt/proxpanel"
|
||||
REPO_URL=""
|
||||
BRANCH="main"
|
||||
PUBLIC_URL=""
|
||||
ADMIN_EMAIL="admin@proxpanel.local"
|
||||
ADMIN_PASSWORD=""
|
||||
POSTGRES_PASSWORD=""
|
||||
FRONTEND_PORT="80"
|
||||
BACKEND_PORT="8080"
|
||||
CONFIGURE_UFW="false"
|
||||
|
||||
log() {
|
||||
printf '\n[%s] %s\n' "$(date +'%Y-%m-%d %H:%M:%S')" "$*"
|
||||
}
|
||||
|
||||
die() {
|
||||
printf '\n[ERROR] %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
bash infra/deploy/install-proxpanel.sh [options]
|
||||
|
||||
Options:
|
||||
--repo-url <url> Git repository URL (required if app is not already in /opt/proxpanel)
|
||||
--branch <name> Git branch to deploy (default: main)
|
||||
--app-dir <path> Deployment directory (default: /opt/proxpanel)
|
||||
--public-url <url> Public base URL (example: http://102.69.243.167)
|
||||
--admin-email <email> Initial admin email (default: admin@proxpanel.local)
|
||||
--admin-password <pass> Initial admin password (auto-generated if omitted)
|
||||
--postgres-password <pass> Postgres password (auto-generated if omitted)
|
||||
--frontend-port <port> Public frontend port (default: 80)
|
||||
--backend-port <port> Local backend bind port (default: 8080 on 127.0.0.1)
|
||||
--configure-ufw Allow OpenSSH + frontend port via UFW (if available)
|
||||
-h, --help Show this help
|
||||
|
||||
Examples:
|
||||
bash infra/deploy/install-proxpanel.sh \
|
||||
--repo-url https://github.com/your-org/proxpanel.git \
|
||||
--branch main \
|
||||
--public-url http://102.69.243.167 \
|
||||
--admin-email admin@yourdomain.com
|
||||
EOF
|
||||
}
|
||||
|
||||
random_secret() {
|
||||
openssl rand -base64 72 | tr -d '\n'
|
||||
}
|
||||
|
||||
random_db_password() {
|
||||
# URL-safe hex string to avoid DATABASE_URL parsing issues.
|
||||
openssl rand -hex 32 | tr -d '\n'
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--repo-url)
|
||||
REPO_URL="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--branch)
|
||||
BRANCH="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--app-dir)
|
||||
APP_DIR="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--public-url)
|
||||
PUBLIC_URL="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--admin-email)
|
||||
ADMIN_EMAIL="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--admin-password)
|
||||
ADMIN_PASSWORD="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--postgres-password)
|
||||
POSTGRES_PASSWORD="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--frontend-port)
|
||||
FRONTEND_PORT="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--backend-port)
|
||||
BACKEND_PORT="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--configure-ufw)
|
||||
CONFIGURE_UFW="true"
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
die "Unknown argument: $1"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
ensure_root() {
|
||||
if [[ "${EUID:-$(id -u)}" -ne 0 ]]; then
|
||||
die "Run as root (or with sudo)."
|
||||
fi
|
||||
}
|
||||
|
||||
install_prereqs() {
|
||||
log "Installing OS prerequisites..."
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update -y
|
||||
apt-get install -y ca-certificates curl git openssl jq rsync awscli
|
||||
}
|
||||
|
||||
install_docker_if_needed() {
|
||||
if command -v docker >/dev/null 2>&1; then
|
||||
log "Docker already installed."
|
||||
else
|
||||
log "Installing Docker..."
|
||||
curl -fsSL https://get.docker.com | sh
|
||||
fi
|
||||
|
||||
systemctl enable docker >/dev/null 2>&1 || true
|
||||
systemctl start docker
|
||||
|
||||
docker compose version >/dev/null 2>&1 || die "Docker Compose plugin is required but not available."
|
||||
}
|
||||
|
||||
sync_source() {
|
||||
if [[ -d "${APP_DIR}/.git" ]]; then
|
||||
log "Updating existing repository in ${APP_DIR}..."
|
||||
git -C "${APP_DIR}" fetch --all --prune
|
||||
git -C "${APP_DIR}" checkout "${BRANCH}"
|
||||
git -C "${APP_DIR}" pull --ff-only origin "${BRANCH}"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -n "${REPO_URL}" ]]; then
|
||||
log "Cloning repository into ${APP_DIR}..."
|
||||
mkdir -p "$(dirname "${APP_DIR}")"
|
||||
git clone --branch "${BRANCH}" --single-branch "${REPO_URL}" "${APP_DIR}"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -f "./package.json" && -d "./backend" && -d "./infra" ]]; then
|
||||
log "No repo URL provided; copying current directory into ${APP_DIR}..."
|
||||
mkdir -p "${APP_DIR}"
|
||||
rsync -a --delete --exclude .git --exclude node_modules --exclude backend/node_modules ./ "${APP_DIR}/"
|
||||
return
|
||||
fi
|
||||
|
||||
die "Could not determine source. Provide --repo-url or run this script from project root."
|
||||
}
|
||||
|
||||
validate_project_layout() {
|
||||
[[ -f "${APP_DIR}/infra/deploy/docker-compose.production.yml" ]] || die "Missing infra/deploy/docker-compose.production.yml"
|
||||
[[ -f "${APP_DIR}/backend/Dockerfile" ]] || die "Missing backend/Dockerfile"
|
||||
[[ -f "${APP_DIR}/Dockerfile" ]] || die "Missing frontend Dockerfile"
|
||||
}
|
||||
|
||||
infer_public_url() {
|
||||
if [[ -n "${PUBLIC_URL}" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local ip
|
||||
ip="$(hostname -I | awk '{print $1}')"
|
||||
[[ -n "${ip}" ]] || ip="127.0.0.1"
|
||||
PUBLIC_URL="http://${ip}"
|
||||
}
|
||||
|
||||
write_env_file() {
|
||||
[[ -n "${ADMIN_PASSWORD}" ]] || ADMIN_PASSWORD="$(openssl rand -base64 18 | tr -d '\n' | tr '/+' 'ab')A9!"
|
||||
[[ -n "${POSTGRES_PASSWORD}" ]] || POSTGRES_PASSWORD="$(random_db_password)"
|
||||
|
||||
local jwt_secret jwt_refresh_secret env_file
|
||||
jwt_secret="$(random_secret)"
|
||||
jwt_refresh_secret="$(random_secret)"
|
||||
env_file="${APP_DIR}/.env.production"
|
||||
|
||||
log "Writing production env file..."
|
||||
umask 077
|
||||
cat > "${env_file}" <<EOF
|
||||
POSTGRES_USER=proxpanel
|
||||
POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
POSTGRES_DB=proxpanel
|
||||
|
||||
JWT_SECRET=${jwt_secret}
|
||||
JWT_REFRESH_SECRET=${jwt_refresh_secret}
|
||||
JWT_EXPIRES_IN=15m
|
||||
JWT_REFRESH_EXPIRES_IN=30d
|
||||
|
||||
CORS_ORIGIN=${PUBLIC_URL}
|
||||
ADMIN_EMAIL=${ADMIN_EMAIL}
|
||||
ADMIN_PASSWORD=${ADMIN_PASSWORD}
|
||||
|
||||
ENABLE_SCHEDULER=true
|
||||
RATE_LIMIT_WINDOW_MS=60000
|
||||
RATE_LIMIT_MAX=600
|
||||
AUTH_RATE_LIMIT_WINDOW_MS=60000
|
||||
AUTH_RATE_LIMIT_MAX=20
|
||||
PROXMOX_TIMEOUT_MS=15000
|
||||
|
||||
FRONTEND_PORT=${FRONTEND_PORT}
|
||||
BACKEND_PORT=${BACKEND_PORT}
|
||||
EOF
|
||||
}
|
||||
|
||||
deploy_stack() {
|
||||
log "Building and starting production stack..."
|
||||
cd "${APP_DIR}"
|
||||
docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml pull || true
|
||||
docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml up -d --build
|
||||
}
|
||||
|
||||
wait_for_health() {
|
||||
log "Waiting for API health..."
|
||||
local max_tries=60
|
||||
local i
|
||||
for ((i=1; i<=max_tries; i++)); do
|
||||
if curl -fsS "http://127.0.0.1:${BACKEND_PORT}/api/health" >/dev/null 2>&1; then
|
||||
log "Backend is healthy."
|
||||
return
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
die "Backend health check failed."
|
||||
}
|
||||
|
||||
apply_database_schema() {
|
||||
log "Applying database schema..."
|
||||
cd "${APP_DIR}"
|
||||
|
||||
if docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml exec -T backend npm run prisma:deploy; then
|
||||
log "Schema migration deploy completed."
|
||||
return
|
||||
fi
|
||||
|
||||
log "prisma:deploy failed or no migrations found. Falling back to prisma:push..."
|
||||
docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml exec -T backend npm run prisma:push
|
||||
log "Schema push completed."
|
||||
}
|
||||
|
||||
seed_database() {
|
||||
log "Running Prisma seed (idempotent)..."
|
||||
cd "${APP_DIR}"
|
||||
docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml exec -T backend npm run prisma:seed
|
||||
}
|
||||
|
||||
verify_login() {
|
||||
log "Verifying login endpoint with seeded admin credentials..."
|
||||
local status
|
||||
status="$(curl -s -o /tmp/proxpanel-login.json -w "%{http_code}" \
|
||||
-X POST "http://127.0.0.1:${FRONTEND_PORT}/api/auth/login" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"email\":\"${ADMIN_EMAIL}\",\"password\":\"${ADMIN_PASSWORD}\"}")"
|
||||
|
||||
if [[ "${status}" != "200" ]]; then
|
||||
cat /tmp/proxpanel-login.json >&2 || true
|
||||
die "Login verification failed with status ${status}."
|
||||
fi
|
||||
|
||||
jq -e '.token and .refresh_token' /tmp/proxpanel-login.json >/dev/null 2>&1 || die "Login response missing token."
|
||||
log "Login verification passed."
|
||||
}
|
||||
|
||||
configure_ufw_if_requested() {
|
||||
if [[ "${CONFIGURE_UFW}" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if ! command -v ufw >/dev/null 2>&1; then
|
||||
log "UFW not installed; skipping firewall configuration."
|
||||
return
|
||||
fi
|
||||
|
||||
log "Configuring UFW rules..."
|
||||
ufw allow OpenSSH >/dev/null 2>&1 || true
|
||||
ufw allow "${FRONTEND_PORT}/tcp" >/dev/null 2>&1 || true
|
||||
ufw --force enable >/dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
write_summary() {
|
||||
local summary_file="/root/proxpanel-install-summary.txt"
|
||||
cat > "${summary_file}" <<EOF
|
||||
ProxPanel production deployment completed.
|
||||
|
||||
Public URL: ${PUBLIC_URL}
|
||||
Server IP: $(echo "${PUBLIC_URL}" | sed -E 's#^https?://##')
|
||||
Admin Email: ${ADMIN_EMAIL}
|
||||
Admin Password: ${ADMIN_PASSWORD}
|
||||
|
||||
Deployment Directory: ${APP_DIR}
|
||||
Compose File: infra/deploy/docker-compose.production.yml
|
||||
Env File: ${APP_DIR}/.env.production
|
||||
|
||||
Quick checks:
|
||||
curl -fsS http://127.0.0.1:${BACKEND_PORT}/api/health
|
||||
docker compose --env-file .env.production -f infra/deploy/docker-compose.production.yml ps
|
||||
|
||||
IMPORTANT:
|
||||
1) Change admin password immediately after first login.
|
||||
2) Configure Proxmox credentials in Settings -> Proxmox.
|
||||
3) Add TLS/reverse-proxy (Nginx/Caddy) if exposing publicly.
|
||||
EOF
|
||||
chmod 600 "${summary_file}"
|
||||
log "Summary saved to ${summary_file}"
|
||||
}
|
||||
|
||||
main() {
|
||||
parse_args "$@"
|
||||
ensure_root
|
||||
install_prereqs
|
||||
install_docker_if_needed
|
||||
sync_source
|
||||
validate_project_layout
|
||||
infer_public_url
|
||||
write_env_file
|
||||
deploy_stack
|
||||
wait_for_health
|
||||
apply_database_schema
|
||||
seed_database
|
||||
verify_login
|
||||
configure_ufw_if_requested
|
||||
write_summary
|
||||
|
||||
log "Deployment finished successfully."
|
||||
printf '\nOpen: %s\n' "${PUBLIC_URL}"
|
||||
printf 'Admin email: %s\n' "${ADMIN_EMAIL}"
|
||||
printf 'Admin password: %s\n' "${ADMIN_PASSWORD}"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
168
infra/deploy/notify-backup-alert.sh
Normal file
168
infra/deploy/notify-backup-alert.sh
Normal file
@@ -0,0 +1,168 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
SECRET_FILE="${SECRET_FILE:-$APP_DIR/.backup.env}"
|
||||
|
||||
EVENT="${EVENT:-}"
|
||||
SEVERITY="${SEVERITY:-warning}"
|
||||
STATUS="${STATUS:-failed}"
|
||||
MESSAGE="${MESSAGE:-}"
|
||||
SOURCE="${SOURCE:-backup-jobs}"
|
||||
CONTEXT_JSON="${CONTEXT_JSON:-{}}"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
APP_DIR=/opt/proxpanel /opt/proxpanel/infra/deploy/notify-backup-alert.sh \
|
||||
--event <backup_failed|restore_test_failed|...> \
|
||||
--severity <info|warning|critical> \
|
||||
--status <ok|failed> \
|
||||
--message "<human-readable-message>" \
|
||||
[--source backup-cron] \
|
||||
[--context-json '{"key":"value"}']
|
||||
|
||||
Alert destinations are read from /opt/proxpanel/.backup.env:
|
||||
BACKUP_ALERT_WEBHOOK_URL=
|
||||
BACKUP_ALERT_EMAIL_WEBHOOK_URL=
|
||||
BACKUP_ALERT_EMAIL_TO=
|
||||
BACKUP_ALERT_SUBJECT_PREFIX=[ProxPanel Backup]
|
||||
EOF
|
||||
}
|
||||
|
||||
log() {
|
||||
printf '[%s] %s\n' "$(date -u +'%Y-%m-%d %H:%M:%S UTC')" "$*"
|
||||
}
|
||||
|
||||
require_command() {
|
||||
command -v "$1" >/dev/null 2>&1 || {
|
||||
printf '[WARN] Missing command: %s\n' "$1" >&2
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--event)
|
||||
EVENT="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--severity)
|
||||
SEVERITY="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--status)
|
||||
STATUS="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--message)
|
||||
MESSAGE="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--source)
|
||||
SOURCE="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--context-json)
|
||||
CONTEXT_JSON="${2:-{}}"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
printf '[WARN] Ignoring unknown argument: %s\n' "$1" >&2
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
main() {
|
||||
parse_args "$@"
|
||||
require_command curl || exit 0
|
||||
require_command jq || exit 0
|
||||
|
||||
if [[ -f "$SECRET_FILE" ]]; then
|
||||
# shellcheck disable=SC1090
|
||||
source "$SECRET_FILE"
|
||||
fi
|
||||
|
||||
[[ -n "$EVENT" ]] || EVENT="backup_job_event"
|
||||
[[ -n "$MESSAGE" ]] || MESSAGE="Backup alert event raised"
|
||||
|
||||
local context
|
||||
context="$(printf '%s' "$CONTEXT_JSON" | jq -c '.' 2>/dev/null || printf '{}')"
|
||||
|
||||
local payload now
|
||||
now="$(date -u +'%Y-%m-%dT%H:%M:%SZ')"
|
||||
payload="$(
|
||||
jq -n \
|
||||
--arg type "backup.alert" \
|
||||
--arg event "$EVENT" \
|
||||
--arg severity "$SEVERITY" \
|
||||
--arg status "$STATUS" \
|
||||
--arg message "$MESSAGE" \
|
||||
--arg source "$SOURCE" \
|
||||
--arg timestamp "$now" \
|
||||
--argjson context "$context" \
|
||||
'{
|
||||
type: $type,
|
||||
event: $event,
|
||||
severity: $severity,
|
||||
status: $status,
|
||||
source: $source,
|
||||
message: $message,
|
||||
timestamp: $timestamp,
|
||||
context: $context
|
||||
}'
|
||||
)"
|
||||
|
||||
local webhook_url email_webhook email_to subject_prefix subject status_webhook status_email
|
||||
webhook_url="${BACKUP_ALERT_WEBHOOK_URL:-}"
|
||||
email_webhook="${BACKUP_ALERT_EMAIL_WEBHOOK_URL:-}"
|
||||
email_to="${BACKUP_ALERT_EMAIL_TO:-${OPS_EMAIL:-}}"
|
||||
subject_prefix="${BACKUP_ALERT_SUBJECT_PREFIX:-[ProxPanel Backup]}"
|
||||
subject="${subject_prefix} ${EVENT} (${SEVERITY})"
|
||||
status_webhook="skipped"
|
||||
status_email="skipped"
|
||||
|
||||
if [[ -n "$webhook_url" ]]; then
|
||||
if curl -fsS -X POST "$webhook_url" -H "Content-Type: application/json" -d "$payload" >/dev/null; then
|
||||
status_webhook="sent"
|
||||
else
|
||||
status_webhook="failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "$email_webhook" && -n "$email_to" ]]; then
|
||||
local email_payload
|
||||
email_payload="$(
|
||||
jq -n \
|
||||
--arg type "backup.alert.email" \
|
||||
--arg to "$email_to" \
|
||||
--arg subject "$subject" \
|
||||
--arg message "$MESSAGE" \
|
||||
--argjson payload "$payload" \
|
||||
'{
|
||||
type: $type,
|
||||
to: $to,
|
||||
subject: $subject,
|
||||
message: $message,
|
||||
payload: $payload
|
||||
}'
|
||||
)"
|
||||
|
||||
if curl -fsS -X POST "$email_webhook" -H "Content-Type: application/json" -d "$email_payload" >/dev/null; then
|
||||
status_email="sent"
|
||||
else
|
||||
status_email="failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
log "Alert dispatch result: webhook=${status_webhook}, email=${status_email}"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
186
infra/deploy/rollback-blue-green.sh
Executable file
186
infra/deploy/rollback-blue-green.sh
Executable file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-/opt/proxpanel-backups}"
|
||||
GREEN_DIR="${GREEN_DIR:-/opt/proxpanel-green}"
|
||||
GREEN_PROJECT="${GREEN_PROJECT:-proxpanel_green}"
|
||||
GREEN_FRONTEND_PORT="${GREEN_FRONTEND_PORT:-18081}"
|
||||
GREEN_BACKEND_PORT="${GREEN_BACKEND_PORT:-18080}"
|
||||
NGINX_SITE="${NGINX_SITE:-/etc/nginx/sites-available/proxpanel.conf}"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
sudo bash infra/deploy/rollback-blue-green.sh --backup-dir /opt/proxpanel-backups/<timestamp> [--cutover]
|
||||
|
||||
Behavior:
|
||||
1) Stages backup app/env into /opt/proxpanel-green
|
||||
2) Boots green stack on ports 18081/18080
|
||||
3) Restores DB dump from selected backup into green postgres
|
||||
4) Verifies green API health
|
||||
5) If --cutover is provided, switches Nginx upstream to green frontend port
|
||||
|
||||
Notes:
|
||||
- This script requires backup directory format created by deployment steps:
|
||||
<backup-dir>/app/
|
||||
<backup-dir>/env.production.bak (or .env.production.bak)
|
||||
<backup-dir>/db_pre_*.sql
|
||||
EOF
|
||||
}
|
||||
|
||||
log() {
|
||||
printf '\n[%s] %s\n' "$(date -u +'%Y-%m-%d %H:%M:%S UTC')" "$*"
|
||||
}
|
||||
|
||||
die() {
|
||||
printf '\n[ERROR] %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_root() {
|
||||
[[ "${EUID:-$(id -u)}" -eq 0 ]] || die "Run as root (or with sudo)."
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
BACKUP_DIR=""
|
||||
CUTOVER="false"
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--backup-dir)
|
||||
BACKUP_DIR="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--cutover)
|
||||
CUTOVER="true"
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
die "Unknown argument: $1"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
[[ -n "$BACKUP_DIR" ]] || die "--backup-dir is required"
|
||||
}
|
||||
|
||||
pick_db_dump() {
|
||||
find "$BACKUP_DIR" -maxdepth 1 -type f -name 'db_pre*.sql' | sort | tail -n 1
|
||||
}
|
||||
|
||||
pick_env_backup() {
|
||||
if [[ -f "$BACKUP_DIR/env.production.bak" ]]; then
|
||||
printf '%s' "$BACKUP_DIR/env.production.bak"
|
||||
return
|
||||
fi
|
||||
if [[ -f "$BACKUP_DIR/.env.production.bak" ]]; then
|
||||
printf '%s' "$BACKUP_DIR/.env.production.bak"
|
||||
return
|
||||
fi
|
||||
printf ''
|
||||
}
|
||||
|
||||
wait_for_green_health() {
|
||||
local i
|
||||
for i in $(seq 1 90); do
|
||||
if curl -fsS "http://127.0.0.1:${GREEN_BACKEND_PORT}/api/health" >/dev/null 2>&1; then
|
||||
return
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
die "Green backend health check failed."
|
||||
}
|
||||
|
||||
main() {
|
||||
require_root
|
||||
parse_args "$@"
|
||||
|
||||
[[ -d "$BACKUP_DIR/app" ]] || die "Missing backup app directory: $BACKUP_DIR/app"
|
||||
local env_backup
|
||||
env_backup="$(pick_env_backup)"
|
||||
[[ -n "$env_backup" ]] || die "Missing env backup in $BACKUP_DIR (.env.production.bak or env.production.bak)"
|
||||
local db_dump
|
||||
db_dump="$(pick_db_dump)"
|
||||
[[ -n "$db_dump" ]] || die "Missing DB dump in backup dir: $BACKUP_DIR"
|
||||
|
||||
log "Preparing green staging directory at $GREEN_DIR"
|
||||
rm -rf "$GREEN_DIR"
|
||||
mkdir -p "$GREEN_DIR"
|
||||
rsync -a "$BACKUP_DIR/app/" "$GREEN_DIR/"
|
||||
cp "$env_backup" "$GREEN_DIR/.env.production"
|
||||
|
||||
# Override ports for green stack and keep CORS aligned to production hostname.
|
||||
if grep -q '^FRONTEND_PORT=' "$GREEN_DIR/.env.production"; then
|
||||
sed -i "s/^FRONTEND_PORT=.*/FRONTEND_PORT=${GREEN_FRONTEND_PORT}/" "$GREEN_DIR/.env.production"
|
||||
else
|
||||
echo "FRONTEND_PORT=${GREEN_FRONTEND_PORT}" >> "$GREEN_DIR/.env.production"
|
||||
fi
|
||||
if grep -q '^BACKEND_PORT=' "$GREEN_DIR/.env.production"; then
|
||||
sed -i "s/^BACKEND_PORT=.*/BACKEND_PORT=${GREEN_BACKEND_PORT}/" "$GREEN_DIR/.env.production"
|
||||
else
|
||||
echo "BACKEND_PORT=${GREEN_BACKEND_PORT}" >> "$GREEN_DIR/.env.production"
|
||||
fi
|
||||
|
||||
# Remove fixed container_name lines for project-isolated blue/green operation.
|
||||
local compose_green
|
||||
compose_green="$GREEN_DIR/infra/deploy/docker-compose.green.yml"
|
||||
awk '!/^[[:space:]]*container_name:/' "$GREEN_DIR/infra/deploy/docker-compose.production.yml" > "$compose_green"
|
||||
|
||||
log "Stopping previous green project (if any)"
|
||||
(
|
||||
cd "$GREEN_DIR"
|
||||
docker compose -p "$GREEN_PROJECT" --env-file .env.production -f "$compose_green" down -v || true
|
||||
)
|
||||
|
||||
log "Starting green stack on ${GREEN_FRONTEND_PORT}/${GREEN_BACKEND_PORT}"
|
||||
(
|
||||
cd "$GREEN_DIR"
|
||||
docker compose -p "$GREEN_PROJECT" --env-file .env.production -f "$compose_green" up -d --build
|
||||
)
|
||||
|
||||
log "Restoring backup DB into green postgres"
|
||||
local green_postgres_id green_pg_user green_pg_db
|
||||
green_postgres_id="$(
|
||||
cd "$GREEN_DIR" && docker compose -p "$GREEN_PROJECT" --env-file .env.production -f "$compose_green" ps -q postgres
|
||||
)"
|
||||
[[ -n "$green_postgres_id" ]] || die "Could not locate green postgres container."
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "$GREEN_DIR/.env.production"
|
||||
green_pg_user="${POSTGRES_USER:-proxpanel}"
|
||||
green_pg_db="${POSTGRES_DB:-proxpanel}"
|
||||
|
||||
docker exec "$green_postgres_id" psql -U "$green_pg_user" -d "$green_pg_db" -v ON_ERROR_STOP=1 \
|
||||
-c 'DROP SCHEMA public CASCADE; CREATE SCHEMA public;'
|
||||
|
||||
cat "$db_dump" | docker exec -i "$green_postgres_id" psql -U "$green_pg_user" -d "$green_pg_db" -v ON_ERROR_STOP=1 >/dev/null
|
||||
|
||||
log "Validating green health"
|
||||
wait_for_green_health
|
||||
curl -fsS "http://127.0.0.1:${GREEN_BACKEND_PORT}/api/health" >/dev/null
|
||||
|
||||
if [[ "$CUTOVER" == "true" ]]; then
|
||||
log "Cutting over Nginx upstream to green frontend port ${GREEN_FRONTEND_PORT}"
|
||||
[[ -f "$NGINX_SITE" ]] || die "Missing nginx site config: $NGINX_SITE"
|
||||
cp "$NGINX_SITE" "${NGINX_SITE}.pre-green.$(date -u +%Y%m%d-%H%M%S).bak"
|
||||
sed -i -E "s#proxy_pass http://127\\.0\\.0\\.1:[0-9]+;#proxy_pass http://127.0.0.1:${GREEN_FRONTEND_PORT};#g" "$NGINX_SITE"
|
||||
nginx -t
|
||||
systemctl reload nginx
|
||||
log "Nginx cutover complete."
|
||||
fi
|
||||
|
||||
log "Blue/green rollback prep complete."
|
||||
printf '\nGreen frontend: http://127.0.0.1:%s\n' "$GREEN_FRONTEND_PORT"
|
||||
printf 'Green backend : http://127.0.0.1:%s/api/health\n' "$GREEN_BACKEND_PORT"
|
||||
if [[ "$CUTOVER" == "true" ]]; then
|
||||
printf 'Traffic switched to green.\n'
|
||||
else
|
||||
printf 'Traffic still on blue. Re-run with --cutover when ready.\n'
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
||||
469
infra/deploy/rotate-integration-secrets.sh
Normal file
469
infra/deploy/rotate-integration-secrets.sh
Normal file
@@ -0,0 +1,469 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
ENV_FILE="${ENV_FILE:-$APP_DIR/.env.production}"
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-/opt/proxpanel-backups}"
|
||||
BACKEND_URL="${BACKEND_URL:-}"
|
||||
|
||||
ADMIN_EMAIL_INPUT=""
|
||||
ADMIN_PASSWORD_INPUT=""
|
||||
|
||||
NEW_PROXMOX_HOST=""
|
||||
NEW_PROXMOX_PORT=""
|
||||
NEW_PROXMOX_USERNAME=""
|
||||
NEW_PROXMOX_TOKEN_ID=""
|
||||
NEW_PROXMOX_TOKEN_SECRET=""
|
||||
NEW_PROXMOX_VERIFY_SSL=""
|
||||
|
||||
NEW_PAYSTACK_SECRET=""
|
||||
NEW_FLUTTERWAVE_SECRET=""
|
||||
NEW_FLUTTERWAVE_WEBHOOK_HASH=""
|
||||
FINALIZE_PAYMENT_GRACE="false"
|
||||
|
||||
NEW_MONITORING_WEBHOOK_URL=""
|
||||
NEW_ALERT_WEBHOOK_URL=""
|
||||
NEW_NOTIFICATION_EMAIL_WEBHOOK=""
|
||||
NEW_OPS_EMAIL=""
|
||||
|
||||
API_TOKEN=""
|
||||
TS=""
|
||||
BACKUP_DIR=""
|
||||
|
||||
UPDATED_PROXMOX="false"
|
||||
UPDATED_PAYMENT="false"
|
||||
UPDATED_NOTIFICATIONS="false"
|
||||
|
||||
ORIGINAL_PROXMOX_JSON=""
|
||||
ORIGINAL_PAYMENT_JSON=""
|
||||
ORIGINAL_NOTIFICATIONS_JSON=""
|
||||
ORIGINAL_NOTIFICATIONS_JSON_SANITIZED=""
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
sudo bash infra/deploy/rotate-integration-secrets.sh [options]
|
||||
|
||||
Options:
|
||||
--backend-url <url> Backend URL (default: http://127.0.0.1:${BACKEND_PORT}/api)
|
||||
--admin-email <email> Admin email (default from .env.production)
|
||||
--admin-password <password> Admin password (default from .env.production)
|
||||
|
||||
# Proxmox token rotation (zero-downtime by preflight + post-sync validation):
|
||||
--proxmox-token-secret <secret> New Proxmox token secret
|
||||
--proxmox-token-id <token-id> Optional new token id
|
||||
--proxmox-username <user@realm> Optional new token username
|
||||
--proxmox-host <host> Optional host override
|
||||
--proxmox-port <port> Optional port override
|
||||
--proxmox-verify-ssl <true|false> Optional SSL verify override
|
||||
|
||||
# Payment/webhook secret rotation (supports grace window):
|
||||
--paystack-secret <secret> New Paystack secret (stores old value as paystack_secret_previous)
|
||||
--flutterwave-secret <secret> New Flutterwave secret (stores old value as flutterwave_secret_previous)
|
||||
--flutterwave-webhook-hash <hash> New Flutterwave webhook hash (stores old value as flutterwave_webhook_hash_previous)
|
||||
--finalize-payment-webhook-grace Clears *_previous fields after provider cutover is complete
|
||||
|
||||
# Alerting webhook endpoints (optional):
|
||||
--monitoring-webhook-url <url>
|
||||
--alert-webhook-url <url>
|
||||
--notification-email-webhook <url>
|
||||
--ops-email <email>
|
||||
|
||||
-h, --help Show help
|
||||
|
||||
Examples:
|
||||
sudo bash infra/deploy/rotate-integration-secrets.sh \
|
||||
--proxmox-token-secret 'NEW_SECRET' \
|
||||
--paystack-secret 'sk_live_new' \
|
||||
--flutterwave-webhook-hash 'new_hash'
|
||||
|
||||
sudo bash infra/deploy/rotate-integration-secrets.sh \
|
||||
--finalize-payment-webhook-grace
|
||||
EOF
|
||||
}
|
||||
|
||||
log() {
|
||||
printf '\n[%s] %s\n' "$(date -u +'%Y-%m-%d %H:%M:%S UTC')" "$*"
|
||||
}
|
||||
|
||||
die() {
|
||||
printf '\n[ERROR] %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_root() {
|
||||
if [[ "${EUID:-$(id -u)}" -ne 0 ]]; then
|
||||
die "Run as root (or with sudo)."
|
||||
fi
|
||||
}
|
||||
|
||||
require_command() {
|
||||
command -v "$1" >/dev/null 2>&1 || die "Missing required command: $1"
|
||||
}
|
||||
|
||||
require_file() {
|
||||
[[ -f "$1" ]] || die "Missing required file: $1"
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--backend-url)
|
||||
BACKEND_URL="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--admin-email)
|
||||
ADMIN_EMAIL_INPUT="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--admin-password)
|
||||
ADMIN_PASSWORD_INPUT="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--proxmox-host)
|
||||
NEW_PROXMOX_HOST="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--proxmox-port)
|
||||
NEW_PROXMOX_PORT="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--proxmox-username)
|
||||
NEW_PROXMOX_USERNAME="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--proxmox-token-id)
|
||||
NEW_PROXMOX_TOKEN_ID="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--proxmox-token-secret)
|
||||
NEW_PROXMOX_TOKEN_SECRET="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--proxmox-verify-ssl)
|
||||
NEW_PROXMOX_VERIFY_SSL="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--paystack-secret)
|
||||
NEW_PAYSTACK_SECRET="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--flutterwave-secret)
|
||||
NEW_FLUTTERWAVE_SECRET="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--flutterwave-webhook-hash)
|
||||
NEW_FLUTTERWAVE_WEBHOOK_HASH="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--finalize-payment-webhook-grace)
|
||||
FINALIZE_PAYMENT_GRACE="true"
|
||||
shift
|
||||
;;
|
||||
--monitoring-webhook-url)
|
||||
NEW_MONITORING_WEBHOOK_URL="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--alert-webhook-url)
|
||||
NEW_ALERT_WEBHOOK_URL="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--notification-email-webhook)
|
||||
NEW_NOTIFICATION_EMAIL_WEBHOOK="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
--ops-email)
|
||||
NEW_OPS_EMAIL="${2:-}"
|
||||
shift 2
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
die "Unknown argument: $1"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
json_escape() {
|
||||
printf '%s' "$1" | jq -Rs .
|
||||
}
|
||||
|
||||
json_canonical() {
|
||||
printf '%s' "$1" | jq -cS .
|
||||
}
|
||||
|
||||
sanitize_notifications_payload() {
|
||||
printf '%s' "$1" | jq '
|
||||
if (.monitoring_webhook_url // "") == "" then del(.monitoring_webhook_url) else . end
|
||||
| if (.alert_webhook_url // "") == "" then del(.alert_webhook_url) else . end
|
||||
| if (.email_gateway_url // "") == "" then del(.email_gateway_url) else . end
|
||||
| if (.notification_email_webhook // "") == "" then del(.notification_email_webhook) else . end
|
||||
| if (.ops_email // "") == "" then del(.ops_email) else . end
|
||||
'
|
||||
}
|
||||
|
||||
api_get() {
|
||||
local path="$1"
|
||||
curl -fsS \
|
||||
-H "Authorization: Bearer ${API_TOKEN}" \
|
||||
"${BACKEND_URL}${path}"
|
||||
}
|
||||
|
||||
api_put() {
|
||||
local path="$1"
|
||||
local payload="$2"
|
||||
curl -fsS \
|
||||
-X PUT \
|
||||
-H "Authorization: Bearer ${API_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$payload" \
|
||||
"${BACKEND_URL}${path}" >/dev/null
|
||||
}
|
||||
|
||||
preflight_proxmox_candidate() {
|
||||
local payload_json="$1"
|
||||
local host port username token_id token_secret verify_ssl auth_header
|
||||
|
||||
host="$(printf '%s' "$payload_json" | jq -r '.host // empty')"
|
||||
port="$(printf '%s' "$payload_json" | jq -r '.port // 8006')"
|
||||
username="$(printf '%s' "$payload_json" | jq -r '.username // empty')"
|
||||
token_id="$(printf '%s' "$payload_json" | jq -r '.token_id // empty')"
|
||||
token_secret="$(printf '%s' "$payload_json" | jq -r '.token_secret // empty')"
|
||||
verify_ssl="$(printf '%s' "$payload_json" | jq -r '.verify_ssl // true')"
|
||||
|
||||
[[ -n "$host" && -n "$username" && -n "$token_id" && -n "$token_secret" ]] || {
|
||||
die "Candidate Proxmox settings are incomplete."
|
||||
}
|
||||
|
||||
auth_header="PVEAPIToken=${username}!${token_id}=${token_secret}"
|
||||
log "Preflight: validating candidate Proxmox token directly against ${host}:${port}"
|
||||
if [[ "$verify_ssl" == "false" ]]; then
|
||||
curl -ksfS -H "Authorization: ${auth_header}" "https://${host}:${port}/api2/json/version" >/dev/null
|
||||
else
|
||||
curl -fsS -H "Authorization: ${auth_header}" "https://${host}:${port}/api2/json/version" >/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
rollback_if_needed() {
|
||||
if [[ -z "$API_TOKEN" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "$UPDATED_NOTIFICATIONS" == "true" && -n "$ORIGINAL_NOTIFICATIONS_JSON" ]]; then
|
||||
log "Rollback: restoring notifications settings"
|
||||
api_put "/settings/notifications" "$ORIGINAL_NOTIFICATIONS_JSON_SANITIZED" || true
|
||||
fi
|
||||
|
||||
if [[ "$UPDATED_PAYMENT" == "true" && -n "$ORIGINAL_PAYMENT_JSON" ]]; then
|
||||
log "Rollback: restoring payment settings"
|
||||
api_put "/settings/payment" "$ORIGINAL_PAYMENT_JSON" || true
|
||||
fi
|
||||
|
||||
if [[ "$UPDATED_PROXMOX" == "true" && -n "$ORIGINAL_PROXMOX_JSON" ]]; then
|
||||
log "Rollback: restoring proxmox settings"
|
||||
api_put "/settings/proxmox" "$ORIGINAL_PROXMOX_JSON" || true
|
||||
fi
|
||||
}
|
||||
|
||||
on_error() {
|
||||
local line="$1"
|
||||
printf '\n[ERROR] Rotation failed at line %s. Attempting rollback where possible.\n' "$line" >&2
|
||||
rollback_if_needed
|
||||
}
|
||||
|
||||
build_backend_url() {
|
||||
if [[ -n "$BACKEND_URL" ]]; then
|
||||
return
|
||||
fi
|
||||
local backend_port
|
||||
backend_port="${BACKEND_PORT:-8080}"
|
||||
BACKEND_URL="http://127.0.0.1:${backend_port}/api"
|
||||
}
|
||||
|
||||
main() {
|
||||
parse_args "$@"
|
||||
require_root
|
||||
require_command curl
|
||||
require_command jq
|
||||
require_file "$ENV_FILE"
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "$ENV_FILE"
|
||||
build_backend_url
|
||||
|
||||
local admin_email admin_password
|
||||
admin_email="${ADMIN_EMAIL_INPUT:-${ADMIN_EMAIL:-}}"
|
||||
admin_password="${ADMIN_PASSWORD_INPUT:-${ADMIN_PASSWORD:-}}"
|
||||
|
||||
[[ -n "$admin_email" && -n "$admin_password" ]] || {
|
||||
die "Admin credentials are required (provide --admin-email/--admin-password or set ADMIN_EMAIL/ADMIN_PASSWORD in env)."
|
||||
}
|
||||
|
||||
local wants_rotation="false"
|
||||
if [[ -n "$NEW_PROXMOX_TOKEN_SECRET" || -n "$NEW_PROXMOX_TOKEN_ID" || -n "$NEW_PROXMOX_USERNAME" || -n "$NEW_PROXMOX_HOST" || -n "$NEW_PROXMOX_PORT" || -n "$NEW_PROXMOX_VERIFY_SSL" || -n "$NEW_PAYSTACK_SECRET" || -n "$NEW_FLUTTERWAVE_SECRET" || -n "$NEW_FLUTTERWAVE_WEBHOOK_HASH" || -n "$NEW_MONITORING_WEBHOOK_URL" || -n "$NEW_ALERT_WEBHOOK_URL" || -n "$NEW_NOTIFICATION_EMAIL_WEBHOOK" || -n "$NEW_OPS_EMAIL" || "$FINALIZE_PAYMENT_GRACE" == "true" ]]; then
|
||||
wants_rotation="true"
|
||||
fi
|
||||
[[ "$wants_rotation" == "true" ]] || die "No rotation inputs supplied. Run with --help for options."
|
||||
|
||||
trap 'on_error $LINENO' ERR
|
||||
|
||||
TS="$(date -u +%Y%m%d-%H%M%S)"
|
||||
BACKUP_DIR="${BACKUP_ROOT}/${TS}-integration-secret-rotation"
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
chmod 700 "$BACKUP_DIR"
|
||||
|
||||
log "Authenticating against backend API"
|
||||
local login_response
|
||||
login_response="$(
|
||||
curl -fsS -X POST "${BACKEND_URL}/auth/login" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"email\":$(json_escape "$admin_email"),\"password\":$(json_escape "$admin_password")}"
|
||||
)"
|
||||
API_TOKEN="$(printf '%s' "$login_response" | jq -r '.token // empty')"
|
||||
[[ -n "$API_TOKEN" ]] || die "Failed to obtain auth token from backend login."
|
||||
|
||||
log "Fetching current integration settings"
|
||||
ORIGINAL_PROXMOX_JSON="$(api_get "/settings/proxmox")"
|
||||
ORIGINAL_PAYMENT_JSON="$(api_get "/settings/payment")"
|
||||
ORIGINAL_NOTIFICATIONS_JSON="$(api_get "/settings/notifications")"
|
||||
ORIGINAL_NOTIFICATIONS_JSON_SANITIZED="$(sanitize_notifications_payload "$ORIGINAL_NOTIFICATIONS_JSON")"
|
||||
|
||||
printf '%s\n' "$ORIGINAL_PROXMOX_JSON" >"${BACKUP_DIR}/proxmox.before.json"
|
||||
printf '%s\n' "$ORIGINAL_PAYMENT_JSON" >"${BACKUP_DIR}/payment.before.json"
|
||||
printf '%s\n' "$ORIGINAL_NOTIFICATIONS_JSON" >"${BACKUP_DIR}/notifications.before.json"
|
||||
cp "$ENV_FILE" "${BACKUP_DIR}/.env.production.bak"
|
||||
|
||||
local proxmox_payload payment_payload notifications_payload
|
||||
proxmox_payload="$ORIGINAL_PROXMOX_JSON"
|
||||
payment_payload="$ORIGINAL_PAYMENT_JSON"
|
||||
notifications_payload="$ORIGINAL_NOTIFICATIONS_JSON"
|
||||
|
||||
if [[ -n "$NEW_PROXMOX_HOST" ]]; then
|
||||
proxmox_payload="$(printf '%s' "$proxmox_payload" | jq --arg v "$NEW_PROXMOX_HOST" '.host = $v')"
|
||||
fi
|
||||
if [[ -n "$NEW_PROXMOX_PORT" ]]; then
|
||||
proxmox_payload="$(printf '%s' "$proxmox_payload" | jq --argjson v "$NEW_PROXMOX_PORT" '.port = $v')"
|
||||
fi
|
||||
if [[ -n "$NEW_PROXMOX_USERNAME" ]]; then
|
||||
proxmox_payload="$(printf '%s' "$proxmox_payload" | jq --arg v "$NEW_PROXMOX_USERNAME" '.username = $v')"
|
||||
fi
|
||||
if [[ -n "$NEW_PROXMOX_TOKEN_ID" ]]; then
|
||||
proxmox_payload="$(printf '%s' "$proxmox_payload" | jq --arg v "$NEW_PROXMOX_TOKEN_ID" '.token_id = $v')"
|
||||
fi
|
||||
if [[ -n "$NEW_PROXMOX_TOKEN_SECRET" ]]; then
|
||||
proxmox_payload="$(printf '%s' "$proxmox_payload" | jq --arg v "$NEW_PROXMOX_TOKEN_SECRET" '.token_secret = $v')"
|
||||
fi
|
||||
if [[ -n "$NEW_PROXMOX_VERIFY_SSL" ]]; then
|
||||
proxmox_payload="$(printf '%s' "$proxmox_payload" | jq --arg v "${NEW_PROXMOX_VERIFY_SSL,,}" '.verify_ssl = ($v == "true")')"
|
||||
fi
|
||||
|
||||
if [[ "$(json_canonical "$proxmox_payload")" != "$(json_canonical "$ORIGINAL_PROXMOX_JSON")" ]]; then
|
||||
preflight_proxmox_candidate "$proxmox_payload"
|
||||
log "Applying Proxmox settings rotation"
|
||||
api_put "/settings/proxmox" "$proxmox_payload"
|
||||
UPDATED_PROXMOX="true"
|
||||
|
||||
log "Post-rotation validation: Proxmox sync"
|
||||
curl -fsS -X POST "${BACKEND_URL}/proxmox/sync" -H "Authorization: Bearer ${API_TOKEN}" >/dev/null
|
||||
fi
|
||||
|
||||
if [[ -n "$NEW_PAYSTACK_SECRET" ]]; then
|
||||
payment_payload="$(
|
||||
printf '%s' "$payment_payload" | jq --arg v "$NEW_PAYSTACK_SECRET" '
|
||||
if (.paystack_secret // "") == $v then . else .paystack_secret_previous = (.paystack_secret // "") | .paystack_secret = $v end
|
||||
'
|
||||
)"
|
||||
fi
|
||||
|
||||
if [[ -n "$NEW_FLUTTERWAVE_SECRET" ]]; then
|
||||
payment_payload="$(
|
||||
printf '%s' "$payment_payload" | jq --arg v "$NEW_FLUTTERWAVE_SECRET" '
|
||||
if (.flutterwave_secret // "") == $v then . else .flutterwave_secret_previous = (.flutterwave_secret // "") | .flutterwave_secret = $v end
|
||||
'
|
||||
)"
|
||||
fi
|
||||
|
||||
if [[ -n "$NEW_FLUTTERWAVE_WEBHOOK_HASH" ]]; then
|
||||
payment_payload="$(
|
||||
printf '%s' "$payment_payload" | jq --arg v "$NEW_FLUTTERWAVE_WEBHOOK_HASH" '
|
||||
if (.flutterwave_webhook_hash // "") == $v then .
|
||||
else .flutterwave_webhook_hash_previous = (.flutterwave_webhook_hash // "") | .flutterwave_webhook_hash = $v end
|
||||
'
|
||||
)"
|
||||
fi
|
||||
|
||||
if [[ "$FINALIZE_PAYMENT_GRACE" == "true" ]]; then
|
||||
payment_payload="$(
|
||||
printf '%s' "$payment_payload" | jq '
|
||||
.paystack_secret_previous = "" |
|
||||
.flutterwave_secret_previous = "" |
|
||||
.flutterwave_webhook_hash_previous = ""
|
||||
'
|
||||
)"
|
||||
fi
|
||||
|
||||
if [[ "$(json_canonical "$payment_payload")" != "$(json_canonical "$ORIGINAL_PAYMENT_JSON")" ]]; then
|
||||
log "Applying payment/webhook rotation payload"
|
||||
api_put "/settings/payment" "$payment_payload"
|
||||
UPDATED_PAYMENT="true"
|
||||
fi
|
||||
|
||||
if [[ -n "$NEW_MONITORING_WEBHOOK_URL" ]]; then
|
||||
notifications_payload="$(printf '%s' "$notifications_payload" | jq --arg v "$NEW_MONITORING_WEBHOOK_URL" '.monitoring_webhook_url = $v')"
|
||||
fi
|
||||
if [[ -n "$NEW_ALERT_WEBHOOK_URL" ]]; then
|
||||
notifications_payload="$(printf '%s' "$notifications_payload" | jq --arg v "$NEW_ALERT_WEBHOOK_URL" '.alert_webhook_url = $v')"
|
||||
fi
|
||||
if [[ -n "$NEW_NOTIFICATION_EMAIL_WEBHOOK" ]]; then
|
||||
notifications_payload="$(printf '%s' "$notifications_payload" | jq --arg v "$NEW_NOTIFICATION_EMAIL_WEBHOOK" '.notification_email_webhook = $v')"
|
||||
fi
|
||||
if [[ -n "$NEW_OPS_EMAIL" ]]; then
|
||||
notifications_payload="$(printf '%s' "$notifications_payload" | jq --arg v "$NEW_OPS_EMAIL" '.ops_email = $v')"
|
||||
fi
|
||||
|
||||
notifications_payload="$(sanitize_notifications_payload "$notifications_payload")"
|
||||
|
||||
if [[ "$(json_canonical "$notifications_payload")" != "$(json_canonical "$ORIGINAL_NOTIFICATIONS_JSON_SANITIZED")" ]]; then
|
||||
log "Applying alerting destination updates"
|
||||
api_put "/settings/notifications" "$notifications_payload"
|
||||
UPDATED_NOTIFICATIONS="true"
|
||||
fi
|
||||
|
||||
local summary_file
|
||||
summary_file="/root/proxpanel-integration-rotation-${TS}.txt"
|
||||
cat >"$summary_file" <<EOF
|
||||
ProxPanel integration secret rotation completed at $(date -u +'%Y-%m-%d %H:%M:%S UTC')
|
||||
|
||||
Backend URL:
|
||||
${BACKEND_URL}
|
||||
|
||||
Backup directory:
|
||||
${BACKUP_DIR}
|
||||
|
||||
Changed blocks:
|
||||
Proxmox settings: ${UPDATED_PROXMOX}
|
||||
Payment/webhook settings: ${UPDATED_PAYMENT}
|
||||
Notification destinations: ${UPDATED_NOTIFICATIONS}
|
||||
|
||||
Grace mode:
|
||||
finalize_payment_webhook_grace=${FINALIZE_PAYMENT_GRACE}
|
||||
|
||||
Post checks:
|
||||
GET ${BACKEND_URL}/health
|
||||
POST ${BACKEND_URL}/proxmox/sync
|
||||
|
||||
Next:
|
||||
If webhook grace fields were populated, finalize later with:
|
||||
sudo bash ${APP_DIR}/infra/deploy/rotate-integration-secrets.sh --finalize-payment-webhook-grace
|
||||
EOF
|
||||
chmod 600 "$summary_file"
|
||||
|
||||
trap - ERR
|
||||
log "Integration secret rotation completed successfully."
|
||||
printf 'Summary: %s\n' "$summary_file"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
209
infra/deploy/rotate-production-secrets.sh
Executable file
209
infra/deploy/rotate-production-secrets.sh
Executable file
@@ -0,0 +1,209 @@
|
||||
#!/usr/bin/env bash
|
||||
set -Eeuo pipefail
|
||||
|
||||
APP_DIR="${APP_DIR:-/opt/proxpanel}"
|
||||
ENV_FILE="${ENV_FILE:-$APP_DIR/.env.production}"
|
||||
COMPOSE_FILE="${COMPOSE_FILE:-$APP_DIR/infra/deploy/docker-compose.production.yml}"
|
||||
BACKUP_ROOT="${BACKUP_ROOT:-/opt/proxpanel-backups}"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
Usage:
|
||||
sudo bash infra/deploy/rotate-production-secrets.sh
|
||||
|
||||
What this rotates:
|
||||
- JWT_SECRET
|
||||
- JWT_REFRESH_SECRET
|
||||
- POSTGRES_PASSWORD
|
||||
- ADMIN_PASSWORD
|
||||
|
||||
Safety:
|
||||
- Creates backup in /opt/proxpanel-backups/<timestamp>-secret-rotation/
|
||||
- Dumps DB before password change
|
||||
- Verifies API health and admin login after rotation
|
||||
EOF
|
||||
}
|
||||
|
||||
log() {
|
||||
printf '\n[%s] %s\n' "$(date -u +'%Y-%m-%d %H:%M:%S UTC')" "$*"
|
||||
}
|
||||
|
||||
die() {
|
||||
printf '\n[ERROR] %s\n' "$*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_root() {
|
||||
if [[ "${EUID:-$(id -u)}" -ne 0 ]]; then
|
||||
die "Run as root (or with sudo)."
|
||||
fi
|
||||
}
|
||||
|
||||
require_file() {
|
||||
local file="$1"
|
||||
[[ -f "$file" ]] || die "Missing required file: $file"
|
||||
}
|
||||
|
||||
require_command() {
|
||||
local cmd="$1"
|
||||
command -v "$cmd" >/dev/null 2>&1 || die "Missing required command: $cmd"
|
||||
}
|
||||
|
||||
escape_sed_replacement() {
|
||||
local value="$1"
|
||||
value="${value//\\/\\\\}"
|
||||
value="${value//&/\\&}"
|
||||
printf '%s' "$value"
|
||||
}
|
||||
|
||||
update_env_value() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
local escaped
|
||||
escaped="$(escape_sed_replacement "$value")"
|
||||
|
||||
if grep -q "^${key}=" "$ENV_FILE"; then
|
||||
sed -i "s/^${key}=.*/${key}=${escaped}/" "$ENV_FILE"
|
||||
else
|
||||
printf '%s=%s\n' "$key" "$value" >>"$ENV_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
random_password() {
|
||||
openssl rand -base64 36 | tr -d '\n' | tr '/+' 'ab'
|
||||
}
|
||||
|
||||
random_hex() {
|
||||
openssl rand -hex 32 | tr -d '\n'
|
||||
}
|
||||
|
||||
wait_for_health() {
|
||||
local max_tries=60
|
||||
local i
|
||||
for ((i=1; i<=max_tries; i++)); do
|
||||
if curl -fsS "http://127.0.0.1:${BACKEND_PORT}/api/health" >/dev/null 2>&1; then
|
||||
return
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
die "Backend health check failed after secret rotation."
|
||||
}
|
||||
|
||||
main() {
|
||||
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
require_root
|
||||
require_command docker
|
||||
require_command curl
|
||||
require_command openssl
|
||||
require_command rsync
|
||||
require_file "$ENV_FILE"
|
||||
require_file "$COMPOSE_FILE"
|
||||
|
||||
# shellcheck disable=SC1090
|
||||
source "$ENV_FILE"
|
||||
|
||||
local ts backup_dir compose_args
|
||||
ts="$(date -u +%Y%m%d-%H%M%S)"
|
||||
backup_dir="${BACKUP_ROOT}/${ts}-secret-rotation"
|
||||
compose_args=(--env-file "$ENV_FILE" -f "$COMPOSE_FILE")
|
||||
|
||||
local old_admin_email old_admin_password old_postgres_user old_postgres_db old_backend_port
|
||||
old_admin_email="${ADMIN_EMAIL:-admin@proxpanel.local}"
|
||||
old_admin_password="${ADMIN_PASSWORD:-}"
|
||||
old_postgres_user="${POSTGRES_USER:-proxpanel}"
|
||||
old_postgres_db="${POSTGRES_DB:-proxpanel}"
|
||||
old_backend_port="${BACKEND_PORT:-8080}"
|
||||
|
||||
local new_jwt_secret new_jwt_refresh_secret new_postgres_password new_admin_password
|
||||
new_jwt_secret="$(random_password)$(random_password)"
|
||||
new_jwt_refresh_secret="$(random_password)$(random_password)"
|
||||
new_postgres_password="$(random_hex)"
|
||||
new_admin_password="$(random_password)A9!"
|
||||
|
||||
log "Creating pre-rotation backups in $backup_dir"
|
||||
mkdir -p "$backup_dir"
|
||||
cp "$ENV_FILE" "$backup_dir/.env.production.bak"
|
||||
rsync -a "$APP_DIR/" "$backup_dir/app/"
|
||||
docker exec proxpanel-postgres pg_dump -U "$old_postgres_user" -d "$old_postgres_db" >"$backup_dir/db_pre_rotation.sql"
|
||||
|
||||
log "Updating env file with rotated secrets"
|
||||
update_env_value "JWT_SECRET" "$new_jwt_secret"
|
||||
update_env_value "JWT_REFRESH_SECRET" "$new_jwt_refresh_secret"
|
||||
update_env_value "POSTGRES_PASSWORD" "$new_postgres_password"
|
||||
update_env_value "ADMIN_PASSWORD" "$new_admin_password"
|
||||
|
||||
# Re-load env values after edits.
|
||||
# shellcheck disable=SC1090
|
||||
source "$ENV_FILE"
|
||||
|
||||
log "Applying Postgres password rotation"
|
||||
docker exec proxpanel-postgres psql -U "$old_postgres_user" -d "$old_postgres_db" -v ON_ERROR_STOP=1 \
|
||||
-c "ALTER USER \"$old_postgres_user\" WITH PASSWORD '$new_postgres_password';"
|
||||
|
||||
log "Restarting stack with new secrets"
|
||||
(
|
||||
cd "$APP_DIR"
|
||||
docker compose "${compose_args[@]}" up -d
|
||||
)
|
||||
|
||||
BACKEND_PORT="${BACKEND_PORT:-$old_backend_port}"
|
||||
wait_for_health
|
||||
|
||||
log "Re-seeding admin credential to match rotated ADMIN_PASSWORD"
|
||||
(
|
||||
cd "$APP_DIR"
|
||||
docker compose "${compose_args[@]}" exec -T backend npm run prisma:seed
|
||||
)
|
||||
|
||||
log "Revoking all active auth sessions after JWT/password rotation"
|
||||
docker exec proxpanel-postgres psql -U "${POSTGRES_USER:-$old_postgres_user}" -d "${POSTGRES_DB:-$old_postgres_db}" \
|
||||
-v ON_ERROR_STOP=1 -c 'TRUNCATE TABLE "AuthSession" RESTART IDENTITY;'
|
||||
|
||||
log "Verifying post-rotation login"
|
||||
local login_status
|
||||
login_status="$(
|
||||
curl -sS -o /tmp/proxpanel-rotate-login.json -w "%{http_code}" \
|
||||
-X POST "http://127.0.0.1:${BACKEND_PORT}/api/auth/login" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"email\":\"${ADMIN_EMAIL}\",\"password\":\"${ADMIN_PASSWORD}\"}"
|
||||
)"
|
||||
[[ "$login_status" == "200" ]] || die "Admin login failed after secret rotation (status $login_status)."
|
||||
|
||||
local summary_file
|
||||
summary_file="/root/proxpanel-secret-rotation-${ts}.txt"
|
||||
cat >"$summary_file" <<EOF
|
||||
ProxPanel secret rotation completed at $(date -u +'%Y-%m-%d %H:%M:%S UTC')
|
||||
|
||||
Backup directory:
|
||||
$backup_dir
|
||||
|
||||
Rotated secrets:
|
||||
JWT_SECRET
|
||||
JWT_REFRESH_SECRET
|
||||
POSTGRES_PASSWORD
|
||||
ADMIN_PASSWORD
|
||||
|
||||
Admin credentials:
|
||||
ADMIN_EMAIL=${ADMIN_EMAIL}
|
||||
ADMIN_PASSWORD=${ADMIN_PASSWORD}
|
||||
|
||||
Post-rotation checks:
|
||||
curl -fsS http://127.0.0.1:${BACKEND_PORT}/api/health
|
||||
curl -X POST http://127.0.0.1:${BACKEND_PORT}/api/auth/login ...
|
||||
|
||||
Important:
|
||||
Change ADMIN_PASSWORD again from Profile page after login.
|
||||
EOF
|
||||
chmod 600 "$summary_file"
|
||||
|
||||
log "Rotation complete."
|
||||
printf '\nNew admin email: %s\n' "${ADMIN_EMAIL}"
|
||||
printf 'New admin password: %s\n' "${ADMIN_PASSWORD}"
|
||||
printf 'Summary: %s\n' "$summary_file"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
29
infra/nginx/default.conf
Normal file
29
infra/nginx/default.conf
Normal file
@@ -0,0 +1,29 @@
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
|
||||
location /api/ {
|
||||
proxy_pass http://backend:8080/api/;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_read_timeout 90s;
|
||||
}
|
||||
|
||||
location = /api/health {
|
||||
proxy_pass http://backend:8080/api/health;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files $uri /index.html;
|
||||
}
|
||||
}
|
||||
16
jsconfig.json
Normal file
16
jsconfig.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"module": "ESNext",
|
||||
"moduleResolution": "Bundler",
|
||||
"jsx": "react-jsx",
|
||||
"allowJs": true,
|
||||
"checkJs": false,
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"@/*": ["src/*"]
|
||||
},
|
||||
"types": ["vite/client"]
|
||||
},
|
||||
"include": ["src", "vite.config.js"]
|
||||
}
|
||||
8290
package-lock.json
generated
Normal file
8290
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
85
package.json
Normal file
85
package.json
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"name": "proxpanel-app",
|
||||
"private": true,
|
||||
"version": "1.0.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"dev:api": "npm --prefix backend run dev",
|
||||
"build": "vite build",
|
||||
"build:api": "npm --prefix backend run build",
|
||||
"lint": "eslint . --quiet",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@hookform/resolvers": "^4.1.2",
|
||||
"@radix-ui/react-accordion": "^1.2.3",
|
||||
"@radix-ui/react-alert-dialog": "^1.1.6",
|
||||
"@radix-ui/react-aspect-ratio": "^1.1.2",
|
||||
"@radix-ui/react-avatar": "^1.1.3",
|
||||
"@radix-ui/react-checkbox": "^1.1.4",
|
||||
"@radix-ui/react-collapsible": "^1.1.3",
|
||||
"@radix-ui/react-context-menu": "^2.2.6",
|
||||
"@radix-ui/react-dialog": "^1.1.6",
|
||||
"@radix-ui/react-dropdown-menu": "^2.1.6",
|
||||
"@radix-ui/react-hover-card": "^1.1.6",
|
||||
"@radix-ui/react-label": "^2.1.2",
|
||||
"@radix-ui/react-menubar": "^1.1.6",
|
||||
"@radix-ui/react-navigation-menu": "^1.2.5",
|
||||
"@radix-ui/react-popover": "^1.1.6",
|
||||
"@radix-ui/react-progress": "^1.1.2",
|
||||
"@radix-ui/react-radio-group": "^1.2.3",
|
||||
"@radix-ui/react-scroll-area": "^1.2.3",
|
||||
"@radix-ui/react-select": "^2.1.6",
|
||||
"@radix-ui/react-separator": "^1.1.2",
|
||||
"@radix-ui/react-slider": "^1.2.3",
|
||||
"@radix-ui/react-slot": "^1.1.2",
|
||||
"@radix-ui/react-switch": "^1.1.3",
|
||||
"@radix-ui/react-tabs": "^1.1.3",
|
||||
"@radix-ui/react-toast": "^1.2.2",
|
||||
"@radix-ui/react-toggle": "^1.1.2",
|
||||
"@radix-ui/react-toggle-group": "^1.1.2",
|
||||
"@radix-ui/react-tooltip": "^1.1.8",
|
||||
"@tanstack/react-query": "^5.84.1",
|
||||
"axios": "^1.9.0",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "^1.0.0",
|
||||
"date-fns": "^3.6.0",
|
||||
"embla-carousel-react": "^8.5.2",
|
||||
"framer-motion": "^11.16.4",
|
||||
"input-otp": "^1.4.2",
|
||||
"lucide-react": "^0.475.0",
|
||||
"moment": "^2.30.1",
|
||||
"next-themes": "^0.4.4",
|
||||
"react": "^18.2.0",
|
||||
"react-day-picker": "^8.10.1",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-hook-form": "^7.54.2",
|
||||
"react-resizable-panels": "^2.1.7",
|
||||
"react-router-dom": "^6.26.0",
|
||||
"recharts": "^2.15.4",
|
||||
"sonner": "^2.0.1",
|
||||
"tailwind-merge": "^3.0.2",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"vaul": "^1.1.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.19.0",
|
||||
"@types/node": "^22.13.5",
|
||||
"@types/react": "^18.2.66",
|
||||
"@types/react-dom": "^18.2.22",
|
||||
"@vitejs/plugin-react": "^4.3.4",
|
||||
"autoprefixer": "^10.4.20",
|
||||
"eslint": "^9.19.0",
|
||||
"eslint-plugin-react": "^7.37.4",
|
||||
"eslint-plugin-react-hooks": "^5.0.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.18",
|
||||
"eslint-plugin-unused-imports": "^4.3.0",
|
||||
"globals": "^15.14.0",
|
||||
"postcss": "^8.5.3",
|
||||
"tailwindcss": "^3.4.17",
|
||||
"typescript": "^5.8.2",
|
||||
"vite": "^6.1.0"
|
||||
}
|
||||
}
|
||||
6
postcss.config.js
Normal file
6
postcss.config.js
Normal file
@@ -0,0 +1,6 @@
|
||||
export default {
|
||||
plugins: {
|
||||
tailwindcss: {},
|
||||
autoprefixer: {}
|
||||
}
|
||||
};
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user