chore: initialize repository with deployment baseline
This commit is contained in:
90
backend/src/app.ts
Normal file
90
backend/src/app.ts
Normal file
@@ -0,0 +1,90 @@
|
||||
import express from "express";
|
||||
import cors from "cors";
|
||||
import helmet from "helmet";
|
||||
import compression from "compression";
|
||||
import morgan from "morgan";
|
||||
import { env } from "./config/env";
|
||||
import authRoutes from "./routes/auth.routes";
|
||||
import healthRoutes from "./routes/health.routes";
|
||||
import dashboardRoutes from "./routes/dashboard.routes";
|
||||
import resourceRoutes from "./routes/resources.routes";
|
||||
import billingRoutes from "./routes/billing.routes";
|
||||
import paymentRoutes from "./routes/payment.routes";
|
||||
import proxmoxRoutes from "./routes/proxmox.routes";
|
||||
import settingsRoutes from "./routes/settings.routes";
|
||||
import operationsRoutes from "./routes/operations.routes";
|
||||
import provisioningRoutes from "./routes/provisioning.routes";
|
||||
import backupRoutes from "./routes/backup.routes";
|
||||
import networkRoutes from "./routes/network.routes";
|
||||
import monitoringRoutes from "./routes/monitoring.routes";
|
||||
import clientRoutes from "./routes/client.routes";
|
||||
import { errorHandler, notFoundHandler } from "./middleware/error-handler";
|
||||
import { createRateLimit } from "./middleware/rate-limit";
|
||||
|
||||
export function createApp() {
|
||||
const app = express();
|
||||
app.set("trust proxy", 1);
|
||||
|
||||
const globalRateLimit = createRateLimit({
|
||||
windowMs: env.RATE_LIMIT_WINDOW_MS,
|
||||
max: env.RATE_LIMIT_MAX
|
||||
});
|
||||
const authRateLimit = createRateLimit({
|
||||
windowMs: env.AUTH_RATE_LIMIT_WINDOW_MS,
|
||||
max: env.AUTH_RATE_LIMIT_MAX,
|
||||
keyGenerator: (req) => {
|
||||
const email = typeof req.body?.email === "string" ? req.body.email.toLowerCase().trim() : "";
|
||||
return `${req.ip}:${email}`;
|
||||
}
|
||||
});
|
||||
|
||||
app.use(
|
||||
cors({
|
||||
origin: env.CORS_ORIGIN === "*" ? true : env.CORS_ORIGIN.split(",").map((item) => item.trim()),
|
||||
credentials: true
|
||||
})
|
||||
);
|
||||
app.use(helmet());
|
||||
app.use(compression());
|
||||
app.use(
|
||||
express.json({
|
||||
limit: "2mb",
|
||||
verify: (req, _res, buffer) => {
|
||||
const request = req as express.Request;
|
||||
request.rawBody = buffer.toString("utf8");
|
||||
}
|
||||
})
|
||||
);
|
||||
app.use(morgan("dev"));
|
||||
app.use("/api", globalRateLimit);
|
||||
app.use("/api/auth/login", authRateLimit);
|
||||
app.use("/api/auth/refresh", authRateLimit);
|
||||
|
||||
app.get("/", (_req, res) => {
|
||||
res.json({
|
||||
name: "ProxPanel API",
|
||||
version: "1.0.0",
|
||||
docs: "/api/health"
|
||||
});
|
||||
});
|
||||
|
||||
app.use("/api/health", healthRoutes);
|
||||
app.use("/api/auth", authRoutes);
|
||||
app.use("/api/dashboard", dashboardRoutes);
|
||||
app.use("/api/resources", resourceRoutes);
|
||||
app.use("/api/billing", billingRoutes);
|
||||
app.use("/api/payments", paymentRoutes);
|
||||
app.use("/api/proxmox", proxmoxRoutes);
|
||||
app.use("/api/settings", settingsRoutes);
|
||||
app.use("/api/operations", operationsRoutes);
|
||||
app.use("/api/provisioning", provisioningRoutes);
|
||||
app.use("/api/backups", backupRoutes);
|
||||
app.use("/api/network", networkRoutes);
|
||||
app.use("/api/monitoring", monitoringRoutes);
|
||||
app.use("/api/client", clientRoutes);
|
||||
|
||||
app.use(notFoundHandler);
|
||||
app.use(errorHandler);
|
||||
|
||||
return app;
|
||||
}
|
||||
38
backend/src/config/env.ts
Normal file
38
backend/src/config/env.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import dotenv from "dotenv";
|
||||
import { z } from "zod";
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const envSchema = z.object({
|
||||
NODE_ENV: z.enum(["development", "test", "production"]).default("development"),
|
||||
PORT: z.coerce.number().default(8080),
|
||||
DATABASE_URL: z.string().min(1, "DATABASE_URL is required"),
|
||||
JWT_SECRET: z.string().min(16, "JWT_SECRET must be at least 16 characters"),
|
||||
JWT_EXPIRES_IN: z.string().default("7d"),
|
||||
JWT_REFRESH_SECRET: z.string().min(16, "JWT_REFRESH_SECRET must be at least 16 characters").optional(),
|
||||
JWT_REFRESH_EXPIRES_IN: z.string().default("30d"),
|
||||
CORS_ORIGIN: z.string().default("*"),
|
||||
RATE_LIMIT_WINDOW_MS: z.coerce.number().int().positive().default(60_000),
|
||||
RATE_LIMIT_MAX: z.coerce.number().int().positive().default(600),
|
||||
AUTH_RATE_LIMIT_WINDOW_MS: z.coerce.number().int().positive().default(60_000),
|
||||
AUTH_RATE_LIMIT_MAX: z.coerce.number().int().positive().default(20),
|
||||
SCHEDULER_LEASE_MS: z.coerce.number().int().positive().default(180_000),
|
||||
SCHEDULER_HEARTBEAT_MS: z.coerce.number().int().positive().default(30_000),
|
||||
ENABLE_SCHEDULER: z.coerce.boolean().default(true),
|
||||
BILLING_CRON: z.string().default("0 * * * *"),
|
||||
BACKUP_CRON: z.string().default("*/15 * * * *"),
|
||||
POWER_SCHEDULE_CRON: z.string().default("* * * * *"),
|
||||
MONITORING_CRON: z.string().default("*/5 * * * *"),
|
||||
PROXMOX_TIMEOUT_MS: z.coerce.number().default(15000)
|
||||
});
|
||||
|
||||
const parsed = envSchema.parse(process.env);
|
||||
|
||||
if (parsed.NODE_ENV === "production" && parsed.CORS_ORIGIN === "*") {
|
||||
throw new Error("CORS_ORIGIN cannot be '*' in production");
|
||||
}
|
||||
|
||||
export const env = {
|
||||
...parsed,
|
||||
JWT_REFRESH_SECRET: parsed.JWT_REFRESH_SECRET ?? parsed.JWT_SECRET
|
||||
};
|
||||
23
backend/src/index.ts
Normal file
23
backend/src/index.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { createApp } from "./app";
|
||||
import { env } from "./config/env";
|
||||
import { prisma } from "./lib/prisma";
|
||||
import { startSchedulers } from "./services/scheduler.service";
|
||||
|
||||
async function bootstrap() {
|
||||
await prisma.$connect();
|
||||
|
||||
const app = createApp();
|
||||
app.listen(env.PORT, () => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`ProxPanel API running on port ${env.PORT}`);
|
||||
});
|
||||
|
||||
await startSchedulers();
|
||||
}
|
||||
|
||||
bootstrap().catch(async (error) => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Failed to start server:", error);
|
||||
await prisma.$disconnect();
|
||||
process.exit(1);
|
||||
});
|
||||
12
backend/src/lib/http-error.ts
Normal file
12
backend/src/lib/http-error.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
export class HttpError extends Error {
|
||||
status: number;
|
||||
code: string;
|
||||
details?: unknown;
|
||||
|
||||
constructor(status: number, message: string, code = "HTTP_ERROR", details?: unknown) {
|
||||
super(message);
|
||||
this.status = status;
|
||||
this.code = code;
|
||||
this.details = details;
|
||||
}
|
||||
}
|
||||
48
backend/src/lib/prisma-json.ts
Normal file
48
backend/src/lib/prisma-json.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import type { Prisma } from "@prisma/client";
|
||||
|
||||
export function toPrismaJsonValue(value: unknown): Prisma.InputJsonValue {
|
||||
if (value === null) {
|
||||
return "null";
|
||||
}
|
||||
|
||||
if (typeof value === "string" || typeof value === "boolean") {
|
||||
return value;
|
||||
}
|
||||
|
||||
if (typeof value === "number") {
|
||||
return Number.isFinite(value) ? value : String(value);
|
||||
}
|
||||
|
||||
if (typeof value === "bigint") {
|
||||
return value.toString();
|
||||
}
|
||||
|
||||
if (value instanceof Date) {
|
||||
return value.toISOString();
|
||||
}
|
||||
|
||||
if (value instanceof Error) {
|
||||
return {
|
||||
name: value.name,
|
||||
message: value.message,
|
||||
stack: value.stack ?? ""
|
||||
};
|
||||
}
|
||||
|
||||
if (Array.isArray(value)) {
|
||||
return value.map((item) => toPrismaJsonValue(item));
|
||||
}
|
||||
|
||||
if (typeof value === "object") {
|
||||
const output: Record<string, Prisma.InputJsonValue> = {};
|
||||
|
||||
for (const [key, raw] of Object.entries(value as Record<string, unknown>)) {
|
||||
if (raw === undefined) continue;
|
||||
output[key] = toPrismaJsonValue(raw);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
return String(value);
|
||||
}
|
||||
3
backend/src/lib/prisma.ts
Normal file
3
backend/src/lib/prisma.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
import { PrismaClient } from "@prisma/client";
|
||||
|
||||
export const prisma = new PrismaClient();
|
||||
163
backend/src/middleware/auth.ts
Normal file
163
backend/src/middleware/auth.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
import type { NextFunction, Request as ExpressRequest, Response } from "express";
|
||||
import jwt, { type JwtPayload, type SignOptions } from "jsonwebtoken";
|
||||
import type { Role } from "@prisma/client";
|
||||
import { env } from "../config/env";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
|
||||
type Permission =
|
||||
| "vm:create"
|
||||
| "vm:read"
|
||||
| "vm:update"
|
||||
| "vm:delete"
|
||||
| "vm:start"
|
||||
| "vm:stop"
|
||||
| "node:manage"
|
||||
| "node:read"
|
||||
| "tenant:manage"
|
||||
| "tenant:read"
|
||||
| "billing:manage"
|
||||
| "billing:read"
|
||||
| "backup:manage"
|
||||
| "backup:read"
|
||||
| "rbac:manage"
|
||||
| "settings:manage"
|
||||
| "settings:read"
|
||||
| "audit:read"
|
||||
| "security:manage"
|
||||
| "security:read"
|
||||
| "user:manage"
|
||||
| "user:read";
|
||||
|
||||
const rolePermissions: Record<Role, Set<Permission>> = {
|
||||
SUPER_ADMIN: new Set<Permission>([
|
||||
"vm:create",
|
||||
"vm:read",
|
||||
"vm:update",
|
||||
"vm:delete",
|
||||
"vm:start",
|
||||
"vm:stop",
|
||||
"node:manage",
|
||||
"node:read",
|
||||
"tenant:manage",
|
||||
"tenant:read",
|
||||
"billing:manage",
|
||||
"billing:read",
|
||||
"backup:manage",
|
||||
"backup:read",
|
||||
"rbac:manage",
|
||||
"settings:manage",
|
||||
"settings:read",
|
||||
"audit:read",
|
||||
"security:manage",
|
||||
"security:read",
|
||||
"user:manage",
|
||||
"user:read"
|
||||
]),
|
||||
TENANT_ADMIN: new Set<Permission>([
|
||||
"vm:create",
|
||||
"vm:read",
|
||||
"vm:update",
|
||||
"vm:delete",
|
||||
"vm:start",
|
||||
"vm:stop",
|
||||
"node:read",
|
||||
"tenant:read",
|
||||
"billing:read",
|
||||
"backup:manage",
|
||||
"backup:read",
|
||||
"settings:read",
|
||||
"audit:read",
|
||||
"security:read",
|
||||
"user:read"
|
||||
]),
|
||||
OPERATOR: new Set<Permission>([
|
||||
"vm:read",
|
||||
"vm:start",
|
||||
"vm:stop",
|
||||
"node:manage",
|
||||
"node:read",
|
||||
"billing:read",
|
||||
"backup:manage",
|
||||
"backup:read",
|
||||
"audit:read",
|
||||
"security:manage",
|
||||
"security:read"
|
||||
]),
|
||||
VIEWER: new Set<Permission>([
|
||||
"vm:read",
|
||||
"node:read",
|
||||
"tenant:read",
|
||||
"billing:read",
|
||||
"backup:read",
|
||||
"audit:read",
|
||||
"security:read",
|
||||
"settings:read",
|
||||
"user:read"
|
||||
])
|
||||
};
|
||||
|
||||
export function createJwtToken(payload: Express.UserToken): string {
|
||||
const expiresIn = env.JWT_EXPIRES_IN as SignOptions["expiresIn"];
|
||||
return jwt.sign(payload, env.JWT_SECRET, {
|
||||
expiresIn
|
||||
});
|
||||
}
|
||||
|
||||
export function createRefreshToken(payload: Express.UserToken): string {
|
||||
const expiresIn = env.JWT_REFRESH_EXPIRES_IN as SignOptions["expiresIn"];
|
||||
return jwt.sign(payload, env.JWT_REFRESH_SECRET, {
|
||||
expiresIn
|
||||
});
|
||||
}
|
||||
|
||||
export function verifyRefreshToken(token: string): Express.UserToken | null {
|
||||
try {
|
||||
const decoded = jwt.verify(token, env.JWT_REFRESH_SECRET) as JwtPayload & Express.UserToken;
|
||||
if (!decoded?.id || !decoded?.email || !decoded?.role) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
id: decoded.id,
|
||||
email: decoded.email,
|
||||
role: decoded.role,
|
||||
tenant_id: decoded.tenant_id
|
||||
};
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function requireAuth(req: ExpressRequest, _res: Response, next: NextFunction) {
|
||||
const authHeader = req.header("authorization");
|
||||
const token = authHeader?.startsWith("Bearer ") ? authHeader.slice(7) : null;
|
||||
|
||||
if (!token) {
|
||||
return next(new HttpError(401, "Missing bearer token", "AUTH_REQUIRED"));
|
||||
}
|
||||
|
||||
try {
|
||||
const decoded = jwt.verify(token, env.JWT_SECRET) as Express.UserToken;
|
||||
req.user = decoded;
|
||||
return next();
|
||||
} catch {
|
||||
return next(new HttpError(401, "Invalid or expired token", "INVALID_TOKEN"));
|
||||
}
|
||||
}
|
||||
|
||||
export function authorize(permission: Permission) {
|
||||
return (req: ExpressRequest, _res: Response, next: NextFunction) => {
|
||||
if (!req.user) {
|
||||
return next(new HttpError(401, "Unauthenticated", "AUTH_REQUIRED"));
|
||||
}
|
||||
const allowed = rolePermissions[req.user.role]?.has(permission);
|
||||
if (!allowed) {
|
||||
return next(new HttpError(403, "Insufficient permission", "FORBIDDEN"));
|
||||
}
|
||||
return next();
|
||||
};
|
||||
}
|
||||
|
||||
export function isTenantScopedUser(req: Pick<Express.Request, "user">): boolean {
|
||||
if (!req.user) return false;
|
||||
return req.user.role === "TENANT_ADMIN" || req.user.role === "VIEWER";
|
||||
}
|
||||
54
backend/src/middleware/error-handler.ts
Normal file
54
backend/src/middleware/error-handler.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
import type { NextFunction, Request, Response } from "express";
|
||||
import { Prisma } from "@prisma/client";
|
||||
import { ZodError } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
|
||||
export function notFoundHandler(_req: Request, res: Response) {
|
||||
res.status(404).json({
|
||||
error: {
|
||||
code: "NOT_FOUND",
|
||||
message: "Resource not found"
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export function errorHandler(error: unknown, _req: Request, res: Response, _next: NextFunction) {
|
||||
if (error instanceof HttpError) {
|
||||
return res.status(error.status).json({
|
||||
error: {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
details: error.details
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (error instanceof ZodError) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
code: "VALIDATION_ERROR",
|
||||
message: "Payload validation failed",
|
||||
details: error.flatten()
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (error instanceof Prisma.PrismaClientKnownRequestError) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
code: "DATABASE_ERROR",
|
||||
message: error.message,
|
||||
details: error.meta
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-console
|
||||
console.error("Unhandled error:", error);
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
code: "INTERNAL_SERVER_ERROR",
|
||||
message: "An unexpected server error occurred"
|
||||
}
|
||||
});
|
||||
}
|
||||
60
backend/src/middleware/rate-limit.ts
Normal file
60
backend/src/middleware/rate-limit.ts
Normal file
@@ -0,0 +1,60 @@
|
||||
import type { NextFunction, Request, Response } from "express";
|
||||
|
||||
type RateLimitOptions = {
|
||||
windowMs: number;
|
||||
max: number;
|
||||
keyGenerator?: (req: Request) => string;
|
||||
};
|
||||
|
||||
type Bucket = {
|
||||
count: number;
|
||||
resetAt: number;
|
||||
};
|
||||
|
||||
export function createRateLimit(options: RateLimitOptions) {
|
||||
const windowMs = Math.max(1_000, options.windowMs);
|
||||
const max = Math.max(1, options.max);
|
||||
const buckets = new Map<string, Bucket>();
|
||||
|
||||
return (req: Request, res: Response, next: NextFunction) => {
|
||||
const key = options.keyGenerator?.(req) ?? req.ip ?? "unknown";
|
||||
const now = Date.now();
|
||||
const existing = buckets.get(key);
|
||||
|
||||
if (!existing || existing.resetAt <= now) {
|
||||
buckets.set(key, {
|
||||
count: 1,
|
||||
resetAt: now + windowMs
|
||||
});
|
||||
res.setHeader("X-RateLimit-Limit", String(max));
|
||||
res.setHeader("X-RateLimit-Remaining", String(max - 1));
|
||||
res.setHeader("X-RateLimit-Reset", String(Math.ceil((now + windowMs) / 1000)));
|
||||
return next();
|
||||
}
|
||||
|
||||
existing.count += 1;
|
||||
const remaining = Math.max(0, max - existing.count);
|
||||
res.setHeader("X-RateLimit-Limit", String(max));
|
||||
res.setHeader("X-RateLimit-Remaining", String(remaining));
|
||||
res.setHeader("X-RateLimit-Reset", String(Math.ceil(existing.resetAt / 1000)));
|
||||
|
||||
if (existing.count > max) {
|
||||
return res.status(429).json({
|
||||
error: {
|
||||
code: "RATE_LIMIT_EXCEEDED",
|
||||
message: "Too many requests. Please retry later."
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (buckets.size > 10_000) {
|
||||
for (const [bucketKey, bucketValue] of buckets.entries()) {
|
||||
if (bucketValue.resetAt <= now) {
|
||||
buckets.delete(bucketKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return next();
|
||||
};
|
||||
}
|
||||
123
backend/src/routes/auth.routes.ts
Normal file
123
backend/src/routes/auth.routes.ts
Normal file
@@ -0,0 +1,123 @@
|
||||
import { Router } from "express";
|
||||
import bcrypt from "bcryptjs";
|
||||
import { z } from "zod";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { createJwtToken, createRefreshToken, requireAuth, verifyRefreshToken } from "../middleware/auth";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const loginSchema = z.object({
|
||||
email: z.string().email(),
|
||||
password: z.string().min(1)
|
||||
});
|
||||
|
||||
const refreshSchema = z.object({
|
||||
refresh_token: z.string().min(1)
|
||||
});
|
||||
|
||||
router.post("/login", async (req, res, next) => {
|
||||
try {
|
||||
const payload = loginSchema.parse(req.body);
|
||||
const user = await prisma.user.findUnique({ where: { email: payload.email } });
|
||||
if (!user || !user.is_active) {
|
||||
throw new HttpError(401, "Invalid email or password", "INVALID_CREDENTIALS");
|
||||
}
|
||||
const matched = await bcrypt.compare(payload.password, user.password_hash);
|
||||
if (!matched) {
|
||||
throw new HttpError(401, "Invalid email or password", "INVALID_CREDENTIALS");
|
||||
}
|
||||
|
||||
await prisma.user.update({
|
||||
where: { id: user.id },
|
||||
data: { last_login_at: new Date() }
|
||||
});
|
||||
|
||||
const userPayload = {
|
||||
id: user.id,
|
||||
email: user.email,
|
||||
role: user.role,
|
||||
tenant_id: user.tenant_id
|
||||
};
|
||||
const token = createJwtToken(userPayload);
|
||||
const refreshToken = createRefreshToken(userPayload);
|
||||
|
||||
res.json({
|
||||
token,
|
||||
refresh_token: refreshToken,
|
||||
user: {
|
||||
id: user.id,
|
||||
email: user.email,
|
||||
full_name: user.full_name,
|
||||
role: user.role,
|
||||
tenant_id: user.tenant_id
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/refresh", async (req, res, next) => {
|
||||
try {
|
||||
const payload = refreshSchema.parse(req.body ?? {});
|
||||
const decoded = verifyRefreshToken(payload.refresh_token);
|
||||
if (!decoded) {
|
||||
throw new HttpError(401, "Invalid refresh token", "INVALID_REFRESH_TOKEN");
|
||||
}
|
||||
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: decoded.id },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
role: true,
|
||||
tenant_id: true,
|
||||
is_active: true
|
||||
}
|
||||
});
|
||||
if (!user || !user.is_active) {
|
||||
throw new HttpError(401, "Refresh token user is invalid", "INVALID_REFRESH_TOKEN");
|
||||
}
|
||||
|
||||
const userPayload = {
|
||||
id: user.id,
|
||||
email: user.email,
|
||||
role: user.role,
|
||||
tenant_id: user.tenant_id
|
||||
};
|
||||
const token = createJwtToken(userPayload);
|
||||
const refreshToken = createRefreshToken(userPayload);
|
||||
|
||||
res.json({
|
||||
token,
|
||||
refresh_token: refreshToken
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/me", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id: req.user!.id },
|
||||
select: {
|
||||
id: true,
|
||||
email: true,
|
||||
full_name: true,
|
||||
role: true,
|
||||
tenant_id: true,
|
||||
is_active: true,
|
||||
created_at: true
|
||||
}
|
||||
});
|
||||
if (!user) throw new HttpError(404, "User not found", "USER_NOT_FOUND");
|
||||
if (!user.is_active) throw new HttpError(401, "User account is inactive", "USER_INACTIVE");
|
||||
res.json(user);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
491
backend/src/routes/backup.routes.ts
Normal file
491
backend/src/routes/backup.routes.ts
Normal file
@@ -0,0 +1,491 @@
|
||||
import {
|
||||
BackupRestoreMode,
|
||||
BackupRestoreStatus,
|
||||
BackupSchedule,
|
||||
BackupSource,
|
||||
BackupStatus,
|
||||
BackupType,
|
||||
SnapshotFrequency
|
||||
} from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
createBackup,
|
||||
createRestoreTask,
|
||||
createSnapshotJob,
|
||||
deleteBackup,
|
||||
deleteSnapshotJob,
|
||||
listBackupPolicies,
|
||||
listBackups,
|
||||
listRestoreTasks,
|
||||
listSnapshotJobs,
|
||||
runRestoreTaskNow,
|
||||
runSnapshotJobNow,
|
||||
toggleBackupProtection,
|
||||
updateSnapshotJob,
|
||||
upsertBackupPolicy
|
||||
} from "../services/backup.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const createBackupSchema = z.object({
|
||||
vm_id: z.string().min(1),
|
||||
type: z.nativeEnum(BackupType).optional(),
|
||||
source: z.nativeEnum(BackupSource).optional(),
|
||||
schedule: z.nativeEnum(BackupSchedule).optional(),
|
||||
retention_days: z.number().int().positive().optional(),
|
||||
storage: z.string().optional(),
|
||||
route_key: z.string().optional(),
|
||||
is_protected: z.boolean().optional(),
|
||||
notes: z.string().optional(),
|
||||
requested_size_mb: z.number().positive().optional()
|
||||
});
|
||||
|
||||
const protectionSchema = z.object({
|
||||
is_protected: z.boolean()
|
||||
});
|
||||
|
||||
const createRestoreSchema = z.object({
|
||||
backup_id: z.string().min(1),
|
||||
target_vm_id: z.string().optional(),
|
||||
mode: z.nativeEnum(BackupRestoreMode),
|
||||
requested_files: z.array(z.string().min(1)).optional(),
|
||||
pbs_enabled: z.boolean().optional(),
|
||||
run_immediately: z.boolean().default(true)
|
||||
});
|
||||
|
||||
const createSnapshotSchema = z.object({
|
||||
vm_id: z.string().min(1),
|
||||
name: z.string().min(2),
|
||||
frequency: z.nativeEnum(SnapshotFrequency),
|
||||
interval: z.number().int().positive().optional(),
|
||||
day_of_week: z.number().int().min(0).max(6).optional(),
|
||||
hour_utc: z.number().int().min(0).max(23).optional(),
|
||||
minute_utc: z.number().int().min(0).max(59).optional(),
|
||||
retention: z.number().int().positive().optional(),
|
||||
enabled: z.boolean().optional()
|
||||
});
|
||||
|
||||
const updateSnapshotSchema = z.object({
|
||||
name: z.string().min(2).optional(),
|
||||
frequency: z.nativeEnum(SnapshotFrequency).optional(),
|
||||
interval: z.number().int().positive().optional(),
|
||||
day_of_week: z.number().int().min(0).max(6).nullable().optional(),
|
||||
hour_utc: z.number().int().min(0).max(23).optional(),
|
||||
minute_utc: z.number().int().min(0).max(59).optional(),
|
||||
retention: z.number().int().positive().optional(),
|
||||
enabled: z.boolean().optional()
|
||||
});
|
||||
|
||||
const upsertPolicySchema = z.object({
|
||||
tenant_id: z.string().optional(),
|
||||
billing_plan_id: z.string().optional(),
|
||||
max_files: z.number().int().positive().optional(),
|
||||
max_total_size_mb: z.number().positive().optional(),
|
||||
max_protected_files: z.number().int().positive().optional(),
|
||||
allow_file_restore: z.boolean().optional(),
|
||||
allow_cross_vm_restore: z.boolean().optional(),
|
||||
allow_pbs_restore: z.boolean().optional()
|
||||
});
|
||||
|
||||
function parseOptionalBackupStatus(value: unknown) {
|
||||
if (typeof value !== "string") return undefined;
|
||||
const normalized = value.toUpperCase();
|
||||
return Object.values(BackupStatus).includes(normalized as BackupStatus)
|
||||
? (normalized as BackupStatus)
|
||||
: undefined;
|
||||
}
|
||||
|
||||
function parseOptionalRestoreStatus(value: unknown) {
|
||||
if (typeof value !== "string") return undefined;
|
||||
const normalized = value.toUpperCase();
|
||||
return Object.values(BackupRestoreStatus).includes(normalized as BackupRestoreStatus)
|
||||
? (normalized as BackupRestoreStatus)
|
||||
: undefined;
|
||||
}
|
||||
|
||||
async function ensureVmTenantScope(vmId: string, req: Express.Request) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
async function ensureBackupTenantScope(backupId: string, req: Express.Request) {
|
||||
const backup = await prisma.backup.findUnique({
|
||||
where: { id: backupId },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!backup) throw new HttpError(404, "Backup not found", "BACKUP_NOT_FOUND");
|
||||
|
||||
const tenantId = backup.tenant_id ?? backup.vm.tenant_id;
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && tenantId !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return backup;
|
||||
}
|
||||
|
||||
async function ensureRestoreTaskTenantScope(taskId: string, req: Express.Request) {
|
||||
const task = await prisma.backupRestoreTask.findUnique({
|
||||
where: { id: taskId },
|
||||
include: {
|
||||
source_vm: {
|
||||
select: {
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!task) throw new HttpError(404, "Restore task not found", "RESTORE_TASK_NOT_FOUND");
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && task.source_vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
async function ensureSnapshotJobTenantScope(jobId: string, req: Express.Request) {
|
||||
const job = await prisma.snapshotJob.findUnique({
|
||||
where: { id: jobId },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!job) throw new HttpError(404, "Snapshot job not found", "SNAPSHOT_JOB_NOT_FOUND");
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && job.vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
return job;
|
||||
}
|
||||
|
||||
router.get("/", requireAuth, authorize("backup:read"), async (req, res, next) => {
|
||||
try {
|
||||
const status = parseOptionalBackupStatus(req.query.status);
|
||||
const vmId = typeof req.query.vm_id === "string" ? req.query.vm_id : undefined;
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const offset = typeof req.query.offset === "string" ? Number(req.query.offset) : undefined;
|
||||
|
||||
if (vmId) {
|
||||
await ensureVmTenantScope(vmId, req);
|
||||
}
|
||||
|
||||
const result = await listBackups({
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined,
|
||||
status,
|
||||
vmId,
|
||||
limit,
|
||||
offset
|
||||
});
|
||||
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createBackupSchema.parse(req.body ?? {});
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
|
||||
const backup = await createBackup({
|
||||
vmId: payload.vm_id,
|
||||
type: payload.type,
|
||||
source: payload.source,
|
||||
schedule: payload.schedule,
|
||||
retentionDays: payload.retention_days,
|
||||
storage: payload.storage,
|
||||
routeKey: payload.route_key,
|
||||
isProtected: payload.is_protected,
|
||||
notes: payload.notes,
|
||||
requestedSizeMb: payload.requested_size_mb,
|
||||
createdBy: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "backup.create",
|
||||
resource_type: "BACKUP",
|
||||
resource_id: backup.id,
|
||||
resource_name: backup.vm_name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: payload,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(backup);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/:id/protection", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = protectionSchema.parse(req.body ?? {});
|
||||
await ensureBackupTenantScope(req.params.id, req);
|
||||
|
||||
const backup = await toggleBackupProtection(req.params.id, payload.is_protected);
|
||||
res.json(backup);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/:id", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureBackupTenantScope(req.params.id, req);
|
||||
const force = req.query.force === "true";
|
||||
await deleteBackup(req.params.id, force);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/restores", requireAuth, authorize("backup:read"), async (req, res, next) => {
|
||||
try {
|
||||
const status = parseOptionalRestoreStatus(req.query.status);
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const offset = typeof req.query.offset === "string" ? Number(req.query.offset) : undefined;
|
||||
|
||||
const result = await listRestoreTasks({
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined,
|
||||
status,
|
||||
limit,
|
||||
offset
|
||||
});
|
||||
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/restores", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createRestoreSchema.parse(req.body ?? {});
|
||||
await ensureBackupTenantScope(payload.backup_id, req);
|
||||
|
||||
if (payload.target_vm_id) {
|
||||
await ensureVmTenantScope(payload.target_vm_id, req);
|
||||
}
|
||||
|
||||
const task = await createRestoreTask({
|
||||
backupId: payload.backup_id,
|
||||
targetVmId: payload.target_vm_id,
|
||||
mode: payload.mode,
|
||||
requestedFiles: payload.requested_files,
|
||||
pbsEnabled: payload.pbs_enabled,
|
||||
createdBy: req.user?.email,
|
||||
runImmediately: payload.run_immediately
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "backup.restore.create",
|
||||
resource_type: "BACKUP",
|
||||
resource_id: payload.backup_id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: payload,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(task);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/restores/:id/run", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureRestoreTaskTenantScope(req.params.id, req);
|
||||
const task = await runRestoreTaskNow(req.params.id);
|
||||
res.json(task);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/snapshot-jobs", requireAuth, authorize("backup:read"), async (req, res, next) => {
|
||||
try {
|
||||
const jobs = await listSnapshotJobs({
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined
|
||||
});
|
||||
res.json({ data: jobs });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/snapshot-jobs", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createSnapshotSchema.parse(req.body ?? {});
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
|
||||
const job = await createSnapshotJob({
|
||||
vmId: payload.vm_id,
|
||||
name: payload.name,
|
||||
frequency: payload.frequency,
|
||||
interval: payload.interval,
|
||||
dayOfWeek: payload.day_of_week,
|
||||
hourUtc: payload.hour_utc,
|
||||
minuteUtc: payload.minute_utc,
|
||||
retention: payload.retention,
|
||||
enabled: payload.enabled,
|
||||
createdBy: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "snapshot_job.create",
|
||||
resource_type: "BACKUP",
|
||||
resource_id: job.id,
|
||||
resource_name: job.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: payload,
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(job);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/snapshot-jobs/:id", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = updateSnapshotSchema.parse(req.body ?? {});
|
||||
await ensureSnapshotJobTenantScope(req.params.id, req);
|
||||
|
||||
const job = await updateSnapshotJob(req.params.id, {
|
||||
name: payload.name,
|
||||
frequency: payload.frequency,
|
||||
interval: payload.interval,
|
||||
dayOfWeek: payload.day_of_week,
|
||||
hourUtc: payload.hour_utc,
|
||||
minuteUtc: payload.minute_utc,
|
||||
retention: payload.retention,
|
||||
enabled: payload.enabled
|
||||
});
|
||||
|
||||
res.json(job);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/snapshot-jobs/:id", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureSnapshotJobTenantScope(req.params.id, req);
|
||||
await deleteSnapshotJob(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/snapshot-jobs/:id/run", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureSnapshotJobTenantScope(req.params.id, req);
|
||||
const result = await runSnapshotJobNow(req.params.id);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/policies", requireAuth, authorize("backup:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const all = await listBackupPolicies();
|
||||
const data =
|
||||
isTenantScopedUser(_req) && _req.user?.tenant_id
|
||||
? all.filter((item) => item.tenant_id === _req.user?.tenant_id)
|
||||
: all;
|
||||
res.json({ data });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/policies", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = upsertPolicySchema.parse(req.body ?? {});
|
||||
const tenantId = isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : payload.tenant_id;
|
||||
if (isTenantScopedUser(req) && payload.tenant_id && req.user?.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
const policy = await upsertBackupPolicy({
|
||||
tenantId,
|
||||
billingPlanId: payload.billing_plan_id,
|
||||
maxFiles: payload.max_files,
|
||||
maxTotalSizeMb: payload.max_total_size_mb,
|
||||
maxProtectedFiles: payload.max_protected_files,
|
||||
allowFileRestore: payload.allow_file_restore,
|
||||
allowCrossVmRestore: payload.allow_cross_vm_restore,
|
||||
allowPbsRestore: payload.allow_pbs_restore
|
||||
});
|
||||
|
||||
res.status(201).json(policy);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/policies/:id", requireAuth, authorize("backup:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = upsertPolicySchema.parse(req.body ?? {});
|
||||
const tenantId = isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : payload.tenant_id;
|
||||
if (isTenantScopedUser(req) && payload.tenant_id && req.user?.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
const policy = await upsertBackupPolicy({
|
||||
policyId: req.params.id,
|
||||
tenantId,
|
||||
billingPlanId: payload.billing_plan_id,
|
||||
maxFiles: payload.max_files,
|
||||
maxTotalSizeMb: payload.max_total_size_mb,
|
||||
maxProtectedFiles: payload.max_protected_files,
|
||||
allowFileRestore: payload.allow_file_restore,
|
||||
allowCrossVmRestore: payload.allow_cross_vm_restore,
|
||||
allowPbsRestore: payload.allow_pbs_restore
|
||||
});
|
||||
|
||||
res.json(policy);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
46
backend/src/routes/billing.routes.ts
Normal file
46
backend/src/routes/billing.routes.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import { generateInvoicesFromUnbilledUsage, markInvoicePaid, meterHourlyUsage } from "../services/billing.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
router.post("/meter/hourly", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const result = await meterHourlyUsage(req.user?.email ?? "system@proxpanel.local");
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/invoices/generate", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const result = await generateInvoicesFromUnbilledUsage(req.user?.email ?? "system@proxpanel.local");
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const markPaidSchema = z.object({
|
||||
payment_provider: z.enum(["PAYSTACK", "FLUTTERWAVE", "MANUAL"]).default("MANUAL"),
|
||||
payment_reference: z.string().min(2)
|
||||
});
|
||||
|
||||
router.post("/invoices/:id/pay", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = markPaidSchema.parse(req.body ?? {});
|
||||
const invoice = await markInvoicePaid(
|
||||
req.params.id,
|
||||
payload.payment_provider,
|
||||
payload.payment_reference,
|
||||
req.user?.email ?? "system@proxpanel.local"
|
||||
);
|
||||
res.json(invoice);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
1247
backend/src/routes/client.routes.ts
Normal file
1247
backend/src/routes/client.routes.ts
Normal file
File diff suppressed because it is too large
Load Diff
390
backend/src/routes/dashboard.routes.ts
Normal file
390
backend/src/routes/dashboard.routes.ts
Normal file
@@ -0,0 +1,390 @@
|
||||
import { Router } from "express";
|
||||
import { IpScope, IpVersion } from "@prisma/client";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { subnetUtilizationDashboard } from "../services/network.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
type HeatLevel = "critical" | "warning" | "elevated" | "healthy";
|
||||
|
||||
function clampInteger(value: unknown, min: number, max: number, fallback: number) {
|
||||
if (typeof value !== "string") return fallback;
|
||||
const parsed = Number(value);
|
||||
if (!Number.isInteger(parsed)) return fallback;
|
||||
return Math.min(Math.max(parsed, min), max);
|
||||
}
|
||||
|
||||
function toUtcDayStart(date: Date) {
|
||||
return new Date(Date.UTC(date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate()));
|
||||
}
|
||||
|
||||
function toDateKey(date: Date) {
|
||||
return date.toISOString().slice(0, 10);
|
||||
}
|
||||
|
||||
function resolveHeatLevel(pressurePct: number): HeatLevel {
|
||||
if (pressurePct >= 90) return "critical";
|
||||
if (pressurePct >= 75) return "warning";
|
||||
if (pressurePct >= 60) return "elevated";
|
||||
return "healthy";
|
||||
}
|
||||
|
||||
router.get("/summary", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const tenantScoped = isTenantScopedUser(req) && req.user?.tenant_id;
|
||||
const tenantWhere = tenantScoped ? { tenant_id: req.user!.tenant_id! } : {};
|
||||
|
||||
const [vmTotal, vmRunning, nodeTotal, tenantTotal, invoicePaidAgg, invoicePendingAgg] = await Promise.all([
|
||||
prisma.virtualMachine.count({ where: tenantWhere }),
|
||||
prisma.virtualMachine.count({ where: { ...tenantWhere, status: "RUNNING" } }),
|
||||
prisma.proxmoxNode.count(),
|
||||
prisma.tenant.count(),
|
||||
prisma.invoice.aggregate({
|
||||
where: { ...tenantWhere, status: "PAID" },
|
||||
_sum: { amount: true }
|
||||
}),
|
||||
prisma.invoice.aggregate({
|
||||
where: { ...tenantWhere, status: "PENDING" },
|
||||
_sum: { amount: true }
|
||||
})
|
||||
]);
|
||||
|
||||
const usage = await prisma.usageRecord.findMany({
|
||||
where: {
|
||||
...tenantWhere,
|
||||
period_start: {
|
||||
gte: new Date(Date.now() - 7 * 24 * 60 * 60 * 1000)
|
||||
}
|
||||
},
|
||||
orderBy: { period_start: "asc" }
|
||||
});
|
||||
|
||||
const hourlyRevenueMap = new Map<string, number>();
|
||||
for (const record of usage) {
|
||||
const key = new Date(record.period_start).toISOString().slice(0, 13) + ":00:00Z";
|
||||
hourlyRevenueMap.set(key, (hourlyRevenueMap.get(key) ?? 0) + Number(record.total_cost));
|
||||
}
|
||||
|
||||
const topVmMap = new Map<string, { vm_name: string; total: number }>();
|
||||
for (const record of usage) {
|
||||
const current = topVmMap.get(record.vm_id) ?? { vm_name: record.vm_name, total: 0 };
|
||||
current.total += Number(record.total_cost);
|
||||
topVmMap.set(record.vm_id, current);
|
||||
}
|
||||
|
||||
const topVms = Array.from(topVmMap.entries())
|
||||
.map(([vm_id, value]) => ({ vm_id, ...value }))
|
||||
.sort((a, b) => b.total - a.total)
|
||||
.slice(0, 5);
|
||||
|
||||
const recentVms = await prisma.virtualMachine.findMany({
|
||||
where: tenantWhere,
|
||||
orderBy: { created_at: "desc" },
|
||||
take: 8,
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
status: true,
|
||||
node: true,
|
||||
tenant_id: true,
|
||||
cpu_usage: true,
|
||||
ram_usage: true,
|
||||
disk_usage: true,
|
||||
created_at: true
|
||||
}
|
||||
});
|
||||
|
||||
res.json({
|
||||
metrics: {
|
||||
vm_total: vmTotal,
|
||||
vm_running: vmRunning,
|
||||
node_total: nodeTotal,
|
||||
tenant_total: tenantTotal,
|
||||
revenue_paid_total: Number(invoicePaidAgg._sum.amount ?? 0),
|
||||
revenue_pending_total: Number(invoicePendingAgg._sum.amount ?? 0)
|
||||
},
|
||||
hourly_revenue_7d: Array.from(hourlyRevenueMap.entries()).map(([time, value]) => ({
|
||||
time,
|
||||
value
|
||||
})),
|
||||
top_vms_by_cost: topVms,
|
||||
recent_vms: recentVms
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/network-utilization", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const tenantScoped = isTenantScopedUser(req) && req.user?.tenant_id;
|
||||
const selectedTenantId =
|
||||
tenantScoped && req.user?.tenant_id
|
||||
? req.user.tenant_id
|
||||
: typeof req.query.tenant_id === "string"
|
||||
? req.query.tenant_id
|
||||
: undefined;
|
||||
|
||||
const scopeQuery = typeof req.query.scope === "string" ? req.query.scope.toUpperCase() : undefined;
|
||||
const versionQuery = typeof req.query.version === "string" ? req.query.version.toUpperCase() : undefined;
|
||||
const rawVlanTag = typeof req.query.vlan_tag === "string" ? Number(req.query.vlan_tag) : undefined;
|
||||
const vlanTag = typeof rawVlanTag === "number" && Number.isInteger(rawVlanTag) ? rawVlanTag : undefined;
|
||||
|
||||
const scope = Object.values(IpScope).includes(scopeQuery as IpScope) ? (scopeQuery as IpScope) : undefined;
|
||||
const version = Object.values(IpVersion).includes(versionQuery as IpVersion) ? (versionQuery as IpVersion) : undefined;
|
||||
|
||||
const days = clampInteger(req.query.days, 7, 60, 14);
|
||||
const maxTenants = clampInteger(req.query.max_tenants, 1, 10, 5);
|
||||
|
||||
const subnetDashboard = await subnetUtilizationDashboard({
|
||||
scope,
|
||||
version,
|
||||
node_hostname: typeof req.query.node_hostname === "string" ? req.query.node_hostname : undefined,
|
||||
bridge: typeof req.query.bridge === "string" ? req.query.bridge : undefined,
|
||||
vlan_tag: vlanTag,
|
||||
tenant_id: selectedTenantId
|
||||
});
|
||||
|
||||
const heatmapCells = subnetDashboard.subnets.slice(0, 18).map((subnet, index) => ({
|
||||
rank: index + 1,
|
||||
subnet: subnet.subnet,
|
||||
scope: subnet.scope,
|
||||
version: subnet.version,
|
||||
node_hostname: subnet.node_hostname,
|
||||
bridge: subnet.bridge,
|
||||
vlan_tag: subnet.vlan_tag,
|
||||
total: subnet.total,
|
||||
assigned: subnet.assigned,
|
||||
reserved: subnet.reserved,
|
||||
available: subnet.available,
|
||||
utilization_pct: subnet.utilization_pct,
|
||||
pressure_pct: subnet.pressure_pct,
|
||||
heat_level: resolveHeatLevel(subnet.pressure_pct)
|
||||
}));
|
||||
|
||||
const heatmapSummary = subnetDashboard.subnets.reduce(
|
||||
(acc, subnet) => {
|
||||
const level = resolveHeatLevel(subnet.pressure_pct);
|
||||
acc.total_subnets += 1;
|
||||
if (level === "critical") acc.critical += 1;
|
||||
if (level === "warning") acc.warning += 1;
|
||||
if (level === "elevated") acc.elevated += 1;
|
||||
if (level === "healthy") acc.healthy += 1;
|
||||
return acc;
|
||||
},
|
||||
{
|
||||
total_subnets: 0,
|
||||
critical: 0,
|
||||
warning: 0,
|
||||
elevated: 0,
|
||||
healthy: 0
|
||||
}
|
||||
);
|
||||
|
||||
let tenantIds: string[] = [];
|
||||
if (selectedTenantId) {
|
||||
tenantIds = [selectedTenantId];
|
||||
} else {
|
||||
const groupedTenants = await prisma.ipAssignment.groupBy({
|
||||
by: ["tenant_id"],
|
||||
where: {
|
||||
is_active: true,
|
||||
tenant_id: {
|
||||
not: null
|
||||
}
|
||||
},
|
||||
_count: {
|
||||
_all: true
|
||||
},
|
||||
orderBy: {
|
||||
_count: {
|
||||
tenant_id: "desc"
|
||||
}
|
||||
},
|
||||
take: maxTenants
|
||||
});
|
||||
|
||||
tenantIds = groupedTenants.map((item) => item.tenant_id).filter((item): item is string => Boolean(item));
|
||||
}
|
||||
|
||||
if (tenantIds.length === 0) {
|
||||
return res.json({
|
||||
generated_at: new Date().toISOString(),
|
||||
subnet_heatmap: {
|
||||
summary: heatmapSummary,
|
||||
cells: heatmapCells
|
||||
},
|
||||
tenant_trends: {
|
||||
window_days: days,
|
||||
series: [],
|
||||
chart_points: []
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const rangeEnd = new Date();
|
||||
rangeEnd.setUTCHours(23, 59, 59, 999);
|
||||
const rangeStart = toUtcDayStart(rangeEnd);
|
||||
rangeStart.setUTCDate(rangeStart.getUTCDate() - (days - 1));
|
||||
|
||||
const dayFrames = Array.from({ length: days }, (_, index) => {
|
||||
const start = new Date(rangeStart);
|
||||
start.setUTCDate(rangeStart.getUTCDate() + index);
|
||||
const end = new Date(start);
|
||||
end.setUTCHours(23, 59, 59, 999);
|
||||
return {
|
||||
key: toDateKey(start),
|
||||
end
|
||||
};
|
||||
});
|
||||
|
||||
const [tenants, quotas, assignments] = await Promise.all([
|
||||
prisma.tenant.findMany({
|
||||
where: {
|
||||
id: {
|
||||
in: tenantIds
|
||||
}
|
||||
},
|
||||
select: {
|
||||
id: true,
|
||||
name: true
|
||||
}
|
||||
}),
|
||||
prisma.tenantIpQuota.findMany({
|
||||
where: {
|
||||
tenant_id: {
|
||||
in: tenantIds
|
||||
}
|
||||
},
|
||||
select: {
|
||||
tenant_id: true,
|
||||
ipv4_limit: true,
|
||||
ipv6_limit: true,
|
||||
burst_allowed: true
|
||||
}
|
||||
}),
|
||||
prisma.ipAssignment.findMany({
|
||||
where: {
|
||||
tenant_id: {
|
||||
in: tenantIds
|
||||
},
|
||||
assigned_at: {
|
||||
lte: rangeEnd
|
||||
},
|
||||
OR: [
|
||||
{
|
||||
released_at: null
|
||||
},
|
||||
{
|
||||
released_at: {
|
||||
gte: rangeStart
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
select: {
|
||||
tenant_id: true,
|
||||
assigned_at: true,
|
||||
released_at: true,
|
||||
ip_address: {
|
||||
select: {
|
||||
version: true
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
]);
|
||||
|
||||
const tenantMap = new Map(tenants.map((tenant) => [tenant.id, tenant]));
|
||||
const quotaMap = new Map(quotas.map((quota) => [quota.tenant_id, quota]));
|
||||
const assignmentsByTenant = new Map<string, typeof assignments>();
|
||||
|
||||
for (const assignment of assignments) {
|
||||
if (!assignment.tenant_id) continue;
|
||||
if (!assignmentsByTenant.has(assignment.tenant_id)) {
|
||||
assignmentsByTenant.set(assignment.tenant_id, []);
|
||||
}
|
||||
assignmentsByTenant.get(assignment.tenant_id)!.push(assignment);
|
||||
}
|
||||
|
||||
const orderedTenantIds = tenantIds.filter((tenantId) => tenantMap.has(tenantId));
|
||||
const series = orderedTenantIds.map((tenantId) => {
|
||||
const tenant = tenantMap.get(tenantId)!;
|
||||
const quota = quotaMap.get(tenantId);
|
||||
const tenantAssignments = assignmentsByTenant.get(tenantId) ?? [];
|
||||
|
||||
const points = dayFrames.map((day) => {
|
||||
let assignedIpv4 = 0;
|
||||
let assignedIpv6 = 0;
|
||||
|
||||
for (const assignment of tenantAssignments) {
|
||||
const activeAtDayEnd =
|
||||
assignment.assigned_at <= day.end && (!assignment.released_at || assignment.released_at > day.end);
|
||||
if (!activeAtDayEnd) continue;
|
||||
if (assignment.ip_address.version === IpVersion.IPV4) assignedIpv4 += 1;
|
||||
if (assignment.ip_address.version === IpVersion.IPV6) assignedIpv6 += 1;
|
||||
}
|
||||
|
||||
const quotaPressure: number[] = [];
|
||||
if (typeof quota?.ipv4_limit === "number" && quota.ipv4_limit > 0) {
|
||||
quotaPressure.push((assignedIpv4 / quota.ipv4_limit) * 100);
|
||||
}
|
||||
if (typeof quota?.ipv6_limit === "number" && quota.ipv6_limit > 0) {
|
||||
quotaPressure.push((assignedIpv6 / quota.ipv6_limit) * 100);
|
||||
}
|
||||
|
||||
return {
|
||||
date: day.key,
|
||||
assigned_total: assignedIpv4 + assignedIpv6,
|
||||
assigned_ipv4: assignedIpv4,
|
||||
assigned_ipv6: assignedIpv6,
|
||||
quota_utilization_pct: quotaPressure.length > 0 ? Number(Math.max(...quotaPressure).toFixed(2)) : null
|
||||
};
|
||||
});
|
||||
|
||||
const lastPoint = points[points.length - 1];
|
||||
return {
|
||||
tenant_id: tenant.id,
|
||||
tenant_name: tenant.name,
|
||||
current_assigned: lastPoint?.assigned_total ?? 0,
|
||||
peak_assigned: points.reduce((peak, point) => (point.assigned_total > peak ? point.assigned_total : peak), 0),
|
||||
quota: {
|
||||
ipv4_limit: quota?.ipv4_limit ?? null,
|
||||
ipv6_limit: quota?.ipv6_limit ?? null,
|
||||
burst_allowed: quota?.burst_allowed ?? false
|
||||
},
|
||||
points
|
||||
};
|
||||
});
|
||||
|
||||
const chartPoints = dayFrames.map((day, index) => {
|
||||
const point: Record<string, string | number> = {
|
||||
date: day.key
|
||||
};
|
||||
|
||||
for (const tenant of series) {
|
||||
point[tenant.tenant_id] = tenant.points[index]?.assigned_total ?? 0;
|
||||
}
|
||||
|
||||
return point;
|
||||
});
|
||||
|
||||
return res.json({
|
||||
generated_at: new Date().toISOString(),
|
||||
subnet_heatmap: {
|
||||
summary: heatmapSummary,
|
||||
cells: heatmapCells
|
||||
},
|
||||
tenant_trends: {
|
||||
window_days: days,
|
||||
series,
|
||||
chart_points: chartPoints
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
22
backend/src/routes/health.routes.ts
Normal file
22
backend/src/routes/health.routes.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import { Router } from "express";
|
||||
import { prisma } from "../lib/prisma";
|
||||
|
||||
const router = Router();
|
||||
|
||||
router.get("/", async (_req, res) => {
|
||||
let db = "ok";
|
||||
try {
|
||||
await prisma.$queryRaw`SELECT 1`;
|
||||
} catch {
|
||||
db = "error";
|
||||
}
|
||||
res.json({
|
||||
status: db === "ok" ? "ok" : "degraded",
|
||||
services: {
|
||||
database: db
|
||||
},
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
});
|
||||
|
||||
export default router;
|
||||
391
backend/src/routes/monitoring.routes.ts
Normal file
391
backend/src/routes/monitoring.routes.ts
Normal file
@@ -0,0 +1,391 @@
|
||||
import {
|
||||
AlertChannel,
|
||||
HealthCheckTargetType,
|
||||
HealthCheckType,
|
||||
MonitoringAlertStatus,
|
||||
Severity
|
||||
} from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
clusterResourceForecast,
|
||||
createAlertRule,
|
||||
createHealthCheckDefinition,
|
||||
evaluateAlertRulesNow,
|
||||
faultyDeploymentInsights,
|
||||
listAlertEvents,
|
||||
listAlertNotifications,
|
||||
listAlertRules,
|
||||
listHealthCheckResults,
|
||||
listHealthChecks,
|
||||
monitoringOverview,
|
||||
runHealthCheckNow,
|
||||
updateAlertRule,
|
||||
updateHealthCheckDefinition
|
||||
} from "../services/monitoring.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const healthCheckSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
description: z.string().optional(),
|
||||
target_type: z.nativeEnum(HealthCheckTargetType),
|
||||
check_type: z.nativeEnum(HealthCheckType).optional(),
|
||||
tenant_id: z.string().optional(),
|
||||
vm_id: z.string().optional(),
|
||||
node_id: z.string().optional(),
|
||||
cpu_warn_pct: z.number().min(0).max(100).optional(),
|
||||
cpu_critical_pct: z.number().min(0).max(100).optional(),
|
||||
ram_warn_pct: z.number().min(0).max(100).optional(),
|
||||
ram_critical_pct: z.number().min(0).max(100).optional(),
|
||||
disk_warn_pct: z.number().min(0).max(100).optional(),
|
||||
disk_critical_pct: z.number().min(0).max(100).optional(),
|
||||
disk_io_read_warn: z.number().min(0).optional(),
|
||||
disk_io_read_critical: z.number().min(0).optional(),
|
||||
disk_io_write_warn: z.number().min(0).optional(),
|
||||
disk_io_write_critical: z.number().min(0).optional(),
|
||||
network_in_warn: z.number().min(0).optional(),
|
||||
network_in_critical: z.number().min(0).optional(),
|
||||
network_out_warn: z.number().min(0).optional(),
|
||||
network_out_critical: z.number().min(0).optional(),
|
||||
latency_warn_ms: z.number().int().min(1).optional(),
|
||||
latency_critical_ms: z.number().int().min(1).optional(),
|
||||
schedule_minutes: z.number().int().min(1).max(1440).optional(),
|
||||
enabled: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const alertRuleSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
description: z.string().optional(),
|
||||
tenant_id: z.string().optional(),
|
||||
vm_id: z.string().optional(),
|
||||
node_id: z.string().optional(),
|
||||
cpu_threshold_pct: z.number().min(0).max(100).optional(),
|
||||
ram_threshold_pct: z.number().min(0).max(100).optional(),
|
||||
disk_threshold_pct: z.number().min(0).max(100).optional(),
|
||||
disk_io_read_threshold: z.number().min(0).optional(),
|
||||
disk_io_write_threshold: z.number().min(0).optional(),
|
||||
network_in_threshold: z.number().min(0).optional(),
|
||||
network_out_threshold: z.number().min(0).optional(),
|
||||
consecutive_breaches: z.number().int().min(1).max(20).optional(),
|
||||
evaluation_window_minutes: z.number().int().min(1).max(1440).optional(),
|
||||
severity: z.nativeEnum(Severity).optional(),
|
||||
channels: z.array(z.nativeEnum(AlertChannel)).optional(),
|
||||
enabled: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
async function ensureVmTenantScope(vmId: string, req: Pick<Express.Request, "user">) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
function scopedTenantId(req: Pick<Express.Request, "user">) {
|
||||
return isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined;
|
||||
}
|
||||
|
||||
function queryTenantId(req: { query?: Record<string, unknown> }) {
|
||||
return typeof req.query?.tenant_id === "string" ? req.query.tenant_id : undefined;
|
||||
}
|
||||
|
||||
router.get("/overview", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await monitoringOverview({
|
||||
tenant_id: scopedTenantId(req)
|
||||
});
|
||||
return res.json(data);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/health-checks", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await listHealthChecks({
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req),
|
||||
enabled: typeof req.query.enabled === "string" ? req.query.enabled === "true" : undefined
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/health-checks", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = healthCheckSchema.parse(req.body ?? {});
|
||||
|
||||
if (payload.vm_id) {
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
}
|
||||
|
||||
const tenantId = scopedTenantId(req) ?? payload.tenant_id;
|
||||
const check = await createHealthCheckDefinition({
|
||||
...payload,
|
||||
tenant_id: tenantId,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "monitoring.health_check.create",
|
||||
resource_type: "SECURITY",
|
||||
resource_id: check.id,
|
||||
resource_name: check.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(check);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/health-checks/:id", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = healthCheckSchema.partial().parse(req.body ?? {});
|
||||
const existing = await prisma.serverHealthCheck.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Health check not found", "HEALTH_CHECK_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
if (payload.vm_id) {
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
}
|
||||
|
||||
const updated = await updateHealthCheckDefinition(req.params.id, {
|
||||
...payload,
|
||||
tenant_id: scopedTenantId(req) ?? payload.tenant_id
|
||||
});
|
||||
|
||||
return res.json(updated);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/health-checks/:id/run", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.serverHealthCheck.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: { id: true, tenant_id: true }
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Health check not found", "HEALTH_CHECK_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const result = await runHealthCheckNow(existing.id);
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/health-checks/:id/results", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.serverHealthCheck.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: { id: true, tenant_id: true }
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Health check not found", "HEALTH_CHECK_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const data = await listHealthCheckResults(existing.id, limit);
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/alerts/rules", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await listAlertRules({
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req),
|
||||
enabled: typeof req.query.enabled === "string" ? req.query.enabled === "true" : undefined
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/alerts/rules", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = alertRuleSchema.parse(req.body ?? {});
|
||||
|
||||
if (payload.vm_id) {
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
}
|
||||
|
||||
const tenantId = scopedTenantId(req) ?? payload.tenant_id;
|
||||
const rule = await createAlertRule({
|
||||
...payload,
|
||||
tenant_id: tenantId,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "monitoring.alert_rule.create",
|
||||
resource_type: "SECURITY",
|
||||
resource_id: rule.id,
|
||||
resource_name: rule.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(rule);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/alerts/rules/:id", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = alertRuleSchema.partial().parse(req.body ?? {});
|
||||
const existing = await prisma.monitoringAlertRule.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Alert rule not found", "ALERT_RULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
if (payload.vm_id) {
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
}
|
||||
|
||||
const updated = await updateAlertRule(req.params.id, {
|
||||
...payload,
|
||||
tenant_id: scopedTenantId(req) ?? payload.tenant_id
|
||||
});
|
||||
return res.json(updated);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/alerts/events", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const statusRaw = typeof req.query.status === "string" ? req.query.status.toUpperCase() : undefined;
|
||||
const status = Object.values(MonitoringAlertStatus).includes(statusRaw as MonitoringAlertStatus)
|
||||
? (statusRaw as MonitoringAlertStatus)
|
||||
: undefined;
|
||||
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const data = await listAlertEvents({
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req),
|
||||
status,
|
||||
limit
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/alerts/notifications", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const data = await listAlertNotifications({
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req),
|
||||
limit
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/alerts/evaluate", requireAuth, authorize("security:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const result = await evaluateAlertRulesNow(scopedTenantId(req));
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/insights/faulty-deployments", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const days = typeof req.query.days === "string" ? Number(req.query.days) : undefined;
|
||||
const data = await faultyDeploymentInsights({
|
||||
days,
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req)
|
||||
});
|
||||
return res.json(data);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/insights/cluster-forecast", requireAuth, authorize("security:read"), async (req, res, next) => {
|
||||
try {
|
||||
const horizon = typeof req.query.horizon_days === "string" ? Number(req.query.horizon_days) : undefined;
|
||||
const data = await clusterResourceForecast({
|
||||
horizon_days: horizon,
|
||||
tenant_id: scopedTenantId(req) ?? queryTenantId(req)
|
||||
});
|
||||
return res.json(data);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
636
backend/src/routes/network.routes.ts
Normal file
636
backend/src/routes/network.routes.ts
Normal file
@@ -0,0 +1,636 @@
|
||||
import { IpAddressStatus, IpAssignmentType, IpAllocationStrategy, IpScope, IpVersion, PrivateNetworkType } from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
assignIpToVm,
|
||||
attachPrivateNetwork,
|
||||
createPrivateNetwork,
|
||||
detachPrivateNetwork,
|
||||
importIpAddresses,
|
||||
listIpAddresses,
|
||||
listIpAssignments,
|
||||
listIpPoolPolicies,
|
||||
listIpReservedRanges,
|
||||
listPrivateNetworks,
|
||||
listTenantIpQuotas,
|
||||
returnAssignedIp,
|
||||
subnetUtilizationDashboard,
|
||||
upsertIpPoolPolicy,
|
||||
upsertTenantIpQuota,
|
||||
createIpReservedRange,
|
||||
updateIpReservedRange
|
||||
} from "../services/network.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const ipImportSchema = z.object({
|
||||
addresses: z.array(z.string().min(2)).optional(),
|
||||
cidr_blocks: z.array(z.string().min(3)).optional(),
|
||||
scope: z.nativeEnum(IpScope).optional(),
|
||||
server: z.string().optional(),
|
||||
node_id: z.string().optional(),
|
||||
node_hostname: z.string().optional(),
|
||||
bridge: z.string().optional(),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
sdn_zone: z.string().optional(),
|
||||
gateway: z.string().optional(),
|
||||
subnet: z.string().optional(),
|
||||
tags: z.array(z.string().min(1)).optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const ipAssignSchema = z.object({
|
||||
vm_id: z.string().min(1),
|
||||
ip_address_id: z.string().optional(),
|
||||
address: z.string().optional(),
|
||||
scope: z.nativeEnum(IpScope).optional(),
|
||||
version: z.nativeEnum(IpVersion).optional(),
|
||||
assignment_type: z.nativeEnum(IpAssignmentType).default(IpAssignmentType.ADDITIONAL),
|
||||
interface_name: z.string().optional(),
|
||||
notes: z.string().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const ipReturnSchema = z
|
||||
.object({
|
||||
assignment_id: z.string().optional(),
|
||||
ip_address_id: z.string().optional()
|
||||
})
|
||||
.refine((value) => value.assignment_id || value.ip_address_id, {
|
||||
message: "assignment_id or ip_address_id is required"
|
||||
});
|
||||
|
||||
const privateNetworkCreateSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
slug: z.string().optional(),
|
||||
network_type: z.nativeEnum(PrivateNetworkType).optional(),
|
||||
cidr: z.string().min(3),
|
||||
gateway: z.string().optional(),
|
||||
bridge: z.string().optional(),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
sdn_zone: z.string().optional(),
|
||||
server: z.string().optional(),
|
||||
node_hostname: z.string().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const privateNetworkAttachSchema = z.object({
|
||||
network_id: z.string().min(1),
|
||||
vm_id: z.string().min(1),
|
||||
interface_name: z.string().optional(),
|
||||
requested_ip: z.string().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const tenantQuotaSchema = z.object({
|
||||
tenant_id: z.string().min(1),
|
||||
ipv4_limit: z.number().int().positive().nullable().optional(),
|
||||
ipv6_limit: z.number().int().positive().nullable().optional(),
|
||||
reserved_ipv4: z.number().int().min(0).optional(),
|
||||
reserved_ipv6: z.number().int().min(0).optional(),
|
||||
burst_allowed: z.boolean().optional(),
|
||||
burst_ipv4_limit: z.number().int().positive().nullable().optional(),
|
||||
burst_ipv6_limit: z.number().int().positive().nullable().optional(),
|
||||
is_active: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const reservedRangeSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
cidr: z.string().min(3),
|
||||
scope: z.nativeEnum(IpScope).optional(),
|
||||
tenant_id: z.string().optional(),
|
||||
reason: z.string().optional(),
|
||||
node_hostname: z.string().optional(),
|
||||
bridge: z.string().optional(),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
sdn_zone: z.string().optional(),
|
||||
is_active: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const ipPoolPolicySchema = z.object({
|
||||
name: z.string().min(2),
|
||||
tenant_id: z.string().optional(),
|
||||
scope: z.nativeEnum(IpScope).optional(),
|
||||
version: z.nativeEnum(IpVersion).optional(),
|
||||
node_hostname: z.string().optional(),
|
||||
bridge: z.string().optional(),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
sdn_zone: z.string().optional(),
|
||||
allocation_strategy: z.nativeEnum(IpAllocationStrategy).optional(),
|
||||
enforce_quota: z.boolean().optional(),
|
||||
disallow_reserved_use: z.boolean().optional(),
|
||||
is_active: z.boolean().optional(),
|
||||
priority: z.number().int().min(1).max(1000).optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
async function ensureVmTenantScope(vmId: string, req: Pick<Express.Request, "user">) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
router.get("/ip-addresses", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const status = typeof req.query.status === "string" ? req.query.status.toUpperCase() : undefined;
|
||||
const version = typeof req.query.version === "string" ? req.query.version.toUpperCase() : undefined;
|
||||
const scope = typeof req.query.scope === "string" ? req.query.scope.toUpperCase() : undefined;
|
||||
|
||||
const result = await listIpAddresses({
|
||||
status: Object.values(IpAddressStatus).includes(status as IpAddressStatus) ? (status as IpAddressStatus) : undefined,
|
||||
version: Object.values(IpVersion).includes(version as IpVersion) ? (version as IpVersion) : undefined,
|
||||
scope: Object.values(IpScope).includes(scope as IpScope) ? (scope as IpScope) : undefined,
|
||||
nodeHostname: typeof req.query.node_hostname === "string" ? req.query.node_hostname : undefined,
|
||||
bridge: typeof req.query.bridge === "string" ? req.query.bridge : undefined,
|
||||
vlanTag: typeof req.query.vlan_tag === "string" ? Number(req.query.vlan_tag) : undefined,
|
||||
assignedVmId: typeof req.query.assigned_vm_id === "string" ? req.query.assigned_vm_id : undefined,
|
||||
limit: typeof req.query.limit === "string" ? Number(req.query.limit) : undefined,
|
||||
offset: typeof req.query.offset === "string" ? Number(req.query.offset) : undefined
|
||||
});
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id) {
|
||||
const tenantData = result.data.filter(
|
||||
(item) =>
|
||||
item.assigned_tenant_id === req.user?.tenant_id ||
|
||||
(item.status === IpAddressStatus.AVAILABLE && item.scope === IpScope.PRIVATE)
|
||||
);
|
||||
return res.json({
|
||||
data: tenantData,
|
||||
meta: {
|
||||
...result.meta,
|
||||
total: tenantData.length
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/ip-addresses/import", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipImportSchema.parse(req.body ?? {});
|
||||
const result = await importIpAddresses({
|
||||
...payload,
|
||||
imported_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_address.import",
|
||||
resource_type: "SYSTEM",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
...payload,
|
||||
result
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/subnet-utilization", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const scope = typeof req.query.scope === "string" ? req.query.scope.toUpperCase() : undefined;
|
||||
const version = typeof req.query.version === "string" ? req.query.version.toUpperCase() : undefined;
|
||||
|
||||
const dashboard = await subnetUtilizationDashboard({
|
||||
scope: Object.values(IpScope).includes(scope as IpScope) ? (scope as IpScope) : undefined,
|
||||
version: Object.values(IpVersion).includes(version as IpVersion) ? (version as IpVersion) : undefined,
|
||||
node_hostname: typeof req.query.node_hostname === "string" ? req.query.node_hostname : undefined,
|
||||
bridge: typeof req.query.bridge === "string" ? req.query.bridge : undefined,
|
||||
vlan_tag: typeof req.query.vlan_tag === "string" ? Number(req.query.vlan_tag) : undefined,
|
||||
tenant_id:
|
||||
isTenantScopedUser(req) && req.user?.tenant_id
|
||||
? req.user.tenant_id
|
||||
: typeof req.query.tenant_id === "string"
|
||||
? req.query.tenant_id
|
||||
: undefined
|
||||
});
|
||||
|
||||
return res.json(dashboard);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/ip-assignments", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await listIpAssignments({
|
||||
vm_id: typeof req.query.vm_id === "string" ? req.query.vm_id : undefined,
|
||||
tenant_id:
|
||||
isTenantScopedUser(req) && req.user?.tenant_id
|
||||
? req.user.tenant_id
|
||||
: typeof req.query.tenant_id === "string"
|
||||
? req.query.tenant_id
|
||||
: undefined,
|
||||
active_only: req.query.active_only === "true"
|
||||
});
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/ip-assignments", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipAssignSchema.parse(req.body ?? {});
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
|
||||
const assignment = await assignIpToVm({
|
||||
vm_id: payload.vm_id,
|
||||
ip_address_id: payload.ip_address_id,
|
||||
address: payload.address,
|
||||
scope: payload.scope,
|
||||
version: payload.version,
|
||||
assignment_type: payload.assignment_type,
|
||||
interface_name: payload.interface_name,
|
||||
notes: payload.notes,
|
||||
metadata: payload.metadata,
|
||||
actor_email: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_address.assign",
|
||||
resource_type: "VM",
|
||||
resource_id: payload.vm_id,
|
||||
resource_name: assignment.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
assignment_id: assignment.id,
|
||||
ip_address: assignment.ip_address.address,
|
||||
cidr: assignment.ip_address.cidr,
|
||||
assignment_type: assignment.assignment_type,
|
||||
interface_name: assignment.interface_name
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(assignment);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/ip-assignments/return", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipReturnSchema.parse(req.body ?? {});
|
||||
if (payload.assignment_id) {
|
||||
const existing = await prisma.ipAssignment.findUnique({
|
||||
where: { id: payload.assignment_id },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) throw new HttpError(404, "IP assignment not found", "IP_ASSIGNMENT_NOT_FOUND");
|
||||
await ensureVmTenantScope(existing.vm.id, req);
|
||||
}
|
||||
|
||||
const assignment = await returnAssignedIp(payload);
|
||||
|
||||
await logAudit({
|
||||
action: "ip_address.return",
|
||||
resource_type: "VM",
|
||||
resource_id: assignment.vm_id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
assignment_id: assignment.id,
|
||||
ip_address_id: assignment.ip_address_id
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json(assignment);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/tenant-quotas", requireAuth, authorize("tenant:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await listTenantIpQuotas(
|
||||
isTenantScopedUser(req) && req.user?.tenant_id ? req.user.tenant_id : typeof req.query.tenant_id === "string" ? req.query.tenant_id : undefined
|
||||
);
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/tenant-quotas", requireAuth, authorize("tenant:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = tenantQuotaSchema.parse(req.body ?? {});
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const quota = await upsertTenantIpQuota({
|
||||
...payload,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_quota.upsert",
|
||||
resource_type: "TENANT",
|
||||
resource_id: quota.tenant_id,
|
||||
resource_name: quota.tenant.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(quota);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/reserved-ranges", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const all = await listIpReservedRanges();
|
||||
const data =
|
||||
isTenantScopedUser(req) && req.user?.tenant_id
|
||||
? all.filter((item) => !item.tenant_id || item.tenant_id === req.user?.tenant_id)
|
||||
: all;
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/reserved-ranges", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = reservedRangeSchema.parse(req.body ?? {});
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && payload.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const range = await createIpReservedRange({
|
||||
...payload,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_reserved_range.create",
|
||||
resource_type: "NETWORK",
|
||||
resource_id: range.id,
|
||||
resource_name: range.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(range);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/reserved-ranges/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = reservedRangeSchema.partial().parse(req.body ?? {});
|
||||
const existing = await prisma.ipReservedRange.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) throw new HttpError(404, "Reserved range not found", "RESERVED_RANGE_NOT_FOUND");
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const updated = await updateIpReservedRange(req.params.id, payload);
|
||||
return res.json(updated);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/policies", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const all = await listIpPoolPolicies();
|
||||
const data =
|
||||
isTenantScopedUser(req) && req.user?.tenant_id
|
||||
? all.filter((item) => !item.tenant_id || item.tenant_id === req.user?.tenant_id)
|
||||
: all;
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/policies", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipPoolPolicySchema.parse(req.body ?? {});
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && payload.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const policy = await upsertIpPoolPolicy({
|
||||
...payload,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "ip_pool_policy.create",
|
||||
resource_type: "NETWORK",
|
||||
resource_id: policy.id,
|
||||
resource_name: policy.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(policy);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/policies/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = ipPoolPolicySchema.partial().parse(req.body ?? {});
|
||||
const existing = await prisma.ipPoolPolicy.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) throw new HttpError(404, "IP pool policy not found", "IP_POOL_POLICY_NOT_FOUND");
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.tenant_id && existing.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const policy = await upsertIpPoolPolicy({
|
||||
policy_id: existing.id,
|
||||
name: payload.name ?? existing.name,
|
||||
tenant_id: payload.tenant_id ?? existing.tenant_id ?? undefined,
|
||||
scope: payload.scope ?? existing.scope ?? undefined,
|
||||
version: payload.version ?? existing.version ?? undefined,
|
||||
node_hostname: payload.node_hostname ?? existing.node_hostname ?? undefined,
|
||||
bridge: payload.bridge ?? existing.bridge ?? undefined,
|
||||
vlan_tag: payload.vlan_tag ?? existing.vlan_tag ?? undefined,
|
||||
sdn_zone: payload.sdn_zone ?? existing.sdn_zone ?? undefined,
|
||||
allocation_strategy: payload.allocation_strategy ?? existing.allocation_strategy,
|
||||
enforce_quota: payload.enforce_quota ?? existing.enforce_quota,
|
||||
disallow_reserved_use: payload.disallow_reserved_use ?? existing.disallow_reserved_use,
|
||||
is_active: payload.is_active ?? existing.is_active,
|
||||
priority: payload.priority ?? existing.priority,
|
||||
metadata: payload.metadata
|
||||
});
|
||||
|
||||
return res.json(policy);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/private-networks", requireAuth, authorize("node:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const data = await listPrivateNetworks();
|
||||
return res.json({ data });
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/private-networks", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = privateNetworkCreateSchema.parse(req.body ?? {});
|
||||
const network = await createPrivateNetwork({
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
network_type: payload.network_type,
|
||||
cidr: payload.cidr,
|
||||
gateway: payload.gateway,
|
||||
bridge: payload.bridge,
|
||||
vlan_tag: payload.vlan_tag,
|
||||
sdn_zone: payload.sdn_zone,
|
||||
server: payload.server,
|
||||
node_hostname: payload.node_hostname,
|
||||
metadata: payload.metadata,
|
||||
created_by: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "private_network.create",
|
||||
resource_type: "NETWORK",
|
||||
resource_id: network.id,
|
||||
resource_name: network.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(network);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/private-networks/attach", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = privateNetworkAttachSchema.parse(req.body ?? {});
|
||||
await ensureVmTenantScope(payload.vm_id, req);
|
||||
|
||||
const attachment = await attachPrivateNetwork({
|
||||
network_id: payload.network_id,
|
||||
vm_id: payload.vm_id,
|
||||
interface_name: payload.interface_name,
|
||||
requested_ip: payload.requested_ip,
|
||||
metadata: payload.metadata,
|
||||
actor_email: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "private_network.attach",
|
||||
resource_type: "VM",
|
||||
resource_id: payload.vm_id,
|
||||
resource_name: attachment.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
attachment_id: attachment.id,
|
||||
network_id: payload.network_id,
|
||||
interface_name: attachment.interface_name,
|
||||
requested_ip: payload.requested_ip
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.status(201).json(attachment);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/private-networks/attachments/:id/detach", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.privateNetworkAttachment.findUnique({
|
||||
where: { id: req.params.id },
|
||||
select: {
|
||||
id: true,
|
||||
vm_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) throw new HttpError(404, "Private network attachment not found", "PRIVATE_NETWORK_ATTACHMENT_NOT_FOUND");
|
||||
await ensureVmTenantScope(existing.vm_id, req);
|
||||
|
||||
const attachment = await detachPrivateNetwork({
|
||||
attachment_id: req.params.id,
|
||||
actor_email: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "private_network.detach",
|
||||
resource_type: "VM",
|
||||
resource_id: attachment.vm.id,
|
||||
resource_name: attachment.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({
|
||||
attachment_id: attachment.id,
|
||||
network_id: attachment.network_id,
|
||||
interface_name: attachment.interface_name
|
||||
}),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
return res.json(attachment);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
275
backend/src/routes/operations.routes.ts
Normal file
275
backend/src/routes/operations.routes.ts
Normal file
@@ -0,0 +1,275 @@
|
||||
import { OperationTaskStatus, OperationTaskType, PowerScheduleAction } from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import {
|
||||
createPowerSchedule,
|
||||
deletePowerSchedule,
|
||||
executeVmPowerActionNow,
|
||||
listOperationTasks,
|
||||
operationQueueInsights,
|
||||
listPowerSchedules,
|
||||
updatePowerSchedule
|
||||
} from "../services/operations.service";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const scheduleCreateSchema = z.object({
|
||||
vm_id: z.string().min(1),
|
||||
action: z.nativeEnum(PowerScheduleAction),
|
||||
cron_expression: z.string().min(5),
|
||||
timezone: z.string().default("UTC")
|
||||
});
|
||||
|
||||
const scheduleUpdateSchema = z.object({
|
||||
action: z.nativeEnum(PowerScheduleAction).optional(),
|
||||
cron_expression: z.string().min(5).optional(),
|
||||
timezone: z.string().min(1).optional(),
|
||||
enabled: z.boolean().optional()
|
||||
});
|
||||
|
||||
function parseOptionalEnum<T extends Record<string, string>>(value: unknown, enumObject: T) {
|
||||
if (typeof value !== "string") return undefined;
|
||||
const candidate = value.toUpperCase();
|
||||
return Object.values(enumObject).includes(candidate as T[keyof T])
|
||||
? (candidate as T[keyof T])
|
||||
: undefined;
|
||||
}
|
||||
|
||||
async function ensureVmTenantAccess(vmId: string, req: Express.Request) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
node: true,
|
||||
tenant_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
router.get("/tasks", requireAuth, authorize("audit:read"), async (req, res, next) => {
|
||||
try {
|
||||
const status = parseOptionalEnum(req.query.status, OperationTaskStatus);
|
||||
const taskType = parseOptionalEnum(req.query.task_type, OperationTaskType);
|
||||
const vmId = typeof req.query.vm_id === "string" ? req.query.vm_id : undefined;
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const offset = typeof req.query.offset === "string" ? Number(req.query.offset) : undefined;
|
||||
|
||||
const result = await listOperationTasks({
|
||||
status,
|
||||
taskType,
|
||||
vmId,
|
||||
limit,
|
||||
offset,
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id : undefined
|
||||
});
|
||||
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/queue-insights", requireAuth, authorize("audit:read"), async (req, res, next) => {
|
||||
try {
|
||||
const data = await operationQueueInsights(isTenantScopedUser(req) ? req.user?.tenant_id : undefined);
|
||||
return res.json(data);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/power-schedules", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const schedules = await listPowerSchedules(isTenantScopedUser(req) ? req.user?.tenant_id : undefined);
|
||||
res.json({ data: schedules });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/power-schedules", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = scheduleCreateSchema.parse(req.body ?? {});
|
||||
const vm = await ensureVmTenantAccess(payload.vm_id, req);
|
||||
|
||||
const schedule = await createPowerSchedule({
|
||||
vmId: vm.id,
|
||||
action: payload.action,
|
||||
cronExpression: payload.cron_expression,
|
||||
timezone: payload.timezone,
|
||||
createdBy: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "power_schedule.create",
|
||||
resource_type: "VM",
|
||||
resource_id: vm.id,
|
||||
resource_name: vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
schedule_id: schedule.id,
|
||||
action: payload.action,
|
||||
cron_expression: payload.cron_expression
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(schedule);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/power-schedules/:id", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = scheduleUpdateSchema.parse(req.body ?? {});
|
||||
const existing = await prisma.powerSchedule.findUnique({
|
||||
where: { id: req.params.id },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Power schedule not found", "POWER_SCHEDULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const schedule = await updatePowerSchedule(existing.id, {
|
||||
action: payload.action,
|
||||
cronExpression: payload.cron_expression,
|
||||
timezone: payload.timezone,
|
||||
enabled: payload.enabled
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "power_schedule.update",
|
||||
resource_type: "VM",
|
||||
resource_id: existing.vm.id,
|
||||
resource_name: existing.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
schedule_id: existing.id,
|
||||
payload
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(schedule);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/power-schedules/:id", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.powerSchedule.findUnique({
|
||||
where: { id: req.params.id },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Power schedule not found", "POWER_SCHEDULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
await deletePowerSchedule(existing.id);
|
||||
|
||||
await logAudit({
|
||||
action: "power_schedule.delete",
|
||||
resource_type: "VM",
|
||||
resource_id: existing.vm.id,
|
||||
resource_name: existing.vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
schedule_id: existing.id
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/power-schedules/:id/run", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const existing = await prisma.powerSchedule.findUnique({
|
||||
where: { id: req.params.id },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
tenant_id: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Power schedule not found", "POWER_SCHEDULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && existing.vm.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const execution = await executeVmPowerActionNow(existing.vm_id, existing.action, req.user!.email, {
|
||||
payload: {
|
||||
source: "manual_schedule_run",
|
||||
schedule_id: existing.id
|
||||
},
|
||||
scheduledFor: new Date()
|
||||
});
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
task_id: execution.task.id,
|
||||
upid: execution.upid
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
71
backend/src/routes/payment.routes.ts
Normal file
71
backend/src/routes/payment.routes.ts
Normal file
@@ -0,0 +1,71 @@
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import {
|
||||
createInvoicePaymentLink,
|
||||
handleManualInvoicePayment,
|
||||
processFlutterwaveWebhook,
|
||||
processPaystackWebhook,
|
||||
verifyFlutterwaveSignature,
|
||||
verifyPaystackSignature
|
||||
} from "../services/payment.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const createLinkSchema = z.object({
|
||||
provider: z.enum(["paystack", "flutterwave", "manual"]).optional()
|
||||
});
|
||||
|
||||
router.post("/invoices/:id/link", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = createLinkSchema.parse(req.body ?? {});
|
||||
const result = await createInvoicePaymentLink(req.params.id, payload.provider);
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const manualSchema = z.object({
|
||||
payment_reference: z.string().min(2)
|
||||
});
|
||||
|
||||
router.post("/invoices/:id/manual-pay", requireAuth, authorize("billing:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = manualSchema.parse(req.body ?? {});
|
||||
const invoice = await handleManualInvoicePayment(req.params.id, payload.payment_reference, req.user?.email ?? "manual@system");
|
||||
res.json(invoice);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/webhooks/paystack", async (req, res, next) => {
|
||||
try {
|
||||
const signature = req.header("x-paystack-signature");
|
||||
const valid = await verifyPaystackSignature(signature, req.rawBody);
|
||||
if (!valid) {
|
||||
return res.status(401).json({ error: { code: "INVALID_SIGNATURE", message: "Invalid signature" } });
|
||||
}
|
||||
const result = await processPaystackWebhook(req.body);
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/webhooks/flutterwave", async (req, res, next) => {
|
||||
try {
|
||||
const signature = req.header("verif-hash");
|
||||
const valid = await verifyFlutterwaveSignature(signature);
|
||||
if (!valid) {
|
||||
return res.status(401).json({ error: { code: "INVALID_SIGNATURE", message: "Invalid signature" } });
|
||||
}
|
||||
const result = await processFlutterwaveWebhook(req.body);
|
||||
return res.json(result);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
566
backend/src/routes/provisioning.routes.ts
Normal file
566
backend/src/routes/provisioning.routes.ts
Normal file
@@ -0,0 +1,566 @@
|
||||
import {
|
||||
ProductType,
|
||||
ServiceLifecycleStatus,
|
||||
TemplateType,
|
||||
VmType
|
||||
} from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
createApplicationGroup,
|
||||
createPlacementPolicy,
|
||||
createProvisionedService,
|
||||
createTemplate,
|
||||
createVmIdRange,
|
||||
deleteApplicationGroup,
|
||||
deletePlacementPolicy,
|
||||
deleteTemplate,
|
||||
deleteVmIdRange,
|
||||
listApplicationGroups,
|
||||
listPlacementPolicies,
|
||||
listProvisionedServices,
|
||||
listTemplates,
|
||||
listVmIdRanges,
|
||||
setApplicationGroupTemplates,
|
||||
suspendProvisionedService,
|
||||
terminateProvisionedService,
|
||||
unsuspendProvisionedService,
|
||||
updateApplicationGroup,
|
||||
updatePlacementPolicy,
|
||||
updateProvisionedServicePackage,
|
||||
updateTemplate,
|
||||
updateVmIdRange
|
||||
} from "../services/provisioning.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const templateCreateSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
slug: z.string().optional(),
|
||||
template_type: z.nativeEnum(TemplateType),
|
||||
virtualization_type: z.nativeEnum(VmType).optional(),
|
||||
source: z.string().optional(),
|
||||
description: z.string().optional(),
|
||||
default_cloud_init: z.string().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const templateUpdateSchema = z.object({
|
||||
name: z.string().min(2).optional(),
|
||||
slug: z.string().optional(),
|
||||
source: z.string().optional(),
|
||||
description: z.string().optional(),
|
||||
default_cloud_init: z.string().optional(),
|
||||
is_active: z.boolean().optional(),
|
||||
metadata: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const groupCreateSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
slug: z.string().optional(),
|
||||
description: z.string().optional()
|
||||
});
|
||||
|
||||
const groupUpdateSchema = z.object({
|
||||
name: z.string().min(2).optional(),
|
||||
slug: z.string().optional(),
|
||||
description: z.string().optional(),
|
||||
is_active: z.boolean().optional()
|
||||
});
|
||||
|
||||
const groupTemplatesSchema = z.object({
|
||||
templates: z
|
||||
.array(
|
||||
z.object({
|
||||
template_id: z.string().min(1),
|
||||
priority: z.number().int().positive().optional()
|
||||
})
|
||||
)
|
||||
.default([])
|
||||
});
|
||||
|
||||
const placementPolicySchema = z.object({
|
||||
group_id: z.string().optional(),
|
||||
node_id: z.string().optional(),
|
||||
product_type: z.nativeEnum(ProductType).optional(),
|
||||
cpu_weight: z.number().int().min(0).max(1000).optional(),
|
||||
ram_weight: z.number().int().min(0).max(1000).optional(),
|
||||
disk_weight: z.number().int().min(0).max(1000).optional(),
|
||||
vm_count_weight: z.number().int().min(0).max(1000).optional(),
|
||||
max_vms: z.number().int().positive().optional(),
|
||||
min_free_ram_mb: z.number().int().positive().optional(),
|
||||
min_free_disk_gb: z.number().int().positive().optional(),
|
||||
is_active: z.boolean().optional()
|
||||
});
|
||||
|
||||
const vmidRangeCreateSchema = z.object({
|
||||
node_id: z.string().optional(),
|
||||
node_hostname: z.string().min(1),
|
||||
application_group_id: z.string().optional(),
|
||||
range_start: z.number().int().positive(),
|
||||
range_end: z.number().int().positive(),
|
||||
next_vmid: z.number().int().positive().optional()
|
||||
});
|
||||
|
||||
const vmidRangeUpdateSchema = z.object({
|
||||
range_start: z.number().int().positive().optional(),
|
||||
range_end: z.number().int().positive().optional(),
|
||||
next_vmid: z.number().int().positive().optional(),
|
||||
is_active: z.boolean().optional()
|
||||
});
|
||||
|
||||
const serviceCreateSchema = z.object({
|
||||
name: z.string().min(2),
|
||||
tenant_id: z.string().min(1),
|
||||
product_type: z.nativeEnum(ProductType).default(ProductType.VPS),
|
||||
virtualization_type: z.nativeEnum(VmType).default(VmType.QEMU),
|
||||
vm_count: z.number().int().min(1).max(20).default(1),
|
||||
target_node: z.string().optional(),
|
||||
auto_node: z.boolean().default(true),
|
||||
application_group_id: z.string().optional(),
|
||||
template_id: z.string().optional(),
|
||||
billing_plan_id: z.string().optional(),
|
||||
package_options: z.record(z.unknown()).optional()
|
||||
});
|
||||
|
||||
const serviceSuspendSchema = z.object({
|
||||
reason: z.string().optional()
|
||||
});
|
||||
|
||||
const serviceTerminateSchema = z.object({
|
||||
reason: z.string().optional(),
|
||||
hard_delete: z.boolean().default(false)
|
||||
});
|
||||
|
||||
const servicePackageSchema = z.object({
|
||||
package_options: z.record(z.unknown())
|
||||
});
|
||||
|
||||
function parseOptionalLifecycleStatus(value: unknown) {
|
||||
if (typeof value !== "string") return undefined;
|
||||
const normalized = value.toUpperCase();
|
||||
return Object.values(ServiceLifecycleStatus).includes(normalized as ServiceLifecycleStatus)
|
||||
? (normalized as ServiceLifecycleStatus)
|
||||
: undefined;
|
||||
}
|
||||
|
||||
async function ensureServiceTenantScope(serviceId: string, req: Express.Request) {
|
||||
const service = await prisma.provisionedService.findUnique({
|
||||
where: { id: serviceId },
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
tenant_id: true,
|
||||
name: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (!service) {
|
||||
throw new HttpError(404, "Provisioned service not found", "SERVICE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && service.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
return service;
|
||||
}
|
||||
|
||||
router.get("/templates", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const templateType = typeof req.query.template_type === "string" ? req.query.template_type.toUpperCase() : undefined;
|
||||
const isActive =
|
||||
typeof req.query.is_active === "string"
|
||||
? req.query.is_active === "true"
|
||||
: undefined;
|
||||
|
||||
const templates = await listTemplates({
|
||||
templateType,
|
||||
isActive
|
||||
});
|
||||
|
||||
res.json({ data: templates });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/templates", requireAuth, authorize("vm:create"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = templateCreateSchema.parse(req.body ?? {});
|
||||
const template = await createTemplate({
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
templateType: payload.template_type,
|
||||
virtualizationType: payload.virtualization_type,
|
||||
source: payload.source,
|
||||
description: payload.description,
|
||||
defaultCloudInit: payload.default_cloud_init,
|
||||
metadata: payload.metadata ? toPrismaJsonValue(payload.metadata) : undefined
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "template.create",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: template.id,
|
||||
resource_name: template.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(template);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/templates/:id", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = templateUpdateSchema.parse(req.body ?? {});
|
||||
const template = await updateTemplate(req.params.id, {
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
source: payload.source,
|
||||
description: payload.description,
|
||||
defaultCloudInit: payload.default_cloud_init,
|
||||
isActive: payload.is_active,
|
||||
metadata: payload.metadata ? toPrismaJsonValue(payload.metadata) : undefined
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "template.update",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: template.id,
|
||||
resource_name: template.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue(payload),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(template);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/templates/:id", requireAuth, authorize("vm:delete"), async (req, res, next) => {
|
||||
try {
|
||||
await deleteTemplate(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/application-groups", requireAuth, authorize("vm:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const groups = await listApplicationGroups();
|
||||
res.json({ data: groups });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/application-groups", requireAuth, authorize("vm:create"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = groupCreateSchema.parse(req.body ?? {});
|
||||
const group = await createApplicationGroup({
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
description: payload.description
|
||||
});
|
||||
res.status(201).json(group);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/application-groups/:id", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = groupUpdateSchema.parse(req.body ?? {});
|
||||
const group = await updateApplicationGroup(req.params.id, {
|
||||
name: payload.name,
|
||||
slug: payload.slug,
|
||||
description: payload.description,
|
||||
isActive: payload.is_active
|
||||
});
|
||||
res.json(group);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/application-groups/:id", requireAuth, authorize("vm:delete"), async (req, res, next) => {
|
||||
try {
|
||||
await deleteApplicationGroup(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/application-groups/:id/templates", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = groupTemplatesSchema.parse(req.body ?? {});
|
||||
|
||||
const assignments = await setApplicationGroupTemplates(
|
||||
req.params.id,
|
||||
payload.templates.map((template) => ({
|
||||
templateId: template.template_id,
|
||||
priority: template.priority
|
||||
}))
|
||||
);
|
||||
|
||||
res.json({ data: assignments });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/placement-policies", requireAuth, authorize("node:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const policies = await listPlacementPolicies();
|
||||
res.json({ data: policies });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/placement-policies", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = placementPolicySchema.parse(req.body ?? {});
|
||||
const policy = await createPlacementPolicy({
|
||||
groupId: payload.group_id,
|
||||
nodeId: payload.node_id,
|
||||
productType: payload.product_type,
|
||||
cpuWeight: payload.cpu_weight,
|
||||
ramWeight: payload.ram_weight,
|
||||
diskWeight: payload.disk_weight,
|
||||
vmCountWeight: payload.vm_count_weight,
|
||||
maxVms: payload.max_vms,
|
||||
minFreeRamMb: payload.min_free_ram_mb,
|
||||
minFreeDiskGb: payload.min_free_disk_gb
|
||||
});
|
||||
res.status(201).json(policy);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/placement-policies/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = placementPolicySchema.parse(req.body ?? {});
|
||||
const policy = await updatePlacementPolicy(req.params.id, {
|
||||
cpuWeight: payload.cpu_weight,
|
||||
ramWeight: payload.ram_weight,
|
||||
diskWeight: payload.disk_weight,
|
||||
vmCountWeight: payload.vm_count_weight,
|
||||
maxVms: payload.max_vms ?? null,
|
||||
minFreeRamMb: payload.min_free_ram_mb ?? null,
|
||||
minFreeDiskGb: payload.min_free_disk_gb ?? null,
|
||||
isActive: payload.is_active
|
||||
});
|
||||
res.json(policy);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/placement-policies/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await deletePlacementPolicy(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/vmid-ranges", requireAuth, authorize("node:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const ranges = await listVmIdRanges();
|
||||
res.json({ data: ranges });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/vmid-ranges", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = vmidRangeCreateSchema.parse(req.body ?? {});
|
||||
const range = await createVmIdRange({
|
||||
nodeId: payload.node_id,
|
||||
nodeHostname: payload.node_hostname,
|
||||
applicationGroupId: payload.application_group_id,
|
||||
rangeStart: payload.range_start,
|
||||
rangeEnd: payload.range_end,
|
||||
nextVmid: payload.next_vmid
|
||||
});
|
||||
res.status(201).json(range);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/vmid-ranges/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = vmidRangeUpdateSchema.parse(req.body ?? {});
|
||||
const range = await updateVmIdRange(req.params.id, {
|
||||
rangeStart: payload.range_start,
|
||||
rangeEnd: payload.range_end,
|
||||
nextVmid: payload.next_vmid,
|
||||
isActive: payload.is_active
|
||||
});
|
||||
res.json(range);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/vmid-ranges/:id", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
await deleteVmIdRange(req.params.id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/services", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const lifecycleStatus = parseOptionalLifecycleStatus(req.query.lifecycle_status);
|
||||
const limit = typeof req.query.limit === "string" ? Number(req.query.limit) : undefined;
|
||||
const offset = typeof req.query.offset === "string" ? Number(req.query.offset) : undefined;
|
||||
|
||||
const result = await listProvisionedServices({
|
||||
tenantId: isTenantScopedUser(req) ? req.user?.tenant_id ?? undefined : undefined,
|
||||
lifecycleStatus,
|
||||
limit,
|
||||
offset
|
||||
});
|
||||
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/services", requireAuth, authorize("vm:create"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = serviceCreateSchema.parse(req.body ?? {});
|
||||
|
||||
if (isTenantScopedUser(req) && req.user?.tenant_id && payload.tenant_id !== req.user.tenant_id) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
|
||||
const services = await createProvisionedService({
|
||||
name: payload.name,
|
||||
tenantId: payload.tenant_id,
|
||||
productType: payload.product_type,
|
||||
virtualizationType: payload.virtualization_type,
|
||||
vmCount: payload.vm_count,
|
||||
targetNode: payload.target_node,
|
||||
autoNode: payload.auto_node,
|
||||
applicationGroupId: payload.application_group_id,
|
||||
templateId: payload.template_id,
|
||||
billingPlanId: payload.billing_plan_id,
|
||||
packageOptions: payload.package_options ? toPrismaJsonValue(payload.package_options) : undefined,
|
||||
createdBy: req.user?.email
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "service.create",
|
||||
resource_type: "VM",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
tenant_id: payload.tenant_id,
|
||||
product_type: payload.product_type,
|
||||
vm_count: payload.vm_count,
|
||||
created_services: services.map((service) => service.id)
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json({ data: services });
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/services/:id/suspend", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = serviceSuspendSchema.parse(req.body ?? {});
|
||||
await ensureServiceTenantScope(req.params.id, req);
|
||||
|
||||
const service = await suspendProvisionedService({
|
||||
serviceId: req.params.id,
|
||||
actorEmail: req.user!.email,
|
||||
reason: payload.reason
|
||||
});
|
||||
|
||||
res.json(service);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/services/:id/unsuspend", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
await ensureServiceTenantScope(req.params.id, req);
|
||||
const service = await unsuspendProvisionedService({
|
||||
serviceId: req.params.id,
|
||||
actorEmail: req.user!.email
|
||||
});
|
||||
res.json(service);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/services/:id/terminate", requireAuth, authorize("vm:delete"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = serviceTerminateSchema.parse(req.body ?? {});
|
||||
await ensureServiceTenantScope(req.params.id, req);
|
||||
|
||||
const service = await terminateProvisionedService({
|
||||
serviceId: req.params.id,
|
||||
actorEmail: req.user!.email,
|
||||
reason: payload.reason,
|
||||
hardDelete: payload.hard_delete
|
||||
});
|
||||
|
||||
res.json(service);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/services/:id/package-options", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = servicePackageSchema.parse(req.body ?? {});
|
||||
await ensureServiceTenantScope(req.params.id, req);
|
||||
|
||||
const service = await updateProvisionedServicePackage({
|
||||
serviceId: req.params.id,
|
||||
actorEmail: req.user!.email,
|
||||
packageOptions: toPrismaJsonValue(payload.package_options)
|
||||
});
|
||||
|
||||
res.json(service);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
637
backend/src/routes/proxmox.routes.ts
Normal file
637
backend/src/routes/proxmox.routes.ts
Normal file
@@ -0,0 +1,637 @@
|
||||
import { OperationTaskType, Prisma } from "@prisma/client";
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import {
|
||||
addVmDisk,
|
||||
clusterUsageGraphs,
|
||||
deleteVm,
|
||||
migrateVm,
|
||||
nodeUsageGraphs,
|
||||
vmUsageGraphs,
|
||||
reinstallVm,
|
||||
reconfigureVmNetwork,
|
||||
restartVm,
|
||||
resumeVm,
|
||||
shutdownVm,
|
||||
startVm,
|
||||
stopVm,
|
||||
suspendVm,
|
||||
syncNodesAndVirtualMachines,
|
||||
updateVmConfiguration,
|
||||
vmConsoleTicket
|
||||
} from "../services/proxmox.service";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import {
|
||||
createOperationTask,
|
||||
markOperationTaskFailed,
|
||||
markOperationTaskRunning,
|
||||
markOperationTaskSuccess
|
||||
} from "../services/operations.service";
|
||||
|
||||
const router = Router();
|
||||
const consoleTypeSchema = z.enum(["novnc", "spice", "xterm"]);
|
||||
const graphTimeframeSchema = z.enum(["hour", "day", "week", "month", "year"]);
|
||||
|
||||
function vmRuntimeType(vm: { type: "QEMU" | "LXC" }) {
|
||||
return vm.type === "LXC" ? "lxc" : "qemu";
|
||||
}
|
||||
|
||||
function withUpid(payload: Prisma.InputJsonObject, upid?: string): Prisma.InputJsonObject {
|
||||
if (!upid) {
|
||||
return payload;
|
||||
}
|
||||
|
||||
return {
|
||||
...payload,
|
||||
upid
|
||||
};
|
||||
}
|
||||
|
||||
async function fetchVm(vmId: string) {
|
||||
const vm = await prisma.virtualMachine.findUnique({ where: { id: vmId } });
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
return vm;
|
||||
}
|
||||
|
||||
async function resolveConsoleProxyTarget(node: string, consoleType: "novnc" | "spice" | "xterm") {
|
||||
const setting = await prisma.setting.findUnique({
|
||||
where: {
|
||||
key: "console_proxy"
|
||||
}
|
||||
});
|
||||
|
||||
const raw = setting?.value as
|
||||
| {
|
||||
mode?: "cluster" | "per_node";
|
||||
cluster?: Record<string, unknown>;
|
||||
nodes?: Record<string, Record<string, unknown>>;
|
||||
}
|
||||
| undefined;
|
||||
|
||||
if (!raw) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const mode = raw.mode ?? "cluster";
|
||||
if (mode === "per_node") {
|
||||
const nodeConfig = raw.nodes?.[node];
|
||||
if (nodeConfig && typeof nodeConfig[consoleType] === "string") {
|
||||
return String(nodeConfig[consoleType]);
|
||||
}
|
||||
}
|
||||
|
||||
if (raw.cluster && typeof raw.cluster[consoleType] === "string") {
|
||||
return String(raw.cluster[consoleType]);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
router.post("/sync", requireAuth, authorize("node:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.SYSTEM_SYNC,
|
||||
requestedBy: req.user?.email,
|
||||
payload: { source: "manual_sync" }
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const result = await syncNodesAndVirtualMachines();
|
||||
await markOperationTaskSuccess(task.id, {
|
||||
node_count: result.node_count
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "proxmox_sync",
|
||||
resource_type: "NODE",
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
node_count: result.node_count,
|
||||
task_id: task.id
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json({
|
||||
...result,
|
||||
task_id: task.id
|
||||
});
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Proxmox sync failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const actionSchema = z.object({
|
||||
action: z.enum(["start", "stop", "restart", "shutdown", "suspend", "resume", "delete"])
|
||||
});
|
||||
|
||||
router.post("/vms/:id/actions/:action", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const { action } = actionSchema.parse(req.params);
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const taskType = action === "delete" ? OperationTaskType.VM_DELETE : OperationTaskType.VM_POWER;
|
||||
const task = await createOperationTask({
|
||||
taskType,
|
||||
vm: {
|
||||
id: vm.id,
|
||||
name: vm.name,
|
||||
node: vm.node
|
||||
},
|
||||
requestedBy: req.user?.email,
|
||||
payload: { action }
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
let upid: string | undefined;
|
||||
|
||||
try {
|
||||
if (action === "start") {
|
||||
upid = await startVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "RUNNING", proxmox_upid: upid } });
|
||||
} else if (action === "stop") {
|
||||
upid = await stopVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "STOPPED", proxmox_upid: upid } });
|
||||
} else if (action === "restart") {
|
||||
upid = await restartVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "RUNNING", proxmox_upid: upid } });
|
||||
} else if (action === "shutdown") {
|
||||
upid = await shutdownVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "STOPPED", proxmox_upid: upid } });
|
||||
} else if (action === "suspend") {
|
||||
upid = await suspendVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "PAUSED", proxmox_upid: upid } });
|
||||
} else if (action === "resume") {
|
||||
upid = await resumeVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.update({ where: { id: vm.id }, data: { status: "RUNNING", proxmox_upid: upid } });
|
||||
} else {
|
||||
upid = await deleteVm(vm.node, vm.vmid, type);
|
||||
await prisma.virtualMachine.delete({ where: { id: vm.id } });
|
||||
}
|
||||
|
||||
const taskResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
action
|
||||
},
|
||||
upid
|
||||
);
|
||||
|
||||
await markOperationTaskSuccess(task.id, taskResult, upid);
|
||||
|
||||
await logAudit({
|
||||
action: `vm_${action}`,
|
||||
resource_type: "VM",
|
||||
resource_id: vm.id,
|
||||
resource_name: vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
...taskResult,
|
||||
task_id: task.id
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json({ success: true, action, upid, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM action failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const migrateSchema = z.object({
|
||||
target_node: z.string().min(1)
|
||||
});
|
||||
|
||||
router.post("/vms/:id/migrate", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = migrateSchema.parse(req.body);
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_MIGRATION,
|
||||
vm: {
|
||||
id: vm.id,
|
||||
name: vm.name,
|
||||
node: vm.node
|
||||
},
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const upid = await migrateVm(vm.node, vm.vmid, payload.target_node, type);
|
||||
await prisma.virtualMachine.update({
|
||||
where: { id: vm.id },
|
||||
data: { node: payload.target_node, status: "MIGRATING", proxmox_upid: upid }
|
||||
});
|
||||
|
||||
const migrationResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
from_node: vm.node,
|
||||
target_node: payload.target_node
|
||||
},
|
||||
upid
|
||||
);
|
||||
|
||||
await markOperationTaskSuccess(task.id, migrationResult, upid);
|
||||
res.json({ success: true, upid, target_node: payload.target_node, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM migrate failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const configSchema = z
|
||||
.object({
|
||||
hostname: z.string().min(1).optional(),
|
||||
iso_image: z.string().min(1).optional(),
|
||||
boot_order: z.string().min(1).optional(),
|
||||
ssh_public_key: z.string().min(10).optional(),
|
||||
qemu_guest_agent: z.boolean().optional()
|
||||
})
|
||||
.refine((value) => Object.keys(value).length > 0, {
|
||||
message: "At least one configuration field is required"
|
||||
});
|
||||
|
||||
router.patch("/vms/:id/config", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = configSchema.parse(req.body ?? {});
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const config: Record<string, string | number | boolean> = {};
|
||||
if (payload.hostname) config.name = payload.hostname;
|
||||
if (payload.boot_order) config.boot = payload.boot_order;
|
||||
if (payload.ssh_public_key) config.sshkeys = payload.ssh_public_key;
|
||||
if (payload.iso_image && vm.type === "QEMU") config.ide2 = `${payload.iso_image},media=cdrom`;
|
||||
if (typeof payload.qemu_guest_agent === "boolean" && vm.type === "QEMU") {
|
||||
config.agent = payload.qemu_guest_agent ? 1 : 0;
|
||||
}
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_CONFIG,
|
||||
vm: { id: vm.id, name: vm.name, node: vm.node },
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const upid = await updateVmConfiguration(vm.node, vm.vmid, type, config);
|
||||
const configResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
config: config as unknown as Prisma.InputJsonValue
|
||||
},
|
||||
upid
|
||||
);
|
||||
await markOperationTaskSuccess(task.id, configResult, upid);
|
||||
|
||||
await logAudit({
|
||||
action: "vm_config_update",
|
||||
resource_type: "VM",
|
||||
resource_id: vm.id,
|
||||
resource_name: vm.name,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: {
|
||||
config: config as unknown as Prisma.InputJsonValue,
|
||||
task_id: task.id,
|
||||
...(upid ? { upid } : {})
|
||||
},
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json({ success: true, upid, task_id: task.id, config_applied: config });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM config update failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const networkSchema = z.object({
|
||||
interface_name: z.string().optional(),
|
||||
bridge: z.string().min(1),
|
||||
vlan_tag: z.number().int().min(0).max(4094).optional(),
|
||||
rate_mbps: z.number().int().positive().optional(),
|
||||
firewall: z.boolean().optional(),
|
||||
ip_mode: z.enum(["dhcp", "static"]).default("dhcp"),
|
||||
ip_cidr: z.string().optional(),
|
||||
gateway: z.string().optional()
|
||||
});
|
||||
|
||||
router.patch("/vms/:id/network", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = networkSchema.parse(req.body ?? {});
|
||||
if (payload.ip_mode === "static" && !payload.ip_cidr) {
|
||||
throw new HttpError(400, "ip_cidr is required when ip_mode=static", "INVALID_NETWORK_PAYLOAD");
|
||||
}
|
||||
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_NETWORK,
|
||||
vm: { id: vm.id, name: vm.name, node: vm.node },
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const networkInput: Parameters<typeof reconfigureVmNetwork>[3] = {
|
||||
interface_name: payload.interface_name,
|
||||
bridge: payload.bridge,
|
||||
vlan_tag: payload.vlan_tag,
|
||||
rate_mbps: payload.rate_mbps,
|
||||
firewall: payload.firewall,
|
||||
ip_mode: payload.ip_mode,
|
||||
ip_cidr: payload.ip_cidr,
|
||||
gateway: payload.gateway
|
||||
};
|
||||
const upid = await reconfigureVmNetwork(vm.node, vm.vmid, type, networkInput);
|
||||
const networkResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
network: payload as unknown as Prisma.InputJsonValue
|
||||
},
|
||||
upid
|
||||
);
|
||||
await markOperationTaskSuccess(task.id, networkResult, upid);
|
||||
res.json({ success: true, upid, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM network update failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const diskSchema = z.object({
|
||||
storage: z.string().min(1),
|
||||
size_gb: z.number().int().positive(),
|
||||
bus: z.enum(["scsi", "sata", "virtio", "ide"]).default("scsi"),
|
||||
mount_point: z.string().optional()
|
||||
});
|
||||
|
||||
router.post("/vms/:id/disks", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = diskSchema.parse(req.body ?? {});
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_CONFIG,
|
||||
vm: { id: vm.id, name: vm.name, node: vm.node },
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const diskInput: Parameters<typeof addVmDisk>[3] = {
|
||||
storage: payload.storage,
|
||||
size_gb: payload.size_gb,
|
||||
bus: payload.bus,
|
||||
mount_point: payload.mount_point
|
||||
};
|
||||
const upid = await addVmDisk(vm.node, vm.vmid, type, diskInput);
|
||||
const diskResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
disk: payload as unknown as Prisma.InputJsonValue
|
||||
},
|
||||
upid
|
||||
);
|
||||
await markOperationTaskSuccess(task.id, diskResult, upid);
|
||||
res.status(201).json({ success: true, upid, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM disk attach failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
const reinstallSchema = z.object({
|
||||
backup_before_reinstall: z.boolean().default(false),
|
||||
iso_image: z.string().optional(),
|
||||
ssh_public_key: z.string().optional()
|
||||
});
|
||||
|
||||
router.post("/vms/:id/reinstall", requireAuth, authorize("vm:update"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = reinstallSchema.parse(req.body ?? {});
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
|
||||
if (payload.backup_before_reinstall) {
|
||||
await prisma.backup.create({
|
||||
data: {
|
||||
vm_id: vm.id,
|
||||
vm_name: vm.name,
|
||||
node: vm.node,
|
||||
status: "PENDING",
|
||||
type: "FULL",
|
||||
schedule: "MANUAL",
|
||||
notes: "Auto-created before VM reinstall"
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_REINSTALL,
|
||||
vm: { id: vm.id, name: vm.name, node: vm.node },
|
||||
requestedBy: req.user?.email,
|
||||
payload
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const upid = await reinstallVm(vm.node, vm.vmid, type, {
|
||||
iso_image: payload.iso_image,
|
||||
ssh_public_key: payload.ssh_public_key
|
||||
});
|
||||
|
||||
await prisma.virtualMachine.update({
|
||||
where: { id: vm.id },
|
||||
data: {
|
||||
status: "RUNNING",
|
||||
proxmox_upid: upid ?? undefined
|
||||
}
|
||||
});
|
||||
|
||||
const reinstallResult = withUpid(
|
||||
{
|
||||
vm_id: vm.id,
|
||||
reinstall: payload as unknown as Prisma.InputJsonValue
|
||||
},
|
||||
upid
|
||||
);
|
||||
|
||||
await markOperationTaskSuccess(task.id, reinstallResult, upid);
|
||||
|
||||
res.json({ success: true, upid, task_id: task.id });
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "VM reinstall failed";
|
||||
await markOperationTaskFailed(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/vms/:id/console", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
const consoleType = consoleTypeSchema.parse(
|
||||
typeof req.query.console_type === "string"
|
||||
? req.query.console_type.toLowerCase()
|
||||
: "novnc"
|
||||
);
|
||||
const ticket = await vmConsoleTicket(vm.node, vm.vmid, type, consoleType);
|
||||
const proxyTarget = await resolveConsoleProxyTarget(vm.node, consoleType);
|
||||
|
||||
res.json({
|
||||
...ticket,
|
||||
console_type: consoleType,
|
||||
proxy_target: proxyTarget ?? null
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/vms/:id/usage-graphs", requireAuth, authorize("vm:read"), async (req, res, next) => {
|
||||
try {
|
||||
const vm = await fetchVm(req.params.id);
|
||||
const type = vmRuntimeType(vm);
|
||||
const timeframe = graphTimeframeSchema.parse(
|
||||
typeof req.query.timeframe === "string" ? req.query.timeframe.toLowerCase() : "day"
|
||||
);
|
||||
|
||||
const graph = await vmUsageGraphs(vm.node, vm.vmid, type, timeframe, {
|
||||
cpu_usage: vm.cpu_usage,
|
||||
ram_usage: vm.ram_usage,
|
||||
disk_usage: vm.disk_usage,
|
||||
network_in: vm.network_in,
|
||||
network_out: vm.network_out
|
||||
});
|
||||
|
||||
return res.json({
|
||||
vm_id: vm.id,
|
||||
vm_name: vm.name,
|
||||
vm_type: vm.type,
|
||||
node: vm.node,
|
||||
timeframe: graph.timeframe,
|
||||
source: graph.source,
|
||||
summary: graph.summary,
|
||||
points: graph.points
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/nodes/:id/usage-graphs", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const node = await prisma.proxmoxNode.findFirst({
|
||||
where: {
|
||||
OR: [{ id: req.params.id }, { hostname: req.params.id }, { name: req.params.id }]
|
||||
}
|
||||
});
|
||||
|
||||
if (!node) {
|
||||
throw new HttpError(404, "Node not found", "NODE_NOT_FOUND");
|
||||
}
|
||||
|
||||
const timeframe = graphTimeframeSchema.parse(
|
||||
typeof req.query.timeframe === "string" ? req.query.timeframe.toLowerCase() : "day"
|
||||
);
|
||||
|
||||
const graph = await nodeUsageGraphs(node.hostname, timeframe, {
|
||||
cpu_usage: node.cpu_usage,
|
||||
ram_used_mb: node.ram_used_mb,
|
||||
ram_total_mb: node.ram_total_mb,
|
||||
disk_used_gb: node.disk_used_gb,
|
||||
disk_total_gb: node.disk_total_gb
|
||||
});
|
||||
|
||||
return res.json({
|
||||
node_id: node.id,
|
||||
node_name: node.name,
|
||||
node_hostname: node.hostname,
|
||||
timeframe: graph.timeframe,
|
||||
source: graph.source,
|
||||
summary: graph.summary,
|
||||
points: graph.points
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/cluster/usage-graphs", requireAuth, authorize("node:read"), async (req, res, next) => {
|
||||
try {
|
||||
const timeframe = graphTimeframeSchema.parse(
|
||||
typeof req.query.timeframe === "string" ? req.query.timeframe.toLowerCase() : "day"
|
||||
);
|
||||
const graph = await clusterUsageGraphs(timeframe);
|
||||
|
||||
return res.json({
|
||||
timeframe: graph.timeframe,
|
||||
source: graph.source,
|
||||
node_count: graph.node_count,
|
||||
nodes: graph.nodes,
|
||||
summary: graph.summary,
|
||||
points: graph.points
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
723
backend/src/routes/resources.routes.ts
Normal file
723
backend/src/routes/resources.routes.ts
Normal file
@@ -0,0 +1,723 @@
|
||||
import { Router } from "express";
|
||||
import { authorize, isTenantScopedUser, requireAuth } from "../middleware/auth";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { toPrismaJsonValue } from "../lib/prisma-json";
|
||||
import { logAudit } from "../services/audit.service";
|
||||
import { prisma } from "../lib/prisma";
|
||||
const router = Router();
|
||||
|
||||
type ResourceMeta = {
|
||||
model: string;
|
||||
readPermission: Parameters<typeof authorize>[0];
|
||||
createPermission?: Parameters<typeof authorize>[0];
|
||||
updatePermission?: Parameters<typeof authorize>[0];
|
||||
deletePermission?: Parameters<typeof authorize>[0];
|
||||
tenantScoped: boolean;
|
||||
searchFields?: string[];
|
||||
};
|
||||
|
||||
const resourceMap: Record<string, ResourceMeta> = {
|
||||
tenants: {
|
||||
model: "tenant",
|
||||
readPermission: "tenant:read",
|
||||
createPermission: "tenant:manage",
|
||||
updatePermission: "tenant:manage",
|
||||
deletePermission: "tenant:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "owner_email", "slug"]
|
||||
},
|
||||
"virtual-machines": {
|
||||
model: "virtualMachine",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:create",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:delete",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "ip_address", "node"]
|
||||
},
|
||||
nodes: {
|
||||
model: "proxmoxNode",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "hostname"]
|
||||
},
|
||||
"billing-plans": {
|
||||
model: "billingPlan",
|
||||
readPermission: "billing:read",
|
||||
createPermission: "billing:manage",
|
||||
updatePermission: "billing:manage",
|
||||
deletePermission: "billing:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "slug", "description"]
|
||||
},
|
||||
invoices: {
|
||||
model: "invoice",
|
||||
readPermission: "billing:read",
|
||||
createPermission: "billing:manage",
|
||||
updatePermission: "billing:manage",
|
||||
deletePermission: "billing:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["invoice_number", "tenant_name", "payment_reference"]
|
||||
},
|
||||
"usage-records": {
|
||||
model: "usageRecord",
|
||||
readPermission: "billing:read",
|
||||
createPermission: "billing:manage",
|
||||
updatePermission: "billing:manage",
|
||||
deletePermission: "billing:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["vm_name", "tenant_name", "plan_name"]
|
||||
},
|
||||
backups: {
|
||||
model: "backup",
|
||||
readPermission: "backup:read",
|
||||
createPermission: "backup:manage",
|
||||
updatePermission: "backup:manage",
|
||||
deletePermission: "backup:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["vm_name", "node", "storage"]
|
||||
},
|
||||
"backup-policies": {
|
||||
model: "backupPolicy",
|
||||
readPermission: "backup:read",
|
||||
createPermission: "backup:manage",
|
||||
updatePermission: "backup:manage",
|
||||
deletePermission: "backup:manage",
|
||||
tenantScoped: true
|
||||
},
|
||||
"backup-restore-tasks": {
|
||||
model: "backupRestoreTask",
|
||||
readPermission: "backup:read",
|
||||
createPermission: "backup:manage",
|
||||
updatePermission: "backup:manage",
|
||||
deletePermission: "backup:manage",
|
||||
tenantScoped: true
|
||||
},
|
||||
"snapshot-jobs": {
|
||||
model: "snapshotJob",
|
||||
readPermission: "backup:read",
|
||||
createPermission: "backup:manage",
|
||||
updatePermission: "backup:manage",
|
||||
deletePermission: "backup:manage",
|
||||
tenantScoped: true
|
||||
},
|
||||
"audit-logs": {
|
||||
model: "auditLog",
|
||||
readPermission: "audit:read",
|
||||
tenantScoped: false,
|
||||
searchFields: ["action", "resource_name", "actor_email"]
|
||||
},
|
||||
"security-events": {
|
||||
model: "securityEvent",
|
||||
readPermission: "security:read",
|
||||
createPermission: "security:manage",
|
||||
updatePermission: "security:manage",
|
||||
deletePermission: "security:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["event_type", "source_ip", "description"]
|
||||
},
|
||||
"firewall-rules": {
|
||||
model: "firewallRule",
|
||||
readPermission: "security:read",
|
||||
createPermission: "security:manage",
|
||||
updatePermission: "security:manage",
|
||||
deletePermission: "security:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "source_ip", "destination_ip", "description"]
|
||||
},
|
||||
users: {
|
||||
model: "user",
|
||||
readPermission: "user:read",
|
||||
createPermission: "user:manage",
|
||||
updatePermission: "user:manage",
|
||||
deletePermission: "user:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["email", "full_name"]
|
||||
},
|
||||
"app-templates": {
|
||||
model: "appTemplate",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:create",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:delete",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "slug", "description", "source"]
|
||||
},
|
||||
"application-groups": {
|
||||
model: "applicationGroup",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:create",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:delete",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "slug", "description"]
|
||||
},
|
||||
"placement-policies": {
|
||||
model: "nodePlacementPolicy",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: false
|
||||
},
|
||||
"vmid-ranges": {
|
||||
model: "vmIdRange",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: false
|
||||
},
|
||||
"provisioned-services": {
|
||||
model: "provisionedService",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:create",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:delete",
|
||||
tenantScoped: true
|
||||
},
|
||||
"ip-addresses": {
|
||||
model: "ipAddressPool",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["address", "subnet", "node_hostname", "bridge", "sdn_zone"]
|
||||
},
|
||||
"ip-assignments": {
|
||||
model: "ipAssignment",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:update",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:update",
|
||||
tenantScoped: true
|
||||
},
|
||||
"private-networks": {
|
||||
model: "privateNetwork",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: false,
|
||||
searchFields: ["name", "slug", "cidr", "bridge", "sdn_zone", "node_hostname"]
|
||||
},
|
||||
"private-network-attachments": {
|
||||
model: "privateNetworkAttachment",
|
||||
readPermission: "vm:read",
|
||||
createPermission: "vm:update",
|
||||
updatePermission: "vm:update",
|
||||
deletePermission: "vm:update",
|
||||
tenantScoped: true
|
||||
},
|
||||
"tenant-ip-quotas": {
|
||||
model: "tenantIpQuota",
|
||||
readPermission: "tenant:read",
|
||||
createPermission: "tenant:manage",
|
||||
updatePermission: "tenant:manage",
|
||||
deletePermission: "tenant:manage",
|
||||
tenantScoped: true
|
||||
},
|
||||
"ip-reserved-ranges": {
|
||||
model: "ipReservedRange",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "cidr", "reason", "node_hostname", "bridge", "sdn_zone"]
|
||||
},
|
||||
"ip-pool-policies": {
|
||||
model: "ipPoolPolicy",
|
||||
readPermission: "node:read",
|
||||
createPermission: "node:manage",
|
||||
updatePermission: "node:manage",
|
||||
deletePermission: "node:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "node_hostname", "bridge", "sdn_zone"]
|
||||
},
|
||||
"server-health-checks": {
|
||||
model: "serverHealthCheck",
|
||||
readPermission: "security:read",
|
||||
createPermission: "security:manage",
|
||||
updatePermission: "security:manage",
|
||||
deletePermission: "security:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "description"]
|
||||
},
|
||||
"server-health-check-results": {
|
||||
model: "serverHealthCheckResult",
|
||||
readPermission: "security:read",
|
||||
tenantScoped: true
|
||||
},
|
||||
"monitoring-alert-rules": {
|
||||
model: "monitoringAlertRule",
|
||||
readPermission: "security:read",
|
||||
createPermission: "security:manage",
|
||||
updatePermission: "security:manage",
|
||||
deletePermission: "security:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["name", "description"]
|
||||
},
|
||||
"monitoring-alert-events": {
|
||||
model: "monitoringAlertEvent",
|
||||
readPermission: "security:read",
|
||||
updatePermission: "security:manage",
|
||||
tenantScoped: true,
|
||||
searchFields: ["title", "message", "metric_key"]
|
||||
},
|
||||
"monitoring-alert-notifications": {
|
||||
model: "monitoringAlertNotification",
|
||||
readPermission: "security:read",
|
||||
tenantScoped: true,
|
||||
searchFields: ["destination", "provider_message"]
|
||||
}
|
||||
};
|
||||
|
||||
function toEnumUpper(value: unknown): unknown {
|
||||
if (typeof value !== "string") return value;
|
||||
return value.replace(/-/g, "_").toUpperCase();
|
||||
}
|
||||
|
||||
function normalizePayload(resource: string, input: Record<string, unknown>) {
|
||||
const data = { ...input };
|
||||
const enumFieldsByResource: Record<string, string[]> = {
|
||||
tenants: ["status", "currency", "payment_provider"],
|
||||
"virtual-machines": ["status", "type"],
|
||||
nodes: ["status"],
|
||||
"billing-plans": ["currency"],
|
||||
invoices: ["status", "currency", "payment_provider"],
|
||||
"usage-records": ["currency"],
|
||||
backups: ["status", "type", "schedule", "source"],
|
||||
"backup-restore-tasks": ["mode", "status"],
|
||||
"snapshot-jobs": ["frequency"],
|
||||
"audit-logs": ["resource_type", "severity"],
|
||||
"security-events": ["severity", "status"],
|
||||
"firewall-rules": ["direction", "action", "protocol", "applies_to"],
|
||||
users: ["role"],
|
||||
"app-templates": ["template_type", "virtualization_type"],
|
||||
"placement-policies": ["product_type"],
|
||||
"provisioned-services": ["product_type", "lifecycle_status"],
|
||||
"server-health-checks": ["target_type", "check_type"],
|
||||
"server-health-check-results": ["status", "severity"],
|
||||
"monitoring-alert-rules": ["severity"],
|
||||
"monitoring-alert-events": ["status", "severity"],
|
||||
"monitoring-alert-notifications": ["channel", "status"]
|
||||
};
|
||||
|
||||
for (const field of enumFieldsByResource[resource] ?? []) {
|
||||
if (field in data && data[field] !== undefined && data[field] !== null) {
|
||||
data[field] = toEnumUpper(data[field]);
|
||||
}
|
||||
}
|
||||
|
||||
if (resource === "billing-plans") {
|
||||
const monthly = data.price_monthly;
|
||||
if (monthly !== undefined && (data.price_hourly === undefined || data.price_hourly === null)) {
|
||||
const monthlyNumber = Number(monthly);
|
||||
data.price_hourly = Number((monthlyNumber / 720).toFixed(4));
|
||||
}
|
||||
if (typeof data.features === "string") {
|
||||
try {
|
||||
data.features = JSON.parse(data.features);
|
||||
} catch {
|
||||
data.features = [];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (resource === "tenants" && typeof data.member_emails === "string") {
|
||||
try {
|
||||
data.member_emails = JSON.parse(data.member_emails);
|
||||
} catch {
|
||||
data.member_emails = [];
|
||||
}
|
||||
}
|
||||
|
||||
if (resource === "invoices" && !data.invoice_number) {
|
||||
data.invoice_number = `INV-${Date.now()}-${Math.floor(1000 + Math.random() * 9000)}`;
|
||||
}
|
||||
|
||||
if (resource === "invoices" && data.due_date && typeof data.due_date === "string") {
|
||||
data.due_date = new Date(data.due_date);
|
||||
}
|
||||
if (resource === "invoices" && data.paid_date && typeof data.paid_date === "string") {
|
||||
data.paid_date = new Date(data.paid_date);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
function getModel(meta: ResourceMeta) {
|
||||
return (prisma as any)[meta.model];
|
||||
}
|
||||
|
||||
function normalizeSortField(field: string) {
|
||||
const aliases: Record<string, string> = {
|
||||
created_date: "created_at",
|
||||
updated_date: "updated_at"
|
||||
};
|
||||
return aliases[field] ?? field;
|
||||
}
|
||||
|
||||
function parseOrder(sort?: string) {
|
||||
if (!sort) return { created_at: "desc" as const };
|
||||
if (sort.startsWith("-")) return { [normalizeSortField(sort.slice(1))]: "desc" as const };
|
||||
return { [normalizeSortField(sort)]: "asc" as const };
|
||||
}
|
||||
|
||||
function attachTenantWhere(req: Express.Request, meta: ResourceMeta, where: Record<string, unknown>) {
|
||||
if (!meta.tenantScoped || !isTenantScopedUser(req)) return;
|
||||
const tenantId = req.user?.tenant_id;
|
||||
if (!tenantId) return;
|
||||
|
||||
if (meta.model === "backup") {
|
||||
where.OR = [{ tenant_id: tenantId }, { vm: { tenant_id: tenantId } }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "backupRestoreTask") {
|
||||
where.source_vm = { tenant_id: tenantId };
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "snapshotJob") {
|
||||
where.vm = { tenant_id: tenantId };
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "backupPolicy") {
|
||||
where.tenant_id = tenantId;
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "ipAddressPool") {
|
||||
where.OR = [{ assigned_tenant_id: tenantId }, { status: "AVAILABLE", scope: "PRIVATE" }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "ipAssignment") {
|
||||
where.tenant_id = tenantId;
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "privateNetworkAttachment") {
|
||||
where.tenant_id = tenantId;
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "tenantIpQuota") {
|
||||
where.tenant_id = tenantId;
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "ipReservedRange" || meta.model === "ipPoolPolicy") {
|
||||
where.OR = [{ tenant_id: tenantId }, { tenant_id: null }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "serverHealthCheck") {
|
||||
where.OR = [{ tenant_id: tenantId }, { tenant_id: null }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "serverHealthCheckResult") {
|
||||
where.check = {
|
||||
OR: [{ tenant_id: tenantId }, { tenant_id: null }]
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "monitoringAlertRule" || meta.model === "monitoringAlertEvent") {
|
||||
where.OR = [{ tenant_id: tenantId }, { tenant_id: null }];
|
||||
return;
|
||||
}
|
||||
|
||||
if (meta.model === "monitoringAlertNotification") {
|
||||
where.event = {
|
||||
OR: [{ tenant_id: tenantId }, { tenant_id: null }]
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
where.tenant_id = tenantId;
|
||||
}
|
||||
|
||||
function attachSearchWhere(
|
||||
where: Record<string, unknown>,
|
||||
search: string,
|
||||
searchFields: string[] | undefined
|
||||
) {
|
||||
if (!search || !searchFields?.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
const searchFilter = {
|
||||
OR: searchFields.map((field) => ({
|
||||
[field]: { contains: search, mode: "insensitive" }
|
||||
}))
|
||||
};
|
||||
|
||||
if (Array.isArray(where.OR)) {
|
||||
const existingOr = where.OR;
|
||||
delete where.OR;
|
||||
const existingAnd = Array.isArray(where.AND) ? where.AND : [];
|
||||
where.AND = [...existingAnd, { OR: existingOr }, searchFilter];
|
||||
return;
|
||||
}
|
||||
|
||||
if (Array.isArray(where.AND)) {
|
||||
where.AND = [...where.AND, searchFilter];
|
||||
return;
|
||||
}
|
||||
|
||||
where.AND = [searchFilter];
|
||||
}
|
||||
|
||||
async function ensureItemTenantScope(req: Express.Request, meta: ResourceMeta, item: Record<string, unknown>) {
|
||||
if (!meta.tenantScoped || !isTenantScopedUser(req) || !req.user?.tenant_id) {
|
||||
return;
|
||||
}
|
||||
|
||||
const tenantId = req.user.tenant_id;
|
||||
let ownerTenantId: string | null | undefined;
|
||||
|
||||
if (meta.model === "backup") {
|
||||
ownerTenantId = (item.tenant_id as string | null | undefined) ?? null;
|
||||
if (!ownerTenantId && typeof item.vm_id === "string") {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: item.vm_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = vm?.tenant_id;
|
||||
}
|
||||
} else if (meta.model === "backupRestoreTask") {
|
||||
if (typeof item.source_vm_id === "string") {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: item.source_vm_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = vm?.tenant_id;
|
||||
}
|
||||
} else if (meta.model === "snapshotJob") {
|
||||
if (typeof item.vm_id === "string") {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: item.vm_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = vm?.tenant_id;
|
||||
}
|
||||
} else if (meta.model === "ipAddressPool") {
|
||||
ownerTenantId = item.assigned_tenant_id as string | null | undefined;
|
||||
if (!ownerTenantId && item.status === "AVAILABLE" && item.scope === "PRIVATE") {
|
||||
return;
|
||||
}
|
||||
} else if (meta.model === "ipAssignment" || meta.model === "privateNetworkAttachment") {
|
||||
ownerTenantId = item.tenant_id as string | null | undefined;
|
||||
} else if (meta.model === "tenantIpQuota" || meta.model === "ipReservedRange" || meta.model === "ipPoolPolicy") {
|
||||
ownerTenantId = (item.tenant_id as string | null | undefined) ?? null;
|
||||
if (!ownerTenantId) return;
|
||||
} else if (meta.model === "serverHealthCheck") {
|
||||
ownerTenantId = (item.tenant_id as string | null | undefined) ?? null;
|
||||
if (!ownerTenantId) return;
|
||||
} else if (meta.model === "serverHealthCheckResult") {
|
||||
if (typeof item.check_id === "string") {
|
||||
const check = await prisma.serverHealthCheck.findUnique({
|
||||
where: { id: item.check_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = check?.tenant_id;
|
||||
if (!ownerTenantId) return;
|
||||
}
|
||||
} else if (meta.model === "monitoringAlertRule" || meta.model === "monitoringAlertEvent") {
|
||||
ownerTenantId = (item.tenant_id as string | null | undefined) ?? null;
|
||||
if (!ownerTenantId) return;
|
||||
} else if (meta.model === "monitoringAlertNotification") {
|
||||
if (typeof item.alert_event_id === "string") {
|
||||
const event = await prisma.monitoringAlertEvent.findUnique({
|
||||
where: { id: item.alert_event_id },
|
||||
select: { tenant_id: true }
|
||||
});
|
||||
ownerTenantId = event?.tenant_id;
|
||||
if (!ownerTenantId) return;
|
||||
}
|
||||
} else {
|
||||
ownerTenantId = item.tenant_id as string | null | undefined;
|
||||
}
|
||||
|
||||
if (ownerTenantId !== tenantId) {
|
||||
throw new HttpError(403, "Access denied for tenant scope", "TENANT_SCOPE_VIOLATION");
|
||||
}
|
||||
}
|
||||
|
||||
router.get("/:resource", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.readPermission)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const rawLimit = Number(req.query.limit ?? 100);
|
||||
const rawOffset = Number(req.query.offset ?? 0);
|
||||
const limit = Number.isFinite(rawLimit) && rawLimit > 0 ? Math.min(Math.floor(rawLimit), 500) : 100;
|
||||
const offset = Number.isFinite(rawOffset) && rawOffset >= 0 ? Math.floor(rawOffset) : 0;
|
||||
const where: Record<string, unknown> = {};
|
||||
|
||||
attachTenantWhere(req, meta, where);
|
||||
|
||||
if (typeof req.query.status === "string") where.status = toEnumUpper(req.query.status);
|
||||
if (typeof req.query.tenant_id === "string" && !isTenantScopedUser(req)) where.tenant_id = req.query.tenant_id;
|
||||
if (typeof req.query.vm_id === "string") where.vm_id = req.query.vm_id;
|
||||
if (typeof req.query.node === "string") where.node = req.query.node;
|
||||
|
||||
const search = typeof req.query.search === "string" ? req.query.search.trim() : "";
|
||||
attachSearchWhere(where, search, meta.searchFields);
|
||||
|
||||
const [data, total] = await Promise.all([
|
||||
model.findMany({
|
||||
where,
|
||||
orderBy: parseOrder(typeof req.query.sort === "string" ? req.query.sort : undefined),
|
||||
take: limit,
|
||||
skip: offset
|
||||
}),
|
||||
model.count({ where })
|
||||
]);
|
||||
|
||||
res.json({
|
||||
data,
|
||||
meta: { total, limit, offset }
|
||||
});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/:resource/:id", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.readPermission)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const item = await model.findUnique({ where: { id: req.params.id } });
|
||||
if (!item) throw new HttpError(404, "Record not found", "NOT_FOUND");
|
||||
await ensureItemTenantScope(req, meta, item);
|
||||
res.json(item);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.post("/:resource", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
if (!meta.createPermission) throw new HttpError(405, "Resource is read-only", "READ_ONLY");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.createPermission!)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const payload = normalizePayload(resource, req.body ?? {});
|
||||
|
||||
if (meta.tenantScoped && isTenantScopedUser(req) && req.user?.tenant_id) {
|
||||
if (
|
||||
meta.model !== "backupRestoreTask" &&
|
||||
meta.model !== "snapshotJob"
|
||||
) {
|
||||
payload.tenant_id = req.user.tenant_id;
|
||||
}
|
||||
}
|
||||
|
||||
const created = await model.create({ data: payload });
|
||||
|
||||
await logAudit({
|
||||
action: `${resource}.create`,
|
||||
resource_type: resource === "virtual-machines" ? "VM" : "SYSTEM",
|
||||
resource_id: created.id,
|
||||
resource_name: created.name ?? created.invoice_number ?? created.id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ resource, payload: created }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(201).json(created);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.patch("/:resource/:id", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
if (!meta.updatePermission) throw new HttpError(405, "Resource is read-only", "READ_ONLY");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.updatePermission!)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const existing = await model.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) throw new HttpError(404, "Record not found", "NOT_FOUND");
|
||||
await ensureItemTenantScope(req, meta, existing);
|
||||
|
||||
const payload = normalizePayload(resource, req.body ?? {});
|
||||
const updated = await model.update({
|
||||
where: { id: req.params.id },
|
||||
data: payload
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: `${resource}.update`,
|
||||
resource_type: resource === "virtual-machines" ? "VM" : "SYSTEM",
|
||||
resource_id: updated.id,
|
||||
resource_name: updated.name ?? updated.invoice_number ?? updated.id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ resource, payload }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.json(updated);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.delete("/:resource/:id", requireAuth, async (req, res, next) => {
|
||||
try {
|
||||
const resource = req.params.resource;
|
||||
const meta = resourceMap[resource];
|
||||
if (!meta) throw new HttpError(404, "Unknown resource", "UNKNOWN_RESOURCE");
|
||||
if (!meta.deletePermission) throw new HttpError(405, "Resource is read-only", "READ_ONLY");
|
||||
await new Promise<void>((resolve, reject) => authorize(meta.deletePermission!)(req, res, (error) => (error ? reject(error) : resolve())));
|
||||
|
||||
const model = getModel(meta);
|
||||
const existing = await model.findUnique({ where: { id: req.params.id } });
|
||||
if (!existing) throw new HttpError(404, "Record not found", "NOT_FOUND");
|
||||
await ensureItemTenantScope(req, meta, existing);
|
||||
|
||||
await model.delete({ where: { id: req.params.id } });
|
||||
|
||||
await logAudit({
|
||||
action: `${resource}.delete`,
|
||||
resource_type: resource === "virtual-machines" ? "VM" : "SYSTEM",
|
||||
resource_id: req.params.id,
|
||||
resource_name: existing.name ?? existing.invoice_number ?? existing.id,
|
||||
actor_email: req.user!.email,
|
||||
actor_role: req.user!.role,
|
||||
details: toPrismaJsonValue({ resource }),
|
||||
ip_address: req.ip
|
||||
});
|
||||
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
280
backend/src/routes/settings.routes.ts
Normal file
280
backend/src/routes/settings.routes.ts
Normal file
@@ -0,0 +1,280 @@
|
||||
import { Router } from "express";
|
||||
import { z } from "zod";
|
||||
import { authorize, requireAuth } from "../middleware/auth";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { getOperationsPolicy } from "../services/operations.service";
|
||||
import { getSchedulerRuntimeSnapshot, reconfigureSchedulers, schedulerDefaults } from "../services/scheduler.service";
|
||||
|
||||
const router = Router();
|
||||
|
||||
const proxmoxSchema = z.object({
|
||||
host: z.string().min(1),
|
||||
port: z.number().int().positive().default(8006),
|
||||
username: z.string().min(1),
|
||||
token_id: z.string().min(1),
|
||||
token_secret: z.string().min(1),
|
||||
verify_ssl: z.boolean().default(true)
|
||||
});
|
||||
|
||||
const paymentSchema = z.object({
|
||||
default_provider: z.enum(["paystack", "flutterwave", "manual"]).default("paystack"),
|
||||
paystack_public: z.string().optional(),
|
||||
paystack_secret: z.string().optional(),
|
||||
flutterwave_public: z.string().optional(),
|
||||
flutterwave_secret: z.string().optional(),
|
||||
flutterwave_webhook_hash: z.string().optional(),
|
||||
callback_url: z.string().optional()
|
||||
});
|
||||
|
||||
const backupSchema = z.object({
|
||||
default_source: z.enum(["local", "pbs", "remote"]).default("local"),
|
||||
default_storage: z.string().default("local-lvm"),
|
||||
max_restore_file_count: z.number().int().positive().default(100),
|
||||
pbs_enabled: z.boolean().default(false),
|
||||
pbs_host: z.string().optional(),
|
||||
pbs_datastore: z.string().optional(),
|
||||
pbs_namespace: z.string().optional(),
|
||||
pbs_verify_ssl: z.boolean().default(true)
|
||||
});
|
||||
|
||||
const consoleProxyNodeSchema = z.object({
|
||||
novnc: z.string().url().optional(),
|
||||
spice: z.string().url().optional(),
|
||||
xterm: z.string().url().optional()
|
||||
});
|
||||
|
||||
const consoleProxySchema = z.object({
|
||||
mode: z.enum(["cluster", "per_node"]).default("cluster"),
|
||||
cluster: consoleProxyNodeSchema.optional(),
|
||||
nodes: z.record(consoleProxyNodeSchema).optional()
|
||||
});
|
||||
|
||||
const schedulerSchema = z.object({
|
||||
enable_scheduler: z.boolean().optional(),
|
||||
billing_cron: z.string().min(5).optional(),
|
||||
backup_cron: z.string().min(5).optional(),
|
||||
power_schedule_cron: z.string().min(5).optional(),
|
||||
monitoring_cron: z.string().min(5).optional(),
|
||||
operation_retry_cron: z.string().min(5).optional()
|
||||
});
|
||||
|
||||
const operationsPolicySchema = z.object({
|
||||
max_retry_attempts: z.number().int().min(0).max(10).optional(),
|
||||
retry_backoff_minutes: z.number().int().min(1).max(720).optional(),
|
||||
notify_on_task_failure: z.boolean().optional(),
|
||||
notification_email: z.string().email().optional(),
|
||||
notification_webhook_url: z.string().url().optional(),
|
||||
email_gateway_url: z.string().url().optional()
|
||||
});
|
||||
|
||||
const notificationsSchema = z.object({
|
||||
email_alerts: z.boolean().optional(),
|
||||
backup_alerts: z.boolean().optional(),
|
||||
billing_alerts: z.boolean().optional(),
|
||||
vm_alerts: z.boolean().optional(),
|
||||
monitoring_webhook_url: z.string().url().optional(),
|
||||
alert_webhook_url: z.string().url().optional(),
|
||||
email_gateway_url: z.string().url().optional(),
|
||||
notification_email_webhook: z.string().url().optional(),
|
||||
ops_email: z.string().email().optional()
|
||||
});
|
||||
|
||||
router.get("/proxmox", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const setting = await prisma.setting.findUnique({ where: { key: "proxmox" } });
|
||||
res.json(setting?.value ?? {});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/proxmox", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = proxmoxSchema.parse(req.body);
|
||||
const setting = await prisma.setting.upsert({
|
||||
where: { key: "proxmox" },
|
||||
update: { value: payload },
|
||||
create: { key: "proxmox", type: "PROXMOX", value: payload, is_encrypted: true }
|
||||
});
|
||||
res.json(setting.value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/payment", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const setting = await prisma.setting.findUnique({ where: { key: "payment" } });
|
||||
res.json(setting?.value ?? {});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/payment", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = paymentSchema.parse(req.body);
|
||||
const setting = await prisma.setting.upsert({
|
||||
where: { key: "payment" },
|
||||
update: { value: payload },
|
||||
create: { key: "payment", type: "PAYMENT", value: payload, is_encrypted: true }
|
||||
});
|
||||
res.json(setting.value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/backup", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const setting = await prisma.setting.findUnique({ where: { key: "backup" } });
|
||||
res.json(setting?.value ?? {});
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/backup", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = backupSchema.parse(req.body);
|
||||
const setting = await prisma.setting.upsert({
|
||||
where: { key: "backup" },
|
||||
update: { value: payload },
|
||||
create: { key: "backup", type: "GENERAL", value: payload, is_encrypted: false }
|
||||
});
|
||||
res.json(setting.value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/console-proxy", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const setting = await prisma.setting.findUnique({ where: { key: "console_proxy" } });
|
||||
res.json(
|
||||
setting?.value ?? {
|
||||
mode: "cluster",
|
||||
cluster: {},
|
||||
nodes: {}
|
||||
}
|
||||
);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/console-proxy", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = consoleProxySchema.parse(req.body);
|
||||
const setting = await prisma.setting.upsert({
|
||||
where: { key: "console_proxy" },
|
||||
update: { value: payload },
|
||||
create: { key: "console_proxy", type: "PROXMOX", value: payload, is_encrypted: false }
|
||||
});
|
||||
res.json(setting.value);
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/scheduler", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const setting = await prisma.setting.findUnique({ where: { key: "scheduler" } });
|
||||
const defaults = schedulerDefaults();
|
||||
const persisted =
|
||||
setting?.value && typeof setting.value === "object" && !Array.isArray(setting.value)
|
||||
? (setting.value as Record<string, unknown>)
|
||||
: {};
|
||||
const config = {
|
||||
...defaults,
|
||||
...persisted
|
||||
};
|
||||
return res.json({
|
||||
config,
|
||||
runtime: getSchedulerRuntimeSnapshot()
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/scheduler", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = schedulerSchema.parse(req.body);
|
||||
const setting = await prisma.setting.upsert({
|
||||
where: { key: "scheduler" },
|
||||
update: { value: payload },
|
||||
create: { key: "scheduler", type: "GENERAL", value: payload, is_encrypted: false }
|
||||
});
|
||||
|
||||
const runtime = await reconfigureSchedulers(payload);
|
||||
return res.json({
|
||||
config: setting.value,
|
||||
runtime
|
||||
});
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/operations-policy", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const policy = await getOperationsPolicy();
|
||||
return res.json(policy);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/operations-policy", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = operationsPolicySchema.parse(req.body);
|
||||
await prisma.setting.upsert({
|
||||
where: { key: "operations_policy" },
|
||||
update: { value: payload },
|
||||
create: { key: "operations_policy", type: "GENERAL", value: payload, is_encrypted: false }
|
||||
});
|
||||
|
||||
const policy = await getOperationsPolicy();
|
||||
return res.json(policy);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.get("/notifications", requireAuth, authorize("settings:read"), async (_req, res, next) => {
|
||||
try {
|
||||
const setting = await prisma.setting.findUnique({ where: { key: "notifications" } });
|
||||
return res.json(
|
||||
setting?.value ?? {
|
||||
email_alerts: true,
|
||||
backup_alerts: true,
|
||||
billing_alerts: true,
|
||||
vm_alerts: true,
|
||||
monitoring_webhook_url: "",
|
||||
alert_webhook_url: "",
|
||||
email_gateway_url: "",
|
||||
notification_email_webhook: "",
|
||||
ops_email: ""
|
||||
}
|
||||
);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
router.put("/notifications", requireAuth, authorize("settings:manage"), async (req, res, next) => {
|
||||
try {
|
||||
const payload = notificationsSchema.parse(req.body);
|
||||
const setting = await prisma.setting.upsert({
|
||||
where: { key: "notifications" },
|
||||
update: { value: payload },
|
||||
create: { key: "notifications", type: "EMAIL", value: payload, is_encrypted: false }
|
||||
});
|
||||
return res.json(setting.value);
|
||||
} catch (error) {
|
||||
return next(error);
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
30
backend/src/services/audit.service.ts
Normal file
30
backend/src/services/audit.service.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import type { Prisma, ResourceType, Severity } from "@prisma/client";
|
||||
import { prisma } from "../lib/prisma";
|
||||
|
||||
type AuditInput = {
|
||||
action: string;
|
||||
resource_type: ResourceType;
|
||||
resource_id?: string;
|
||||
resource_name?: string;
|
||||
actor_email: string;
|
||||
actor_role?: string;
|
||||
severity?: Severity;
|
||||
details?: Prisma.InputJsonValue;
|
||||
ip_address?: string;
|
||||
};
|
||||
|
||||
export async function logAudit(input: AuditInput) {
|
||||
await prisma.auditLog.create({
|
||||
data: {
|
||||
action: input.action,
|
||||
resource_type: input.resource_type,
|
||||
resource_id: input.resource_id,
|
||||
resource_name: input.resource_name,
|
||||
actor_email: input.actor_email,
|
||||
actor_role: input.actor_role,
|
||||
severity: input.severity ?? "INFO",
|
||||
details: input.details,
|
||||
ip_address: input.ip_address
|
||||
}
|
||||
});
|
||||
}
|
||||
1086
backend/src/services/backup.service.ts
Normal file
1086
backend/src/services/backup.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
245
backend/src/services/billing.service.ts
Normal file
245
backend/src/services/billing.service.ts
Normal file
@@ -0,0 +1,245 @@
|
||||
import { Prisma, InvoiceStatus, PaymentProvider } from "@prisma/client";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { logAudit } from "./audit.service";
|
||||
|
||||
function startOfHour(date = new Date()) {
|
||||
const d = new Date(date);
|
||||
d.setMinutes(0, 0, 0);
|
||||
return d;
|
||||
}
|
||||
|
||||
export async function meterHourlyUsage(actorEmail = "system@proxpanel.local") {
|
||||
const periodStart = startOfHour();
|
||||
const periodEnd = new Date(periodStart.getTime() + 60 * 60 * 1000);
|
||||
|
||||
const vms = await prisma.virtualMachine.findMany({
|
||||
where: { status: "RUNNING" },
|
||||
include: {
|
||||
tenant: true,
|
||||
billing_plan: true
|
||||
}
|
||||
});
|
||||
|
||||
let created = 0;
|
||||
for (const vm of vms) {
|
||||
if (!vm.billing_plan) continue;
|
||||
|
||||
const exists = await prisma.usageRecord.findFirst({
|
||||
where: {
|
||||
vm_id: vm.id,
|
||||
period_start: periodStart,
|
||||
period_end: periodEnd
|
||||
}
|
||||
});
|
||||
if (exists) continue;
|
||||
|
||||
const hoursUsed = new Prisma.Decimal(1);
|
||||
const pricePerHour = vm.billing_plan.price_hourly;
|
||||
const totalCost = pricePerHour.mul(hoursUsed);
|
||||
|
||||
await prisma.usageRecord.create({
|
||||
data: {
|
||||
vm_id: vm.id,
|
||||
vm_name: vm.name,
|
||||
tenant_id: vm.tenant_id,
|
||||
tenant_name: vm.tenant.name,
|
||||
billing_plan_id: vm.billing_plan_id ?? undefined,
|
||||
plan_name: vm.billing_plan.name,
|
||||
hours_used: hoursUsed,
|
||||
price_per_hour: pricePerHour,
|
||||
currency: vm.billing_plan.currency,
|
||||
total_cost: totalCost,
|
||||
period_start: periodStart,
|
||||
period_end: periodEnd,
|
||||
cpu_hours: new Prisma.Decimal(vm.cpu_cores),
|
||||
ram_gb_hours: new Prisma.Decimal(vm.ram_mb / 1024),
|
||||
disk_gb_hours: new Prisma.Decimal(vm.disk_gb)
|
||||
}
|
||||
});
|
||||
created += 1;
|
||||
}
|
||||
|
||||
await logAudit({
|
||||
action: "hourly_usage_metering",
|
||||
resource_type: "BILLING",
|
||||
actor_email: actorEmail,
|
||||
severity: "INFO",
|
||||
details: { period_start: periodStart.toISOString(), created_records: created }
|
||||
});
|
||||
|
||||
return { created_records: created, period_start: periodStart.toISOString() };
|
||||
}
|
||||
|
||||
function invoiceNumber() {
|
||||
const rand = Math.floor(1000 + Math.random() * 9000);
|
||||
return `INV-${Date.now()}-${rand}`;
|
||||
}
|
||||
|
||||
export async function generateInvoicesFromUnbilledUsage(actorEmail = "system@proxpanel.local") {
|
||||
const usageRecords = await prisma.usageRecord.findMany({
|
||||
where: { billed: false },
|
||||
orderBy: { created_at: "asc" }
|
||||
});
|
||||
if (usageRecords.length === 0) {
|
||||
return { generated: 0, invoices: [] as Array<{ id: string; tenant_id: string; amount: string }> };
|
||||
}
|
||||
|
||||
const grouped = new Map<string, typeof usageRecords>();
|
||||
for (const item of usageRecords) {
|
||||
const key = `${item.tenant_id}:${item.currency}`;
|
||||
const current = grouped.get(key) ?? [];
|
||||
current.push(item);
|
||||
grouped.set(key, current);
|
||||
}
|
||||
|
||||
const createdInvoices: Array<{ id: string; tenant_id: string; amount: string }> = [];
|
||||
|
||||
for (const [key, records] of grouped.entries()) {
|
||||
const [tenantId] = key.split(":");
|
||||
const amount = records.reduce((sum, record) => sum.add(record.total_cost), new Prisma.Decimal(0));
|
||||
const tenant = await prisma.tenant.findUniqueOrThrow({ where: { id: tenantId } });
|
||||
|
||||
const invoice = await prisma.invoice.create({
|
||||
data: {
|
||||
invoice_number: invoiceNumber(),
|
||||
tenant_id: tenantId,
|
||||
tenant_name: tenant.name,
|
||||
status: InvoiceStatus.PENDING,
|
||||
amount,
|
||||
currency: records[0].currency,
|
||||
due_date: new Date(Date.now() + 7 * 24 * 60 * 60 * 1000),
|
||||
payment_provider: tenant.payment_provider,
|
||||
line_items: records.map((r) => ({
|
||||
usage_record_id: r.id,
|
||||
vm_name: r.vm_name,
|
||||
period_start: r.period_start,
|
||||
period_end: r.period_end,
|
||||
hours_used: r.hours_used.toString(),
|
||||
unit_price: r.price_per_hour.toString(),
|
||||
amount: r.total_cost.toString()
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
await prisma.usageRecord.updateMany({
|
||||
where: { id: { in: records.map((r) => r.id) } },
|
||||
data: {
|
||||
billed: true,
|
||||
invoice_id: invoice.id
|
||||
}
|
||||
});
|
||||
|
||||
createdInvoices.push({
|
||||
id: invoice.id,
|
||||
tenant_id: invoice.tenant_id,
|
||||
amount: invoice.amount.toString()
|
||||
});
|
||||
}
|
||||
|
||||
await logAudit({
|
||||
action: "invoice_batch_generation",
|
||||
resource_type: "BILLING",
|
||||
actor_email: actorEmail,
|
||||
severity: "INFO",
|
||||
details: {
|
||||
generated_invoices: createdInvoices.length
|
||||
}
|
||||
});
|
||||
|
||||
return { generated: createdInvoices.length, invoices: createdInvoices };
|
||||
}
|
||||
|
||||
export async function markInvoicePaid(
|
||||
invoiceId: string,
|
||||
paymentProvider: PaymentProvider,
|
||||
paymentReference: string,
|
||||
actorEmail: string
|
||||
) {
|
||||
const invoice = await prisma.invoice.update({
|
||||
where: { id: invoiceId },
|
||||
data: {
|
||||
status: "PAID",
|
||||
paid_date: new Date(),
|
||||
payment_provider: paymentProvider,
|
||||
payment_reference: paymentReference
|
||||
}
|
||||
});
|
||||
|
||||
await logAudit({
|
||||
action: "invoice_mark_paid",
|
||||
resource_type: "INVOICE",
|
||||
resource_id: invoice.id,
|
||||
resource_name: invoice.invoice_number,
|
||||
actor_email: actorEmail,
|
||||
severity: "INFO",
|
||||
details: { payment_provider: paymentProvider, payment_reference: paymentReference }
|
||||
});
|
||||
|
||||
return invoice;
|
||||
}
|
||||
|
||||
export async function updateOverdueInvoices(actorEmail = "system@proxpanel.local") {
|
||||
const result = await prisma.invoice.updateMany({
|
||||
where: {
|
||||
status: "PENDING",
|
||||
due_date: { lt: new Date() }
|
||||
},
|
||||
data: { status: "OVERDUE" }
|
||||
});
|
||||
|
||||
if (result.count > 0) {
|
||||
await logAudit({
|
||||
action: "invoice_overdue_scan",
|
||||
resource_type: "BILLING",
|
||||
actor_email: actorEmail,
|
||||
severity: "WARNING",
|
||||
details: { marked_overdue: result.count }
|
||||
});
|
||||
}
|
||||
|
||||
return result.count;
|
||||
}
|
||||
|
||||
function nextRunDate(schedule: "DAILY" | "WEEKLY" | "MONTHLY" | "MANUAL") {
|
||||
const now = new Date();
|
||||
if (schedule === "DAILY") return new Date(now.getTime() + 24 * 60 * 60 * 1000);
|
||||
if (schedule === "WEEKLY") return new Date(now.getTime() + 7 * 24 * 60 * 60 * 1000);
|
||||
if (schedule === "MONTHLY") return new Date(now.getTime() + 30 * 24 * 60 * 60 * 1000);
|
||||
return null;
|
||||
}
|
||||
|
||||
export async function processBackupSchedule(actorEmail = "system@proxpanel.local") {
|
||||
const now = new Date();
|
||||
const dueBackups = await prisma.backup.findMany({
|
||||
where: {
|
||||
schedule: { not: "MANUAL" },
|
||||
next_run_at: { lte: now },
|
||||
status: { in: ["PENDING", "COMPLETED", "FAILED"] }
|
||||
}
|
||||
});
|
||||
|
||||
for (const backup of dueBackups) {
|
||||
const nextRunAt = nextRunDate(backup.schedule);
|
||||
await prisma.backup.update({
|
||||
where: { id: backup.id },
|
||||
data: {
|
||||
status: "PENDING",
|
||||
started_at: null,
|
||||
completed_at: null,
|
||||
next_run_at: nextRunAt
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (dueBackups.length > 0) {
|
||||
await logAudit({
|
||||
action: "backup_scheduler_run",
|
||||
resource_type: "BACKUP",
|
||||
actor_email: actorEmail,
|
||||
severity: "INFO",
|
||||
details: { queued_backups: dueBackups.length }
|
||||
});
|
||||
}
|
||||
|
||||
return dueBackups.length;
|
||||
}
|
||||
1454
backend/src/services/monitoring.service.ts
Normal file
1454
backend/src/services/monitoring.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
1402
backend/src/services/network.service.ts
Normal file
1402
backend/src/services/network.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
954
backend/src/services/operations.service.ts
Normal file
954
backend/src/services/operations.service.ts
Normal file
@@ -0,0 +1,954 @@
|
||||
import {
|
||||
OperationTaskStatus,
|
||||
OperationTaskType,
|
||||
PowerScheduleAction,
|
||||
Prisma,
|
||||
VmStatus
|
||||
} from "@prisma/client";
|
||||
import axios from "axios";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { restartVm, shutdownVm, startVm, stopVm } from "./proxmox.service";
|
||||
|
||||
type TaskCreateInput = {
|
||||
taskType: OperationTaskType;
|
||||
requestedBy?: string;
|
||||
vm?: {
|
||||
id: string;
|
||||
name: string;
|
||||
node: string;
|
||||
};
|
||||
payload?: Prisma.InputJsonValue;
|
||||
scheduledFor?: Date | null;
|
||||
status?: OperationTaskStatus;
|
||||
};
|
||||
|
||||
type TaskListInput = {
|
||||
status?: OperationTaskStatus;
|
||||
taskType?: OperationTaskType;
|
||||
vmId?: string;
|
||||
limit?: number;
|
||||
offset?: number;
|
||||
tenantId?: string | null;
|
||||
};
|
||||
|
||||
type PowerScheduleCreateInput = {
|
||||
vmId: string;
|
||||
action: PowerScheduleAction;
|
||||
cronExpression: string;
|
||||
timezone?: string;
|
||||
createdBy?: string;
|
||||
};
|
||||
|
||||
type PowerScheduleUpdateInput = {
|
||||
action?: PowerScheduleAction;
|
||||
cronExpression?: string;
|
||||
timezone?: string;
|
||||
enabled?: boolean;
|
||||
};
|
||||
|
||||
type ExecutePowerOptions = {
|
||||
scheduledFor?: Date | null;
|
||||
payload?: Prisma.InputJsonValue;
|
||||
};
|
||||
|
||||
export type OperationsPolicy = {
|
||||
max_retry_attempts: number;
|
||||
retry_backoff_minutes: number;
|
||||
notify_on_task_failure: boolean;
|
||||
notification_email: string | null;
|
||||
notification_webhook_url: string | null;
|
||||
email_gateway_url: string | null;
|
||||
};
|
||||
|
||||
const DEFAULT_OPERATIONS_POLICY: OperationsPolicy = {
|
||||
max_retry_attempts: 2,
|
||||
retry_backoff_minutes: 10,
|
||||
notify_on_task_failure: true,
|
||||
notification_email: null,
|
||||
notification_webhook_url: null,
|
||||
email_gateway_url: null
|
||||
};
|
||||
|
||||
function numberRange(min: number, max: number) {
|
||||
return Array.from({ length: max - min + 1 }, (_, idx) => min + idx);
|
||||
}
|
||||
|
||||
function parseSingleToken(token: string, min: number, max: number): number[] {
|
||||
if (token === "*") {
|
||||
return numberRange(min, max);
|
||||
}
|
||||
|
||||
if (token.includes("/")) {
|
||||
const [baseToken, stepToken] = token.split("/");
|
||||
const step = Number(stepToken);
|
||||
if (!Number.isInteger(step) || step <= 0) {
|
||||
throw new Error(`Invalid cron step: ${token}`);
|
||||
}
|
||||
|
||||
const baseValues = parseSingleToken(baseToken, min, max);
|
||||
const startValue = Math.min(...baseValues);
|
||||
return baseValues.filter((value) => (value - startValue) % step === 0);
|
||||
}
|
||||
|
||||
if (token.includes("-")) {
|
||||
const [startToken, endToken] = token.split("-");
|
||||
const start = Number(startToken);
|
||||
const end = Number(endToken);
|
||||
if (!Number.isInteger(start) || !Number.isInteger(end) || start > end) {
|
||||
throw new Error(`Invalid cron range: ${token}`);
|
||||
}
|
||||
if (start < min || end > max) {
|
||||
throw new Error(`Cron range out of bounds: ${token}`);
|
||||
}
|
||||
return numberRange(start, end);
|
||||
}
|
||||
|
||||
const value = Number(token);
|
||||
if (!Number.isInteger(value) || value < min || value > max) {
|
||||
throw new Error(`Invalid cron value: ${token}`);
|
||||
}
|
||||
return [value];
|
||||
}
|
||||
|
||||
function parseCronField(field: string, min: number, max: number): Set<number> {
|
||||
const values = new Set<number>();
|
||||
for (const rawToken of field.split(",")) {
|
||||
const token = rawToken.trim();
|
||||
if (!token) continue;
|
||||
for (const value of parseSingleToken(token, min, max)) {
|
||||
values.add(value);
|
||||
}
|
||||
}
|
||||
if (values.size === 0) {
|
||||
throw new Error(`Invalid cron field: ${field}`);
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
function parseCronExpression(expression: string) {
|
||||
const parts = expression.trim().split(/\s+/);
|
||||
if (parts.length !== 5) {
|
||||
throw new Error("Cron expression must contain exactly 5 fields");
|
||||
}
|
||||
|
||||
return {
|
||||
minute: parseCronField(parts[0], 0, 59),
|
||||
hour: parseCronField(parts[1], 0, 23),
|
||||
dayOfMonth: parseCronField(parts[2], 1, 31),
|
||||
month: parseCronField(parts[3], 1, 12),
|
||||
dayOfWeek: parseCronField(parts[4], 0, 6)
|
||||
};
|
||||
}
|
||||
|
||||
function cronMatchesParsed(date: Date, parsed: ReturnType<typeof parseCronExpression>) {
|
||||
return (
|
||||
parsed.minute.has(date.getMinutes()) &&
|
||||
parsed.hour.has(date.getHours()) &&
|
||||
parsed.dayOfMonth.has(date.getDate()) &&
|
||||
parsed.month.has(date.getMonth() + 1) &&
|
||||
parsed.dayOfWeek.has(date.getDay())
|
||||
);
|
||||
}
|
||||
|
||||
export function nextRunAt(cronExpression: string, fromDate = new Date()): Date | null {
|
||||
const parsed = parseCronExpression(cronExpression);
|
||||
const base = new Date(fromDate);
|
||||
base.setSeconds(0, 0);
|
||||
|
||||
const maxChecks = 60 * 24 * 365;
|
||||
for (let index = 1; index <= maxChecks; index += 1) {
|
||||
const candidate = new Date(base.getTime() + index * 60 * 1000);
|
||||
if (cronMatchesParsed(candidate, parsed)) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
export function validateCronExpression(cronExpression: string) {
|
||||
parseCronExpression(cronExpression);
|
||||
}
|
||||
|
||||
export async function createOperationTask(input: TaskCreateInput) {
|
||||
return prisma.operationTask.create({
|
||||
data: {
|
||||
task_type: input.taskType,
|
||||
status: input.status ?? OperationTaskStatus.QUEUED,
|
||||
vm_id: input.vm?.id,
|
||||
vm_name: input.vm?.name,
|
||||
node: input.vm?.node,
|
||||
requested_by: input.requestedBy,
|
||||
payload: input.payload,
|
||||
scheduled_for: input.scheduledFor ?? undefined
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function markOperationTaskRunning(taskId: string) {
|
||||
return prisma.operationTask.update({
|
||||
where: { id: taskId },
|
||||
data: {
|
||||
status: OperationTaskStatus.RUNNING,
|
||||
started_at: new Date(),
|
||||
error_message: null
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function markOperationTaskSuccess(taskId: string, result?: Prisma.InputJsonValue, proxmoxUpid?: string) {
|
||||
return prisma.operationTask.update({
|
||||
where: { id: taskId },
|
||||
data: {
|
||||
status: OperationTaskStatus.SUCCESS,
|
||||
result,
|
||||
proxmox_upid: proxmoxUpid,
|
||||
completed_at: new Date()
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function markOperationTaskFailed(taskId: string, errorMessage: string) {
|
||||
return prisma.operationTask.update({
|
||||
where: { id: taskId },
|
||||
data: {
|
||||
status: OperationTaskStatus.FAILED,
|
||||
error_message: errorMessage,
|
||||
completed_at: new Date()
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function asPlainObject(value: Prisma.JsonValue | Prisma.InputJsonValue | null | undefined): Record<string, unknown> {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) return {};
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function toPowerAction(value: unknown): PowerScheduleAction | null {
|
||||
if (typeof value !== "string") return null;
|
||||
const candidate = value.toUpperCase();
|
||||
return Object.values(PowerScheduleAction).includes(candidate as PowerScheduleAction)
|
||||
? (candidate as PowerScheduleAction)
|
||||
: null;
|
||||
}
|
||||
|
||||
function asStringOrNull(value: unknown) {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
function addMinutes(date: Date, minutes: number) {
|
||||
const copy = new Date(date);
|
||||
copy.setMinutes(copy.getMinutes() + minutes);
|
||||
return copy;
|
||||
}
|
||||
|
||||
export async function getOperationsPolicy(): Promise<OperationsPolicy> {
|
||||
const [setting, notificationsSetting] = await Promise.all([
|
||||
prisma.setting.findUnique({
|
||||
where: { key: "operations_policy" },
|
||||
select: { value: true }
|
||||
}),
|
||||
prisma.setting.findUnique({
|
||||
where: { key: "notifications" },
|
||||
select: { value: true }
|
||||
})
|
||||
]);
|
||||
|
||||
const value =
|
||||
setting?.value && typeof setting.value === "object" && !Array.isArray(setting.value)
|
||||
? (setting.value as Record<string, unknown>)
|
||||
: {};
|
||||
const notificationsValue =
|
||||
notificationsSetting?.value && typeof notificationsSetting.value === "object" && !Array.isArray(notificationsSetting.value)
|
||||
? (notificationsSetting.value as Record<string, unknown>)
|
||||
: {};
|
||||
|
||||
const maxRetryAttemptsRaw = Number(value.max_retry_attempts);
|
||||
const retryBackoffRaw = Number(value.retry_backoff_minutes);
|
||||
|
||||
return {
|
||||
max_retry_attempts:
|
||||
Number.isInteger(maxRetryAttemptsRaw) && maxRetryAttemptsRaw >= 0
|
||||
? Math.min(maxRetryAttemptsRaw, 10)
|
||||
: DEFAULT_OPERATIONS_POLICY.max_retry_attempts,
|
||||
retry_backoff_minutes:
|
||||
Number.isInteger(retryBackoffRaw) && retryBackoffRaw >= 1
|
||||
? Math.min(retryBackoffRaw, 720)
|
||||
: DEFAULT_OPERATIONS_POLICY.retry_backoff_minutes,
|
||||
notify_on_task_failure:
|
||||
typeof value.notify_on_task_failure === "boolean"
|
||||
? value.notify_on_task_failure
|
||||
: DEFAULT_OPERATIONS_POLICY.notify_on_task_failure,
|
||||
notification_email: asStringOrNull(value.notification_email) ?? asStringOrNull(notificationsValue.ops_email),
|
||||
notification_webhook_url:
|
||||
asStringOrNull(value.notification_webhook_url) ??
|
||||
asStringOrNull(notificationsValue.monitoring_webhook_url) ??
|
||||
asStringOrNull(notificationsValue.alert_webhook_url),
|
||||
email_gateway_url:
|
||||
asStringOrNull(value.email_gateway_url) ??
|
||||
asStringOrNull(notificationsValue.email_gateway_url) ??
|
||||
asStringOrNull(notificationsValue.notification_email_webhook)
|
||||
};
|
||||
}
|
||||
|
||||
async function dispatchTaskFailureNotifications(input: {
|
||||
task: {
|
||||
id: string;
|
||||
task_type: OperationTaskType;
|
||||
vm_name: string | null;
|
||||
vm_id: string | null;
|
||||
node: string | null;
|
||||
retry_count: number;
|
||||
error_message: string | null;
|
||||
created_at: Date;
|
||||
completed_at: Date | null;
|
||||
requested_by: string | null;
|
||||
};
|
||||
policy: OperationsPolicy;
|
||||
stage: "retry_exhausted" | "non_retryable";
|
||||
}) {
|
||||
const destinationEmail = input.policy.notification_email;
|
||||
const emailGatewayUrl = input.policy.email_gateway_url;
|
||||
const webhookUrl = input.policy.notification_webhook_url;
|
||||
const eventPayload = {
|
||||
type: "operations.task_failure",
|
||||
stage: input.stage,
|
||||
task_id: input.task.id,
|
||||
task_type: input.task.task_type,
|
||||
vm_id: input.task.vm_id,
|
||||
vm_name: input.task.vm_name,
|
||||
node: input.task.node,
|
||||
retry_count: input.task.retry_count,
|
||||
error_message: input.task.error_message,
|
||||
created_at: input.task.created_at.toISOString(),
|
||||
completed_at: input.task.completed_at?.toISOString() ?? null,
|
||||
requested_by: input.task.requested_by
|
||||
};
|
||||
|
||||
const notifications: Array<{
|
||||
channel: "WEBHOOK" | "EMAIL";
|
||||
destination: string | null;
|
||||
status: "SENT" | "FAILED";
|
||||
provider_message: string;
|
||||
sent_at: Date | null;
|
||||
}> = [];
|
||||
|
||||
if (webhookUrl) {
|
||||
try {
|
||||
const response = await axios.post(webhookUrl, eventPayload, { timeout: 10_000 });
|
||||
notifications.push({
|
||||
channel: "WEBHOOK",
|
||||
destination: webhookUrl,
|
||||
status: "SENT",
|
||||
provider_message: `HTTP ${response.status}`,
|
||||
sent_at: new Date()
|
||||
});
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Webhook dispatch failed";
|
||||
notifications.push({
|
||||
channel: "WEBHOOK",
|
||||
destination: webhookUrl,
|
||||
status: "FAILED",
|
||||
provider_message: message.slice(0, 240),
|
||||
sent_at: null
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (emailGatewayUrl && destinationEmail) {
|
||||
try {
|
||||
const response = await axios.post(
|
||||
emailGatewayUrl,
|
||||
{
|
||||
type: "operations.task_failure.email",
|
||||
to: destinationEmail,
|
||||
subject: `[Task Failure] ${input.task.task_type} ${input.task.vm_name ?? input.task.vm_id ?? ""}`.trim(),
|
||||
message: input.task.error_message ?? "Operation task failed",
|
||||
payload: eventPayload
|
||||
},
|
||||
{ timeout: 10_000 }
|
||||
);
|
||||
notifications.push({
|
||||
channel: "EMAIL",
|
||||
destination: destinationEmail,
|
||||
status: "SENT",
|
||||
provider_message: `HTTP ${response.status}`,
|
||||
sent_at: new Date()
|
||||
});
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Email dispatch failed";
|
||||
notifications.push({
|
||||
channel: "EMAIL",
|
||||
destination: destinationEmail,
|
||||
status: "FAILED",
|
||||
provider_message: message.slice(0, 240),
|
||||
sent_at: null
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (notifications.length > 0) {
|
||||
await prisma.auditLog.createMany({
|
||||
data: notifications.map((notification) => ({
|
||||
action: "operations.task_failure_notification",
|
||||
resource_type: "SYSTEM",
|
||||
resource_id: input.task.id,
|
||||
resource_name: input.task.vm_name ?? input.task.id,
|
||||
actor_email: "system@proxpanel.local",
|
||||
actor_role: "SYSTEM",
|
||||
severity: notification.status === "FAILED" ? "ERROR" : "INFO",
|
||||
details: {
|
||||
channel: notification.channel,
|
||||
destination: notification.destination,
|
||||
dispatch_status: notification.status,
|
||||
provider_message: notification.provider_message,
|
||||
task_id: input.task.id,
|
||||
stage: input.stage
|
||||
}
|
||||
}))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async function handleOperationTaskFailure(taskId: string, errorMessage: string) {
|
||||
const policy = await getOperationsPolicy();
|
||||
const existing = await prisma.operationTask.findUnique({ where: { id: taskId } });
|
||||
|
||||
if (!existing) {
|
||||
return { status: "missing" as const, retry_scheduled: false };
|
||||
}
|
||||
|
||||
const canRetry =
|
||||
existing.task_type === OperationTaskType.VM_POWER &&
|
||||
existing.retry_count < policy.max_retry_attempts &&
|
||||
policy.max_retry_attempts > 0;
|
||||
|
||||
if (canRetry) {
|
||||
const nextRetryAt = addMinutes(new Date(), policy.retry_backoff_minutes);
|
||||
await prisma.operationTask.update({
|
||||
where: { id: existing.id },
|
||||
data: {
|
||||
status: OperationTaskStatus.RETRYING,
|
||||
error_message: errorMessage,
|
||||
completed_at: new Date(),
|
||||
retry_count: existing.retry_count + 1,
|
||||
scheduled_for: nextRetryAt
|
||||
}
|
||||
});
|
||||
return { status: "retrying" as const, retry_scheduled: true, next_retry_at: nextRetryAt };
|
||||
}
|
||||
|
||||
const failed = await prisma.operationTask.update({
|
||||
where: { id: existing.id },
|
||||
data: {
|
||||
status: OperationTaskStatus.FAILED,
|
||||
error_message: errorMessage,
|
||||
completed_at: new Date(),
|
||||
scheduled_for: null
|
||||
}
|
||||
});
|
||||
|
||||
if (policy.notify_on_task_failure) {
|
||||
await dispatchTaskFailureNotifications({
|
||||
task: failed,
|
||||
policy,
|
||||
stage: existing.task_type === OperationTaskType.VM_POWER ? "retry_exhausted" : "non_retryable"
|
||||
});
|
||||
}
|
||||
|
||||
return { status: "failed" as const, retry_scheduled: false };
|
||||
}
|
||||
|
||||
export async function listOperationTasks(input: TaskListInput) {
|
||||
const where: Prisma.OperationTaskWhereInput = {};
|
||||
|
||||
if (input.status) where.status = input.status;
|
||||
if (input.taskType) where.task_type = input.taskType;
|
||||
if (input.vmId) where.vm_id = input.vmId;
|
||||
if (input.tenantId) {
|
||||
where.vm = { tenant_id: input.tenantId };
|
||||
}
|
||||
|
||||
const limit = Math.min(Math.max(input.limit ?? 50, 1), 200);
|
||||
const offset = Math.max(input.offset ?? 0, 0);
|
||||
|
||||
const [data, total] = await Promise.all([
|
||||
prisma.operationTask.findMany({
|
||||
where,
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
tenant_id: true,
|
||||
node: true
|
||||
}
|
||||
}
|
||||
},
|
||||
orderBy: { created_at: "desc" },
|
||||
take: limit,
|
||||
skip: offset
|
||||
}),
|
||||
prisma.operationTask.count({ where })
|
||||
]);
|
||||
|
||||
const queue = await prisma.operationTask.groupBy({
|
||||
by: ["status"],
|
||||
_count: { status: true },
|
||||
where: input.tenantId ? { vm: { tenant_id: input.tenantId } } : undefined
|
||||
});
|
||||
|
||||
return {
|
||||
data,
|
||||
meta: {
|
||||
total,
|
||||
limit,
|
||||
offset,
|
||||
queue_summary: queue.reduce<Record<string, number>>((acc, item) => {
|
||||
acc[item.status] = item._count.status;
|
||||
return acc;
|
||||
}, {})
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function vmStatusFromPowerAction(action: PowerScheduleAction): VmStatus {
|
||||
if (action === PowerScheduleAction.START || action === PowerScheduleAction.RESTART) {
|
||||
return VmStatus.RUNNING;
|
||||
}
|
||||
return VmStatus.STOPPED;
|
||||
}
|
||||
|
||||
async function fetchVmForAction(vmId: string) {
|
||||
const vm = await prisma.virtualMachine.findUnique({
|
||||
where: { id: vmId },
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
node: true,
|
||||
vmid: true,
|
||||
type: true,
|
||||
tenant_id: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!vm) {
|
||||
throw new HttpError(404, "VM not found", "VM_NOT_FOUND");
|
||||
}
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
async function runPowerAction(vm: Awaited<ReturnType<typeof fetchVmForAction>>, action: PowerScheduleAction) {
|
||||
const type = vm.type === "LXC" ? "lxc" : "qemu";
|
||||
|
||||
if (action === PowerScheduleAction.START) {
|
||||
return startVm(vm.node, vm.vmid, type);
|
||||
}
|
||||
|
||||
if (action === PowerScheduleAction.STOP) {
|
||||
return stopVm(vm.node, vm.vmid, type);
|
||||
}
|
||||
|
||||
if (action === PowerScheduleAction.RESTART) {
|
||||
return restartVm(vm.node, vm.vmid, type);
|
||||
}
|
||||
|
||||
return shutdownVm(vm.node, vm.vmid, type);
|
||||
}
|
||||
|
||||
export async function executeVmPowerActionNow(
|
||||
vmId: string,
|
||||
action: PowerScheduleAction,
|
||||
actorEmail: string,
|
||||
options?: ExecutePowerOptions
|
||||
) {
|
||||
const vm = await fetchVmForAction(vmId);
|
||||
const rawPayload = asPlainObject(options?.payload ?? null);
|
||||
const taskPayload: Prisma.InputJsonObject = {
|
||||
...rawPayload,
|
||||
action,
|
||||
vm_id: vm.id
|
||||
};
|
||||
|
||||
const task = await createOperationTask({
|
||||
taskType: OperationTaskType.VM_POWER,
|
||||
vm: {
|
||||
id: vm.id,
|
||||
name: vm.name,
|
||||
node: vm.node
|
||||
},
|
||||
requestedBy: actorEmail,
|
||||
payload: taskPayload,
|
||||
scheduledFor: options?.scheduledFor
|
||||
});
|
||||
|
||||
await markOperationTaskRunning(task.id);
|
||||
|
||||
try {
|
||||
const upid = await runPowerAction(vm, action);
|
||||
await prisma.virtualMachine.update({
|
||||
where: { id: vm.id },
|
||||
data: {
|
||||
status: vmStatusFromPowerAction(action),
|
||||
proxmox_upid: upid ?? undefined
|
||||
}
|
||||
});
|
||||
|
||||
const resultPayload: Prisma.InputJsonObject = upid
|
||||
? {
|
||||
vm_id: vm.id,
|
||||
action,
|
||||
upid
|
||||
}
|
||||
: {
|
||||
vm_id: vm.id,
|
||||
action
|
||||
};
|
||||
|
||||
const updatedTask = await markOperationTaskSuccess(task.id, resultPayload, upid ?? undefined);
|
||||
return { task: updatedTask, upid };
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Unknown power action error";
|
||||
await handleOperationTaskFailure(task.id, message);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
export async function listPowerSchedules(tenantId?: string | null) {
|
||||
const where: Prisma.PowerScheduleWhereInput = tenantId
|
||||
? {
|
||||
vm: {
|
||||
tenant_id: tenantId
|
||||
}
|
||||
}
|
||||
: {};
|
||||
|
||||
return prisma.powerSchedule.findMany({
|
||||
where,
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
node: true,
|
||||
tenant_id: true,
|
||||
status: true
|
||||
}
|
||||
}
|
||||
},
|
||||
orderBy: [
|
||||
{ enabled: "desc" },
|
||||
{ next_run_at: "asc" },
|
||||
{ created_at: "desc" }
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
export async function createPowerSchedule(input: PowerScheduleCreateInput) {
|
||||
validateCronExpression(input.cronExpression);
|
||||
const vm = await fetchVmForAction(input.vmId);
|
||||
|
||||
const nextRun = nextRunAt(input.cronExpression, new Date());
|
||||
|
||||
return prisma.powerSchedule.create({
|
||||
data: {
|
||||
vm_id: vm.id,
|
||||
action: input.action,
|
||||
cron_expression: input.cronExpression,
|
||||
timezone: input.timezone ?? "UTC",
|
||||
next_run_at: nextRun,
|
||||
created_by: input.createdBy
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function updatePowerSchedule(scheduleId: string, input: PowerScheduleUpdateInput) {
|
||||
const existing = await prisma.powerSchedule.findUnique({ where: { id: scheduleId } });
|
||||
if (!existing) {
|
||||
throw new HttpError(404, "Power schedule not found", "POWER_SCHEDULE_NOT_FOUND");
|
||||
}
|
||||
|
||||
if (input.cronExpression) {
|
||||
validateCronExpression(input.cronExpression);
|
||||
}
|
||||
|
||||
const cronExpression = input.cronExpression ?? existing.cron_expression;
|
||||
const enabled = input.enabled ?? existing.enabled;
|
||||
const nextRun = enabled ? nextRunAt(cronExpression, new Date()) : null;
|
||||
|
||||
return prisma.powerSchedule.update({
|
||||
where: { id: scheduleId },
|
||||
data: {
|
||||
action: input.action,
|
||||
cron_expression: input.cronExpression,
|
||||
timezone: input.timezone,
|
||||
enabled: input.enabled,
|
||||
next_run_at: nextRun
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
export async function deletePowerSchedule(scheduleId: string) {
|
||||
return prisma.powerSchedule.delete({ where: { id: scheduleId } });
|
||||
}
|
||||
|
||||
export async function processDuePowerSchedules(actorEmail = "system@proxpanel.local") {
|
||||
const now = new Date();
|
||||
const dueSchedules = await prisma.powerSchedule.findMany({
|
||||
where: {
|
||||
enabled: true,
|
||||
next_run_at: {
|
||||
lte: now
|
||||
}
|
||||
},
|
||||
include: {
|
||||
vm: {
|
||||
select: {
|
||||
id: true,
|
||||
name: true,
|
||||
node: true,
|
||||
vmid: true,
|
||||
type: true
|
||||
}
|
||||
}
|
||||
},
|
||||
orderBy: {
|
||||
next_run_at: "asc"
|
||||
},
|
||||
take: 100
|
||||
});
|
||||
|
||||
let executed = 0;
|
||||
let failed = 0;
|
||||
let skipped = 0;
|
||||
|
||||
for (const schedule of dueSchedules) {
|
||||
const nextRun = nextRunAt(schedule.cron_expression, now);
|
||||
const claim = await prisma.powerSchedule.updateMany({
|
||||
where: {
|
||||
id: schedule.id,
|
||||
enabled: true,
|
||||
next_run_at: {
|
||||
lte: now
|
||||
}
|
||||
},
|
||||
data: {
|
||||
last_run_at: now,
|
||||
next_run_at: nextRun,
|
||||
enabled: nextRun ? schedule.enabled : false
|
||||
}
|
||||
});
|
||||
|
||||
if (claim.count === 0) {
|
||||
skipped += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
const payload: Prisma.InputJsonValue = {
|
||||
source: "power_schedule",
|
||||
schedule_id: schedule.id,
|
||||
action: schedule.action
|
||||
};
|
||||
|
||||
try {
|
||||
await executeVmPowerActionNow(schedule.vm_id, schedule.action, actorEmail, {
|
||||
payload,
|
||||
scheduledFor: schedule.next_run_at
|
||||
});
|
||||
executed += 1;
|
||||
} catch {
|
||||
failed += 1;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
scanned: dueSchedules.length,
|
||||
executed,
|
||||
failed,
|
||||
skipped
|
||||
};
|
||||
}
|
||||
|
||||
export async function processDueOperationRetries(actorEmail = "system@proxpanel.local") {
|
||||
const now = new Date();
|
||||
const dueRetries = await prisma.operationTask.findMany({
|
||||
where: {
|
||||
task_type: OperationTaskType.VM_POWER,
|
||||
status: OperationTaskStatus.RETRYING,
|
||||
scheduled_for: {
|
||||
lte: now
|
||||
}
|
||||
},
|
||||
orderBy: { scheduled_for: "asc" },
|
||||
take: 100
|
||||
});
|
||||
|
||||
let executed = 0;
|
||||
let succeeded = 0;
|
||||
let failed = 0;
|
||||
let rescheduled = 0;
|
||||
let invalidPayload = 0;
|
||||
let skipped = 0;
|
||||
|
||||
for (const task of dueRetries) {
|
||||
const claimedAt = new Date();
|
||||
const claim = await prisma.operationTask.updateMany({
|
||||
where: {
|
||||
id: task.id,
|
||||
task_type: OperationTaskType.VM_POWER,
|
||||
status: OperationTaskStatus.RETRYING,
|
||||
scheduled_for: {
|
||||
lte: now
|
||||
}
|
||||
},
|
||||
data: {
|
||||
status: OperationTaskStatus.RUNNING,
|
||||
started_at: claimedAt,
|
||||
error_message: null,
|
||||
completed_at: null
|
||||
}
|
||||
});
|
||||
|
||||
if (claim.count === 0) {
|
||||
skipped += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
executed += 1;
|
||||
const payload = asPlainObject(task.payload as Prisma.JsonValue | null);
|
||||
const action = toPowerAction(payload.action);
|
||||
|
||||
if (!task.vm_id || !action) {
|
||||
invalidPayload += 1;
|
||||
await handleOperationTaskFailure(task.id, "Retry payload missing actionable power action");
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const vm = await fetchVmForAction(task.vm_id);
|
||||
const upid = await runPowerAction(vm, action);
|
||||
await prisma.virtualMachine.update({
|
||||
where: { id: vm.id },
|
||||
data: {
|
||||
status: vmStatusFromPowerAction(action),
|
||||
proxmox_upid: upid ?? undefined
|
||||
}
|
||||
});
|
||||
|
||||
const resultPayload: Prisma.InputJsonObject = upid
|
||||
? {
|
||||
retry_of_task: task.id,
|
||||
vm_id: vm.id,
|
||||
action,
|
||||
upid
|
||||
}
|
||||
: {
|
||||
retry_of_task: task.id,
|
||||
vm_id: vm.id,
|
||||
action
|
||||
};
|
||||
|
||||
await markOperationTaskSuccess(task.id, resultPayload, upid ?? undefined);
|
||||
succeeded += 1;
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Retry power action failed";
|
||||
const failureResult = await handleOperationTaskFailure(task.id, message);
|
||||
failed += 1;
|
||||
if (failureResult.retry_scheduled) {
|
||||
rescheduled += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dueRetries.length > 0 || failed > 0 || rescheduled > 0) {
|
||||
await prisma.auditLog.create({
|
||||
data: {
|
||||
action: "operations.retry_cycle",
|
||||
resource_type: "SYSTEM",
|
||||
resource_name: "Operation Retry Worker",
|
||||
actor_email: actorEmail,
|
||||
actor_role: "SYSTEM",
|
||||
severity: failed > 0 ? "WARNING" : "INFO",
|
||||
details: {
|
||||
scanned: dueRetries.length,
|
||||
executed,
|
||||
succeeded,
|
||||
failed,
|
||||
rescheduled,
|
||||
invalid_payload: invalidPayload,
|
||||
skipped
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
scanned: dueRetries.length,
|
||||
executed,
|
||||
succeeded,
|
||||
failed,
|
||||
rescheduled,
|
||||
invalid_payload: invalidPayload,
|
||||
skipped
|
||||
};
|
||||
}
|
||||
|
||||
export async function operationQueueInsights(tenantId?: string | null) {
|
||||
const now = new Date();
|
||||
const staleThreshold = addMinutes(now, -15);
|
||||
const dayAgo = addMinutes(now, -24 * 60);
|
||||
|
||||
const tenantWhere: Prisma.OperationTaskWhereInput = tenantId ? { vm: { tenant_id: tenantId } } : {};
|
||||
|
||||
const [statusBuckets, staleQueued, failed24h, dueRetries, powerSchedulesDue] = await Promise.all([
|
||||
prisma.operationTask.groupBy({
|
||||
by: ["status"],
|
||||
_count: { status: true },
|
||||
where: tenantWhere
|
||||
}),
|
||||
prisma.operationTask.count({
|
||||
where: {
|
||||
...tenantWhere,
|
||||
status: OperationTaskStatus.QUEUED,
|
||||
created_at: { lte: staleThreshold }
|
||||
}
|
||||
}),
|
||||
prisma.operationTask.count({
|
||||
where: {
|
||||
...tenantWhere,
|
||||
status: OperationTaskStatus.FAILED,
|
||||
completed_at: { gte: dayAgo }
|
||||
}
|
||||
}),
|
||||
prisma.operationTask.count({
|
||||
where: {
|
||||
...tenantWhere,
|
||||
status: OperationTaskStatus.RETRYING,
|
||||
scheduled_for: { lte: now }
|
||||
}
|
||||
}),
|
||||
prisma.powerSchedule.count({
|
||||
where: {
|
||||
enabled: true,
|
||||
next_run_at: { lte: now },
|
||||
...(tenantId ? { vm: { tenant_id: tenantId } } : {})
|
||||
}
|
||||
})
|
||||
]);
|
||||
|
||||
const queueSummary = statusBuckets.reduce<Record<string, number>>((acc, bucket) => {
|
||||
acc[bucket.status] = bucket._count.status;
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return {
|
||||
generated_at: now.toISOString(),
|
||||
queue_summary: queueSummary,
|
||||
stale_queued_tasks: staleQueued,
|
||||
failed_tasks_24h: failed24h,
|
||||
due_retries: dueRetries,
|
||||
due_power_schedules: powerSchedulesDue
|
||||
};
|
||||
}
|
||||
182
backend/src/services/payment.service.ts
Normal file
182
backend/src/services/payment.service.ts
Normal file
@@ -0,0 +1,182 @@
|
||||
import axios from "axios";
|
||||
import crypto from "crypto";
|
||||
import { PaymentProvider } from "@prisma/client";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { HttpError } from "../lib/http-error";
|
||||
import { markInvoicePaid } from "./billing.service";
|
||||
|
||||
type PaymentSettings = {
|
||||
default_provider?: "paystack" | "flutterwave" | "manual";
|
||||
paystack_public?: string;
|
||||
paystack_secret?: string;
|
||||
flutterwave_public?: string;
|
||||
flutterwave_secret?: string;
|
||||
flutterwave_webhook_hash?: string;
|
||||
callback_url?: string;
|
||||
};
|
||||
|
||||
async function getPaymentSettings(): Promise<PaymentSettings> {
|
||||
const setting = await prisma.setting.findUnique({
|
||||
where: { key: "payment" }
|
||||
});
|
||||
return (setting?.value as PaymentSettings) ?? {};
|
||||
}
|
||||
|
||||
function normalizeProvider(provider: string | undefined, fallback: string): PaymentProvider {
|
||||
const value = (provider ?? fallback).toLowerCase();
|
||||
if (value === "paystack") return PaymentProvider.PAYSTACK;
|
||||
if (value === "flutterwave") return PaymentProvider.FLUTTERWAVE;
|
||||
return PaymentProvider.MANUAL;
|
||||
}
|
||||
|
||||
export async function createInvoicePaymentLink(invoiceId: string, requestedProvider?: string) {
|
||||
const invoice = await prisma.invoice.findUnique({
|
||||
where: { id: invoiceId },
|
||||
include: { tenant: true }
|
||||
});
|
||||
if (!invoice) {
|
||||
throw new HttpError(404, "Invoice not found", "INVOICE_NOT_FOUND");
|
||||
}
|
||||
|
||||
const settings = await getPaymentSettings();
|
||||
const provider = normalizeProvider(requestedProvider, settings.default_provider ?? "manual");
|
||||
if (provider === PaymentProvider.MANUAL) {
|
||||
throw new HttpError(400, "Manual payment provider cannot generate online links", "MANUAL_PROVIDER");
|
||||
}
|
||||
|
||||
const reference = invoice.payment_reference ?? `PAY-${invoice.invoice_number}-${Date.now()}`;
|
||||
|
||||
if (provider === PaymentProvider.PAYSTACK) {
|
||||
if (!settings.paystack_secret) {
|
||||
throw new HttpError(400, "Paystack secret key is missing", "PAYSTACK_CONFIG_MISSING");
|
||||
}
|
||||
const response = await axios.post(
|
||||
"https://api.paystack.co/transaction/initialize",
|
||||
{
|
||||
email: invoice.tenant.owner_email,
|
||||
amount: Math.round(Number(invoice.amount) * 100),
|
||||
reference,
|
||||
currency: invoice.currency,
|
||||
callback_url: settings.callback_url,
|
||||
metadata: {
|
||||
invoice_id: invoice.id,
|
||||
tenant_id: invoice.tenant_id
|
||||
}
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${settings.paystack_secret}`,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
const paymentUrl = response.data?.data?.authorization_url as string | undefined;
|
||||
await prisma.invoice.update({
|
||||
where: { id: invoice.id },
|
||||
data: {
|
||||
status: "PENDING",
|
||||
payment_provider: provider,
|
||||
payment_reference: reference,
|
||||
payment_url: paymentUrl
|
||||
}
|
||||
});
|
||||
return { provider: "paystack", payment_url: paymentUrl, reference };
|
||||
}
|
||||
|
||||
if (!settings.flutterwave_secret) {
|
||||
throw new HttpError(400, "Flutterwave secret key is missing", "FLUTTERWAVE_CONFIG_MISSING");
|
||||
}
|
||||
const response = await axios.post(
|
||||
"https://api.flutterwave.com/v3/payments",
|
||||
{
|
||||
tx_ref: reference,
|
||||
amount: Number(invoice.amount),
|
||||
currency: invoice.currency,
|
||||
redirect_url: settings.callback_url,
|
||||
customer: {
|
||||
email: invoice.tenant.owner_email,
|
||||
name: invoice.tenant.name
|
||||
},
|
||||
customizations: {
|
||||
title: "ProxPanel Invoice Payment",
|
||||
description: `Invoice ${invoice.invoice_number}`
|
||||
},
|
||||
meta: {
|
||||
invoice_id: invoice.id,
|
||||
tenant_id: invoice.tenant_id
|
||||
}
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${settings.flutterwave_secret}`,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
}
|
||||
);
|
||||
const paymentUrl = response.data?.data?.link as string | undefined;
|
||||
await prisma.invoice.update({
|
||||
where: { id: invoice.id },
|
||||
data: {
|
||||
status: "PENDING",
|
||||
payment_provider: provider,
|
||||
payment_reference: reference,
|
||||
payment_url: paymentUrl
|
||||
}
|
||||
});
|
||||
return { provider: "flutterwave", payment_url: paymentUrl, reference };
|
||||
}
|
||||
|
||||
export async function handleManualInvoicePayment(invoiceId: string, reference: string, actorEmail: string) {
|
||||
return markInvoicePaid(invoiceId, PaymentProvider.MANUAL, reference, actorEmail);
|
||||
}
|
||||
|
||||
export async function verifyPaystackSignature(signature: string | undefined, rawBody: string | undefined) {
|
||||
if (!signature || !rawBody) return false;
|
||||
const settings = await getPaymentSettings();
|
||||
if (!settings.paystack_secret) return false;
|
||||
const expected = crypto
|
||||
.createHmac("sha512", settings.paystack_secret)
|
||||
.update(rawBody)
|
||||
.digest("hex");
|
||||
return expected === signature;
|
||||
}
|
||||
|
||||
export async function verifyFlutterwaveSignature(signature: string | undefined) {
|
||||
const settings = await getPaymentSettings();
|
||||
if (!settings.flutterwave_webhook_hash) return false;
|
||||
return settings.flutterwave_webhook_hash === signature;
|
||||
}
|
||||
|
||||
export async function processPaystackWebhook(payload: any) {
|
||||
if (payload?.event !== "charge.success") return { handled: false };
|
||||
const reference = payload?.data?.reference as string | undefined;
|
||||
if (!reference) return { handled: false };
|
||||
|
||||
const invoice = await prisma.invoice.findFirst({
|
||||
where: { payment_reference: reference }
|
||||
});
|
||||
if (!invoice) return { handled: false };
|
||||
|
||||
if (invoice.status !== "PAID") {
|
||||
await markInvoicePaid(invoice.id, PaymentProvider.PAYSTACK, reference, "webhook@paystack");
|
||||
}
|
||||
return { handled: true, invoice_id: invoice.id };
|
||||
}
|
||||
|
||||
export async function processFlutterwaveWebhook(payload: any) {
|
||||
const status = payload?.status?.toLowerCase();
|
||||
if (status !== "successful") return { handled: false };
|
||||
const reference = (payload?.txRef ?? payload?.tx_ref) as string | undefined;
|
||||
if (!reference) return { handled: false };
|
||||
|
||||
const invoice = await prisma.invoice.findFirst({
|
||||
where: { payment_reference: reference }
|
||||
});
|
||||
if (!invoice) return { handled: false };
|
||||
|
||||
if (invoice.status !== "PAID") {
|
||||
await markInvoicePaid(invoice.id, PaymentProvider.FLUTTERWAVE, reference, "webhook@flutterwave");
|
||||
}
|
||||
return { handled: true, invoice_id: invoice.id };
|
||||
}
|
||||
1123
backend/src/services/provisioning.service.ts
Normal file
1123
backend/src/services/provisioning.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
1451
backend/src/services/proxmox.service.ts
Normal file
1451
backend/src/services/proxmox.service.ts
Normal file
File diff suppressed because it is too large
Load Diff
495
backend/src/services/scheduler.service.ts
Normal file
495
backend/src/services/scheduler.service.ts
Normal file
@@ -0,0 +1,495 @@
|
||||
import cron, { type ScheduledTask } from "node-cron";
|
||||
import os from "os";
|
||||
import { SettingType } from "@prisma/client";
|
||||
import { env } from "../config/env";
|
||||
import { prisma } from "../lib/prisma";
|
||||
import { meterHourlyUsage, generateInvoicesFromUnbilledUsage, processBackupSchedule, updateOverdueInvoices } from "./billing.service";
|
||||
import { processDuePowerSchedules, processDueOperationRetries } from "./operations.service";
|
||||
import { processDueSnapshotJobs, processPendingBackups } from "./backup.service";
|
||||
import { evaluateAlertRulesNow, processDueHealthChecks } from "./monitoring.service";
|
||||
|
||||
export type SchedulerConfig = {
|
||||
enable_scheduler: boolean;
|
||||
billing_cron: string;
|
||||
backup_cron: string;
|
||||
power_schedule_cron: string;
|
||||
monitoring_cron: string;
|
||||
operation_retry_cron: string;
|
||||
};
|
||||
|
||||
type WorkerKey = "billing" | "backup" | "power" | "monitoring" | "operation_retry";
|
||||
type WorkerStatus = "disabled" | "scheduled" | "running" | "success" | "failed";
|
||||
|
||||
type WorkerState = {
|
||||
worker: WorkerKey;
|
||||
cron: string;
|
||||
status: WorkerStatus;
|
||||
last_run_at: string | null;
|
||||
last_duration_ms: number | null;
|
||||
last_message: string | null;
|
||||
last_error: string | null;
|
||||
};
|
||||
|
||||
type SchedulerLeasePayload = {
|
||||
owner_id: string;
|
||||
lease_until: string;
|
||||
acquired_at: string;
|
||||
heartbeat_at: string;
|
||||
worker: WorkerKey;
|
||||
};
|
||||
|
||||
type SchedulerState = {
|
||||
started_at: string | null;
|
||||
config: SchedulerConfig;
|
||||
workers: Record<WorkerKey, WorkerState>;
|
||||
};
|
||||
|
||||
const DEFAULT_SCHEDULER_CONFIG: SchedulerConfig = {
|
||||
enable_scheduler: env.ENABLE_SCHEDULER,
|
||||
billing_cron: env.BILLING_CRON,
|
||||
backup_cron: env.BACKUP_CRON,
|
||||
power_schedule_cron: env.POWER_SCHEDULE_CRON,
|
||||
monitoring_cron: env.MONITORING_CRON,
|
||||
operation_retry_cron: "*/5 * * * *"
|
||||
};
|
||||
|
||||
let scheduledJobs: Partial<Record<WorkerKey, ScheduledTask>> = {};
|
||||
const activeWorkerRuns = new Set<WorkerKey>();
|
||||
const schedulerInstanceId = `${os.hostname()}:${process.pid}:${Math.random().toString(36).slice(2, 10)}`;
|
||||
|
||||
const schedulerState: SchedulerState = {
|
||||
started_at: null,
|
||||
config: DEFAULT_SCHEDULER_CONFIG,
|
||||
workers: {
|
||||
billing: {
|
||||
worker: "billing",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.billing_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
},
|
||||
backup: {
|
||||
worker: "backup",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.backup_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
},
|
||||
power: {
|
||||
worker: "power",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.power_schedule_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
},
|
||||
monitoring: {
|
||||
worker: "monitoring",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.monitoring_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
},
|
||||
operation_retry: {
|
||||
worker: "operation_retry",
|
||||
cron: DEFAULT_SCHEDULER_CONFIG.operation_retry_cron,
|
||||
status: DEFAULT_SCHEDULER_CONFIG.enable_scheduler ? "scheduled" : "disabled",
|
||||
last_run_at: null,
|
||||
last_duration_ms: null,
|
||||
last_message: null,
|
||||
last_error: null
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
function normalizeCronExpression(value: unknown, fallback: string) {
|
||||
if (typeof value !== "string") return fallback;
|
||||
const trimmed = value.trim();
|
||||
if (trimmed.length === 0) return fallback;
|
||||
return cron.validate(trimmed) ? trimmed : fallback;
|
||||
}
|
||||
|
||||
function normalizeSchedulerConfig(raw?: unknown): SchedulerConfig {
|
||||
const record = raw && typeof raw === "object" && !Array.isArray(raw) ? (raw as Record<string, unknown>) : {};
|
||||
|
||||
const enabled =
|
||||
typeof record.enable_scheduler === "boolean" ? record.enable_scheduler : DEFAULT_SCHEDULER_CONFIG.enable_scheduler;
|
||||
|
||||
return {
|
||||
enable_scheduler: enabled,
|
||||
billing_cron: normalizeCronExpression(record.billing_cron, DEFAULT_SCHEDULER_CONFIG.billing_cron),
|
||||
backup_cron: normalizeCronExpression(record.backup_cron, DEFAULT_SCHEDULER_CONFIG.backup_cron),
|
||||
power_schedule_cron: normalizeCronExpression(record.power_schedule_cron, DEFAULT_SCHEDULER_CONFIG.power_schedule_cron),
|
||||
monitoring_cron: normalizeCronExpression(record.monitoring_cron, DEFAULT_SCHEDULER_CONFIG.monitoring_cron),
|
||||
operation_retry_cron: normalizeCronExpression(record.operation_retry_cron, DEFAULT_SCHEDULER_CONFIG.operation_retry_cron)
|
||||
};
|
||||
}
|
||||
|
||||
function lockSettingKey(worker: WorkerKey) {
|
||||
return `scheduler_lock:${worker}`;
|
||||
}
|
||||
|
||||
function nextLeaseDeadline(from = new Date()) {
|
||||
return new Date(from.getTime() + env.SCHEDULER_LEASE_MS);
|
||||
}
|
||||
|
||||
function parseLeasePayload(value: unknown): SchedulerLeasePayload | null {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) return null;
|
||||
const record = value as Record<string, unknown>;
|
||||
if (
|
||||
typeof record.owner_id !== "string" ||
|
||||
typeof record.lease_until !== "string" ||
|
||||
typeof record.acquired_at !== "string" ||
|
||||
typeof record.heartbeat_at !== "string" ||
|
||||
typeof record.worker !== "string"
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
owner_id: record.owner_id,
|
||||
lease_until: record.lease_until,
|
||||
acquired_at: record.acquired_at,
|
||||
heartbeat_at: record.heartbeat_at,
|
||||
worker: record.worker as WorkerKey
|
||||
};
|
||||
}
|
||||
|
||||
function leasePayload(worker: WorkerKey, now = new Date(), acquiredAt?: string): SchedulerLeasePayload {
|
||||
return {
|
||||
owner_id: schedulerInstanceId,
|
||||
lease_until: nextLeaseDeadline(now).toISOString(),
|
||||
acquired_at: acquiredAt ?? now.toISOString(),
|
||||
heartbeat_at: now.toISOString(),
|
||||
worker
|
||||
};
|
||||
}
|
||||
|
||||
async function acquireWorkerLease(worker: WorkerKey) {
|
||||
const now = new Date();
|
||||
const key = lockSettingKey(worker);
|
||||
const existing = await prisma.setting.findUnique({
|
||||
where: { key },
|
||||
select: {
|
||||
id: true,
|
||||
value: true,
|
||||
updated_at: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
try {
|
||||
await prisma.setting.create({
|
||||
data: {
|
||||
key,
|
||||
type: SettingType.GENERAL,
|
||||
is_encrypted: false,
|
||||
value: leasePayload(worker, now)
|
||||
}
|
||||
});
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const parsed = parseLeasePayload(existing.value);
|
||||
const leaseUntilMs = parsed ? Date.parse(parsed.lease_until) : 0;
|
||||
const activeOwner =
|
||||
parsed &&
|
||||
parsed.owner_id &&
|
||||
parsed.owner_id !== schedulerInstanceId &&
|
||||
Number.isFinite(leaseUntilMs) &&
|
||||
leaseUntilMs > now.getTime();
|
||||
|
||||
if (activeOwner) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const updated = await prisma.setting.updateMany({
|
||||
where: {
|
||||
id: existing.id,
|
||||
updated_at: existing.updated_at
|
||||
},
|
||||
data: {
|
||||
value: leasePayload(worker, now, parsed?.acquired_at)
|
||||
}
|
||||
});
|
||||
|
||||
return updated.count === 1;
|
||||
}
|
||||
|
||||
async function renewWorkerLease(worker: WorkerKey) {
|
||||
const now = new Date();
|
||||
const key = lockSettingKey(worker);
|
||||
const existing = await prisma.setting.findUnique({
|
||||
where: { key },
|
||||
select: {
|
||||
id: true,
|
||||
value: true,
|
||||
updated_at: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const parsed = parseLeasePayload(existing.value);
|
||||
if (!parsed || parsed.owner_id !== schedulerInstanceId) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const updated = await prisma.setting.updateMany({
|
||||
where: {
|
||||
id: existing.id,
|
||||
updated_at: existing.updated_at
|
||||
},
|
||||
data: {
|
||||
value: leasePayload(worker, now, parsed.acquired_at)
|
||||
}
|
||||
});
|
||||
|
||||
return updated.count === 1;
|
||||
}
|
||||
|
||||
async function releaseWorkerLease(worker: WorkerKey) {
|
||||
const key = lockSettingKey(worker);
|
||||
const existing = await prisma.setting.findUnique({
|
||||
where: { key },
|
||||
select: {
|
||||
id: true,
|
||||
value: true,
|
||||
updated_at: true
|
||||
}
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
return;
|
||||
}
|
||||
|
||||
const parsed = parseLeasePayload(existing.value);
|
||||
if (!parsed || parsed.owner_id !== schedulerInstanceId) {
|
||||
return;
|
||||
}
|
||||
|
||||
const now = new Date();
|
||||
const leaseExpired = new Date(now.getTime() - 1000).toISOString();
|
||||
await prisma.setting.updateMany({
|
||||
where: {
|
||||
id: existing.id,
|
||||
updated_at: existing.updated_at
|
||||
},
|
||||
data: {
|
||||
value: {
|
||||
...parsed,
|
||||
owner_id: "",
|
||||
lease_until: leaseExpired,
|
||||
heartbeat_at: now.toISOString()
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function stopAllScheduledJobs() {
|
||||
const entries = Object.entries(scheduledJobs) as Array<[WorkerKey, ScheduledTask]>;
|
||||
for (const [, task] of entries) {
|
||||
try {
|
||||
task.stop();
|
||||
task.destroy();
|
||||
} catch {
|
||||
task.stop();
|
||||
}
|
||||
}
|
||||
scheduledJobs = {};
|
||||
}
|
||||
|
||||
function setWorkerDisabledState(config: SchedulerConfig) {
|
||||
schedulerState.workers.billing = {
|
||||
...schedulerState.workers.billing,
|
||||
cron: config.billing_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
schedulerState.workers.backup = {
|
||||
...schedulerState.workers.backup,
|
||||
cron: config.backup_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
schedulerState.workers.power = {
|
||||
...schedulerState.workers.power,
|
||||
cron: config.power_schedule_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
schedulerState.workers.monitoring = {
|
||||
...schedulerState.workers.monitoring,
|
||||
cron: config.monitoring_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
schedulerState.workers.operation_retry = {
|
||||
...schedulerState.workers.operation_retry,
|
||||
cron: config.operation_retry_cron,
|
||||
status: "disabled"
|
||||
};
|
||||
}
|
||||
|
||||
async function runWorker(worker: WorkerKey, execute: () => Promise<string>) {
|
||||
if (activeWorkerRuns.has(worker)) {
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "scheduled",
|
||||
last_message: "Skipped: worker already running in this process"
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
const acquired = await acquireWorkerLease(worker);
|
||||
if (!acquired) {
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "scheduled",
|
||||
last_message: "Skipped: lease held by another scheduler instance"
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
activeWorkerRuns.add(worker);
|
||||
const startedAt = Date.now();
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "running",
|
||||
last_error: null
|
||||
};
|
||||
|
||||
const heartbeatEveryMs = Math.max(1_000, Math.min(env.SCHEDULER_HEARTBEAT_MS, Math.floor(env.SCHEDULER_LEASE_MS / 2)));
|
||||
const heartbeat = setInterval(() => {
|
||||
void renewWorkerLease(worker);
|
||||
}, heartbeatEveryMs);
|
||||
|
||||
try {
|
||||
const message = await execute();
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "success",
|
||||
last_run_at: new Date().toISOString(),
|
||||
last_duration_ms: Date.now() - startedAt,
|
||||
last_message: message,
|
||||
last_error: null
|
||||
};
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : "Unknown scheduler error";
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
status: "failed",
|
||||
last_run_at: new Date().toISOString(),
|
||||
last_duration_ms: Date.now() - startedAt,
|
||||
last_error: message
|
||||
};
|
||||
} finally {
|
||||
clearInterval(heartbeat);
|
||||
activeWorkerRuns.delete(worker);
|
||||
await releaseWorkerLease(worker);
|
||||
}
|
||||
}
|
||||
|
||||
function registerWorker(worker: WorkerKey, cronExpression: string, execute: () => Promise<string>) {
|
||||
schedulerState.workers[worker] = {
|
||||
...schedulerState.workers[worker],
|
||||
cron: cronExpression,
|
||||
status: "scheduled",
|
||||
last_error: null
|
||||
};
|
||||
|
||||
const task = cron.schedule(cronExpression, () => {
|
||||
void runWorker(worker, execute);
|
||||
});
|
||||
|
||||
scheduledJobs[worker] = task;
|
||||
}
|
||||
|
||||
async function readSchedulerConfigSetting() {
|
||||
const setting = await prisma.setting.findUnique({
|
||||
where: { key: "scheduler" },
|
||||
select: { value: true }
|
||||
});
|
||||
return normalizeSchedulerConfig(setting?.value);
|
||||
}
|
||||
|
||||
function applyRuntimeConfig(config: SchedulerConfig) {
|
||||
schedulerState.config = config;
|
||||
schedulerState.started_at = new Date().toISOString();
|
||||
}
|
||||
|
||||
export async function configureSchedulers(config?: SchedulerConfig) {
|
||||
const resolvedConfig = config ?? (await readSchedulerConfigSetting());
|
||||
applyRuntimeConfig(resolvedConfig);
|
||||
|
||||
stopAllScheduledJobs();
|
||||
|
||||
if (!resolvedConfig.enable_scheduler) {
|
||||
setWorkerDisabledState(resolvedConfig);
|
||||
return getSchedulerRuntimeSnapshot();
|
||||
}
|
||||
|
||||
registerWorker("billing", resolvedConfig.billing_cron, async () => {
|
||||
await meterHourlyUsage();
|
||||
await generateInvoicesFromUnbilledUsage();
|
||||
await updateOverdueInvoices();
|
||||
return "Billing cycle completed";
|
||||
});
|
||||
|
||||
registerWorker("backup", resolvedConfig.backup_cron, async () => {
|
||||
const queued = await processBackupSchedule();
|
||||
const backupResult = await processPendingBackups();
|
||||
const snapshotResult = await processDueSnapshotJobs();
|
||||
return `Backup queue=${queued}, backups_completed=${backupResult.completed}, backups_skipped=${backupResult.skipped}, snapshot_scanned=${snapshotResult.scanned}, snapshot_executed=${snapshotResult.executed}, snapshot_failed=${snapshotResult.failed}, snapshot_pruned=${snapshotResult.pruned}, snapshot_skipped=${snapshotResult.skipped}`;
|
||||
});
|
||||
|
||||
registerWorker("power", resolvedConfig.power_schedule_cron, async () => {
|
||||
const result = await processDuePowerSchedules();
|
||||
return `Power schedules scanned=${result.scanned}, executed=${result.executed}, failed=${result.failed}, skipped=${result.skipped}`;
|
||||
});
|
||||
|
||||
registerWorker("monitoring", resolvedConfig.monitoring_cron, async () => {
|
||||
const checkResult = await processDueHealthChecks();
|
||||
const alertResult = await evaluateAlertRulesNow();
|
||||
return `Checks scanned=${checkResult.scanned}, executed=${checkResult.executed}, failed=${checkResult.failed}, skipped=${checkResult.skipped}; alerts evaluated=${alertResult.evaluated}, triggered=${alertResult.triggered}, resolved=${alertResult.resolved}`;
|
||||
});
|
||||
|
||||
registerWorker("operation_retry", resolvedConfig.operation_retry_cron, async () => {
|
||||
const retryResult = await processDueOperationRetries();
|
||||
return `Retry tasks scanned=${retryResult.scanned}, executed=${retryResult.executed}, succeeded=${retryResult.succeeded}, failed=${retryResult.failed}, rescheduled=${retryResult.rescheduled}, invalid_payload=${retryResult.invalid_payload}, skipped=${retryResult.skipped}`;
|
||||
});
|
||||
|
||||
return getSchedulerRuntimeSnapshot();
|
||||
}
|
||||
|
||||
export async function startSchedulers() {
|
||||
await configureSchedulers();
|
||||
}
|
||||
|
||||
export async function reconfigureSchedulers(config?: Partial<SchedulerConfig>) {
|
||||
const persisted = await readSchedulerConfigSetting();
|
||||
const merged = normalizeSchedulerConfig({
|
||||
...persisted,
|
||||
...(config ?? {})
|
||||
});
|
||||
return configureSchedulers(merged);
|
||||
}
|
||||
|
||||
export function getSchedulerRuntimeSnapshot() {
|
||||
return {
|
||||
generated_at: new Date().toISOString(),
|
||||
...schedulerState
|
||||
};
|
||||
}
|
||||
|
||||
export function schedulerDefaults() {
|
||||
return { ...DEFAULT_SCHEDULER_CONFIG };
|
||||
}
|
||||
20
backend/src/tests/operations.test.ts
Normal file
20
backend/src/tests/operations.test.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { nextRunAt, validateCronExpression } from "../services/operations.service";
|
||||
|
||||
test("nextRunAt returns a future date for a valid cron expression", () => {
|
||||
const base = new Date("2026-01-01T00:00:00.000Z");
|
||||
const next = nextRunAt("*/5 * * * *", base);
|
||||
assert.ok(next instanceof Date);
|
||||
assert.ok(next.getTime() > base.getTime());
|
||||
});
|
||||
|
||||
test("validateCronExpression accepts valid expressions", () => {
|
||||
assert.doesNotThrow(() => validateCronExpression("0 * * * *"));
|
||||
assert.doesNotThrow(() => validateCronExpression("*/10 1-23 * * 1,3,5"));
|
||||
});
|
||||
|
||||
test("validateCronExpression rejects invalid expressions", () => {
|
||||
assert.throws(() => validateCronExpression("invalid-cron"));
|
||||
assert.throws(() => validateCronExpression("* * * * * *"));
|
||||
});
|
||||
19
backend/src/types/express.d.ts
vendored
Normal file
19
backend/src/types/express.d.ts
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
import type { Role } from "@prisma/client";
|
||||
|
||||
declare global {
|
||||
namespace Express {
|
||||
interface UserToken {
|
||||
id: string;
|
||||
email: string;
|
||||
role: Role;
|
||||
tenant_id?: string | null;
|
||||
}
|
||||
|
||||
interface Request {
|
||||
user?: UserToken;
|
||||
rawBody?: string;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export {};
|
||||
Reference in New Issue
Block a user