proper src structure, dockerfile, entrypoint

This commit is contained in:
jarek
2025-12-29 08:40:11 +01:00
parent e536388a7a
commit ab8743bdae
556 changed files with 1390 additions and 0 deletions

View File

@@ -1,43 +0,0 @@
/**
* Audit Events Broadcasting
*
* Server-side event emitter for broadcasting audit log entries to connected SSE clients.
*/
import { EventEmitter } from 'events';
import type { AuditLogCreateData } from './db';
export interface AuditEventData extends AuditLogCreateData {
id: number;
timestamp: string;
}
// Create a singleton event emitter for audit events
class AuditEventEmitter extends EventEmitter {
constructor() {
super();
// Allow many listeners (one per connected SSE client)
this.setMaxListeners(1000);
}
emit(event: 'audit', data: AuditEventData): boolean {
return super.emit(event, data);
}
on(event: 'audit', listener: (data: AuditEventData) => void): this {
return super.on(event, listener);
}
off(event: 'audit', listener: (data: AuditEventData) => void): this {
return super.off(event, listener);
}
}
export const auditEvents = new AuditEventEmitter();
/**
* Broadcast a new audit event to all connected clients
*/
export function broadcastAuditEvent(data: AuditEventData): void {
auditEvents.emit('audit', data);
}

View File

@@ -1,307 +0,0 @@
/**
* Audit Logging Helper
*
* Provides easy-to-use functions for logging audit events from API endpoints.
* This is an Enterprise-only feature.
*/
import type { RequestEvent } from '@sveltejs/kit';
import { isEnterprise } from './license';
import { logAuditEvent, type AuditAction, type AuditEntityType, type AuditLogCreateData } from './db';
import { authorize } from './authorize';
export interface AuditContext {
userId?: number | null;
username: string;
ipAddress?: string | null;
userAgent?: string | null;
}
/**
* Extract audit context from a request event
*/
export async function getAuditContext(event: RequestEvent): Promise<AuditContext> {
const auth = await authorize(event.cookies);
// Get IP address from various headers (proxied requests)
const forwardedFor = event.request.headers.get('x-forwarded-for');
const realIp = event.request.headers.get('x-real-ip');
let ipAddress = forwardedFor?.split(',')[0]?.trim() || realIp || event.getClientAddress?.() || null;
// Convert IPv6 loopback to more readable format
if (ipAddress === '::1' || ipAddress === '::ffff:127.0.0.1') {
ipAddress = '127.0.0.1';
} else if (ipAddress?.startsWith('::ffff:')) {
// Strip IPv6 prefix from IPv4-mapped addresses
ipAddress = ipAddress.substring(7);
}
// Get user agent
const userAgent = event.request.headers.get('user-agent') || null;
return {
userId: auth.user?.id ?? null,
username: auth.user?.username ?? 'anonymous',
ipAddress,
userAgent
};
}
/**
* Log an audit event (only logs if Enterprise license is active)
*/
export async function audit(
event: RequestEvent,
action: AuditAction,
entityType: AuditEntityType,
options: {
entityId?: string | null;
entityName?: string | null;
environmentId?: number | null;
description?: string | null;
details?: any | null;
} = {}
): Promise<void> {
// Only log if enterprise
if (!(await isEnterprise())) return;
const ctx = await getAuditContext(event);
const data: AuditLogCreateData = {
userId: ctx.userId,
username: ctx.username,
action,
entityType: entityType,
entityId: options.entityId ?? null,
entityName: options.entityName ?? null,
environmentId: options.environmentId ?? null,
description: options.description ?? null,
details: options.details ?? null,
ipAddress: ctx.ipAddress ?? null,
userAgent: ctx.userAgent ?? null
};
try {
await logAuditEvent(data);
} catch (error) {
// Don't let audit logging errors break the main operation
console.error('Failed to log audit event:', error);
}
}
/**
* Helper for container actions
*/
export async function auditContainer(
event: RequestEvent,
action: AuditAction,
containerId: string,
containerName: string,
environmentId?: number | null,
details?: any
): Promise<void> {
await audit(event, action, 'container', {
entityId: containerId,
entityName: containerName,
environmentId,
description: `Container ${containerName} ${action}`,
details
});
}
/**
* Helper for image actions
*/
export async function auditImage(
event: RequestEvent,
action: AuditAction,
imageId: string,
imageName: string,
environmentId?: number | null,
details?: any
): Promise<void> {
await audit(event, action, 'image', {
entityId: imageId,
entityName: imageName,
environmentId,
description: `Image ${imageName} ${action}`,
details
});
}
/**
* Helper for stack actions
*/
export async function auditStack(
event: RequestEvent,
action: AuditAction,
stackName: string,
environmentId?: number | null,
details?: any
): Promise<void> {
await audit(event, action, 'stack', {
entityId: stackName,
entityName: stackName,
environmentId,
description: `Stack ${stackName} ${action}`,
details
});
}
/**
* Helper for volume actions
*/
export async function auditVolume(
event: RequestEvent,
action: AuditAction,
volumeId: string,
volumeName: string,
environmentId?: number | null,
details?: any
): Promise<void> {
await audit(event, action, 'volume', {
entityId: volumeId,
entityName: volumeName,
environmentId,
description: `Volume ${volumeName} ${action}`,
details
});
}
/**
* Helper for network actions
*/
export async function auditNetwork(
event: RequestEvent,
action: AuditAction,
networkId: string,
networkName: string,
environmentId?: number | null,
details?: any
): Promise<void> {
await audit(event, action, 'network', {
entityId: networkId,
entityName: networkName,
environmentId,
description: `Network ${networkName} ${action}`,
details
});
}
/**
* Helper for user actions
*/
export async function auditUser(
event: RequestEvent,
action: AuditAction,
userId: number,
username: string,
details?: any
): Promise<void> {
await audit(event, action, 'user', {
entityId: String(userId),
entityName: username,
description: `User ${username} ${action}`,
details
});
}
/**
* Helper for settings actions
*/
export async function auditSettings(
event: RequestEvent,
action: AuditAction,
settingName: string,
details?: any
): Promise<void> {
await audit(event, action, 'settings', {
entityId: settingName,
entityName: settingName,
description: `Settings ${settingName} ${action}`,
details
});
}
/**
* Helper for environment actions
*/
export async function auditEnvironment(
event: RequestEvent,
action: AuditAction,
environmentId: number,
environmentName: string,
details?: any
): Promise<void> {
await audit(event, action, 'environment', {
entityId: String(environmentId),
entityName: environmentName,
environmentId,
description: `Environment ${environmentName} ${action}`,
details
});
}
/**
* Helper for registry actions
*/
export async function auditRegistry(
event: RequestEvent,
action: AuditAction,
registryId: number,
registryName: string,
details?: any
): Promise<void> {
await audit(event, action, 'registry', {
entityId: String(registryId),
entityName: registryName,
description: `Registry ${registryName} ${action}`,
details
});
}
/**
* Helper for auth actions (login/logout)
*/
export async function auditAuth(
event: RequestEvent,
action: 'login' | 'logout',
username: string,
details?: any
): Promise<void> {
// For login/logout, we want to log even without a session
if (!(await isEnterprise())) return;
const forwardedFor = event.request.headers.get('x-forwarded-for');
const realIp = event.request.headers.get('x-real-ip');
let ipAddress = forwardedFor?.split(',')[0]?.trim() || realIp || event.getClientAddress?.() || null;
// Convert IPv6 loopback to more readable format
if (ipAddress === '::1' || ipAddress === '::ffff:127.0.0.1') {
ipAddress = '127.0.0.1';
} else if (ipAddress?.startsWith('::ffff:')) {
ipAddress = ipAddress.substring(7);
}
const userAgent = event.request.headers.get('user-agent') || null;
const data: AuditLogCreateData = {
userId: null, // Will be set from details if available
username,
action,
entityType: 'user',
entityId: null,
entityName: username,
environmentId: null,
description: `User ${username} ${action}`,
details,
ipAddress: ipAddress,
userAgent: userAgent
};
try {
await logAuditEvent(data);
} catch (error) {
console.error('Failed to log audit event:', error);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,256 +0,0 @@
/**
* Centralized Authorization Service
*
* This module provides a unified interface for all authorization checks in the application.
* It consolidates the authorization logic that was previously scattered across API endpoints.
*
* Feature Access Model:
* - Free Edition: SSO/OIDC + local users, all authenticated users have full access
* - Enterprise Edition: LDAP, MFA, RBAC with fine-grained permissions
*
* Usage:
* import { authorize } from '$lib/server/authorize';
*
* // In API handler:
* const auth = authorize(cookies);
*
* // Check authentication only
* if (!auth.isAuthenticated) {
* return json({ error: 'Authentication required' }, { status: 401 });
* }
*
* // Check specific permission
* if (!await auth.can('settings', 'edit')) {
* return json({ error: 'Permission denied' }, { status: 403 });
* }
*
* // Check permission in environment context
* if (!await auth.canAccessEnvironment(envId)) {
* return json({ error: 'Access denied' }, { status: 403 });
* }
*
* // Require enterprise license
* if (!auth.isEnterprise) {
* return json({ error: 'Enterprise license required' }, { status: 403 });
* }
*/
import type { Cookies } from '@sveltejs/kit';
import type { Permissions } from './db';
import { getUserAccessibleEnvironments, userCanAccessEnvironment, userHasAdminRole } from './db';
import { validateSession, isAuthEnabled, checkPermission, type AuthenticatedUser } from './auth';
import { isEnterprise } from './license';
export interface AuthorizationContext {
/** Whether authentication is enabled globally */
authEnabled: boolean;
/** Whether the request is authenticated (has valid session) */
isAuthenticated: boolean;
/** The authenticated user, if any */
user: AuthenticatedUser | null;
/** Whether the user has admin privileges */
isAdmin: boolean;
/** Whether an enterprise license is active */
isEnterprise: boolean;
/**
* Check if the user has a specific permission.
* In free edition, all authenticated users have full access.
* In enterprise edition, checks RBAC permissions.
* @param environmentId - Optional: check permission in context of specific environment
*/
can: (resource: keyof Permissions, action: string, environmentId?: number) => Promise<boolean>;
/**
* Check if user can access a specific environment.
* Returns true if user has any role that applies to this environment.
*/
canAccessEnvironment: (environmentId: number) => Promise<boolean>;
/**
* Get list of environment IDs the user can access.
* Returns null if user has access to ALL environments.
* Returns empty array if user has no access.
*/
getAccessibleEnvironmentIds: () => Promise<number[] | null>;
/**
* Check if user can manage other users.
* Returns true if:
* - Auth is disabled (initial setup)
* - User is admin
* - Free edition (all users have full access)
* - Enterprise edition with users permission
*/
canManageUsers: () => Promise<boolean>;
/**
* Check if user can manage settings (OIDC, LDAP configs, etc).
* Returns true if:
* - Auth is disabled (initial setup)
* - User is authenticated and (free edition or has settings permission)
*/
canManageSettings: () => Promise<boolean>;
/**
* Check if user can view audit logs.
* Audit logs are an enterprise-only feature.
* Returns true if:
* - Enterprise license is active AND
* - (User is admin OR has audit_logs view permission)
*/
canViewAuditLog: () => Promise<boolean>;
}
/**
* Create an authorization context from cookies.
* This is the main entry point for authorization checks.
*/
export async function authorize(cookies: Cookies): Promise<AuthorizationContext> {
const authEnabled = await isAuthEnabled();
const enterprise = await isEnterprise();
const user = authEnabled ? await validateSession(cookies) : null;
// Determine admin status:
// - Free edition: all authenticated users are effectively admins (full access)
// - Enterprise edition: check if user has Admin role assigned
let isAdmin = false;
if (user) {
if (!enterprise) {
// Free edition: everyone is admin
isAdmin = true;
} else {
// Enterprise: check for Admin role assignment
isAdmin = await userHasAdminRole(user.id);
}
}
const ctx: AuthorizationContext = {
authEnabled,
isAuthenticated: !!user,
user,
isAdmin,
isEnterprise: enterprise,
async can(resource: keyof Permissions, action: string, environmentId?: number): Promise<boolean> {
// If auth is disabled, allow everything (initial setup)
if (!authEnabled) return true;
// Must be authenticated
if (!user) return false;
// Use the existing checkPermission which already handles free vs enterprise
// Pass environmentId for environment-scoped permission checks
return checkPermission(user, resource, action, environmentId);
},
async canAccessEnvironment(environmentId: number): Promise<boolean> {
// If auth is disabled, allow everything (initial setup)
if (!authEnabled) return true;
// Must be authenticated
if (!user) return false;
// Admins can access all environments
if (user.isAdmin) return true;
// In free edition, all authenticated users have full access
if (!enterprise) return true;
// In enterprise, check if user has any role for this environment
return userCanAccessEnvironment(user.id, environmentId);
},
async getAccessibleEnvironmentIds(): Promise<number[] | null> {
// If auth is disabled, return null (all environments)
if (!authEnabled) return null;
// Must be authenticated
if (!user) return [];
// Admins can access all environments
if (user.isAdmin) return null;
// In free edition, all authenticated users have full access
if (!enterprise) return null;
// In enterprise, get accessible environment IDs
return getUserAccessibleEnvironments(user.id);
},
async canManageUsers(): Promise<boolean> {
// If auth is disabled, allow (initial setup when no users exist)
if (!authEnabled) return true;
// Must be authenticated
if (!user) return false;
// Admins can always manage users
if (user.isAdmin) return true;
// In free edition, all authenticated users have full access
if (!enterprise) return true;
// In enterprise, check RBAC
return checkPermission(user, 'users', 'create');
},
async canManageSettings(): Promise<boolean> {
// If auth is disabled, allow (initial setup)
if (!authEnabled) return true;
// Must be authenticated
if (!user) return false;
// In free edition, all authenticated users have full access
if (!enterprise) return true;
// In enterprise, check RBAC
return checkPermission(user, 'settings', 'edit');
},
async canViewAuditLog(): Promise<boolean> {
// Audit logs are enterprise-only
if (!enterprise) return false;
// If auth is disabled, allow access (enterprise-only protection is enough)
if (!authEnabled) return true;
// Must be authenticated
if (!user) return false;
// Admins can always view audit logs
if (user.isAdmin) return true;
// Check for audit_logs permission
return checkPermission(user, 'audit_logs' as keyof Permissions, 'view');
}
};
return ctx;
}
/**
* Helper to create a standard 401 response
*/
export function unauthorized() {
return { error: 'Authentication required', status: 401 };
}
/**
* Helper to create a standard 403 response
*/
export function forbidden(reason: string = 'Permission denied') {
return { error: reason, status: 403 };
}
/**
* Helper to create enterprise required response
*/
export function enterpriseRequired() {
return { error: 'Enterprise license required', status: 403 };
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,175 +0,0 @@
/**
* Database Connection Module
*
* Provides a unified database connection using Bun's SQL API.
* Supports both SQLite (default) and PostgreSQL (via DATABASE_URL).
*/
import { SQL } from 'bun';
import { existsSync, mkdirSync, readFileSync } from 'node:fs';
import { join, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
const __dirname = dirname(fileURLToPath(import.meta.url));
// Database configuration
const databaseUrl = process.env.DATABASE_URL;
const dataDir = process.env.DATA_DIR || './data';
// Detect database type
export const isPostgres = databaseUrl && (databaseUrl.startsWith('postgres://') || databaseUrl.startsWith('postgresql://'));
export const isSqlite = !isPostgres;
/**
* Read a SQL file from the appropriate sql directory.
*/
function readSql(filename: string): string {
const sqlDir = isPostgres ? 'postgres' : 'sqlite';
return readFileSync(join(__dirname, sqlDir, 'sql', filename), 'utf-8');
}
/**
* Validate PostgreSQL connection URL format.
*/
function validatePostgresUrl(url: string): void {
try {
const parsed = new URL(url);
if (parsed.protocol !== 'postgres:' && parsed.protocol !== 'postgresql:') {
exitWithError(`Invalid protocol "${parsed.protocol}". Expected "postgres:" or "postgresql:"`, url);
}
if (!parsed.hostname) {
exitWithError('Missing hostname in DATABASE_URL', url);
}
if (!parsed.pathname || parsed.pathname === '/') {
exitWithError('Missing database name in DATABASE_URL', url);
}
} catch {
exitWithError('Invalid URL format', url);
}
}
/**
* Print connection error and exit.
*/
function exitWithError(error: string, url?: string): never {
console.error('\n' + '='.repeat(70));
console.error('DATABASE CONNECTION ERROR');
console.error('='.repeat(70));
console.error(`\nError: ${error}`);
if (url) {
try {
const parsed = new URL(url);
if (parsed.password) parsed.password = '***';
console.error(`\nProvided URL: ${parsed.toString()}`);
} catch {
console.error(`\nProvided URL: ${url.replace(/:[^:@]+@/, ':***@')}`);
}
}
console.error('\n' + '-'.repeat(70));
console.error('DATABASE_URL format:');
console.error('-'.repeat(70));
console.error('\n postgres://USER:PASSWORD@HOST:PORT/DATABASE');
console.error('\nExamples:');
console.error(' postgres://dockhand:secret@localhost:5432/dockhand');
console.error(' postgres://admin:p4ssw0rd@192.168.1.100:5432/dockhand');
console.error(' postgresql://user:pass@db.example.com/mydb?sslmode=require');
console.error('\n' + '-'.repeat(70));
console.error('To use SQLite instead, remove the DATABASE_URL environment variable.');
console.error('='.repeat(70) + '\n');
process.exit(1);
}
/**
* Create the database connection.
*/
function createConnection(): SQL {
if (isPostgres) {
// Validate PostgreSQL URL
validatePostgresUrl(databaseUrl!);
console.log('Connecting to PostgreSQL database...');
try {
const sql = new SQL(databaseUrl!);
return sql;
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
exitWithError(`Failed to connect to PostgreSQL: ${message}`, databaseUrl);
}
} else {
// SQLite: Ensure db directory exists
const dbDir = join(dataDir, 'db');
if (!existsSync(dbDir)) {
mkdirSync(dbDir, { recursive: true });
}
const dbPath = join(dbDir, 'dockhand.db');
console.log(`Using SQLite database at: ${dbPath}`);
const sql = new SQL(`sqlite://${dbPath}`);
// Enable WAL mode for better performance
sql.run('PRAGMA journal_mode = WAL');
return sql;
}
}
/**
* Initialize the database schema.
*/
async function initializeSchema(sql: SQL): Promise<void> {
try {
// Create schema (tables)
await sql.run(readSql('schema.sql'));
// Create indexes
await sql.run(readSql('indexes.sql'));
// Insert seed data
await sql.run(readSql('seed.sql'));
// Update system roles
await sql.run(readSql('system-roles.sql'));
// Run maintenance
await sql.run(readSql('maintenance.sql'));
console.log(`Database initialized successfully (${isPostgres ? 'PostgreSQL' : 'SQLite'})`);
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
console.error('Failed to initialize database schema:', message);
throw error;
}
}
// Create and export the database connection
export const sql = createConnection();
// Initialize schema (runs async but we handle it)
initializeSchema(sql).catch((error) => {
console.error('Database initialization failed:', error);
process.exit(1);
});
/**
* Helper to convert SQLite integer booleans to JS booleans.
* PostgreSQL returns actual booleans, SQLite returns 0/1.
*/
export function toBool(value: any): boolean {
if (typeof value === 'boolean') return value;
return Boolean(value);
}
/**
* Helper to convert JS boolean to database value.
* PostgreSQL uses boolean, SQLite uses 0/1.
*/
export function fromBool(value: boolean): boolean | number {
return isPostgres ? value : (value ? 1 : 0);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,567 +0,0 @@
/**
* Drizzle ORM Schema for Dockhand
*
* This schema supports both SQLite and PostgreSQL through Drizzle's
* database-agnostic schema definitions.
*/
import {
sqliteTable,
text,
integer,
real,
primaryKey,
unique,
index
} from 'drizzle-orm/sqlite-core';
import { sql } from 'drizzle-orm';
// =============================================================================
// CORE TABLES
// =============================================================================
export const environments = sqliteTable('environments', {
id: integer('id').primaryKey({ autoIncrement: true }),
name: text('name').notNull().unique(),
host: text('host'),
port: integer('port').default(2375),
protocol: text('protocol').default('http'),
tlsCa: text('tls_ca'),
tlsCert: text('tls_cert'),
tlsKey: text('tls_key'),
tlsSkipVerify: integer('tls_skip_verify', { mode: 'boolean' }).default(false),
icon: text('icon').default('globe'),
collectActivity: integer('collect_activity', { mode: 'boolean' }).default(true),
collectMetrics: integer('collect_metrics', { mode: 'boolean' }).default(true),
highlightChanges: integer('highlight_changes', { mode: 'boolean' }).default(true),
labels: text('labels'), // JSON array of label strings for categorization
// Connection settings
connectionType: text('connection_type').default('socket'), // 'socket' | 'direct' | 'hawser-standard' | 'hawser-edge'
socketPath: text('socket_path').default('/var/run/docker.sock'), // Unix socket path for 'socket' connection type
hawserToken: text('hawser_token'), // Plain-text token for hawser-standard auth
hawserLastSeen: text('hawser_last_seen'),
hawserAgentId: text('hawser_agent_id'),
hawserAgentName: text('hawser_agent_name'),
hawserVersion: text('hawser_version'),
hawserCapabilities: text('hawser_capabilities'), // JSON array: ["compose", "exec", "metrics"]
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const hawserTokens = sqliteTable('hawser_tokens', {
id: integer('id').primaryKey({ autoIncrement: true }),
token: text('token').notNull().unique(), // Hashed token
tokenPrefix: text('token_prefix').notNull(), // First 8 chars for identification
name: text('name').notNull(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
isActive: integer('is_active', { mode: 'boolean' }).default(true),
lastUsed: text('last_used'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
expiresAt: text('expires_at')
});
export const registries = sqliteTable('registries', {
id: integer('id').primaryKey({ autoIncrement: true }),
name: text('name').notNull().unique(),
url: text('url').notNull(),
username: text('username'),
password: text('password'),
isDefault: integer('is_default', { mode: 'boolean' }).default(false),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const settings = sqliteTable('settings', {
key: text('key').primaryKey(),
value: text('value').notNull(),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
// =============================================================================
// EVENT TRACKING TABLES
// =============================================================================
export const stackEvents = sqliteTable('stack_events', {
id: integer('id').primaryKey({ autoIncrement: true }),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
stackName: text('stack_name').notNull(),
eventType: text('event_type').notNull(),
timestamp: text('timestamp').default(sql`CURRENT_TIMESTAMP`),
metadata: text('metadata')
});
export const hostMetrics = sqliteTable('host_metrics', {
id: integer('id').primaryKey({ autoIncrement: true }),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
cpuPercent: real('cpu_percent').notNull(),
memoryPercent: real('memory_percent').notNull(),
memoryUsed: integer('memory_used'),
memoryTotal: integer('memory_total'),
timestamp: text('timestamp').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
envTimestampIdx: index('host_metrics_env_timestamp_idx').on(table.environmentId, table.timestamp)
}));
// =============================================================================
// CONFIGURATION TABLES
// =============================================================================
export const configSets = sqliteTable('config_sets', {
id: integer('id').primaryKey({ autoIncrement: true }),
name: text('name').notNull().unique(),
description: text('description'),
envVars: text('env_vars'),
labels: text('labels'),
ports: text('ports'),
volumes: text('volumes'),
networkMode: text('network_mode').default('bridge'),
restartPolicy: text('restart_policy').default('no'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const autoUpdateSettings = sqliteTable('auto_update_settings', {
id: integer('id').primaryKey({ autoIncrement: true }),
environmentId: integer('environment_id').references(() => environments.id),
containerName: text('container_name').notNull(),
enabled: integer('enabled', { mode: 'boolean' }).default(false),
scheduleType: text('schedule_type').default('daily'),
cronExpression: text('cron_expression'),
vulnerabilityCriteria: text('vulnerability_criteria').default('never'), // 'never' | 'any' | 'critical_high' | 'critical' | 'more_than_current'
lastChecked: text('last_checked'),
lastUpdated: text('last_updated'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
envContainerUnique: unique().on(table.environmentId, table.containerName)
}));
export const notificationSettings = sqliteTable('notification_settings', {
id: integer('id').primaryKey({ autoIncrement: true }),
type: text('type').notNull(),
name: text('name').notNull(),
enabled: integer('enabled', { mode: 'boolean' }).default(true),
config: text('config').notNull(),
eventTypes: text('event_types'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const environmentNotifications = sqliteTable('environment_notifications', {
id: integer('id').primaryKey({ autoIncrement: true }),
environmentId: integer('environment_id').notNull().references(() => environments.id, { onDelete: 'cascade' }),
notificationId: integer('notification_id').notNull().references(() => notificationSettings.id, { onDelete: 'cascade' }),
enabled: integer('enabled', { mode: 'boolean' }).default(true),
eventTypes: text('event_types'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
envNotifUnique: unique().on(table.environmentId, table.notificationId)
}));
// =============================================================================
// AUTHENTICATION TABLES
// =============================================================================
export const authSettings = sqliteTable('auth_settings', {
id: integer('id').primaryKey({ autoIncrement: true }),
authEnabled: integer('auth_enabled', { mode: 'boolean' }).default(false),
defaultProvider: text('default_provider').default('local'),
sessionTimeout: integer('session_timeout').default(86400),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const users = sqliteTable('users', {
id: integer('id').primaryKey({ autoIncrement: true }),
username: text('username').notNull().unique(),
email: text('email'),
passwordHash: text('password_hash').notNull(),
displayName: text('display_name'),
avatar: text('avatar'),
authProvider: text('auth_provider').default('local'), // e.g., 'local', 'oidc:Keycloak', 'ldap:AD'
mfaEnabled: integer('mfa_enabled', { mode: 'boolean' }).default(false),
mfaSecret: text('mfa_secret'),
isActive: integer('is_active', { mode: 'boolean' }).default(true),
lastLogin: text('last_login'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const sessions = sqliteTable('sessions', {
id: text('id').primaryKey(),
userId: integer('user_id').notNull().references(() => users.id, { onDelete: 'cascade' }),
provider: text('provider').notNull(),
expiresAt: text('expires_at').notNull(),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
userIdIdx: index('sessions_user_id_idx').on(table.userId),
expiresAtIdx: index('sessions_expires_at_idx').on(table.expiresAt)
}));
export const ldapConfig = sqliteTable('ldap_config', {
id: integer('id').primaryKey({ autoIncrement: true }),
name: text('name').notNull(),
enabled: integer('enabled', { mode: 'boolean' }).default(false),
serverUrl: text('server_url').notNull(),
bindDn: text('bind_dn'),
bindPassword: text('bind_password'),
baseDn: text('base_dn').notNull(),
userFilter: text('user_filter').default('(uid={{username}})'),
usernameAttribute: text('username_attribute').default('uid'),
emailAttribute: text('email_attribute').default('mail'),
displayNameAttribute: text('display_name_attribute').default('cn'),
groupBaseDn: text('group_base_dn'),
groupFilter: text('group_filter'),
adminGroup: text('admin_group'),
roleMappings: text('role_mappings'), // JSON: [{ groupDn: string, roleId: number }]
tlsEnabled: integer('tls_enabled', { mode: 'boolean' }).default(false),
tlsCa: text('tls_ca'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const oidcConfig = sqliteTable('oidc_config', {
id: integer('id').primaryKey({ autoIncrement: true }),
name: text('name').notNull(),
enabled: integer('enabled', { mode: 'boolean' }).default(false),
issuerUrl: text('issuer_url').notNull(),
clientId: text('client_id').notNull(),
clientSecret: text('client_secret').notNull(),
redirectUri: text('redirect_uri').notNull(),
scopes: text('scopes').default('openid profile email'),
usernameClaim: text('username_claim').default('preferred_username'),
emailClaim: text('email_claim').default('email'),
displayNameClaim: text('display_name_claim').default('name'),
adminClaim: text('admin_claim'),
adminValue: text('admin_value'),
roleMappingsClaim: text('role_mappings_claim').default('groups'),
roleMappings: text('role_mappings'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
// =============================================================================
// ROLE-BASED ACCESS CONTROL TABLES
// =============================================================================
export const roles = sqliteTable('roles', {
id: integer('id').primaryKey({ autoIncrement: true }),
name: text('name').notNull().unique(),
description: text('description'),
isSystem: integer('is_system', { mode: 'boolean' }).default(false),
permissions: text('permissions').notNull(),
environmentIds: text('environment_ids'), // JSON array of env IDs, null = all environments
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const userRoles = sqliteTable('user_roles', {
id: integer('id').primaryKey({ autoIncrement: true }),
userId: integer('user_id').notNull().references(() => users.id, { onDelete: 'cascade' }),
roleId: integer('role_id').notNull().references(() => roles.id, { onDelete: 'cascade' }),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
userRoleEnvUnique: unique().on(table.userId, table.roleId, table.environmentId)
}));
// =============================================================================
// GIT INTEGRATION TABLES
// =============================================================================
export const gitCredentials = sqliteTable('git_credentials', {
id: integer('id').primaryKey({ autoIncrement: true }),
name: text('name').notNull().unique(),
authType: text('auth_type').notNull().default('none'),
username: text('username'),
password: text('password'),
sshPrivateKey: text('ssh_private_key'),
sshPassphrase: text('ssh_passphrase'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const gitRepositories = sqliteTable('git_repositories', {
id: integer('id').primaryKey({ autoIncrement: true }),
name: text('name').notNull().unique(),
url: text('url').notNull(),
branch: text('branch').default('main'),
credentialId: integer('credential_id').references(() => gitCredentials.id, { onDelete: 'set null' }),
composePath: text('compose_path').default('docker-compose.yml'),
environmentId: integer('environment_id'),
autoUpdate: integer('auto_update', { mode: 'boolean' }).default(false),
autoUpdateSchedule: text('auto_update_schedule').default('daily'),
autoUpdateCron: text('auto_update_cron').default('0 3 * * *'),
webhookEnabled: integer('webhook_enabled', { mode: 'boolean' }).default(false),
webhookSecret: text('webhook_secret'),
lastSync: text('last_sync'),
lastCommit: text('last_commit'),
syncStatus: text('sync_status').default('pending'),
syncError: text('sync_error'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
});
export const gitStacks = sqliteTable('git_stacks', {
id: integer('id').primaryKey({ autoIncrement: true }),
stackName: text('stack_name').notNull(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
repositoryId: integer('repository_id').notNull().references(() => gitRepositories.id, { onDelete: 'cascade' }),
composePath: text('compose_path').default('docker-compose.yml'),
envFilePath: text('env_file_path'), // Path to .env file in repository (e.g., ".env", "config/.env.prod")
autoUpdate: integer('auto_update', { mode: 'boolean' }).default(false),
autoUpdateSchedule: text('auto_update_schedule').default('daily'),
autoUpdateCron: text('auto_update_cron').default('0 3 * * *'),
webhookEnabled: integer('webhook_enabled', { mode: 'boolean' }).default(false),
webhookSecret: text('webhook_secret'),
lastSync: text('last_sync'),
lastCommit: text('last_commit'),
syncStatus: text('sync_status').default('pending'),
syncError: text('sync_error'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
stackEnvUnique: unique().on(table.stackName, table.environmentId)
}));
export const stackSources = sqliteTable('stack_sources', {
id: integer('id').primaryKey({ autoIncrement: true }),
stackName: text('stack_name').notNull(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
sourceType: text('source_type').notNull().default('internal'),
gitRepositoryId: integer('git_repository_id').references(() => gitRepositories.id, { onDelete: 'set null' }),
gitStackId: integer('git_stack_id').references(() => gitStacks.id, { onDelete: 'set null' }),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
stackSourceEnvUnique: unique().on(table.stackName, table.environmentId)
}));
export const stackEnvironmentVariables = sqliteTable('stack_environment_variables', {
id: integer('id').primaryKey({ autoIncrement: true }),
stackName: text('stack_name').notNull(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
key: text('key').notNull(),
value: text('value').notNull(),
isSecret: integer('is_secret', { mode: 'boolean' }).default(false),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
stackEnvVarUnique: unique().on(table.stackName, table.environmentId, table.key)
}));
// =============================================================================
// SECURITY TABLES
// =============================================================================
export const vulnerabilityScans = sqliteTable('vulnerability_scans', {
id: integer('id').primaryKey({ autoIncrement: true }),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
imageId: text('image_id').notNull(),
imageName: text('image_name').notNull(),
scanner: text('scanner').notNull(),
scannedAt: text('scanned_at').notNull(),
scanDuration: integer('scan_duration'),
criticalCount: integer('critical_count').default(0),
highCount: integer('high_count').default(0),
mediumCount: integer('medium_count').default(0),
lowCount: integer('low_count').default(0),
negligibleCount: integer('negligible_count').default(0),
unknownCount: integer('unknown_count').default(0),
vulnerabilities: text('vulnerabilities'),
error: text('error'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
envImageIdx: index('vulnerability_scans_env_image_idx').on(table.environmentId, table.imageId)
}));
// =============================================================================
// AUDIT LOGGING TABLES
// =============================================================================
export const auditLogs = sqliteTable('audit_logs', {
id: integer('id').primaryKey({ autoIncrement: true }),
userId: integer('user_id').references(() => users.id, { onDelete: 'set null' }),
username: text('username').notNull(),
action: text('action').notNull(),
entityType: text('entity_type').notNull(),
entityId: text('entity_id'),
entityName: text('entity_name'),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'set null' }),
description: text('description'),
details: text('details'),
ipAddress: text('ip_address'),
userAgent: text('user_agent'),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
userIdIdx: index('audit_logs_user_id_idx').on(table.userId),
createdAtIdx: index('audit_logs_created_at_idx').on(table.createdAt)
}));
// =============================================================================
// CONTAINER ACTIVITY TABLES
// =============================================================================
export const containerEvents = sqliteTable('container_events', {
id: integer('id').primaryKey({ autoIncrement: true }),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
containerId: text('container_id').notNull(),
containerName: text('container_name'),
image: text('image'),
action: text('action').notNull(),
actorAttributes: text('actor_attributes'),
timestamp: text('timestamp').notNull(),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
envTimestampIdx: index('container_events_env_timestamp_idx').on(table.environmentId, table.timestamp)
}));
// =============================================================================
// SCHEDULE EXECUTION TABLES
// =============================================================================
export const scheduleExecutions = sqliteTable('schedule_executions', {
id: integer('id').primaryKey({ autoIncrement: true }),
// Link to the scheduled job
scheduleType: text('schedule_type').notNull(), // 'container_update' | 'git_stack_sync' | 'system_cleanup'
scheduleId: integer('schedule_id').notNull(), // ID in autoUpdateSettings or gitStacks, or 0 for system jobs
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
// What ran
entityName: text('entity_name').notNull(), // container name or stack name
// When and how
triggeredBy: text('triggered_by').notNull(), // 'cron' | 'webhook' | 'manual'
triggeredAt: text('triggered_at').notNull(),
startedAt: text('started_at'),
completedAt: text('completed_at'),
duration: integer('duration'), // milliseconds
// Result
status: text('status').notNull(), // 'queued' | 'running' | 'success' | 'failed' | 'skipped'
errorMessage: text('error_message'),
// Details
details: text('details'), // JSON with execution details
logs: text('logs'), // Execution logs/output
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
typeIdIdx: index('schedule_executions_type_id_idx').on(table.scheduleType, table.scheduleId)
}));
// =============================================================================
// PENDING CONTAINER UPDATES TABLE
// =============================================================================
export const pendingContainerUpdates = sqliteTable('pending_container_updates', {
id: integer('id').primaryKey({ autoIncrement: true }),
environmentId: integer('environment_id').notNull().references(() => environments.id, { onDelete: 'cascade' }),
containerId: text('container_id').notNull(),
containerName: text('container_name').notNull(),
currentImage: text('current_image').notNull(),
checkedAt: text('checked_at').default(sql`CURRENT_TIMESTAMP`),
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => ({
envContainerUnique: unique().on(table.environmentId, table.containerId)
}));
// =============================================================================
// USER PREFERENCES TABLE (unified key-value store)
// =============================================================================
export const userPreferences = sqliteTable('user_preferences', {
id: integer('id').primaryKey({ autoIncrement: true }),
userId: integer('user_id').references(() => users.id, { onDelete: 'cascade' }), // NULL = shared (free edition), set = per-user (enterprise)
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }), // NULL for global prefs
key: text('key').notNull(), // e.g., 'dashboard_layout', 'logs_favorites'
value: text('value').notNull(), // JSON value
createdAt: text('created_at').default(sql`CURRENT_TIMESTAMP`),
updatedAt: text('updated_at').default(sql`CURRENT_TIMESTAMP`)
}, (table) => [
unique().on(table.userId, table.environmentId, table.key)
]);
// =============================================================================
// TYPE EXPORTS
// =============================================================================
export type Environment = typeof environments.$inferSelect;
export type NewEnvironment = typeof environments.$inferInsert;
export type Registry = typeof registries.$inferSelect;
export type NewRegistry = typeof registries.$inferInsert;
export type HawserToken = typeof hawserTokens.$inferSelect;
export type NewHawserToken = typeof hawserTokens.$inferInsert;
export type Setting = typeof settings.$inferSelect;
export type NewSetting = typeof settings.$inferInsert;
export type User = typeof users.$inferSelect;
export type NewUser = typeof users.$inferInsert;
export type Session = typeof sessions.$inferSelect;
export type NewSession = typeof sessions.$inferInsert;
export type Role = typeof roles.$inferSelect;
export type NewRole = typeof roles.$inferInsert;
export type UserRole = typeof userRoles.$inferSelect;
export type NewUserRole = typeof userRoles.$inferInsert;
export type OidcConfig = typeof oidcConfig.$inferSelect;
export type NewOidcConfig = typeof oidcConfig.$inferInsert;
export type LdapConfig = typeof ldapConfig.$inferSelect;
export type NewLdapConfig = typeof ldapConfig.$inferInsert;
export type AuthSetting = typeof authSettings.$inferSelect;
export type NewAuthSetting = typeof authSettings.$inferInsert;
export type ConfigSet = typeof configSets.$inferSelect;
export type NewConfigSet = typeof configSets.$inferInsert;
export type NotificationSetting = typeof notificationSettings.$inferSelect;
export type NewNotificationSetting = typeof notificationSettings.$inferInsert;
export type EnvironmentNotification = typeof environmentNotifications.$inferSelect;
export type NewEnvironmentNotification = typeof environmentNotifications.$inferInsert;
export type GitCredential = typeof gitCredentials.$inferSelect;
export type NewGitCredential = typeof gitCredentials.$inferInsert;
export type GitRepository = typeof gitRepositories.$inferSelect;
export type NewGitRepository = typeof gitRepositories.$inferInsert;
export type GitStack = typeof gitStacks.$inferSelect;
export type NewGitStack = typeof gitStacks.$inferInsert;
export type StackSource = typeof stackSources.$inferSelect;
export type NewStackSource = typeof stackSources.$inferInsert;
export type VulnerabilityScan = typeof vulnerabilityScans.$inferSelect;
export type NewVulnerabilityScan = typeof vulnerabilityScans.$inferInsert;
export type AuditLog = typeof auditLogs.$inferSelect;
export type NewAuditLog = typeof auditLogs.$inferInsert;
export type ContainerEvent = typeof containerEvents.$inferSelect;
export type NewContainerEvent = typeof containerEvents.$inferInsert;
export type HostMetric = typeof hostMetrics.$inferSelect;
export type NewHostMetric = typeof hostMetrics.$inferInsert;
export type StackEvent = typeof stackEvents.$inferSelect;
export type NewStackEvent = typeof stackEvents.$inferInsert;
export type AutoUpdateSetting = typeof autoUpdateSettings.$inferSelect;
export type NewAutoUpdateSetting = typeof autoUpdateSettings.$inferInsert;
export type UserPreference = typeof userPreferences.$inferSelect;
export type NewUserPreference = typeof userPreferences.$inferInsert;
export type ScheduleExecution = typeof scheduleExecutions.$inferSelect;
export type NewScheduleExecution = typeof scheduleExecutions.$inferInsert;
export type StackEnvironmentVariable = typeof stackEnvironmentVariables.$inferSelect;
export type NewStackEnvironmentVariable = typeof stackEnvironmentVariables.$inferInsert;
export type PendingContainerUpdate = typeof pendingContainerUpdates.$inferSelect;
export type NewPendingContainerUpdate = typeof pendingContainerUpdates.$inferInsert;

View File

@@ -1,482 +0,0 @@
/**
* Drizzle ORM Schema for Dockhand - PostgreSQL Version
*
* This schema is used for PostgreSQL migrations and is a mirror of the SQLite schema
* with PostgreSQL-specific types and syntax.
*/
import {
pgTable,
text,
integer,
serial,
boolean,
doublePrecision,
bigint,
timestamp,
unique,
index
} from 'drizzle-orm/pg-core';
import { sql } from 'drizzle-orm';
// =============================================================================
// CORE TABLES
// =============================================================================
export const environments = pgTable('environments', {
id: serial('id').primaryKey(),
name: text('name').notNull().unique(),
host: text('host'),
port: integer('port').default(2375),
protocol: text('protocol').default('http'),
tlsCa: text('tls_ca'),
tlsCert: text('tls_cert'),
tlsKey: text('tls_key'),
tlsSkipVerify: boolean('tls_skip_verify').default(false),
icon: text('icon').default('globe'),
collectActivity: boolean('collect_activity').default(true),
collectMetrics: boolean('collect_metrics').default(true),
highlightChanges: boolean('highlight_changes').default(true),
labels: text('labels'), // JSON array of label strings for categorization
// Connection settings
connectionType: text('connection_type').default('socket'), // 'socket' | 'direct' | 'hawser-standard' | 'hawser-edge'
socketPath: text('socket_path').default('/var/run/docker.sock'), // Unix socket path for 'socket' connection type
hawserToken: text('hawser_token'), // Plain-text token for hawser-standard auth
hawserLastSeen: timestamp('hawser_last_seen', { mode: 'string' }),
hawserAgentId: text('hawser_agent_id'),
hawserAgentName: text('hawser_agent_name'),
hawserVersion: text('hawser_version'),
hawserCapabilities: text('hawser_capabilities'), // JSON array: ["compose", "exec", "metrics"]
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const hawserTokens = pgTable('hawser_tokens', {
id: serial('id').primaryKey(),
token: text('token').notNull().unique(), // Hashed token
tokenPrefix: text('token_prefix').notNull(), // First 8 chars for identification
name: text('name').notNull(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
isActive: boolean('is_active').default(true),
lastUsed: timestamp('last_used', { mode: 'string' }),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
expiresAt: timestamp('expires_at', { mode: 'string' })
});
export const registries = pgTable('registries', {
id: serial('id').primaryKey(),
name: text('name').notNull().unique(),
url: text('url').notNull(),
username: text('username'),
password: text('password'),
isDefault: boolean('is_default').default(false),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const settings = pgTable('settings', {
key: text('key').primaryKey(),
value: text('value').notNull(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
// =============================================================================
// EVENT TRACKING TABLES
// =============================================================================
export const stackEvents = pgTable('stack_events', {
id: serial('id').primaryKey(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
stackName: text('stack_name').notNull(),
eventType: text('event_type').notNull(),
timestamp: timestamp('timestamp', { mode: 'string' }).defaultNow(),
metadata: text('metadata')
});
export const hostMetrics = pgTable('host_metrics', {
id: serial('id').primaryKey(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
cpuPercent: doublePrecision('cpu_percent').notNull(),
memoryPercent: doublePrecision('memory_percent').notNull(),
memoryUsed: bigint('memory_used', { mode: 'number' }),
memoryTotal: bigint('memory_total', { mode: 'number' }),
timestamp: timestamp('timestamp', { mode: 'string' }).defaultNow()
}, (table) => ({
envTimestampIdx: index('host_metrics_env_timestamp_idx').on(table.environmentId, table.timestamp)
}));
// =============================================================================
// CONFIGURATION TABLES
// =============================================================================
export const configSets = pgTable('config_sets', {
id: serial('id').primaryKey(),
name: text('name').notNull().unique(),
description: text('description'),
envVars: text('env_vars'),
labels: text('labels'),
ports: text('ports'),
volumes: text('volumes'),
networkMode: text('network_mode').default('bridge'),
restartPolicy: text('restart_policy').default('no'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const autoUpdateSettings = pgTable('auto_update_settings', {
id: serial('id').primaryKey(),
environmentId: integer('environment_id').references(() => environments.id),
containerName: text('container_name').notNull(),
enabled: boolean('enabled').default(false),
scheduleType: text('schedule_type').default('daily'),
cronExpression: text('cron_expression'),
vulnerabilityCriteria: text('vulnerability_criteria').default('never'), // 'never' | 'any' | 'critical_high' | 'critical' | 'more_than_current'
lastChecked: timestamp('last_checked', { mode: 'string' }),
lastUpdated: timestamp('last_updated', { mode: 'string' }),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
}, (table) => ({
envContainerUnique: unique().on(table.environmentId, table.containerName)
}));
export const notificationSettings = pgTable('notification_settings', {
id: serial('id').primaryKey(),
type: text('type').notNull(),
name: text('name').notNull(),
enabled: boolean('enabled').default(true),
config: text('config').notNull(),
eventTypes: text('event_types'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const environmentNotifications = pgTable('environment_notifications', {
id: serial('id').primaryKey(),
environmentId: integer('environment_id').notNull().references(() => environments.id, { onDelete: 'cascade' }),
notificationId: integer('notification_id').notNull().references(() => notificationSettings.id, { onDelete: 'cascade' }),
enabled: boolean('enabled').default(true),
eventTypes: text('event_types'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
}, (table) => ({
envNotifUnique: unique().on(table.environmentId, table.notificationId)
}));
// =============================================================================
// AUTHENTICATION TABLES
// =============================================================================
export const authSettings = pgTable('auth_settings', {
id: serial('id').primaryKey(),
authEnabled: boolean('auth_enabled').default(false),
defaultProvider: text('default_provider').default('local'),
sessionTimeout: integer('session_timeout').default(86400),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const users = pgTable('users', {
id: serial('id').primaryKey(),
username: text('username').notNull().unique(),
email: text('email'),
passwordHash: text('password_hash').notNull(),
displayName: text('display_name'),
avatar: text('avatar'),
authProvider: text('auth_provider').default('local'), // e.g., 'local', 'oidc:Keycloak', 'ldap:AD'
mfaEnabled: boolean('mfa_enabled').default(false),
mfaSecret: text('mfa_secret'),
isActive: boolean('is_active').default(true),
lastLogin: timestamp('last_login', { mode: 'string' }),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const sessions = pgTable('sessions', {
id: text('id').primaryKey(),
userId: integer('user_id').notNull().references(() => users.id, { onDelete: 'cascade' }),
provider: text('provider').notNull(),
expiresAt: timestamp('expires_at', { mode: 'string' }).notNull(),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow()
}, (table) => ({
userIdIdx: index('sessions_user_id_idx').on(table.userId),
expiresAtIdx: index('sessions_expires_at_idx').on(table.expiresAt)
}));
export const ldapConfig = pgTable('ldap_config', {
id: serial('id').primaryKey(),
name: text('name').notNull(),
enabled: boolean('enabled').default(false),
serverUrl: text('server_url').notNull(),
bindDn: text('bind_dn'),
bindPassword: text('bind_password'),
baseDn: text('base_dn').notNull(),
userFilter: text('user_filter').default('(uid={{username}})'),
usernameAttribute: text('username_attribute').default('uid'),
emailAttribute: text('email_attribute').default('mail'),
displayNameAttribute: text('display_name_attribute').default('cn'),
groupBaseDn: text('group_base_dn'),
groupFilter: text('group_filter'),
adminGroup: text('admin_group'),
roleMappings: text('role_mappings'), // JSON: [{ groupDn: string, roleId: number }]
tlsEnabled: boolean('tls_enabled').default(false),
tlsCa: text('tls_ca'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const oidcConfig = pgTable('oidc_config', {
id: serial('id').primaryKey(),
name: text('name').notNull(),
enabled: boolean('enabled').default(false),
issuerUrl: text('issuer_url').notNull(),
clientId: text('client_id').notNull(),
clientSecret: text('client_secret').notNull(),
redirectUri: text('redirect_uri').notNull(),
scopes: text('scopes').default('openid profile email'),
usernameClaim: text('username_claim').default('preferred_username'),
emailClaim: text('email_claim').default('email'),
displayNameClaim: text('display_name_claim').default('name'),
adminClaim: text('admin_claim'),
adminValue: text('admin_value'),
roleMappingsClaim: text('role_mappings_claim').default('groups'),
roleMappings: text('role_mappings'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
// =============================================================================
// ROLE-BASED ACCESS CONTROL TABLES
// =============================================================================
export const roles = pgTable('roles', {
id: serial('id').primaryKey(),
name: text('name').notNull().unique(),
description: text('description'),
isSystem: boolean('is_system').default(false),
permissions: text('permissions').notNull(),
environmentIds: text('environment_ids'), // JSON array of env IDs, null = all environments
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const userRoles = pgTable('user_roles', {
id: serial('id').primaryKey(),
userId: integer('user_id').notNull().references(() => users.id, { onDelete: 'cascade' }),
roleId: integer('role_id').notNull().references(() => roles.id, { onDelete: 'cascade' }),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow()
}, (table) => ({
userRoleEnvUnique: unique().on(table.userId, table.roleId, table.environmentId)
}));
// =============================================================================
// GIT INTEGRATION TABLES
// =============================================================================
export const gitCredentials = pgTable('git_credentials', {
id: serial('id').primaryKey(),
name: text('name').notNull().unique(),
authType: text('auth_type').notNull().default('none'),
username: text('username'),
password: text('password'),
sshPrivateKey: text('ssh_private_key'),
sshPassphrase: text('ssh_passphrase'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const gitRepositories = pgTable('git_repositories', {
id: serial('id').primaryKey(),
name: text('name').notNull().unique(),
url: text('url').notNull(),
branch: text('branch').default('main'),
credentialId: integer('credential_id').references(() => gitCredentials.id, { onDelete: 'set null' }),
composePath: text('compose_path').default('docker-compose.yml'),
environmentId: integer('environment_id'),
autoUpdate: boolean('auto_update').default(false),
autoUpdateSchedule: text('auto_update_schedule').default('daily'),
autoUpdateCron: text('auto_update_cron').default('0 3 * * *'),
webhookEnabled: boolean('webhook_enabled').default(false),
webhookSecret: text('webhook_secret'),
lastSync: timestamp('last_sync', { mode: 'string' }),
lastCommit: text('last_commit'),
syncStatus: text('sync_status').default('pending'),
syncError: text('sync_error'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
});
export const gitStacks = pgTable('git_stacks', {
id: serial('id').primaryKey(),
stackName: text('stack_name').notNull(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
repositoryId: integer('repository_id').notNull().references(() => gitRepositories.id, { onDelete: 'cascade' }),
composePath: text('compose_path').default('docker-compose.yml'),
envFilePath: text('env_file_path'), // Path to .env file in repository (e.g., ".env", "config/.env.prod")
autoUpdate: boolean('auto_update').default(false),
autoUpdateSchedule: text('auto_update_schedule').default('daily'),
autoUpdateCron: text('auto_update_cron').default('0 3 * * *'),
webhookEnabled: boolean('webhook_enabled').default(false),
webhookSecret: text('webhook_secret'),
lastSync: timestamp('last_sync', { mode: 'string' }),
lastCommit: text('last_commit'),
syncStatus: text('sync_status').default('pending'),
syncError: text('sync_error'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
}, (table) => ({
stackEnvUnique: unique().on(table.stackName, table.environmentId)
}));
export const stackSources = pgTable('stack_sources', {
id: serial('id').primaryKey(),
stackName: text('stack_name').notNull(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
sourceType: text('source_type').notNull().default('internal'),
gitRepositoryId: integer('git_repository_id').references(() => gitRepositories.id, { onDelete: 'set null' }),
gitStackId: integer('git_stack_id').references(() => gitStacks.id, { onDelete: 'set null' }),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
}, (table) => ({
stackSourceEnvUnique: unique().on(table.stackName, table.environmentId)
}));
export const stackEnvironmentVariables = pgTable('stack_environment_variables', {
id: serial('id').primaryKey(),
stackName: text('stack_name').notNull(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
key: text('key').notNull(),
value: text('value').notNull(),
isSecret: boolean('is_secret').default(false),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
}, (table) => ({
stackEnvVarUnique: unique().on(table.stackName, table.environmentId, table.key)
}));
// =============================================================================
// SECURITY TABLES
// =============================================================================
export const vulnerabilityScans = pgTable('vulnerability_scans', {
id: serial('id').primaryKey(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
imageId: text('image_id').notNull(),
imageName: text('image_name').notNull(),
scanner: text('scanner').notNull(),
scannedAt: timestamp('scanned_at', { mode: 'string' }).notNull(),
scanDuration: integer('scan_duration'),
criticalCount: integer('critical_count').default(0),
highCount: integer('high_count').default(0),
mediumCount: integer('medium_count').default(0),
lowCount: integer('low_count').default(0),
negligibleCount: integer('negligible_count').default(0),
unknownCount: integer('unknown_count').default(0),
vulnerabilities: text('vulnerabilities'),
error: text('error'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow()
}, (table) => ({
envImageIdx: index('vulnerability_scans_env_image_idx').on(table.environmentId, table.imageId)
}));
// =============================================================================
// AUDIT LOGGING TABLES
// =============================================================================
export const auditLogs = pgTable('audit_logs', {
id: serial('id').primaryKey(),
userId: integer('user_id').references(() => users.id, { onDelete: 'set null' }),
username: text('username').notNull(),
action: text('action').notNull(),
entityType: text('entity_type').notNull(),
entityId: text('entity_id'),
entityName: text('entity_name'),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'set null' }),
description: text('description'),
details: text('details'),
ipAddress: text('ip_address'),
userAgent: text('user_agent'),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow()
}, (table) => ({
userIdIdx: index('audit_logs_user_id_idx').on(table.userId),
createdAtIdx: index('audit_logs_created_at_idx').on(table.createdAt)
}));
// =============================================================================
// CONTAINER ACTIVITY TABLES
// =============================================================================
export const containerEvents = pgTable('container_events', {
id: serial('id').primaryKey(),
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
containerId: text('container_id').notNull(),
containerName: text('container_name'),
image: text('image'),
action: text('action').notNull(),
actorAttributes: text('actor_attributes'),
timestamp: timestamp('timestamp', { mode: 'string' }).notNull(),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow()
}, (table) => ({
envTimestampIdx: index('container_events_env_timestamp_idx').on(table.environmentId, table.timestamp)
}));
// =============================================================================
// SCHEDULE EXECUTION TABLES
// =============================================================================
export const scheduleExecutions = pgTable('schedule_executions', {
id: serial('id').primaryKey(),
// Link to the scheduled job
scheduleType: text('schedule_type').notNull(), // 'container_update' | 'git_stack_sync' | 'system_cleanup'
scheduleId: integer('schedule_id').notNull(), // ID in autoUpdateSettings or gitStacks, or 0 for system jobs
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }),
// What ran
entityName: text('entity_name').notNull(), // container name or stack name
// When and how
triggeredBy: text('triggered_by').notNull(), // 'cron' | 'webhook' | 'manual'
triggeredAt: timestamp('triggered_at', { mode: 'string' }).notNull(),
startedAt: timestamp('started_at', { mode: 'string' }),
completedAt: timestamp('completed_at', { mode: 'string' }),
duration: integer('duration'), // milliseconds
// Result
status: text('status').notNull(), // 'queued' | 'running' | 'success' | 'failed' | 'skipped'
errorMessage: text('error_message'),
// Details
details: text('details'), // JSON with execution details
logs: text('logs'), // Execution logs/output
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow()
}, (table) => ({
typeIdIdx: index('schedule_executions_type_id_idx').on(table.scheduleType, table.scheduleId)
}));
// =============================================================================
// PENDING CONTAINER UPDATES TABLE
// =============================================================================
export const pendingContainerUpdates = pgTable('pending_container_updates', {
id: serial('id').primaryKey(),
environmentId: integer('environment_id').notNull().references(() => environments.id, { onDelete: 'cascade' }),
containerId: text('container_id').notNull(),
containerName: text('container_name').notNull(),
currentImage: text('current_image').notNull(),
checkedAt: timestamp('checked_at', { mode: 'string' }).defaultNow(),
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow()
}, (table) => ({
envContainerUnique: unique().on(table.environmentId, table.containerId)
}));
// =============================================================================
// USER PREFERENCES TABLE (unified key-value store)
// =============================================================================
export const userPreferences = pgTable('user_preferences', {
id: serial('id').primaryKey(),
userId: integer('user_id').references(() => users.id, { onDelete: 'cascade' }), // NULL = shared (free edition), set = per-user (enterprise)
environmentId: integer('environment_id').references(() => environments.id, { onDelete: 'cascade' }), // NULL for global prefs
key: text('key').notNull(), // e.g., 'dashboard_layout', 'logs_favorites'
value: text('value').notNull(), // JSON value
createdAt: timestamp('created_at', { mode: 'string' }).defaultNow(),
updatedAt: timestamp('updated_at', { mode: 'string' }).defaultNow()
}, (table) => [
unique().on(table.userId, table.environmentId, table.key)
]);

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +0,0 @@
/**
* Container Event Emitter
*
* Shared EventEmitter for broadcasting container events to SSE clients.
* Events are emitted by the subprocess-manager when it receives them from the event-subprocess.
*/
import { EventEmitter } from 'node:events';
// Event emitter for broadcasting new events to SSE clients
// Used by:
// - subprocess-manager.ts: emits events received from event-subprocess via IPC
// - api/activity/events/+server.ts: listens for events to broadcast via SSE
export const containerEventEmitter = new EventEmitter();
// Allow up to 100 concurrent SSE listeners (default is 10)
// This prevents MaxListenersExceededWarning with many dashboard clients
containerEventEmitter.setMaxListeners(100);

File diff suppressed because it is too large Load Diff

View File

@@ -1,945 +0,0 @@
/**
* Hawser Edge Connection Manager
*
* Manages WebSocket connections from Hawser agents running in Edge mode.
* Handles request/response correlation, heartbeat tracking, and metrics collection.
*/
import { db, hawserTokens, environments, eq } from './db/drizzle.js';
import { logContainerEvent, saveHostMetric, type ContainerEventAction } from './db.js';
import { containerEventEmitter } from './event-collector.js';
import { sendEnvironmentNotification } from './notifications.js';
// Protocol constants
export const HAWSER_PROTOCOL_VERSION = '1.0';
// Message types (matching Hawser agent protocol)
export const MessageType = {
HELLO: 'hello',
WELCOME: 'welcome',
REQUEST: 'request',
RESPONSE: 'response',
STREAM: 'stream',
STREAM_END: 'stream_end',
METRICS: 'metrics',
PING: 'ping',
PONG: 'pong',
ERROR: 'error'
} as const;
// Active edge connections mapped by environment ID
export interface EdgeConnection {
ws: WebSocket;
environmentId: number;
agentId: string;
agentName: string;
agentVersion: string;
dockerVersion: string;
hostname: string;
capabilities: string[];
connectedAt: Date;
lastHeartbeat: Date;
pendingRequests: Map<string, PendingRequest>;
pendingStreamRequests: Map<string, PendingStreamRequest>;
lastMetrics?: {
uptime?: number;
cpuUsage?: number;
memoryTotal?: number;
memoryUsed?: number;
};
}
interface PendingRequest {
resolve: (response: EdgeResponse) => void;
reject: (error: Error) => void;
timeout: NodeJS.Timeout;
}
interface PendingStreamRequest {
onData: (data: string, stream?: 'stdout' | 'stderr') => void;
onEnd: (reason?: string) => void;
onError: (error: string) => void;
}
export interface EdgeResponse {
statusCode: number;
headers: Record<string, string>;
body: string | Uint8Array;
isBinary?: boolean;
}
// Global map of active connections (stored in globalThis for dev mode sharing with vite.config.ts)
declare global {
var __hawserEdgeConnections: Map<number, EdgeConnection> | undefined;
var __hawserSendMessage: ((envId: number, message: string) => boolean) | undefined;
var __hawserHandleContainerEvent: ((envId: number, event: ContainerEventMessage['event']) => Promise<void>) | undefined;
var __hawserHandleMetrics: ((envId: number, metrics: MetricsMessage['metrics']) => Promise<void>) | undefined;
}
export const edgeConnections: Map<number, EdgeConnection> =
globalThis.__hawserEdgeConnections ?? (globalThis.__hawserEdgeConnections = new Map());
// Cleanup interval for stale connections (check every 30 seconds)
let cleanupInterval: NodeJS.Timeout | null = null;
/**
* Initialize the edge connection manager
*/
export function initializeEdgeManager(): void {
if (cleanupInterval) return;
cleanupInterval = setInterval(() => {
const now = Date.now();
const timeout = 90 * 1000; // 90 seconds (3 missed heartbeats)
for (const [envId, conn] of edgeConnections) {
if (now - conn.lastHeartbeat.getTime() > timeout) {
const pendingCount = conn.pendingRequests.size;
const streamCount = conn.pendingStreamRequests.size;
console.log(
`[Hawser] Connection timeout for environment ${envId}. ` +
`Rejecting ${pendingCount} pending requests and ${streamCount} stream requests.`
);
// Reject all pending requests before closing
for (const [requestId, pending] of conn.pendingRequests) {
console.log(`[Hawser] Rejecting pending request ${requestId} due to connection timeout`);
clearTimeout(pending.timeout);
pending.reject(new Error('Connection timeout'));
}
for (const [requestId, pending] of conn.pendingStreamRequests) {
console.log(`[Hawser] Ending stream request ${requestId} due to connection timeout`);
pending.onEnd?.('Connection timeout');
}
conn.pendingRequests.clear();
conn.pendingStreamRequests.clear();
conn.ws.close(1001, 'Connection timeout');
edgeConnections.delete(envId);
updateEnvironmentStatus(envId, null);
}
}
}, 30000);
}
/**
* Stop the edge connection manager
*/
export function stopEdgeManager(): void {
if (cleanupInterval) {
clearInterval(cleanupInterval);
cleanupInterval = null;
}
// Close all connections
for (const [, conn] of edgeConnections) {
conn.ws.close(1001, 'Server shutdown');
}
edgeConnections.clear();
}
/**
* Handle container event from Edge agent
* Saves to database, emits to SSE clients, and sends notifications
*/
export async function handleEdgeContainerEvent(
environmentId: number,
event: ContainerEventMessage['event']
): Promise<void> {
try {
// Log the event
console.log(`[Hawser] Container event from env ${environmentId}: ${event.action} ${event.containerName || event.containerId}`);
// Save to database
const savedEvent = await logContainerEvent({
environmentId,
containerId: event.containerId,
containerName: event.containerName || null,
image: event.image || null,
action: event.action as ContainerEventAction,
actorAttributes: event.actorAttributes || null,
timestamp: event.timestamp
});
// Broadcast to SSE clients
containerEventEmitter.emit('event', savedEvent);
// Prepare notification
const actionLabel = event.action.charAt(0).toUpperCase() + event.action.slice(1);
const containerLabel = event.containerName || event.containerId.substring(0, 12);
const notificationType =
event.action === 'die' || event.action === 'kill' || event.action === 'oom'
? 'error'
: event.action === 'stop'
? 'warning'
: event.action === 'start'
? 'success'
: 'info';
// Send notification
await sendEnvironmentNotification(environmentId, event.action as ContainerEventAction, {
title: `Container ${actionLabel}`,
message: `Container "${containerLabel}" ${event.action}${event.image ? ` (${event.image})` : ''}`,
type: notificationType as 'success' | 'error' | 'warning' | 'info'
}, event.image);
} catch (error) {
console.error('[Hawser] Error handling container event:', error);
}
}
// Register global handler for patch-build.ts to use
globalThis.__hawserHandleContainerEvent = handleEdgeContainerEvent;
/**
* Handle metrics from Edge agent
* Saves to database for dashboard graphs and stores latest metrics in connection
*/
export async function handleEdgeMetrics(
environmentId: number,
metrics: MetricsMessage['metrics']
): Promise<void> {
try {
// Store latest metrics in the edge connection for quick access (e.g., uptime)
const connection = edgeConnections.get(environmentId);
if (connection) {
connection.lastMetrics = {
uptime: metrics.uptime,
cpuUsage: metrics.cpuUsage,
memoryTotal: metrics.memoryTotal,
memoryUsed: metrics.memoryUsed
};
}
// Normalize CPU by core count (agent sends raw percentage across all cores)
const cpuPercent = metrics.cpuCores > 0 ? metrics.cpuUsage / metrics.cpuCores : metrics.cpuUsage;
const memoryPercent = metrics.memoryTotal > 0
? (metrics.memoryUsed / metrics.memoryTotal) * 100
: 0;
// Save to database using the existing function
await saveHostMetric(
cpuPercent,
memoryPercent,
metrics.memoryUsed,
metrics.memoryTotal,
environmentId
);
} catch (error) {
console.error('[Hawser] Error saving metrics:', error);
}
}
// Register global handler for metrics
globalThis.__hawserHandleMetrics = handleEdgeMetrics;
/**
* Validate a Hawser token
*/
export async function validateHawserToken(
token: string
): Promise<{ valid: boolean; environmentId?: number; tokenId?: number }> {
// Get all active tokens
const tokens = await db.select().from(hawserTokens).where(eq(hawserTokens.isActive, true));
// Check each token (tokens are hashed)
for (const t of tokens) {
try {
const isValid = await Bun.password.verify(token, t.token);
if (isValid) {
// Update last used timestamp
await db
.update(hawserTokens)
.set({ lastUsed: new Date().toISOString() })
.where(eq(hawserTokens.id, t.id));
return {
valid: true,
environmentId: t.environmentId ?? undefined,
tokenId: t.id
};
}
} catch {
// Invalid hash, continue checking
}
}
return { valid: false };
}
/**
* Generate a new Hawser token for an environment
* @param rawToken - Optional pre-generated token (base64url string). If not provided, generates a new one.
*/
export async function generateHawserToken(
name: string,
environmentId: number,
expiresAt?: string,
rawToken?: string
): Promise<{ token: string; tokenId: number }> {
// Close any existing edge connection for this environment
// This forces the agent to reconnect with the new token
const existingConnection = edgeConnections.get(environmentId);
if (existingConnection) {
console.log(`[Hawser] Closing existing connection for env ${environmentId} due to new token generation`);
existingConnection.ws.close(1000, 'Token regenerated');
edgeConnections.delete(environmentId);
}
// Use provided token or generate a new one
let token: string;
if (rawToken) {
// Use the pre-generated token directly (already in base64url format)
token = rawToken;
} else {
// Generate a secure random token (32 bytes = 256 bits)
const tokenBytes = new Uint8Array(32);
crypto.getRandomValues(tokenBytes);
token = Buffer.from(tokenBytes).toString('base64url');
}
// Hash the token for storage (using Bun's built-in Argon2id)
const hashedToken = await Bun.password.hash(token, {
algorithm: 'argon2id',
memoryCost: 19456,
timeCost: 2
});
// Get prefix for identification
const tokenPrefix = token.substring(0, 8);
// Store in database
const result = await db
.insert(hawserTokens)
.values({
token: hashedToken,
tokenPrefix,
name,
environmentId,
isActive: true,
expiresAt
})
.returning({ id: hawserTokens.id });
return {
token, // Return unhashed token (only shown once)
tokenId: result[0].id
};
}
/**
* Revoke a Hawser token
*/
export async function revokeHawserToken(tokenId: number): Promise<void> {
await db.update(hawserTokens).set({ isActive: false }).where(eq(hawserTokens.id, tokenId));
}
/**
* Close an Edge connection and clean up pending requests.
* Called when an environment is deleted.
*/
export function closeEdgeConnection(environmentId: number): void {
const connection = edgeConnections.get(environmentId);
if (!connection) {
console.log(`[Hawser] No Edge connection to close for environment ${environmentId}`);
return;
}
const pendingCount = connection.pendingRequests.size;
const streamCount = connection.pendingStreamRequests.size;
console.log(
`[Hawser] Closing Edge connection for deleted environment ${environmentId}. ` +
`Rejecting ${pendingCount} pending requests and ${streamCount} stream requests.`
);
// Reject all pending requests
for (const [requestId, pending] of connection.pendingRequests) {
console.log(`[Hawser] Rejecting pending request ${requestId} due to environment deletion`);
clearTimeout(pending.timeout);
pending.reject(new Error('Environment deleted'));
}
for (const [requestId, pending] of connection.pendingStreamRequests) {
console.log(`[Hawser] Ending stream request ${requestId} due to environment deletion`);
pending.onEnd?.('Environment deleted');
}
connection.pendingRequests.clear();
connection.pendingStreamRequests.clear();
// Close the WebSocket
try {
connection.ws.close(1000, 'Environment deleted');
} catch (e) {
console.error(`[Hawser] Error closing WebSocket for environment ${environmentId}:`, e);
}
edgeConnections.delete(environmentId);
console.log(`[Hawser] Edge connection closed for environment ${environmentId}`);
}
/**
* Handle a new edge connection from a Hawser agent
*/
export function handleEdgeConnection(
ws: WebSocket,
environmentId: number,
hello: HelloMessage
): EdgeConnection {
// Check if there's already a connection for this environment
const existing = edgeConnections.get(environmentId);
if (existing) {
const pendingCount = existing.pendingRequests.size;
const streamCount = existing.pendingStreamRequests.size;
console.log(
`[Hawser] Replacing existing connection for environment ${environmentId}. ` +
`Rejecting ${pendingCount} pending requests and ${streamCount} stream requests.`
);
// Reject all pending requests before closing
for (const [requestId, pending] of existing.pendingRequests) {
console.log(`[Hawser] Rejecting pending request ${requestId} due to connection replacement`);
pending.reject(new Error('Connection replaced by new agent'));
}
for (const [requestId, pending] of existing.pendingStreamRequests) {
console.log(`[Hawser] Ending stream request ${requestId} due to connection replacement`);
pending.onEnd?.('Connection replaced by new agent');
}
existing.pendingRequests.clear();
existing.pendingStreamRequests.clear();
existing.ws.close(1000, 'Replaced by new connection');
}
const connection: EdgeConnection = {
ws,
environmentId,
agentId: hello.agentId,
agentName: hello.agentName,
agentVersion: hello.version,
dockerVersion: hello.dockerVersion,
hostname: hello.hostname,
capabilities: hello.capabilities,
connectedAt: new Date(),
lastHeartbeat: new Date(),
pendingRequests: new Map(),
pendingStreamRequests: new Map()
};
edgeConnections.set(environmentId, connection);
// Update environment record
updateEnvironmentStatus(environmentId, connection);
return connection;
}
/**
* Update environment status in database
*/
async function updateEnvironmentStatus(
environmentId: number,
connection: EdgeConnection | null
): Promise<void> {
if (connection) {
await db
.update(environments)
.set({
hawserLastSeen: new Date().toISOString(),
hawserAgentId: connection.agentId,
hawserAgentName: connection.agentName,
hawserVersion: connection.agentVersion,
hawserCapabilities: JSON.stringify(connection.capabilities),
updatedAt: new Date().toISOString()
})
.where(eq(environments.id, environmentId));
} else {
await db
.update(environments)
.set({
hawserLastSeen: new Date().toISOString(),
updatedAt: new Date().toISOString()
})
.where(eq(environments.id, environmentId));
}
}
/**
* Send a request to a Hawser agent and wait for response
*/
export async function sendEdgeRequest(
environmentId: number,
method: string,
path: string,
body?: unknown,
headers?: Record<string, string>,
streaming = false,
timeout = 30000
): Promise<EdgeResponse> {
const connection = edgeConnections.get(environmentId);
if (!connection) {
throw new Error('Edge agent not connected');
}
const requestId = crypto.randomUUID();
return new Promise((resolve, reject) => {
const timeoutHandle = setTimeout(() => {
connection.pendingRequests.delete(requestId);
if (streaming) {
connection.pendingStreamRequests.delete(requestId);
}
reject(new Error('Request timeout'));
}, timeout);
// For streaming requests, the Go agent sends 'stream' messages instead of a single 'response'.
// We need to register a stream handler that collects all data and resolves when complete.
if (streaming) {
// Initialize pendingStreamRequests if not present (dev mode HMR safety)
if (!connection.pendingStreamRequests) {
connection.pendingStreamRequests = new Map();
}
const chunks: Buffer[] = [];
connection.pendingStreamRequests.set(requestId, {
onData: (data: string, stream?: 'stdout' | 'stderr') => {
// Data is base64 encoded from Go agent
try {
const decoded = Buffer.from(data, 'base64');
chunks.push(decoded);
} catch {
// If not base64, use as-is
chunks.push(Buffer.from(data));
}
},
onEnd: (reason?: string) => {
clearTimeout(timeoutHandle);
connection.pendingRequests.delete(requestId);
connection.pendingStreamRequests.delete(requestId);
// Combine all chunks and return as response
const combined = Buffer.concat(chunks);
resolve({
statusCode: 200,
headers: {},
body: combined,
isBinary: true
});
},
onError: (error: string) => {
clearTimeout(timeoutHandle);
connection.pendingRequests.delete(requestId);
connection.pendingStreamRequests.delete(requestId);
reject(new Error(error));
}
});
}
// Also register in pendingRequests in case the agent sends a 'response' instead of 'stream'
// (e.g., for error responses or non-streaming paths)
connection.pendingRequests.set(requestId, {
resolve: (response: EdgeResponse) => {
clearTimeout(timeoutHandle);
if (streaming) {
connection.pendingStreamRequests.delete(requestId);
}
resolve(response);
},
reject: (error: Error) => {
clearTimeout(timeoutHandle);
if (streaming) {
connection.pendingStreamRequests.delete(requestId);
}
reject(error);
},
timeout: timeoutHandle
});
const message: RequestMessage = {
type: MessageType.REQUEST,
requestId,
method,
path,
headers: headers || {},
body: body, // Body is already an object, will be serialized by JSON.stringify(message)
streaming
};
const messageStr = JSON.stringify(message);
// In dev mode, use the global send function from vite.config.ts
// In production, use the WebSocket directly
if (globalThis.__hawserSendMessage) {
const sent = globalThis.__hawserSendMessage(environmentId, messageStr);
if (!sent) {
connection.pendingRequests.delete(requestId);
if (streaming) {
connection.pendingStreamRequests.delete(requestId);
}
clearTimeout(timeoutHandle);
reject(new Error('Failed to send message'));
}
} else {
try {
connection.ws.send(messageStr);
} catch (sendError) {
console.error(`[Hawser Edge] Error sending message:`, sendError);
connection.pendingRequests.delete(requestId);
if (streaming) {
connection.pendingStreamRequests.delete(requestId);
}
clearTimeout(timeoutHandle);
reject(sendError as Error);
}
}
});
}
/**
* Send a streaming request to a Hawser agent
* Returns a cancel function to stop the stream
*/
export function sendEdgeStreamRequest(
environmentId: number,
method: string,
path: string,
callbacks: {
onData: (data: string, stream?: 'stdout' | 'stderr') => void;
onEnd: (reason?: string) => void;
onError: (error: string) => void;
},
body?: unknown,
headers?: Record<string, string>
): { requestId: string; cancel: () => void } {
const connection = edgeConnections.get(environmentId);
if (!connection) {
callbacks.onError('Edge agent not connected');
return { requestId: '', cancel: () => {} };
}
const requestId = crypto.randomUUID();
// Initialize pendingStreamRequests if not present (can happen in dev mode due to HMR)
if (!connection.pendingStreamRequests) {
connection.pendingStreamRequests = new Map();
}
connection.pendingStreamRequests.set(requestId, {
onData: callbacks.onData,
onEnd: callbacks.onEnd,
onError: callbacks.onError
});
const message: RequestMessage = {
type: MessageType.REQUEST,
requestId,
method,
path,
headers: headers || {},
body: body, // Body is already an object, will be serialized by JSON.stringify(message)
streaming: true
};
const messageStr = JSON.stringify(message);
// In dev mode, use the global send function from vite.config.ts
// In production, use the WebSocket directly
if (globalThis.__hawserSendMessage) {
const sent = globalThis.__hawserSendMessage(environmentId, messageStr);
if (!sent) {
connection.pendingStreamRequests.delete(requestId);
callbacks.onError('Failed to send message');
return { requestId: '', cancel: () => {} };
}
} else {
try {
connection.ws.send(messageStr);
} catch (sendError) {
console.error(`[Hawser Edge] Error sending streaming message:`, sendError);
connection.pendingStreamRequests.delete(requestId);
callbacks.onError(sendError instanceof Error ? sendError.message : String(sendError));
return { requestId: '', cancel: () => {} };
}
}
return {
requestId,
cancel: () => {
connection.pendingStreamRequests.delete(requestId);
// Send stream_end message to agent to stop the stream
const cancelMessage: StreamEndMessage = {
type: 'stream_end',
requestId,
reason: 'cancelled'
};
try {
connection.ws.send(JSON.stringify(cancelMessage));
} catch {
// Connection may already be closed, ignore
}
}
};
}
/**
* Handle incoming stream data from Hawser agent
*/
export function handleEdgeStreamData(environmentId: number, message: StreamMessage): void {
const connection = edgeConnections.get(environmentId);
if (!connection) {
console.warn(`[Hawser] Stream data for unknown environment ${environmentId}, requestId=${message.requestId}`);
return;
}
const pending = connection.pendingStreamRequests.get(message.requestId);
if (!pending) {
console.warn(`[Hawser] Stream data for unknown request ${message.requestId} on env ${environmentId}`);
return;
}
pending.onData(message.data, message.stream);
}
/**
* Handle stream end from Hawser agent
*/
export function handleEdgeStreamEnd(environmentId: number, message: StreamEndMessage): void {
const connection = edgeConnections.get(environmentId);
if (!connection) {
console.warn(`[Hawser] Stream end for unknown environment ${environmentId}, requestId=${message.requestId}`);
return;
}
const pending = connection.pendingStreamRequests.get(message.requestId);
if (!pending) {
console.warn(`[Hawser] Stream end for unknown request ${message.requestId} on env ${environmentId}`);
return;
}
connection.pendingStreamRequests.delete(message.requestId);
pending.onEnd(message.reason);
}
/**
* Handle incoming response from Hawser agent
*/
export function handleEdgeResponse(environmentId: number, response: ResponseMessage): void {
const connection = edgeConnections.get(environmentId);
if (!connection) {
console.warn(`[Hawser] Response for unknown environment ${environmentId}, requestId=${response.requestId}`);
return;
}
const pending = connection.pendingRequests.get(response.requestId);
if (!pending) {
console.warn(`[Hawser] Response for unknown request ${response.requestId} on env ${environmentId}`);
return;
}
clearTimeout(pending.timeout);
connection.pendingRequests.delete(response.requestId);
pending.resolve({
statusCode: response.statusCode,
headers: response.headers || {},
body: response.body || '',
isBinary: response.isBinary || false
});
}
/**
* Handle heartbeat from agent
*/
export function handleHeartbeat(environmentId: number): void {
const connection = edgeConnections.get(environmentId);
if (connection) {
connection.lastHeartbeat = new Date();
}
}
/**
* Handle connection close
*/
export function handleDisconnect(environmentId: number): void {
const connection = edgeConnections.get(environmentId);
if (connection) {
// Reject all pending requests
for (const [, pending] of connection.pendingRequests) {
clearTimeout(pending.timeout);
pending.reject(new Error('Connection closed'));
}
// End all pending stream requests
for (const [, pending] of connection.pendingStreamRequests) {
pending.onEnd('Connection closed');
}
edgeConnections.delete(environmentId);
updateEnvironmentStatus(environmentId, null);
}
}
/**
* Check if an environment has an active edge connection
*/
export function isEdgeConnected(environmentId: number): boolean {
return edgeConnections.has(environmentId);
}
/**
* Get connection info for an environment
*/
export function getEdgeConnectionInfo(environmentId: number): EdgeConnection | undefined {
return edgeConnections.get(environmentId);
}
/**
* Get all active connections
*/
export function getAllEdgeConnections(): Map<number, EdgeConnection> {
return edgeConnections;
}
// Message type definitions
export interface HelloMessage {
type: 'hello';
version: string;
agentId: string;
agentName: string;
token: string;
dockerVersion: string;
hostname: string;
capabilities: string[];
}
export interface WelcomeMessage {
type: 'welcome';
environmentId: number;
message?: string;
}
export interface RequestMessage {
type: 'request';
requestId: string;
method: string;
path: string;
headers?: Record<string, string>;
body?: unknown; // JSON-serializable object, will be serialized when message is stringified
streaming?: boolean;
}
export interface ResponseMessage {
type: 'response';
requestId: string;
statusCode: number;
headers?: Record<string, string>;
body?: string;
isBinary?: boolean;
}
export interface StreamMessage {
type: 'stream';
requestId: string;
data: string;
stream?: 'stdout' | 'stderr';
}
export interface StreamEndMessage {
type: 'stream_end';
requestId: string;
reason?: string;
}
export interface MetricsMessage {
type: 'metrics';
timestamp: number;
metrics: {
cpuUsage: number;
cpuCores: number;
memoryTotal: number;
memoryUsed: number;
memoryFree: number;
diskTotal: number;
diskUsed: number;
diskFree: number;
networkRxBytes: number;
networkTxBytes: number;
uptime: number;
};
}
export interface ErrorMessage {
type: 'error';
requestId?: string;
error: string;
code?: string;
}
// Exec message types for bidirectional terminal
export interface ExecStartMessage {
type: 'exec_start';
execId: string;
containerId: string;
cmd: string;
user: string;
cols: number;
rows: number;
}
export interface ExecReadyMessage {
type: 'exec_ready';
execId: string;
}
export interface ExecInputMessage {
type: 'exec_input';
execId: string;
data: string; // Base64-encoded
}
export interface ExecOutputMessage {
type: 'exec_output';
execId: string;
data: string; // Base64-encoded
}
export interface ExecResizeMessage {
type: 'exec_resize';
execId: string;
cols: number;
rows: number;
}
export interface ExecEndMessage {
type: 'exec_end';
execId: string;
reason?: string;
}
export interface ContainerEventMessage {
type: 'container_event';
event: {
containerId: string;
containerName?: string;
image?: string;
action: string;
actorAttributes?: Record<string, string>;
timestamp: string;
};
}
export type HawserMessage =
| HelloMessage
| WelcomeMessage
| RequestMessage
| ResponseMessage
| StreamMessage
| StreamEndMessage
| MetricsMessage
| ErrorMessage
| ExecStartMessage
| ExecReadyMessage
| ExecInputMessage
| ExecOutputMessage
| ExecResizeMessage
| ExecEndMessage
| ContainerEventMessage
| { type: 'ping'; timestamp: number }
| { type: 'pong'; timestamp: number };

View File

@@ -1,253 +0,0 @@
import crypto from 'node:crypto';
import os from 'node:os';
import { getSetting, setSetting } from './db';
import { sendEventNotification } from './notifications';
// RSA Public Key for license verification
// This key can only VERIFY signatures, not create them
// The private key is kept secret and used only for license generation
const LICENSE_PUBLIC_KEY = `-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoGJOObrKQyOPrDC+xSVh
Cq5WeUQqwvAl2xEoI5iOhJtHIvnlxayc2UKt9D5WVWS0dgzi41L7VD2OjTayrbL8
RxPXYh0EfMtnKoJZyFwN1XdlYk8yUjs2TRXnrw8Y+riuMjFWgUHmWUQTA7yBnJG6
9efCMUDREHwGglPIKhTstQfSqi2fNO1GCgY1W7JCMnE8CCpwLGvLodbWFUe1CwT0
OgRZRNWPljc/cX5DLSaB1RXFUnBM4O9YalNCNOR3HvEV/8HULFtDpZT0ZwRbC3K3
R8GFY97lrqADuWVaEdRRYdr402eAcd4DnRT62OjpEllNbRI3U5Wyj6EmYm3Cmc9Q
GwIDAQAB
-----END PUBLIC KEY-----`;
export type LicenseType = 'enterprise' | 'smb';
export interface LicensePayload {
name: string;
host: string;
issued: string;
expires: string | null;
type: LicenseType;
v?: number; // Version: 2 = RSA signed
}
export interface LicenseStatus {
valid: boolean;
active: boolean;
payload?: LicensePayload;
error?: string;
}
export interface StoredLicense {
name: string;
key: string;
activated_at: string;
}
/**
* Validates a license key using RSA-SHA256 signature verification
*/
export function validateLicense(licenseKey: string, currentHost?: string): LicenseStatus {
try {
// Clean the license key - remove whitespace, newlines, etc.
const cleanKey = licenseKey.replace(/\s+/g, '');
const parts = cleanKey.split('.');
if (parts.length !== 2) {
return { valid: false, active: false, error: 'Invalid license format' };
}
const [payloadBase64, signature] = parts;
// Verify RSA-SHA256 signature
const verify = crypto.createVerify('RSA-SHA256');
verify.update(payloadBase64);
const isValid = verify.verify(LICENSE_PUBLIC_KEY, signature, 'base64url');
if (!isValid) {
return { valid: false, active: false, error: 'Invalid license signature' };
}
// Decode payload
const payload: LicensePayload = JSON.parse(
Buffer.from(payloadBase64, 'base64url').toString()
);
// Check expiration
if (payload.expires && new Date(payload.expires) < new Date()) {
return { valid: false, active: false, error: 'License has expired', payload };
}
// Check host (allow wildcard matching)
const hostToCheck = currentHost || os.hostname();
if (payload.host !== '*') {
const hostMatches =
payload.host === hostToCheck ||
(payload.host.startsWith('*.') && hostToCheck.endsWith(payload.host.slice(1)));
if (!hostMatches) {
return {
valid: false,
active: false,
error: `License is not valid for this host (${hostToCheck})`,
payload
};
}
}
return { valid: true, active: true, payload };
} catch (error) {
return {
valid: false,
active: false,
error: `License validation failed: ${error instanceof Error ? error.message : 'Unknown error'}`
};
}
}
/**
* Gets the currently stored license
*/
export async function getStoredLicense(): Promise<StoredLicense | null> {
return getSetting('enterprise_license');
}
/**
* Stores and activates a license
*/
export async function activateLicense(
name: string,
key: string
): Promise<{ success: boolean; error?: string; license?: StoredLicense }> {
// Clean the key - remove whitespace, newlines, etc.
const cleanKey = key.replace(/\s+/g, '');
// Validate the license first (use getHostname() for Docker-aware hostname detection)
const validation = validateLicense(cleanKey, getHostname());
if (!validation.valid) {
return { success: false, error: validation.error };
}
// Check if the name matches
if (validation.payload && validation.payload.name !== name.trim()) {
return {
success: false,
error: `License name mismatch. Expected "${validation.payload.name}", got "${name.trim()}"`
};
}
// Store the license (with cleaned key)
const license: StoredLicense = {
name: name.trim(),
key: cleanKey,
activated_at: new Date().toISOString()
};
await setSetting('enterprise_license', license);
return { success: true, license };
}
/**
* Removes the current license
*/
export async function deactivateLicense(): Promise<boolean> {
await setSetting('enterprise_license', null);
return true;
}
/**
* Checks if the current installation has an active enterprise license
*/
export async function isEnterprise(): Promise<boolean> {
const stored = await getStoredLicense();
if (!stored || !stored.key) {
return false;
}
const validation = validateLicense(stored.key, getHostname());
// Only true for enterprise licenses (SMB does not unlock enterprise features)
return validation.valid && validation.active && validation.payload?.type === 'enterprise';
}
/**
* Gets the license type if a valid license is active
*/
export async function getLicenseType(): Promise<LicenseType | null> {
const stored = await getStoredLicense();
if (!stored || !stored.key) {
return null;
}
const validation = validateLicense(stored.key, getHostname());
if (validation.valid && validation.active && validation.payload) {
return validation.payload.type;
}
return null;
}
/**
* Gets the full license status including validation
*/
export async function getLicenseStatus(): Promise<LicenseStatus & { stored?: StoredLicense }> {
const stored = await getStoredLicense();
if (!stored || !stored.key) {
return { valid: false, active: false };
}
const validation = validateLicense(stored.key, getHostname());
return { ...validation, stored };
}
/**
* Gets the current hostname for license validation.
*
* In Docker: DOCKHAND_HOSTNAME is set by the entrypoint script from Docker API.
* Outside Docker: Falls back to os.hostname().
*/
export function getHostname(): string {
return process.env.DOCKHAND_HOSTNAME || os.hostname();
}
// Track when we last sent a license expiring notification
let lastLicenseExpiryNotification: number | null = null;
const LICENSE_EXPIRY_NOTIFICATION_COOLDOWN = 86400000; // 24 hours between notifications
const LICENSE_EXPIRY_WARNING_DAYS = 30; // Warn when license expires within 30 days
/**
* Check if the enterprise license is expiring soon and send notification
* Call this periodically (e.g., on startup and daily)
*/
export async function checkLicenseExpiry(): Promise<void> {
try {
const status = await getLicenseStatus();
// Only check if we have an active license with an expiry date
if (!status.valid || !status.active || !status.payload?.expires) {
return;
}
const expiryDate = new Date(status.payload.expires);
const now = new Date();
const daysUntilExpiry = Math.ceil((expiryDate.getTime() - now.getTime()) / (1000 * 60 * 60 * 24));
// Check if expiring within warning threshold
if (daysUntilExpiry > 0 && daysUntilExpiry <= LICENSE_EXPIRY_WARNING_DAYS) {
// Check cooldown to avoid spamming
if (lastLicenseExpiryNotification && Date.now() - lastLicenseExpiryNotification < LICENSE_EXPIRY_NOTIFICATION_COOLDOWN) {
return;
}
const licenseTypeName = status.payload.type === 'enterprise' ? 'Enterprise' : 'SMB';
console.log(`[License] ${licenseTypeName} license expiring in ${daysUntilExpiry} days`);
await sendEventNotification('license_expiring', {
title: 'License expiring soon',
message: `Your ${licenseTypeName} license expires in ${daysUntilExpiry} day${daysUntilExpiry === 1 ? '' : 's'} (${expiryDate.toLocaleDateString()}). Contact support to renew.`,
type: 'warning'
});
lastLicenseExpiryNotification = Date.now();
}
} catch (error) {
console.error('[License] Failed to check license expiry:', error);
}
}

View File

@@ -1,271 +0,0 @@
import { saveHostMetric, getEnvironments, getEnvSetting } from './db';
import { listContainers, getContainerStats, getDockerInfo, getDiskUsage } from './docker';
import { sendEventNotification } from './notifications';
import os from 'node:os';
const COLLECT_INTERVAL = 10000; // 10 seconds
const DISK_CHECK_INTERVAL = 300000; // 5 minutes
const DEFAULT_DISK_THRESHOLD = 80; // 80% threshold for disk warnings
let collectorInterval: ReturnType<typeof setInterval> | null = null;
let diskCheckInterval: ReturnType<typeof setInterval> | null = null;
// Track last disk warning sent per environment to avoid spamming
const lastDiskWarning: Map<number, number> = new Map();
const DISK_WARNING_COOLDOWN = 3600000; // 1 hour between warnings
/**
* Collect metrics for a single environment
*/
async function collectEnvMetrics(env: { id: number; name: string; collectMetrics?: boolean }) {
try {
// Skip environments where metrics collection is disabled
if (env.collectMetrics === false) {
return;
}
// Get running containers
const containers = await listContainers(false, env.id); // Only running
let totalCpuPercent = 0;
let totalMemUsed = 0;
// Get stats for each running container
const statsPromises = containers.map(async (container) => {
try {
const stats = await getContainerStats(container.id, env.id) as any;
// Calculate CPU percentage
const cpuDelta = stats.cpu_stats.cpu_usage.total_usage - stats.precpu_stats.cpu_usage.total_usage;
const systemDelta = stats.cpu_stats.system_cpu_usage - stats.precpu_stats.system_cpu_usage;
const cpuCount = stats.cpu_stats.online_cpus || os.cpus().length;
let cpuPercent = 0;
if (systemDelta > 0 && cpuDelta > 0) {
cpuPercent = (cpuDelta / systemDelta) * cpuCount * 100;
}
// Get container memory usage
const memUsage = stats.memory_stats?.usage || 0;
const memCache = stats.memory_stats?.stats?.cache || 0;
// Subtract cache from usage to get actual memory used by the container
const actualMemUsed = memUsage - memCache;
return { cpu: cpuPercent, mem: actualMemUsed > 0 ? actualMemUsed : memUsage };
} catch {
return { cpu: 0, mem: 0 };
}
});
const statsResults = await Promise.all(statsPromises);
totalCpuPercent = statsResults.reduce((sum, v) => sum + v.cpu, 0);
totalMemUsed = statsResults.reduce((sum, v) => sum + v.mem, 0);
// Get host total memory from Docker info (this is the remote host's memory)
const info = await getDockerInfo(env.id) as any;
const memTotal = info.MemTotal || os.totalmem();
// Calculate memory percentage based on container usage vs host total
const memPercent = memTotal > 0 ? (totalMemUsed / memTotal) * 100 : 0;
// Normalize CPU by number of cores from the remote host
const cpuCount = info.NCPU || os.cpus().length;
const normalizedCpu = totalCpuPercent / cpuCount;
// Save to database
await saveHostMetric(
normalizedCpu,
memPercent,
totalMemUsed,
memTotal,
env.id
);
} catch (error) {
// Skip this environment if it fails (might be offline)
console.error(`Failed to collect metrics for ${env.name}:`, error);
}
}
async function collectMetrics() {
try {
const environments = await getEnvironments();
// Filter enabled environments and collect metrics in parallel
const enabledEnvs = environments.filter(env => env.collectMetrics !== false);
// Process all environments in parallel for better performance
await Promise.all(enabledEnvs.map(env => collectEnvMetrics(env)));
} catch (error) {
console.error('Metrics collection error:', error);
}
}
/**
* Check disk space for a single environment
*/
async function checkEnvDiskSpace(env: { id: number; name: string; collectMetrics?: boolean }) {
try {
// Skip environments where metrics collection is disabled
if (env.collectMetrics === false) {
return;
}
// Check if we're in cooldown for this environment
const lastWarningTime = lastDiskWarning.get(env.id);
if (lastWarningTime && Date.now() - lastWarningTime < DISK_WARNING_COOLDOWN) {
return; // Skip this environment, still in cooldown
}
// Get Docker disk usage data
const diskData = await getDiskUsage(env.id) as any;
if (!diskData) return;
// Calculate total Docker disk usage using reduce for cleaner code
let totalUsed = 0;
if (diskData.Images) {
totalUsed += diskData.Images.reduce((sum: number, img: any) => sum + (img.Size || 0), 0);
}
if (diskData.Containers) {
totalUsed += diskData.Containers.reduce((sum: number, c: any) => sum + (c.SizeRw || 0), 0);
}
if (diskData.Volumes) {
totalUsed += diskData.Volumes.reduce((sum: number, v: any) => sum + (v.UsageData?.Size || 0), 0);
}
if (diskData.BuildCache) {
totalUsed += diskData.BuildCache.reduce((sum: number, bc: any) => sum + (bc.Size || 0), 0);
}
// Get Docker root filesystem info from Docker info
const info = await getDockerInfo(env.id) as any;
const driverStatus = info?.DriverStatus;
// Try to find "Data Space Total" from driver status
let dataSpaceTotal = 0;
let diskPercentUsed = 0;
if (driverStatus) {
for (const [key, value] of driverStatus) {
if (key === 'Data Space Total' && typeof value === 'string') {
dataSpaceTotal = parseSize(value);
break;
}
}
}
// If we found total disk space, calculate percentage
if (dataSpaceTotal > 0) {
diskPercentUsed = (totalUsed / dataSpaceTotal) * 100;
} else {
// Fallback: just report absolute usage if we can't determine percentage
const GB = 1024 * 1024 * 1024;
if (totalUsed > 50 * GB) {
await sendEventNotification('disk_space_warning', {
title: 'High Docker disk usage',
message: `Environment "${env.name}" is using ${formatSize(totalUsed)} of Docker disk space`,
type: 'warning'
}, env.id);
lastDiskWarning.set(env.id, Date.now());
}
return;
}
// Check against threshold
const threshold = await getEnvSetting('disk_warning_threshold', env.id) || DEFAULT_DISK_THRESHOLD;
if (diskPercentUsed >= threshold) {
console.log(`[Metrics] Docker disk usage for ${env.name}: ${diskPercentUsed.toFixed(1)}% (threshold: ${threshold}%)`);
await sendEventNotification('disk_space_warning', {
title: 'Disk space warning',
message: `Environment "${env.name}" Docker disk usage is at ${diskPercentUsed.toFixed(1)}% (${formatSize(totalUsed)} used)`,
type: 'warning'
}, env.id);
lastDiskWarning.set(env.id, Date.now());
}
} catch (error) {
// Skip this environment if it fails
console.error(`Failed to check disk space for ${env.name}:`, error);
}
}
/**
* Check Docker disk usage and send warnings if above threshold
*/
async function checkDiskSpace() {
try {
const environments = await getEnvironments();
// Filter enabled environments and check disk space in parallel
const enabledEnvs = environments.filter(env => env.collectMetrics !== false);
// Process all environments in parallel for better performance
await Promise.all(enabledEnvs.map(env => checkEnvDiskSpace(env)));
} catch (error) {
console.error('Disk space check error:', error);
}
}
/**
* Parse size string like "107.4GB" to bytes
*/
function parseSize(sizeStr: string): number {
const units: Record<string, number> = {
'B': 1,
'KB': 1024,
'MB': 1024 * 1024,
'GB': 1024 * 1024 * 1024,
'TB': 1024 * 1024 * 1024 * 1024
};
const match = sizeStr.match(/^([\d.]+)\s*([KMGT]?B)$/i);
if (!match) return 0;
const value = parseFloat(match[1]);
const unit = match[2].toUpperCase();
return value * (units[unit] || 1);
}
/**
* Format bytes to human readable string
*/
function formatSize(bytes: number): string {
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
let unitIndex = 0;
let size = bytes;
while (size >= 1024 && unitIndex < units.length - 1) {
size /= 1024;
unitIndex++;
}
return `${size.toFixed(1)} ${units[unitIndex]}`;
}
export function startMetricsCollector() {
if (collectorInterval) return; // Already running
console.log('Starting server-side metrics collector (every 10s)');
// Initial collection
collectMetrics();
// Schedule regular collection
collectorInterval = setInterval(collectMetrics, COLLECT_INTERVAL);
// Start disk space checking (every 5 minutes)
console.log('Starting disk space monitoring (every 5 minutes)');
checkDiskSpace(); // Initial check
diskCheckInterval = setInterval(checkDiskSpace, DISK_CHECK_INTERVAL);
}
export function stopMetricsCollector() {
if (collectorInterval) {
clearInterval(collectorInterval);
collectorInterval = null;
}
if (diskCheckInterval) {
clearInterval(diskCheckInterval);
diskCheckInterval = null;
}
lastDiskWarning.clear();
console.log('Metrics collector stopped');
}

View File

@@ -1,499 +0,0 @@
import nodemailer from 'nodemailer';
import {
getEnabledNotificationSettings,
getEnabledEnvironmentNotifications,
getEnvironment,
type NotificationSettingData,
type SmtpConfig,
type AppriseConfig,
type NotificationEventType
} from './db';
export interface NotificationPayload {
title: string;
message: string;
type?: 'info' | 'success' | 'warning' | 'error';
environmentId?: number;
environmentName?: string;
}
// Send notification via SMTP
async function sendSmtpNotification(config: SmtpConfig, payload: NotificationPayload): Promise<boolean> {
try {
const transporter = nodemailer.createTransport({
host: config.host,
port: config.port,
secure: config.secure,
auth: config.username ? {
user: config.username,
pass: config.password
} : undefined
});
const envBadge = payload.environmentName
? `<span style="display: inline-block; background: #3b82f6; color: white; padding: 2px 8px; border-radius: 4px; font-size: 12px; margin-left: 8px;">${payload.environmentName}</span>`
: '';
const envText = payload.environmentName ? ` [${payload.environmentName}]` : '';
const html = `
<div style="font-family: sans-serif; padding: 20px;">
<h2 style="margin: 0 0 10px 0;">${payload.title}${envBadge}</h2>
<p style="margin: 0; white-space: pre-wrap;">${payload.message}</p>
<hr style="margin: 20px 0; border: none; border-top: 1px solid #eee;">
<p style="margin: 0; font-size: 12px; color: #666;">Sent by Dockhand</p>
</div>
`;
await transporter.sendMail({
from: config.from_name ? `"${config.from_name}" <${config.from_email}>` : config.from_email,
to: config.to_emails.join(', '),
subject: `[Dockhand]${envText} ${payload.title}`,
text: `${payload.title}${envText}\n\n${payload.message}`,
html
});
return true;
} catch (error) {
console.error('[Notifications] SMTP send failed:', error);
return false;
}
}
// Parse Apprise URL and send notification
async function sendAppriseNotification(config: AppriseConfig, payload: NotificationPayload): Promise<boolean> {
let success = true;
for (const url of config.urls) {
try {
const sent = await sendToAppriseUrl(url, payload);
if (!sent) success = false;
} catch (error) {
console.error(`[Notifications] Failed to send to ${url}:`, error);
success = false;
}
}
return success;
}
// Send to a single Apprise URL
async function sendToAppriseUrl(url: string, payload: NotificationPayload): Promise<boolean> {
try {
// Extract protocol from Apprise URL format (protocol://...)
// Note: Can't use new URL() because custom schemes like 'tgram://' are not valid URLs
const protocolMatch = url.match(/^([a-z]+):\/\//i);
if (!protocolMatch) {
console.error('[Notifications] Invalid Apprise URL format - missing protocol:', url);
return false;
}
const protocol = protocolMatch[1].toLowerCase();
// Handle different notification services
switch (protocol) {
case 'discord':
case 'discords':
return await sendDiscord(url, payload);
case 'slack':
case 'slacks':
return await sendSlack(url, payload);
case 'tgram':
return await sendTelegram(url, payload);
case 'gotify':
case 'gotifys':
return await sendGotify(url, payload);
case 'ntfy':
case 'ntfys':
return await sendNtfy(url, payload);
case 'pushover':
return await sendPushover(url, payload);
case 'json':
case 'jsons':
return await sendGenericWebhook(url, payload);
default:
console.warn(`[Notifications] Unsupported Apprise protocol: ${protocol}`);
return false;
}
} catch (error) {
console.error('[Notifications] Failed to parse Apprise URL:', error);
return false;
}
}
// Discord webhook
async function sendDiscord(appriseUrl: string, payload: NotificationPayload): Promise<boolean> {
// discord://webhook_id/webhook_token or discords://...
const url = appriseUrl.replace(/^discords?:\/\//, 'https://discord.com/api/webhooks/');
const titleWithEnv = payload.environmentName ? `${payload.title} [${payload.environmentName}]` : payload.title;
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
embeds: [{
title: titleWithEnv,
description: payload.message,
color: payload.type === 'error' ? 0xff0000 : payload.type === 'warning' ? 0xffaa00 : payload.type === 'success' ? 0x00ff00 : 0x0099ff,
...(payload.environmentName && {
footer: { text: `Environment: ${payload.environmentName}` }
})
}]
})
});
return response.ok;
}
// Slack webhook
async function sendSlack(appriseUrl: string, payload: NotificationPayload): Promise<boolean> {
// slack://token_a/token_b/token_c or webhook URL
let url: string;
if (appriseUrl.includes('hooks.slack.com')) {
url = appriseUrl.replace(/^slacks?:\/\//, 'https://');
} else {
const parts = appriseUrl.replace(/^slacks?:\/\//, '').split('/');
url = `https://hooks.slack.com/services/${parts.join('/')}`;
}
const envTag = payload.environmentName ? ` \`${payload.environmentName}\`` : '';
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
text: `*${payload.title}*${envTag}\n${payload.message}`
})
});
return response.ok;
}
// Telegram
async function sendTelegram(appriseUrl: string, payload: NotificationPayload): Promise<boolean> {
// tgram://bot_token/chat_id
const match = appriseUrl.match(/^tgram:\/\/([^/]+)\/(.+)/);
if (!match) {
console.error('[Notifications] Invalid Telegram URL format. Expected: tgram://bot_token/chat_id');
return false;
}
const [, botToken, chatId] = match;
const url = `https://api.telegram.org/bot${botToken}/sendMessage`;
const envTag = payload.environmentName ? ` \\[${payload.environmentName}\\]` : '';
try {
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
chat_id: chatId,
text: `*${payload.title}*${envTag}\n${payload.message}`,
parse_mode: 'Markdown'
})
});
if (!response.ok) {
const errorData = await response.json().catch(() => ({}));
console.error('[Notifications] Telegram API error:', response.status, errorData);
}
return response.ok;
} catch (error) {
console.error('[Notifications] Telegram send failed:', error);
return false;
}
}
// Gotify
async function sendGotify(appriseUrl: string, payload: NotificationPayload): Promise<boolean> {
// gotify://hostname/token or gotifys://hostname/token
const match = appriseUrl.match(/^gotifys?:\/\/([^/]+)\/(.+)/);
if (!match) return false;
const [, hostname, token] = match;
const protocol = appriseUrl.startsWith('gotifys') ? 'https' : 'http';
const url = `${protocol}://${hostname}/message?token=${token}`;
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
title: payload.title,
message: payload.message,
priority: payload.type === 'error' ? 8 : payload.type === 'warning' ? 5 : 2
})
});
return response.ok;
}
// ntfy
async function sendNtfy(appriseUrl: string, payload: NotificationPayload): Promise<boolean> {
// ntfy://topic or ntfys://hostname/topic
let url: string;
const isSecure = appriseUrl.startsWith('ntfys');
const path = appriseUrl.replace(/^ntfys?:\/\//, '');
if (path.includes('/')) {
// Custom server
url = `${isSecure ? 'https' : 'http'}://${path}`;
} else {
// Default ntfy.sh
url = `https://ntfy.sh/${path}`;
}
const response = await fetch(url, {
method: 'POST',
headers: {
'Title': payload.title,
'Priority': payload.type === 'error' ? '5' : payload.type === 'warning' ? '4' : '3',
'Tags': payload.type || 'info'
},
body: payload.message
});
return response.ok;
}
// Pushover
async function sendPushover(appriseUrl: string, payload: NotificationPayload): Promise<boolean> {
// pushover://user_key/api_token
const match = appriseUrl.match(/^pushover:\/\/([^/]+)\/(.+)/);
if (!match) return false;
const [, userKey, apiToken] = match;
const url = 'https://api.pushover.net/1/messages.json';
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
token: apiToken,
user: userKey,
title: payload.title,
message: payload.message,
priority: payload.type === 'error' ? 1 : 0
})
});
return response.ok;
}
// Generic JSON webhook
async function sendGenericWebhook(appriseUrl: string, payload: NotificationPayload): Promise<boolean> {
// json://hostname/path or jsons://hostname/path
const url = appriseUrl.replace(/^jsons?:\/\//, appriseUrl.startsWith('jsons') ? 'https://' : 'http://');
const response = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
title: payload.title,
message: payload.message,
type: payload.type || 'info',
timestamp: new Date().toISOString()
})
});
return response.ok;
}
// Send notification to all enabled channels
export async function sendNotification(payload: NotificationPayload): Promise<{ success: boolean; results: { name: string; success: boolean }[] }> {
const settings = await getEnabledNotificationSettings();
const results: { name: string; success: boolean }[] = [];
for (const setting of settings) {
let success = false;
if (setting.type === 'smtp') {
success = await sendSmtpNotification(setting.config as SmtpConfig, payload);
} else if (setting.type === 'apprise') {
success = await sendAppriseNotification(setting.config as AppriseConfig, payload);
}
results.push({ name: setting.name, success });
}
return {
success: results.every(r => r.success),
results
};
}
// Test a specific notification setting
export async function testNotification(setting: NotificationSettingData): Promise<boolean> {
const payload: NotificationPayload = {
title: 'Dockhand Test Notification',
message: 'This is a test notification from Dockhand. If you receive this, your notification settings are configured correctly.',
type: 'info'
};
if (setting.type === 'smtp') {
return await sendSmtpNotification(setting.config as SmtpConfig, payload);
} else if (setting.type === 'apprise') {
return await sendAppriseNotification(setting.config as AppriseConfig, payload);
}
return false;
}
// Map Docker action to notification event type
function mapActionToEventType(action: string): NotificationEventType | null {
const mapping: Record<string, NotificationEventType> = {
'start': 'container_started',
'stop': 'container_stopped',
'restart': 'container_restarted',
'die': 'container_exited',
'kill': 'container_exited',
'oom': 'container_oom',
'health_status: unhealthy': 'container_unhealthy',
'pull': 'image_pulled'
};
return mapping[action] || null;
}
// Scanner image patterns to exclude from notifications
const SCANNER_IMAGE_PATTERNS = [
'anchore/grype',
'aquasec/trivy',
'ghcr.io/anchore/grype',
'ghcr.io/aquasecurity/trivy'
];
function isScannerContainer(image: string | null | undefined): boolean {
if (!image) return false;
const lowerImage = image.toLowerCase();
return SCANNER_IMAGE_PATTERNS.some(pattern => lowerImage.includes(pattern.toLowerCase()));
}
// Send notification for an environment-specific event
export async function sendEnvironmentNotification(
environmentId: number,
action: string,
payload: Omit<NotificationPayload, 'environmentId' | 'environmentName'>,
image?: string | null
): Promise<{ success: boolean; sent: number }> {
const eventType = mapActionToEventType(action);
if (!eventType) {
// Not a notifiable event type
return { success: true, sent: 0 };
}
// Get environment name
const env = await getEnvironment(environmentId);
if (!env) {
return { success: false, sent: 0 };
}
// Get enabled notification channels for this environment and event type
const envNotifications = await getEnabledEnvironmentNotifications(environmentId, eventType);
if (envNotifications.length === 0) {
return { success: true, sent: 0 };
}
const enrichedPayload: NotificationPayload = {
...payload,
environmentId,
environmentName: env.name
};
// Check if this is a scanner container
const isScanner = isScannerContainer(image);
let sent = 0;
let allSuccess = true;
// Skip all notifications for scanner containers (Trivy, Grype)
if (isScanner) {
return { success: true, sent: 0 };
}
for (const notif of envNotifications) {
try {
let success = false;
if (notif.channelType === 'smtp') {
success = await sendSmtpNotification(notif.config as SmtpConfig, enrichedPayload);
} else if (notif.channelType === 'apprise') {
success = await sendAppriseNotification(notif.config as AppriseConfig, enrichedPayload);
}
if (success) sent++;
else allSuccess = false;
} catch (error) {
console.error(`[Notifications] Failed to send to channel ${notif.channelName}:`, error);
allSuccess = false;
}
}
return { success: allSuccess, sent };
}
// Send notification for a specific event type (not mapped from Docker action)
// Used for auto-update, git sync, vulnerability, and system events
export async function sendEventNotification(
eventType: NotificationEventType,
payload: NotificationPayload,
environmentId?: number
): Promise<{ success: boolean; sent: number }> {
// Get environment name if provided
let enrichedPayload = { ...payload };
if (environmentId) {
const env = await getEnvironment(environmentId);
if (env) {
enrichedPayload.environmentId = environmentId;
enrichedPayload.environmentName = env.name;
}
}
// Get enabled notification channels for this event type
let channels: Array<{
channel_type: 'smtp' | 'apprise';
channel_name: string;
config: SmtpConfig | AppriseConfig;
}> = [];
if (environmentId) {
// Environment-specific: get channels subscribed to this env and event type
const envNotifications = await getEnabledEnvironmentNotifications(environmentId, eventType);
channels = envNotifications
.filter(n => n.channelType && n.channelName)
.map(n => ({
channel_type: n.channelType!,
channel_name: n.channelName!,
config: n.config
}));
} else {
// System-wide: get all globally enabled channels that subscribe to this event type
const globalSettings = await getEnabledNotificationSettings();
channels = globalSettings
.filter(s => s.eventTypes?.includes(eventType))
.map(s => ({
channel_type: s.type,
channel_name: s.name,
config: s.config
}));
}
if (channels.length === 0) {
return { success: true, sent: 0 };
}
let sent = 0;
let allSuccess = true;
for (const channel of channels) {
try {
let success = false;
if (channel.channel_type === 'smtp') {
success = await sendSmtpNotification(channel.config as SmtpConfig, enrichedPayload);
} else if (channel.channel_type === 'apprise') {
success = await sendAppriseNotification(channel.config as AppriseConfig, enrichedPayload);
}
if (success) sent++;
else allSuccess = false;
} catch (error) {
console.error(`[Notifications] Failed to send to channel ${channel.channel_name}:`, error);
allSuccess = false;
}
}
return { success: allSuccess, sent };
}

View File

@@ -1,829 +0,0 @@
// Vulnerability Scanner Service
// Supports Grype and Trivy scanners
// Uses long-running containers for faster subsequent scans (cached vulnerability databases)
import {
listImages,
pullImage,
createVolume,
listVolumes,
removeVolume,
runContainer,
runContainerWithStreaming,
inspectImage
} from './docker';
import { getEnvironment, getEnvSetting, getSetting } from './db';
import { sendEventNotification } from './notifications';
export type ScannerType = 'none' | 'grype' | 'trivy' | 'both';
/**
* Send vulnerability notifications based on scan results.
* Sends the most severe notification type based on found vulnerabilities.
*/
export async function sendVulnerabilityNotifications(
imageName: string,
summary: VulnerabilitySeverity,
envId?: number
): Promise<void> {
const totalVulns = summary.critical + summary.high + summary.medium + summary.low + summary.negligible + summary.unknown;
if (totalVulns === 0) {
// No vulnerabilities found, no notification needed
return;
}
// Send notifications based on severity (most severe first)
// Note: Users can subscribe to specific severity levels, so we send all applicable
if (summary.critical > 0) {
await sendEventNotification('vulnerability_critical', {
title: 'Critical vulnerabilities found',
message: `Image "${imageName}" has ${summary.critical} critical vulnerabilities (${totalVulns} total)`,
type: 'error'
}, envId);
}
if (summary.high > 0) {
await sendEventNotification('vulnerability_high', {
title: 'High severity vulnerabilities found',
message: `Image "${imageName}" has ${summary.high} high severity vulnerabilities (${totalVulns} total)`,
type: 'warning'
}, envId);
}
// Only send 'any' notification if there are medium/low/negligible but no critical/high
// This prevents notification spam for users who only want to know about lesser severities
if (summary.critical === 0 && summary.high === 0 && totalVulns > 0) {
await sendEventNotification('vulnerability_any', {
title: 'Vulnerabilities found',
message: `Image "${imageName}" has ${totalVulns} vulnerabilities (medium: ${summary.medium}, low: ${summary.low})`,
type: 'info'
}, envId);
}
}
// Volume names for scanner database caching
const GRYPE_VOLUME_NAME = 'dockhand-grype-db';
const TRIVY_VOLUME_NAME = 'dockhand-trivy-db';
// Track running scanner instances to detect concurrent scans
const runningScanners = new Map<string, number>(); // key: "grype" or "trivy", value: count
// Default CLI arguments for scanners (image name is substituted for {image})
export const DEFAULT_GRYPE_ARGS = '-o json -v {image}';
export const DEFAULT_TRIVY_ARGS = 'image --format json {image}';
export interface VulnerabilitySeverity {
critical: number;
high: number;
medium: number;
low: number;
negligible: number;
unknown: number;
}
export interface Vulnerability {
id: string;
severity: string;
package: string;
version: string;
fixedVersion?: string;
description?: string;
link?: string;
scanner: 'grype' | 'trivy';
}
export interface ScanResult {
imageId: string;
imageName: string;
scanner: 'grype' | 'trivy';
scannedAt: string;
vulnerabilities: Vulnerability[];
summary: VulnerabilitySeverity;
scanDuration: number;
error?: string;
}
export interface ScanProgress {
stage: 'checking' | 'pulling-scanner' | 'scanning' | 'parsing' | 'complete' | 'error';
message: string;
scanner?: 'grype' | 'trivy';
progress?: number;
result?: ScanResult;
results?: ScanResult[]; // All scanner results when using 'both'
error?: string;
output?: string; // Line of scanner output
}
// Get global default scanner CLI args from general settings (or fallback to hardcoded defaults)
export async function getGlobalScannerDefaults(): Promise<{
grypeArgs: string;
trivyArgs: string;
}> {
const [grypeArgs, trivyArgs] = await Promise.all([
getSetting('default_grype_args'),
getSetting('default_trivy_args')
]);
return {
grypeArgs: grypeArgs ?? DEFAULT_GRYPE_ARGS,
trivyArgs: trivyArgs ?? DEFAULT_TRIVY_ARGS
};
}
// Get scanner settings (scanner type is per-environment, CLI args are global)
export async function getScannerSettings(envId?: number): Promise<{
scanner: ScannerType;
grypeArgs: string;
trivyArgs: string;
}> {
// CLI args are always global - no need for per-env settings
const [globalDefaults, scanner] = await Promise.all([
getGlobalScannerDefaults(),
getEnvSetting('vulnerability_scanner', envId)
]);
return {
scanner: scanner || 'none',
grypeArgs: globalDefaults.grypeArgs,
trivyArgs: globalDefaults.trivyArgs
};
}
// Optimized version that accepts pre-cached global defaults (avoids redundant DB calls)
// Only looks up scanner type per-environment since CLI args are global
export async function getScannerSettingsWithDefaults(
envId: number | undefined,
globalDefaults: { grypeArgs: string; trivyArgs: string }
): Promise<{
scanner: ScannerType;
grypeArgs: string;
trivyArgs: string;
}> {
const scanner = await getEnvSetting('vulnerability_scanner', envId) || 'none';
return {
scanner,
grypeArgs: globalDefaults.grypeArgs,
trivyArgs: globalDefaults.trivyArgs
};
}
// Parse CLI args string into array, substituting {image} placeholder
function parseCliArgs(argsString: string, imageName: string): string[] {
// Replace {image} placeholder with actual image name
const withImage = argsString.replace(/\{image\}/g, imageName);
// Split by whitespace, respecting quoted strings
const args: string[] = [];
let current = '';
let inQuote = false;
let quoteChar = '';
for (const char of withImage) {
if ((char === '"' || char === "'") && !inQuote) {
inQuote = true;
quoteChar = char;
} else if (char === quoteChar && inQuote) {
inQuote = false;
quoteChar = '';
} else if (char === ' ' && !inQuote) {
if (current) {
args.push(current);
current = '';
}
} else {
current += char;
}
}
if (current) {
args.push(current);
}
return args;
}
// Check if a scanner image is available locally
async function isScannerImageAvailable(scannerImage: string, envId?: number): Promise<boolean> {
try {
const images = await listImages(envId);
return images.some((img) =>
img.tags?.some((tag: string) => tag.includes(scannerImage.split(':')[0]))
);
} catch {
return false;
}
}
// Pull scanner image if not available
async function ensureScannerImage(
scannerImage: string,
envId?: number,
onProgress?: (progress: ScanProgress) => void
): Promise<boolean> {
const isAvailable = await isScannerImageAvailable(scannerImage, envId);
if (isAvailable) {
return true;
}
onProgress?.({
stage: 'pulling-scanner',
message: `Pulling scanner image ${scannerImage}...`
});
try {
await pullImage(scannerImage, undefined, envId);
return true;
} catch (error) {
console.error(`Failed to pull scanner image ${scannerImage}:`, error);
return false;
}
}
// Parse Grype JSON output
function parseGrypeOutput(output: string): { vulnerabilities: Vulnerability[]; summary: VulnerabilitySeverity } {
const vulnerabilities: Vulnerability[] = [];
const summary: VulnerabilitySeverity = {
critical: 0,
high: 0,
medium: 0,
low: 0,
negligible: 0,
unknown: 0
};
console.log('[Grype] Raw output length:', output.length);
console.log('[Grype] Output starts with:', output.slice(0, 200));
try {
const data = JSON.parse(output);
console.log('[Grype] Parsed JSON, matches count:', data.matches?.length || 0);
if (data.matches) {
for (const match of data.matches) {
const severity = (match.vulnerability?.severity || 'Unknown').toLowerCase();
const vuln: Vulnerability = {
id: match.vulnerability?.id || 'Unknown',
severity: severity,
package: match.artifact?.name || 'Unknown',
version: match.artifact?.version || 'Unknown',
fixedVersion: match.vulnerability?.fix?.versions?.[0],
description: match.vulnerability?.description,
link: match.vulnerability?.dataSource,
scanner: 'grype'
};
vulnerabilities.push(vuln);
// Count by severity
if (severity === 'critical') summary.critical++;
else if (severity === 'high') summary.high++;
else if (severity === 'medium') summary.medium++;
else if (severity === 'low') summary.low++;
else if (severity === 'negligible') summary.negligible++;
else summary.unknown++;
}
}
} catch (error) {
console.error('[Grype] Failed to parse output:', error);
console.error('[Grype] Output was:', output.slice(0, 500));
// Check if output looks like an error message from grype
const firstLine = output.split('\n')[0].trim();
if (firstLine && !firstLine.startsWith('{')) {
throw new Error(`Scanner output error: ${firstLine}`);
}
throw new Error('Failed to parse scanner output - ensure CLI args include "-o json"');
}
console.log('[Grype] Parsed vulnerabilities:', vulnerabilities.length);
return { vulnerabilities, summary };
}
// Parse Trivy JSON output
function parseTrivyOutput(output: string): { vulnerabilities: Vulnerability[]; summary: VulnerabilitySeverity } {
const vulnerabilities: Vulnerability[] = [];
const summary: VulnerabilitySeverity = {
critical: 0,
high: 0,
medium: 0,
low: 0,
negligible: 0,
unknown: 0
};
try {
const data = JSON.parse(output);
const results = data.Results || [];
for (const result of results) {
const vulns = result.Vulnerabilities || [];
for (const v of vulns) {
const severity = (v.Severity || 'Unknown').toLowerCase();
const vuln: Vulnerability = {
id: v.VulnerabilityID || 'Unknown',
severity: severity,
package: v.PkgName || 'Unknown',
version: v.InstalledVersion || 'Unknown',
fixedVersion: v.FixedVersion,
description: v.Description,
link: v.PrimaryURL || v.References?.[0],
scanner: 'trivy'
};
vulnerabilities.push(vuln);
// Count by severity
if (severity === 'critical') summary.critical++;
else if (severity === 'high') summary.high++;
else if (severity === 'medium') summary.medium++;
else if (severity === 'low') summary.low++;
else if (severity === 'negligible') summary.negligible++;
else summary.unknown++;
}
}
} catch (error) {
console.error('[Trivy] Failed to parse output:', error);
console.error('[Trivy] Output was:', output.slice(0, 500));
// Check if output looks like an error message from trivy
const firstLine = output.split('\n')[0].trim();
if (firstLine && !firstLine.startsWith('{')) {
throw new Error(`Scanner output error: ${firstLine}`);
}
throw new Error('Failed to parse scanner output - ensure CLI args include "--format json"');
}
return { vulnerabilities, summary };
}
// Get the SHA256 image ID for a given image name/tag
async function getImageSha(imageName: string, envId?: number): Promise<string> {
try {
const imageInfo = await inspectImage(imageName, envId) as any;
// The Id field contains the full sha256:... hash
return imageInfo.Id || imageName;
} catch {
// If we can't inspect the image, fall back to the name
return imageName;
}
}
// Ensure a named volume exists for caching scanner databases
async function ensureVolume(volumeName: string, envId?: number): Promise<void> {
const volumes = await listVolumes(envId);
const exists = volumes.some(v => v.name === volumeName);
if (!exists) {
console.log(`[Scanner] Creating database volume: ${volumeName}`);
await createVolume({ name: volumeName }, envId);
} else {
console.log(`[Scanner] Using existing database volume: ${volumeName}`);
}
}
// Run scanner in a fresh container with volume-cached database
async function runScannerContainer(
scannerImage: string,
scannerType: 'grype' | 'trivy',
imageName: string,
cmd: string[],
envId?: number,
onOutput?: (line: string) => void
): Promise<string> {
// Ensure database cache volume exists
const volumeName = scannerType === 'grype' ? GRYPE_VOLUME_NAME : TRIVY_VOLUME_NAME;
await ensureVolume(volumeName, envId);
// Check if another scanner of the same type is already running
// If so, use a unique cache subdirectory to avoid lock conflicts
const currentCount = runningScanners.get(scannerType) || 0;
const scanId = currentCount > 0 ? `-${Date.now()}-${Math.random().toString(36).slice(2, 8)}` : '';
// Increment running counter
runningScanners.set(scannerType, currentCount + 1);
// Configure volume mount based on scanner type
// Use a unique subdirectory if another scan is in progress
const basePath = scannerType === 'grype' ? '/cache/grype' : '/cache/trivy';
const dbPath = scanId ? `${basePath}${scanId}` : basePath;
const binds = [
'/var/run/docker.sock:/var/run/docker.sock:ro',
`${volumeName}:${basePath}` // Always mount to base path
];
// Environment variables to ensure scanners use the correct cache path
// For concurrent scans, use a unique subdirectory
const envVars = scannerType === 'grype'
? [`GRYPE_DB_CACHE_DIR=${dbPath}`]
: [`TRIVY_CACHE_DIR=${dbPath}`];
if (scanId) {
console.log(`[Scanner] Concurrent scan detected - using unique cache dir: ${dbPath}`);
}
console.log(`[Scanner] Running ${scannerType} with volume ${volumeName} mounted at ${basePath}`);
try {
// Run the scanner container
const output = await runContainerWithStreaming({
image: scannerImage,
cmd,
binds,
env: envVars,
name: `dockhand-${scannerType}-${Date.now()}`,
envId,
onStderr: (data) => {
// Stream stderr lines for real-time progress output
const lines = data.split('\n');
for (const line of lines) {
if (line.trim()) {
onOutput?.(line);
}
}
}
});
return output;
} finally {
// Decrement running counter
const newCount = (runningScanners.get(scannerType) || 1) - 1;
if (newCount <= 0) {
runningScanners.delete(scannerType);
} else {
runningScanners.set(scannerType, newCount);
}
}
}
// Scan image with Grype
export async function scanWithGrype(
imageName: string,
envId?: number,
onProgress?: (progress: ScanProgress) => void
): Promise<ScanResult> {
const startTime = Date.now();
const scannerImage = 'anchore/grype:latest';
const { grypeArgs } = await getScannerSettings(envId);
onProgress?.({
stage: 'checking',
message: 'Checking Grype scanner availability...',
scanner: 'grype'
});
// Ensure scanner image is available
const available = await ensureScannerImage(scannerImage, envId, onProgress);
if (!available) {
throw new Error('Failed to get Grype scanner image. Please ensure Docker can pull images.');
}
onProgress?.({
stage: 'scanning',
message: `Scanning ${imageName} with Grype...`,
scanner: 'grype',
progress: 30
});
try {
// Parse CLI args from settings
const cmd = parseCliArgs(grypeArgs, imageName);
const output = await runScannerContainer(
scannerImage,
'grype',
imageName,
cmd,
envId,
(line) => {
onProgress?.({
stage: 'scanning',
message: `Scanning ${imageName} with Grype...`,
scanner: 'grype',
progress: 50,
output: line
});
}
);
onProgress?.({
stage: 'parsing',
message: 'Parsing scan results...',
scanner: 'grype',
progress: 80
});
const { vulnerabilities, summary } = parseGrypeOutput(output);
// Get the actual SHA256 image ID for reliable caching
const imageId = await getImageSha(imageName, envId);
const result: ScanResult = {
imageId,
imageName,
scanner: 'grype',
scannedAt: new Date().toISOString(),
vulnerabilities,
summary,
scanDuration: Date.now() - startTime
};
onProgress?.({
stage: 'complete',
message: 'Grype scan complete',
scanner: 'grype',
progress: 100,
result
});
return result;
} catch (error) {
const errorMsg = error instanceof Error ? error.message : String(error);
onProgress?.({
stage: 'error',
message: `Grype scan failed: ${errorMsg}`,
scanner: 'grype',
error: errorMsg
});
throw error;
}
}
// Scan image with Trivy
export async function scanWithTrivy(
imageName: string,
envId?: number,
onProgress?: (progress: ScanProgress) => void
): Promise<ScanResult> {
const startTime = Date.now();
const scannerImage = 'aquasec/trivy:latest';
const { trivyArgs } = await getScannerSettings(envId);
onProgress?.({
stage: 'checking',
message: 'Checking Trivy scanner availability...',
scanner: 'trivy'
});
// Ensure scanner image is available
const available = await ensureScannerImage(scannerImage, envId, onProgress);
if (!available) {
throw new Error('Failed to get Trivy scanner image. Please ensure Docker can pull images.');
}
onProgress?.({
stage: 'scanning',
message: `Scanning ${imageName} with Trivy...`,
scanner: 'trivy',
progress: 30
});
try {
// Parse CLI args from settings
const cmd = parseCliArgs(trivyArgs, imageName);
const output = await runScannerContainer(
scannerImage,
'trivy',
imageName,
cmd,
envId,
(line) => {
onProgress?.({
stage: 'scanning',
message: `Scanning ${imageName} with Trivy...`,
scanner: 'trivy',
progress: 50,
output: line
});
}
);
onProgress?.({
stage: 'parsing',
message: 'Parsing scan results...',
scanner: 'trivy',
progress: 80
});
const { vulnerabilities, summary } = parseTrivyOutput(output);
// Get the actual SHA256 image ID for reliable caching
const imageId = await getImageSha(imageName, envId);
const result: ScanResult = {
imageId,
imageName,
scanner: 'trivy',
scannedAt: new Date().toISOString(),
vulnerabilities,
summary,
scanDuration: Date.now() - startTime
};
onProgress?.({
stage: 'complete',
message: 'Trivy scan complete',
scanner: 'trivy',
progress: 100,
result
});
return result;
} catch (error) {
const errorMsg = error instanceof Error ? error.message : String(error);
onProgress?.({
stage: 'error',
message: `Trivy scan failed: ${errorMsg}`,
scanner: 'trivy',
error: errorMsg
});
throw error;
}
}
// Scan image with configured scanner(s)
export async function scanImage(
imageName: string,
envId?: number,
onProgress?: (progress: ScanProgress) => void,
forceScannerType?: ScannerType
): Promise<ScanResult[]> {
const { scanner } = await getScannerSettings(envId);
const scannerType = forceScannerType || scanner;
if (scannerType === 'none') {
return [];
}
const results: ScanResult[] = [];
const errors: Error[] = [];
if (scannerType === 'grype' || scannerType === 'both') {
try {
const result = await scanWithGrype(imageName, envId, onProgress);
results.push(result);
} catch (error) {
console.error('Grype scan failed:', error);
errors.push(error instanceof Error ? error : new Error(String(error)));
if (scannerType === 'grype') throw error;
}
}
if (scannerType === 'trivy' || scannerType === 'both') {
try {
const result = await scanWithTrivy(imageName, envId, onProgress);
results.push(result);
} catch (error) {
console.error('Trivy scan failed:', error);
errors.push(error instanceof Error ? error : new Error(String(error)));
if (scannerType === 'trivy') throw error;
}
}
// If using 'both' and all scanners failed, throw an error
if (scannerType === 'both' && results.length === 0 && errors.length > 0) {
throw new Error(`All scanners failed: ${errors.map(e => e.message).join('; ')}`);
}
// Send vulnerability notifications based on combined results
// When using 'both' scanners, take the MAX of each severity across all results
if (results.length > 0) {
const combinedSummary: VulnerabilitySeverity = {
critical: Math.max(...results.map(r => r.summary.critical)),
high: Math.max(...results.map(r => r.summary.high)),
medium: Math.max(...results.map(r => r.summary.medium)),
low: Math.max(...results.map(r => r.summary.low)),
negligible: Math.max(...results.map(r => r.summary.negligible)),
unknown: Math.max(...results.map(r => r.summary.unknown))
};
// Send notifications (async, don't block return)
sendVulnerabilityNotifications(imageName, combinedSummary, envId).catch(err => {
console.error('[Scanner] Failed to send vulnerability notifications:', err);
});
}
return results;
}
// Check if scanner images are available
export async function checkScannerAvailability(envId?: number): Promise<{
grype: boolean;
trivy: boolean;
}> {
const [grypeAvailable, trivyAvailable] = await Promise.all([
isScannerImageAvailable('anchore/grype', envId),
isScannerImageAvailable('aquasec/trivy', envId)
]);
return {
grype: grypeAvailable,
trivy: trivyAvailable
};
}
// Get scanner version by running a temporary container
async function getScannerVersion(
scannerType: 'grype' | 'trivy',
envId?: number
): Promise<string | null> {
try {
const scannerImage = scannerType === 'grype' ? 'anchore/grype:latest' : 'aquasec/trivy:latest';
// Check if image exists first
const images = await listImages(envId);
const hasImage = images.some((img) =>
img.tags?.some((tag: string) => tag.includes(scannerImage.split(':')[0]))
);
if (!hasImage) return null;
// Create temporary container to get version
const versionCmd = scannerType === 'grype' ? ['version'] : ['--version'];
const { stdout, stderr } = await runContainer({
image: scannerImage,
cmd: versionCmd,
name: `dockhand-${scannerType}-version-${Date.now()}`,
envId
});
const output = stdout || stderr;
// Parse version from output
// Grype: "grype 0.74.0" or "Application: grype\nVersion: 0.86.1"
// Trivy: "Version: 0.48.0" or just "0.48.0"
const versionMatch = output.match(/(?:grype|trivy|Version:?\s*)?([\d]+\.[\d]+\.[\d]+)/i);
const version = versionMatch ? versionMatch[1] : null;
if (!version) {
console.error(`Could not parse ${scannerType} version from output:`, output.substring(0, 200));
}
return version;
} catch (error) {
console.error(`Failed to get ${scannerType} version:`, error);
return null;
}
}
// Get versions of available scanners
export async function getScannerVersions(envId?: number): Promise<{
grype: string | null;
trivy: string | null;
}> {
const [grypeVersion, trivyVersion] = await Promise.all([
getScannerVersion('grype', envId),
getScannerVersion('trivy', envId)
]);
return {
grype: grypeVersion,
trivy: trivyVersion
};
}
// Check if scanner images have updates available by comparing local digest with remote
export async function checkScannerUpdates(envId?: number): Promise<{
grype: { hasUpdate: boolean; localDigest?: string; remoteDigest?: string };
trivy: { hasUpdate: boolean; localDigest?: string; remoteDigest?: string };
}> {
const result = {
grype: { hasUpdate: false, localDigest: undefined as string | undefined, remoteDigest: undefined as string | undefined },
trivy: { hasUpdate: false, localDigest: undefined as string | undefined, remoteDigest: undefined as string | undefined }
};
try {
const images = await listImages(envId);
// Check both scanners
for (const [scanner, imageName] of [['grype', 'anchore/grype:latest'], ['trivy', 'aquasec/trivy:latest']] as const) {
try {
// Find local image
const localImage = images.find((img) =>
img.tags?.includes(imageName)
);
if (localImage) {
result[scanner].localDigest = localImage.id?.substring(7, 19); // Short digest
// Note: Remote digest checking would require pulling or using registry API
// For simplicity, we just note that checking for updates requires a pull
result[scanner].hasUpdate = false;
}
} catch (error) {
console.error(`Failed to check updates for ${scanner}:`, error);
}
}
} catch (error) {
console.error('Failed to check scanner updates:', error);
}
return result;
}
// Clean up scanner database volumes (removes cached vulnerability databases)
export async function cleanupScannerVolumes(envId?: number): Promise<void> {
try {
// Remove scanner database volumes
for (const volumeName of [GRYPE_VOLUME_NAME, TRIVY_VOLUME_NAME]) {
try {
await removeVolume(volumeName, true, envId);
console.log(`[Scanner] Removed volume: ${volumeName}`);
} catch {
// Volume might not exist, ignore
}
}
} catch (error) {
console.error('Failed to cleanup scanner volumes:', error);
}
}

View File

@@ -1,632 +0,0 @@
/**
* Unified Scheduler Service
*
* Manages all scheduled tasks using croner with automatic job lifecycle:
* - System cleanup jobs (static cron schedules)
* - Container auto-updates (dynamic schedules from database)
* - Git stack auto-sync (dynamic schedules from database)
*
* All execution logic is in separate task files for clean architecture.
*/
import { Cron } from 'croner';
import {
getEnabledAutoUpdateSettings,
getEnabledAutoUpdateGitStacks,
getAutoUpdateSettingById,
getGitStack,
getScheduleCleanupCron,
getEventCleanupCron,
getScheduleRetentionDays,
getEventRetentionDays,
getScheduleCleanupEnabled,
getEventCleanupEnabled,
getEnvironments,
getEnvUpdateCheckSettings,
getAllEnvUpdateCheckSettings,
getEnvironment,
getEnvironmentTimezone,
getDefaultTimezone
} from '../db';
import {
cleanupStaleVolumeHelpers,
cleanupExpiredVolumeHelpers
} from '../docker';
// Import task execution functions
import { runContainerUpdate } from './tasks/container-update';
import { runGitStackSync } from './tasks/git-stack-sync';
import { runEnvUpdateCheckJob } from './tasks/env-update-check';
import {
runScheduleCleanupJob,
runEventCleanupJob,
runVolumeHelperCleanupJob,
SYSTEM_SCHEDULE_CLEANUP_ID,
SYSTEM_EVENT_CLEANUP_ID,
SYSTEM_VOLUME_HELPER_CLEANUP_ID
} from './tasks/system-cleanup';
// Store all active cron jobs
const activeJobs: Map<string, Cron> = new Map();
// System cleanup jobs
let cleanupJob: Cron | null = null;
let eventCleanupJob: Cron | null = null;
let volumeHelperCleanupJob: Cron | null = null;
// Scheduler state
let isRunning = false;
/**
* Start the unified scheduler service.
* Registers all schedules with croner for automatic execution.
*/
export async function startScheduler(): Promise<void> {
if (isRunning) {
console.log('[Scheduler] Already running');
return;
}
console.log('[Scheduler] Starting scheduler service...');
isRunning = true;
// Get cron expressions and default timezone from database
const scheduleCleanupCron = await getScheduleCleanupCron();
const eventCleanupCron = await getEventCleanupCron();
const defaultTimezone = await getDefaultTimezone();
// Start system cleanup jobs (static schedules with default timezone)
cleanupJob = new Cron(scheduleCleanupCron, { timezone: defaultTimezone }, async () => {
await runScheduleCleanupJob();
});
eventCleanupJob = new Cron(eventCleanupCron, { timezone: defaultTimezone }, async () => {
await runEventCleanupJob();
});
// Cleanup functions to pass to the job (avoids dynamic import issues in production)
// Wrap cleanupStaleVolumeHelpers to pre-fetch environments
const wrappedCleanupStale = async () => {
const envs = await getEnvironments();
await cleanupStaleVolumeHelpers(envs);
};
const volumeCleanupFns = {
cleanupStaleVolumeHelpers: wrappedCleanupStale,
cleanupExpiredVolumeHelpers
};
// Volume helper cleanup runs every 30 minutes to clean up expired browse containers
volumeHelperCleanupJob = new Cron('*/30 * * * *', { timezone: defaultTimezone }, async () => {
await runVolumeHelperCleanupJob('cron', volumeCleanupFns);
});
// Run volume helper cleanup immediately on startup to clean up stale containers
runVolumeHelperCleanupJob('startup', volumeCleanupFns).catch(err => {
console.error('[Scheduler] Error during startup volume helper cleanup:', err);
});
console.log(`[Scheduler] System schedule cleanup: ${scheduleCleanupCron} [${defaultTimezone}]`);
console.log(`[Scheduler] System event cleanup: ${eventCleanupCron} [${defaultTimezone}]`);
console.log(`[Scheduler] Volume helper cleanup: every 30 minutes [${defaultTimezone}]`);
// Register all dynamic schedules from database
await refreshAllSchedules();
console.log('[Scheduler] Service started');
}
/**
* Stop the scheduler service and cleanup all jobs.
*/
export function stopScheduler(): void {
if (!isRunning) return;
console.log('[Scheduler] Stopping scheduler...');
isRunning = false;
// Stop system jobs
if (cleanupJob) {
cleanupJob.stop();
cleanupJob = null;
}
if (eventCleanupJob) {
eventCleanupJob.stop();
eventCleanupJob = null;
}
if (volumeHelperCleanupJob) {
volumeHelperCleanupJob.stop();
volumeHelperCleanupJob = null;
}
// Stop all dynamic jobs
for (const [key, job] of activeJobs.entries()) {
job.stop();
}
activeJobs.clear();
console.log('[Scheduler] Service stopped');
}
/**
* Refresh all dynamic schedules from database.
* Called on startup and optionally for recovery.
*/
export async function refreshAllSchedules(): Promise<void> {
console.log('[Scheduler] Refreshing all schedules...');
// Clear existing dynamic jobs
for (const [key, job] of activeJobs.entries()) {
job.stop();
}
activeJobs.clear();
let containerCount = 0;
let gitStackCount = 0;
// Register container auto-update schedules
try {
const containerSettings = await getEnabledAutoUpdateSettings();
for (const setting of containerSettings) {
if (setting.cronExpression) {
const registered = await registerSchedule(
setting.id,
'container_update',
setting.environmentId
);
if (registered) containerCount++;
}
}
} catch (error) {
console.error('[Scheduler] Error loading container schedules:', error);
}
// Register git stack auto-sync schedules
try {
const gitStacks = await getEnabledAutoUpdateGitStacks();
for (const stack of gitStacks) {
if (stack.autoUpdateCron) {
const registered = await registerSchedule(
stack.id,
'git_stack_sync',
stack.environmentId
);
if (registered) gitStackCount++;
}
}
} catch (error) {
console.error('[Scheduler] Error loading git stack schedules:', error);
}
// Register environment update check schedules
let envUpdateCheckCount = 0;
try {
const envConfigs = await getAllEnvUpdateCheckSettings();
for (const { envId, settings } of envConfigs) {
if (settings.enabled && settings.cron) {
const registered = await registerSchedule(
envId,
'env_update_check',
envId
);
if (registered) envUpdateCheckCount++;
}
}
} catch (error) {
console.error('[Scheduler] Error loading env update check schedules:', error);
}
console.log(`[Scheduler] Registered ${containerCount} container schedules, ${gitStackCount} git stack schedules, ${envUpdateCheckCount} env update check schedules`);
}
/**
* Register or update a schedule with automatic croner execution.
* Idempotent - can be called multiple times safely.
*/
export async function registerSchedule(
scheduleId: number,
type: 'container_update' | 'git_stack_sync' | 'env_update_check',
environmentId: number | null
): Promise<boolean> {
const key = `${type}-${scheduleId}`;
try {
// Unregister existing job if present
unregisterSchedule(scheduleId, type);
// Fetch schedule data from database
let cronExpression: string | null = null;
let entityName: string | null = null;
let enabled = false;
if (type === 'container_update') {
const setting = await getAutoUpdateSettingById(scheduleId);
if (!setting) return false;
cronExpression = setting.cronExpression;
entityName = setting.containerName;
enabled = setting.enabled;
} else if (type === 'git_stack_sync') {
const stack = await getGitStack(scheduleId);
if (!stack) return false;
cronExpression = stack.autoUpdateCron;
entityName = stack.stackName;
enabled = stack.autoUpdate;
} else if (type === 'env_update_check') {
const config = await getEnvUpdateCheckSettings(scheduleId);
if (!config) return false;
const env = await getEnvironment(scheduleId);
if (!env) return false;
cronExpression = config.cron;
entityName = `Update: ${env.name}`;
enabled = config.enabled;
}
// Don't create job if disabled or no cron expression
if (!enabled || !cronExpression) {
return false;
}
// Get timezone for this environment
const timezone = environmentId ? await getEnvironmentTimezone(environmentId) : 'UTC';
// Create new Cron instance with timezone
const job = new Cron(cronExpression, { timezone }, async () => {
// Defensive check: verify schedule still exists and is enabled
if (type === 'container_update') {
const setting = await getAutoUpdateSettingById(scheduleId);
if (!setting || !setting.enabled) return;
await runContainerUpdate(scheduleId, setting.containerName, environmentId, 'cron');
} else if (type === 'git_stack_sync') {
const stack = await getGitStack(scheduleId);
if (!stack || !stack.autoUpdate) return;
await runGitStackSync(scheduleId, stack.stackName, environmentId, 'cron');
} else if (type === 'env_update_check') {
const config = await getEnvUpdateCheckSettings(scheduleId);
if (!config || !config.enabled) return;
await runEnvUpdateCheckJob(scheduleId, 'cron');
}
});
// Store in active jobs map
activeJobs.set(key, job);
console.log(`[Scheduler] Registered ${type} schedule ${scheduleId} (${entityName}): ${cronExpression} [${timezone}]`);
return true;
} catch (error: any) {
console.error(`[Scheduler] Failed to register ${type} schedule ${scheduleId}:`, error.message);
return false;
}
}
/**
* Unregister a schedule and stop its croner job.
* Idempotent - safe to call even if not registered.
*/
export function unregisterSchedule(
scheduleId: number,
type: 'container_update' | 'git_stack_sync' | 'env_update_check'
): void {
const key = `${type}-${scheduleId}`;
const job = activeJobs.get(key);
if (job) {
job.stop();
activeJobs.delete(key);
console.log(`[Scheduler] Unregistered ${type} schedule ${scheduleId}`);
}
}
/**
* Refresh all schedules for a specific environment.
* Called when an environment's timezone changes to re-register jobs with the new timezone.
*/
export async function refreshSchedulesForEnvironment(environmentId: number): Promise<void> {
console.log(`[Scheduler] Refreshing schedules for environment ${environmentId} (timezone changed)`);
let refreshedCount = 0;
// Re-register container auto-update schedules for this environment
try {
const containerSettings = await getEnabledAutoUpdateSettings();
for (const setting of containerSettings) {
if (setting.environmentId === environmentId && setting.cronExpression) {
const registered = await registerSchedule(
setting.id,
'container_update',
setting.environmentId
);
if (registered) refreshedCount++;
}
}
} catch (error) {
console.error('[Scheduler] Error refreshing container schedules:', error);
}
// Re-register git stack auto-sync schedules for this environment
try {
const gitStacks = await getEnabledAutoUpdateGitStacks();
for (const stack of gitStacks) {
if (stack.environmentId === environmentId && stack.autoUpdateCron) {
const registered = await registerSchedule(
stack.id,
'git_stack_sync',
stack.environmentId
);
if (registered) refreshedCount++;
}
}
} catch (error) {
console.error('[Scheduler] Error refreshing git stack schedules:', error);
}
// Re-register environment update check schedule for this environment
try {
const config = await getEnvUpdateCheckSettings(environmentId);
if (config && config.enabled && config.cron) {
const registered = await registerSchedule(
environmentId,
'env_update_check',
environmentId
);
if (registered) refreshedCount++;
}
} catch (error) {
console.error('[Scheduler] Error refreshing env update check schedule:', error);
}
console.log(`[Scheduler] Refreshed ${refreshedCount} schedules for environment ${environmentId}`);
}
/**
* Refresh system cleanup jobs with the new default timezone.
* Called when the default timezone setting changes.
*/
export async function refreshSystemJobs(): Promise<void> {
console.log('[Scheduler] Refreshing system jobs (default timezone changed)');
// Get current settings
const scheduleCleanupCron = await getScheduleCleanupCron();
const eventCleanupCron = await getEventCleanupCron();
const defaultTimezone = await getDefaultTimezone();
// Cleanup functions to pass to the job
const wrappedCleanupStale = async () => {
const envs = await getEnvironments();
await cleanupStaleVolumeHelpers(envs);
};
const volumeCleanupFns = {
cleanupStaleVolumeHelpers: wrappedCleanupStale,
cleanupExpiredVolumeHelpers
};
// Stop existing system jobs
if (cleanupJob) {
cleanupJob.stop();
}
if (eventCleanupJob) {
eventCleanupJob.stop();
}
if (volumeHelperCleanupJob) {
volumeHelperCleanupJob.stop();
}
// Re-create with new timezone
cleanupJob = new Cron(scheduleCleanupCron, { timezone: defaultTimezone }, async () => {
await runScheduleCleanupJob();
});
eventCleanupJob = new Cron(eventCleanupCron, { timezone: defaultTimezone }, async () => {
await runEventCleanupJob();
});
volumeHelperCleanupJob = new Cron('*/30 * * * *', { timezone: defaultTimezone }, async () => {
await runVolumeHelperCleanupJob('cron', volumeCleanupFns);
});
console.log(`[Scheduler] System schedule cleanup: ${scheduleCleanupCron} [${defaultTimezone}]`);
console.log(`[Scheduler] System event cleanup: ${eventCleanupCron} [${defaultTimezone}]`);
console.log(`[Scheduler] Volume helper cleanup: every 30 minutes [${defaultTimezone}]`);
}
// =============================================================================
// MANUAL TRIGGER FUNCTIONS (for API endpoints)
// =============================================================================
/**
* Manually trigger a container update.
*/
export async function triggerContainerUpdate(settingId: number): Promise<{ success: boolean; executionId?: number; error?: string }> {
try {
const setting = await getAutoUpdateSettingById(settingId);
if (!setting) {
return { success: false, error: 'Auto-update setting not found' };
}
// Run in background
runContainerUpdate(settingId, setting.containerName, setting.environmentId, 'manual');
return { success: true };
} catch (error: any) {
return { success: false, error: error.message };
}
}
/**
* Manually trigger a git stack sync.
*/
export async function triggerGitStackSync(stackId: number): Promise<{ success: boolean; executionId?: number; error?: string }> {
try {
const stack = await getGitStack(stackId);
if (!stack) {
return { success: false, error: 'Git stack not found' };
}
// Run in background
runGitStackSync(stackId, stack.stackName, stack.environmentId, 'manual');
return { success: true };
} catch (error: any) {
return { success: false, error: error.message };
}
}
/**
* Trigger git stack sync from webhook (called from webhook endpoint).
*/
export async function triggerGitStackSyncFromWebhook(stackId: number): Promise<{ success: boolean; executionId?: number; error?: string }> {
try {
const stack = await getGitStack(stackId);
if (!stack) {
return { success: false, error: 'Git stack not found' };
}
// Run in background
runGitStackSync(stackId, stack.stackName, stack.environmentId, 'webhook');
return { success: true };
} catch (error: any) {
return { success: false, error: error.message };
}
}
/**
* Manually trigger an environment update check.
*/
export async function triggerEnvUpdateCheck(environmentId: number): Promise<{ success: boolean; executionId?: number; error?: string }> {
try {
const config = await getEnvUpdateCheckSettings(environmentId);
if (!config) {
return { success: false, error: 'Update check settings not found for this environment' };
}
const env = await getEnvironment(environmentId);
if (!env) {
return { success: false, error: 'Environment not found' };
}
// Run in background
runEnvUpdateCheckJob(environmentId, 'manual');
return { success: true };
} catch (error: any) {
return { success: false, error: error.message };
}
}
/**
* Manually trigger a system job (schedule cleanup, event cleanup, etc.).
*/
export async function triggerSystemJob(jobId: string): Promise<{ success: boolean; executionId?: number; error?: string }> {
try {
if (jobId === String(SYSTEM_SCHEDULE_CLEANUP_ID) || jobId === 'schedule-cleanup') {
runScheduleCleanupJob('manual');
return { success: true };
} else if (jobId === String(SYSTEM_EVENT_CLEANUP_ID) || jobId === 'event-cleanup') {
runEventCleanupJob('manual');
return { success: true };
} else if (jobId === String(SYSTEM_VOLUME_HELPER_CLEANUP_ID) || jobId === 'volume-helper-cleanup') {
// Wrap to pre-fetch environments (avoids dynamic import in production)
const wrappedCleanupStale = async () => {
const envs = await getEnvironments();
await cleanupStaleVolumeHelpers(envs);
};
runVolumeHelperCleanupJob('manual', {
cleanupStaleVolumeHelpers: wrappedCleanupStale,
cleanupExpiredVolumeHelpers
});
return { success: true };
} else {
return { success: false, error: 'Unknown system job ID' };
}
} catch (error: any) {
return { success: false, error: error.message };
}
}
// =============================================================================
// UTILITY FUNCTIONS
// =============================================================================
/**
* Get the next run time for a cron expression.
* @param cronExpression - The cron expression
* @param timezone - Optional IANA timezone (e.g., 'Europe/Warsaw'). Defaults to local timezone.
*/
export function getNextRun(cronExpression: string, timezone?: string): Date | null {
try {
const options = timezone ? { timezone } : undefined;
const job = new Cron(cronExpression, options);
const next = job.nextRun();
job.stop();
return next;
} catch {
return null;
}
}
/**
* Check if a cron expression is valid.
*/
export function isValidCron(cronExpression: string): boolean {
try {
const job = new Cron(cronExpression);
job.stop();
return true;
} catch {
return false;
}
}
/**
* Get system schedules info for the API.
*/
export async function getSystemSchedules(): Promise<SystemScheduleInfo[]> {
const scheduleRetention = await getScheduleRetentionDays();
const eventRetention = await getEventRetentionDays();
const scheduleCleanupCron = await getScheduleCleanupCron();
const eventCleanupCron = await getEventCleanupCron();
const scheduleCleanupEnabled = await getScheduleCleanupEnabled();
const eventCleanupEnabled = await getEventCleanupEnabled();
return [
{
id: SYSTEM_SCHEDULE_CLEANUP_ID,
type: 'system_cleanup' as const,
name: 'Schedule execution cleanup',
description: `Removes execution logs older than ${scheduleRetention} days`,
cronExpression: scheduleCleanupCron,
nextRun: scheduleCleanupEnabled ? getNextRun(scheduleCleanupCron)?.toISOString() ?? null : null,
isSystem: true,
enabled: scheduleCleanupEnabled
},
{
id: SYSTEM_EVENT_CLEANUP_ID,
type: 'system_cleanup' as const,
name: 'Container event cleanup',
description: `Removes container events older than ${eventRetention} days`,
cronExpression: eventCleanupCron,
nextRun: eventCleanupEnabled ? getNextRun(eventCleanupCron)?.toISOString() ?? null : null,
isSystem: true,
enabled: eventCleanupEnabled
},
{
id: SYSTEM_VOLUME_HELPER_CLEANUP_ID,
type: 'system_cleanup' as const,
name: 'Volume helper cleanup',
description: 'Cleans up temporary volume browser containers',
cronExpression: '*/30 * * * *',
nextRun: getNextRun('*/30 * * * *')?.toISOString() ?? null,
isSystem: true,
enabled: true
}
];
}
export interface SystemScheduleInfo {
id: number;
type: 'system_cleanup';
name: string;
description: string;
cronExpression: string;
nextRun: string | null;
isSystem: true;
enabled: boolean;
}

View File

@@ -1,575 +0,0 @@
/**
* Container Auto-Update Task
*
* Handles automatic container updates with vulnerability scanning.
*/
import type { ScheduleTrigger, VulnerabilityCriteria } from '../../db';
import {
getAutoUpdateSettingById,
updateAutoUpdateLastChecked,
updateAutoUpdateLastUpdated,
createScheduleExecution,
updateScheduleExecution,
appendScheduleExecutionLog,
saveVulnerabilityScan,
getCombinedScanForImage
} from '../../db';
import {
pullImage,
listContainers,
inspectContainer,
createContainer,
stopContainer,
removeContainer,
checkImageUpdateAvailable,
getTempImageTag,
isDigestBasedImage,
getImageIdByTag,
removeTempImage,
tagImage
} from '../../docker';
import { getScannerSettings, scanImage, type ScanResult, type VulnerabilitySeverity } from '../../scanner';
import { sendEventNotification } from '../../notifications';
import { parseImageNameAndTag, shouldBlockUpdate, combineScanSummaries, isDockhandContainer } from './update-utils';
/**
* Execute a container auto-update.
*/
export async function runContainerUpdate(
settingId: number,
containerName: string,
environmentId: number | null | undefined,
triggeredBy: ScheduleTrigger
): Promise<void> {
const envId = environmentId ?? undefined;
const startTime = Date.now();
// Create execution record
const execution = await createScheduleExecution({
scheduleType: 'container_update',
scheduleId: settingId,
environmentId: environmentId ?? null,
entityName: containerName,
triggeredBy,
status: 'running'
});
await updateScheduleExecution(execution.id, {
startedAt: new Date().toISOString()
});
const log = (message: string) => {
console.log(`[Auto-update] ${message}`);
appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
};
try {
log(`Checking container: ${containerName}`);
await updateAutoUpdateLastChecked(containerName, envId);
// Find the container
const containers = await listContainers(true, envId);
const container = containers.find(c => c.name === containerName);
if (!container) {
log(`Container not found: ${containerName}`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: 'Container not found'
});
return;
}
// Get the full container config to extract the image name (tag)
const inspectData = await inspectContainer(container.id, envId) as any;
const imageNameFromConfig = inspectData.Config?.Image;
if (!imageNameFromConfig) {
log(`Could not determine image name from container config`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: 'Could not determine image name'
});
return;
}
// Prevent Dockhand from updating itself
if (isDockhandContainer(imageNameFromConfig)) {
log(`Skipping Dockhand container - cannot auto-update self`);
await updateScheduleExecution(execution.id, {
status: 'skipped',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: { reason: 'Cannot auto-update Dockhand itself' }
});
return;
}
// Get the actual image ID from inspect data
const currentImageId = inspectData.Image;
log(`Container is using image: ${imageNameFromConfig}`);
log(`Current image ID: ${currentImageId?.substring(0, 19)}`);
// Get scanner and schedule settings early to determine scan strategy
const [scannerSettings, updateSetting] = await Promise.all([
getScannerSettings(envId),
getAutoUpdateSettingById(settingId)
]);
const vulnerabilityCriteria = (updateSetting?.vulnerabilityCriteria || 'never') as VulnerabilityCriteria;
// Scan if scanning is enabled (scanner !== 'none')
// The vulnerabilityCriteria only controls whether to BLOCK updates, not whether to SCAN
const shouldScan = scannerSettings.scanner !== 'none';
// =============================================================================
// SAFE UPDATE FLOW
// =============================================================================
// 1. Registry check (no pull) - determine if update is available
// 2. If scanning enabled:
// a. Pull new image (overwrites original tag temporarily)
// b. Get new image ID
// c. SAFETY: Restore original tag to point to OLD image
// d. Tag new image with temp suffix for scanning
// e. Scan temp image
// f. If blocked: remove temp image, original tag still safe
// g. If approved: re-tag to original and proceed
// 3. If no scanning: simple pull and update
// =============================================================================
// Step 1: Check for update using registry check (no pull)
log(`Checking registry for updates: ${imageNameFromConfig}`);
const registryCheck = await checkImageUpdateAvailable(imageNameFromConfig, currentImageId, envId);
// Handle local images or registry errors
if (registryCheck.isLocalImage) {
log(`Local image detected - skipping (auto-update requires registry)`);
await updateScheduleExecution(execution.id, {
status: 'skipped',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: { reason: 'Local image - no registry available' }
});
return;
}
if (registryCheck.error) {
log(`Registry check error: ${registryCheck.error}`);
// Don't fail on transient errors, just skip this run
await updateScheduleExecution(execution.id, {
status: 'skipped',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: { reason: `Registry check failed: ${registryCheck.error}` }
});
return;
}
if (!registryCheck.hasUpdate) {
log(`Already up-to-date: ${containerName} is running the latest version`);
await updateScheduleExecution(execution.id, {
status: 'skipped',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: { reason: 'Already up-to-date' }
});
return;
}
log(`Update available! Registry digest: ${registryCheck.registryDigest?.substring(0, 19) || 'unknown'}`);
// Variables for scan results
let scanResults: ScanResult[] | undefined;
let scanSummary: VulnerabilitySeverity | undefined;
let newImageId: string | null = null;
const newDigest = registryCheck.registryDigest;
// Step 2: Safe pull with temp tag protection (if scanning enabled)
if (shouldScan) {
log(`Safe-pull enabled (scanner: ${scannerSettings.scanner}, criteria: ${vulnerabilityCriteria})`);
// Check if this is a digest-based image (can't use temp tags)
if (isDigestBasedImage(imageNameFromConfig)) {
log(`Digest-based image detected - temp tag protection not available`);
// Fall through to simple flow
} else {
const tempTag = getTempImageTag(imageNameFromConfig);
log(`Using temp tag for safe pull: ${tempTag}`);
try {
// Step 2a: Pull new image (overwrites original tag)
log(`Pulling new image: ${imageNameFromConfig}`);
await pullImage(imageNameFromConfig, undefined, envId);
// Step 2b: Get new image ID
newImageId = await getImageIdByTag(imageNameFromConfig, envId);
if (!newImageId) {
throw new Error('Failed to get new image ID after pull');
}
log(`New image pulled: ${newImageId.substring(0, 19)}`);
// Step 2c: SAFETY - Restore original tag to OLD image
log(`Restoring original tag to current safe image...`);
const [oldRepo, oldTag] = parseImageNameAndTag(imageNameFromConfig);
await tagImage(currentImageId, oldRepo, oldTag, envId);
log(`Original tag ${imageNameFromConfig} restored to safe image`);
// Step 2d: Tag new image with temp suffix
const [tempRepo, tempTagName] = parseImageNameAndTag(tempTag);
await tagImage(newImageId, tempRepo, tempTagName, envId);
log(`New image tagged as: ${tempTag}`);
// Step 2e: Scan temp image
log(`Scanning new image for vulnerabilities...`);
try {
scanResults = await scanImage(tempTag, envId, (progress) => {
const scannerTag = progress.scanner ? `[${progress.scanner}]` : '[scan]';
if (progress.message) {
log(`${scannerTag} ${progress.message}`);
}
if (progress.output) {
log(`${scannerTag} ${progress.output}`);
}
});
if (scanResults.length > 0) {
scanSummary = combineScanSummaries(scanResults);
log(`Scan result: ${scanSummary.critical} critical, ${scanSummary.high} high, ${scanSummary.medium} medium, ${scanSummary.low} low`);
// Save scan results
for (const result of scanResults) {
try {
await saveVulnerabilityScan({
environmentId: envId ?? null,
imageId: newImageId,
imageName: result.imageName,
scanner: result.scanner,
scannedAt: result.scannedAt,
scanDuration: result.scanDuration,
criticalCount: result.summary.critical,
highCount: result.summary.high,
mediumCount: result.summary.medium,
lowCount: result.summary.low,
negligibleCount: result.summary.negligible,
unknownCount: result.summary.unknown,
vulnerabilities: result.vulnerabilities,
error: result.error ?? null
});
} catch (saveError: any) {
log(`Warning: Could not save scan results: ${saveError.message}`);
}
}
// Handle 'more_than_current' criteria
let currentScanSummary: VulnerabilitySeverity | undefined;
if (vulnerabilityCriteria === 'more_than_current') {
log(`Looking up cached scan for current image...`);
try {
const cachedScan = await getCombinedScanForImage(currentImageId, envId ?? null);
if (cachedScan) {
currentScanSummary = cachedScan;
log(`Cached scan: ${currentScanSummary.critical} critical, ${currentScanSummary.high} high`);
} else {
log(`No cached scan found, scanning current image...`);
const currentScanResults = await scanImage(currentImageId, envId, (progress) => {
const tag = progress.scanner ? `[${progress.scanner}]` : '[scan]';
if (progress.message) log(`${tag} ${progress.message}`);
});
if (currentScanResults.length > 0) {
currentScanSummary = combineScanSummaries(currentScanResults);
log(`Current image: ${currentScanSummary.critical} critical, ${currentScanSummary.high} high`);
// Save for future use
for (const result of currentScanResults) {
try {
await saveVulnerabilityScan({
environmentId: envId ?? null,
imageId: currentImageId,
imageName: result.imageName,
scanner: result.scanner,
scannedAt: result.scannedAt,
scanDuration: result.scanDuration,
criticalCount: result.summary.critical,
highCount: result.summary.high,
mediumCount: result.summary.medium,
lowCount: result.summary.low,
negligibleCount: result.summary.negligible,
unknownCount: result.summary.unknown,
vulnerabilities: result.vulnerabilities,
error: result.error ?? null
});
} catch { /* ignore */ }
}
}
}
} catch (cacheError: any) {
log(`Warning: Could not get current scan: ${cacheError.message}`);
}
}
// Check if update should be blocked
const { blocked, reason } = shouldBlockUpdate(vulnerabilityCriteria, scanSummary, currentScanSummary);
if (blocked) {
// Step 2f: BLOCKED - Remove temp image, original tag is safe
log(`UPDATE BLOCKED: ${reason}`);
log(`Removing blocked image: ${tempTag}`);
await removeTempImage(newImageId, envId);
log(`Blocked image removed - container will continue using safe image`);
await updateScheduleExecution(execution.id, {
status: 'skipped',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: {
mode: 'auto_update',
reason: 'vulnerabilities_found',
blockReason: reason,
vulnerabilityCriteria,
summary: { checked: 1, updated: 0, blocked: 1, failed: 0 },
containers: [{
name: containerName,
status: 'blocked',
blockReason: reason,
scannerResults: scanResults.map(r => ({
scanner: r.scanner,
critical: r.summary.critical,
high: r.summary.high,
medium: r.summary.medium,
low: r.summary.low,
negligible: r.summary.negligible,
unknown: r.summary.unknown
}))
}],
scanResult: {
summary: scanSummary,
scanners: scanResults.map(r => r.scanner),
scannedAt: scanResults[0]?.scannedAt,
scannerResults: scanResults.map(r => ({
scanner: r.scanner,
critical: r.summary.critical,
high: r.summary.high,
medium: r.summary.medium,
low: r.summary.low,
negligible: r.summary.negligible,
unknown: r.summary.unknown
}))
}
}
});
await sendEventNotification('auto_update_blocked', {
title: 'Auto-update blocked',
message: `Container "${containerName}" update blocked: ${reason}`,
type: 'warning'
}, envId);
return;
}
log(`Scan passed vulnerability criteria`);
}
} catch (scanError: any) {
// Scan failure - cleanup temp image and fail
log(`Scan failed: ${scanError.message}`);
log(`Removing temp image due to scan failure...`);
await removeTempImage(newImageId, envId);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: `Vulnerability scan failed: ${scanError.message}`
});
return;
}
// Step 2g: APPROVED - Re-tag to original for update
log(`Re-tagging approved image to: ${imageNameFromConfig}`);
await tagImage(newImageId, oldRepo, oldTag, envId);
log(`Image ready for update`);
// Clean up temp tag (optional, image will be removed when container is recreated)
try {
await removeTempImage(tempTag, envId);
} catch { /* ignore cleanup errors */ }
} catch (pullError: any) {
log(`Safe-pull failed: ${pullError.message}`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: `Failed to pull image: ${pullError.message}`
});
return;
}
}
} else {
// No scanning - simple pull
log(`Pulling update (no vulnerability scan)...`);
try {
await pullImage(imageNameFromConfig, undefined, envId);
log(`Image pulled successfully`);
} catch (pullError: any) {
log(`Pull failed: ${pullError.message}`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: `Failed to pull image: ${pullError.message}`
});
return;
}
}
log(`Proceeding with container recreation...`);
const success = await recreateContainer(containerName, envId, log);
if (success) {
await updateAutoUpdateLastUpdated(containerName, envId);
log(`Successfully updated container: ${containerName}`);
await updateScheduleExecution(execution.id, {
status: 'success',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: {
mode: 'auto_update',
newDigest,
vulnerabilityCriteria,
summary: { checked: 1, updated: 1, blocked: 0, failed: 0 },
containers: [{
name: containerName,
status: 'updated',
scannerResults: scanResults?.map(r => ({
scanner: r.scanner,
critical: r.summary.critical,
high: r.summary.high,
medium: r.summary.medium,
low: r.summary.low,
negligible: r.summary.negligible,
unknown: r.summary.unknown
}))
}],
scanResult: scanSummary ? {
summary: scanSummary,
scanners: scanResults?.map(r => r.scanner) || [],
scannedAt: scanResults?.[0]?.scannedAt,
scannerResults: scanResults?.map(r => ({
scanner: r.scanner,
critical: r.summary.critical,
high: r.summary.high,
medium: r.summary.medium,
low: r.summary.low,
negligible: r.summary.negligible,
unknown: r.summary.unknown
})) || []
} : undefined
}
});
// Send notification for successful update
await sendEventNotification('auto_update_success', {
title: 'Container auto-updated',
message: `Container "${containerName}" was updated to a new image version`,
type: 'success'
}, envId);
} else {
throw new Error('Failed to recreate container');
}
} catch (error: any) {
log(`Error: ${error.message}`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: error.message
});
// Send notification for failed update
await sendEventNotification('auto_update_failed', {
title: 'Auto-update failed',
message: `Container "${containerName}" auto-update failed: ${error.message}`,
type: 'error'
}, envId);
}
}
// =============================================================================
// HELPER FUNCTIONS
// =============================================================================
async function recreateContainer(
containerName: string,
envId?: number,
log?: (msg: string) => void
): Promise<boolean> {
try {
// Find the container by name
const containers = await listContainers(true, envId);
const container = containers.find(c => c.name === containerName);
if (!container) {
log?.(`Container not found: ${containerName}`);
return false;
}
// Get full container config
const inspectData = await inspectContainer(container.id, envId) as any;
const wasRunning = inspectData.State.Running;
const config = inspectData.Config;
const hostConfig = inspectData.HostConfig;
log?.(`Recreating container: ${containerName} (was running: ${wasRunning})`);
// Stop container if running
if (wasRunning) {
log?.('Stopping container...');
await stopContainer(container.id, envId);
}
// Remove old container
log?.('Removing old container...');
await removeContainer(container.id, true, envId);
// Prepare port bindings
const ports: { [key: string]: { HostPort: string } } = {};
if (hostConfig.PortBindings) {
for (const [containerPort, bindings] of Object.entries(hostConfig.PortBindings)) {
if (bindings && (bindings as any[]).length > 0) {
ports[containerPort] = { HostPort: (bindings as any[])[0].HostPort || '' };
}
}
}
// Create new container
log?.('Creating new container...');
const newContainer = await createContainer({
name: containerName,
image: config.Image,
ports,
volumeBinds: hostConfig.Binds || [],
env: config.Env || [],
labels: config.Labels || {},
cmd: config.Cmd || undefined,
restartPolicy: hostConfig.RestartPolicy?.Name || 'no',
networkMode: hostConfig.NetworkMode || undefined
}, envId);
// Start if was running
if (wasRunning) {
log?.('Starting new container...');
await newContainer.start();
}
log?.('Container recreated successfully');
return true;
} catch (error: any) {
log?.(`Failed to recreate container: ${error.message}`);
return false;
}
}

View File

@@ -1,509 +0,0 @@
/**
* Environment Update Check Task
*
* Checks all containers in an environment for available image updates.
* Can optionally auto-update containers when updates are found.
*/
import type { ScheduleTrigger, VulnerabilityCriteria } from '../../db';
import {
getEnvUpdateCheckSettings,
getEnvironment,
createScheduleExecution,
updateScheduleExecution,
appendScheduleExecutionLog,
saveVulnerabilityScan,
clearPendingContainerUpdates,
addPendingContainerUpdate,
removePendingContainerUpdate
} from '../../db';
import {
listContainers,
inspectContainer,
checkImageUpdateAvailable,
pullImage,
stopContainer,
removeContainer,
createContainer,
getTempImageTag,
isDigestBasedImage,
getImageIdByTag,
removeTempImage,
tagImage
} from '../../docker';
import { sendEventNotification } from '../../notifications';
import { getScannerSettings, scanImage, type VulnerabilitySeverity } from '../../scanner';
import { parseImageNameAndTag, shouldBlockUpdate, combineScanSummaries, isDockhandContainer } from './update-utils';
interface UpdateInfo {
containerId: string;
containerName: string;
imageName: string;
currentImageId: string;
currentDigest?: string;
newDigest?: string;
}
// Track running update checks to prevent concurrent execution
const runningUpdateChecks = new Set<number>();
/**
* Execute environment update check job.
* @param environmentId - The environment ID to check
* @param triggeredBy - What triggered this execution
*/
export async function runEnvUpdateCheckJob(
environmentId: number,
triggeredBy: ScheduleTrigger = 'cron'
): Promise<void> {
// Prevent concurrent execution for the same environment
if (runningUpdateChecks.has(environmentId)) {
console.log(`[EnvUpdateCheck] Environment ${environmentId} update check already running, skipping`);
return;
}
runningUpdateChecks.add(environmentId);
const startTime = Date.now();
try {
// Get environment info
const env = await getEnvironment(environmentId);
if (!env) {
console.error(`[EnvUpdateCheck] Environment ${environmentId} not found`);
return;
}
// Get settings
const config = await getEnvUpdateCheckSettings(environmentId);
if (!config) {
console.error(`[EnvUpdateCheck] No settings found for environment ${environmentId}`);
return;
}
// Create execution record
const execution = await createScheduleExecution({
scheduleType: 'env_update_check',
scheduleId: environmentId,
environmentId,
entityName: `Update: ${env.name}`,
triggeredBy,
status: 'running'
});
await updateScheduleExecution(execution.id, {
startedAt: new Date().toISOString()
});
const log = async (message: string) => {
console.log(`[EnvUpdateCheck] ${message}`);
await appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
};
try {
await log(`Starting update check for environment: ${env.name}`);
await log(`Auto-update mode: ${config.autoUpdate ? 'ON' : 'OFF'}`);
// Clear pending updates at the start - we'll re-add as we discover updates
await clearPendingContainerUpdates(environmentId);
// Get all containers in this environment
const containers = await listContainers(true, environmentId);
await log(`Found ${containers.length} containers`);
const updatesAvailable: UpdateInfo[] = [];
let checkedCount = 0;
let errorCount = 0;
// Check each container for updates
for (const container of containers) {
try {
const inspectData = await inspectContainer(container.id, environmentId) as any;
const imageName = inspectData.Config?.Image;
const currentImageId = inspectData.Image;
if (!imageName) {
await log(` [${container.name}] Skipping - no image name found`);
continue;
}
checkedCount++;
await log(` Checking: ${container.name} (${imageName})`);
const result = await checkImageUpdateAvailable(imageName, currentImageId, environmentId);
if (result.isLocalImage) {
await log(` Local image - skipping update check`);
continue;
}
if (result.error) {
await log(` Error: ${result.error}`);
errorCount++;
continue;
}
if (result.hasUpdate) {
updatesAvailable.push({
containerId: container.id,
containerName: container.name,
imageName,
currentImageId,
currentDigest: result.currentDigest,
newDigest: result.registryDigest
});
// Add to pending table immediately - will be removed on successful update
await addPendingContainerUpdate(environmentId, container.id, container.name, imageName);
await log(` UPDATE AVAILABLE`);
await log(` Current: ${result.currentDigest?.substring(0, 24) || 'unknown'}...`);
await log(` New: ${result.registryDigest?.substring(0, 24) || 'unknown'}...`);
} else {
await log(` Up to date`);
}
} catch (err: any) {
await log(` [${container.name}] Error: ${err.message}`);
errorCount++;
}
}
// Summary
await log('');
await log('=== SUMMARY ===');
await log(`Total containers: ${containers.length}`);
await log(`Checked: ${checkedCount}`);
await log(`Updates available: ${updatesAvailable.length}`);
await log(`Errors: ${errorCount}`);
if (updatesAvailable.length === 0) {
await log('All containers are up to date');
// Pending updates already cleared at start, nothing to add
await updateScheduleExecution(execution.id, {
status: 'success',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: {
updatesFound: 0,
containersChecked: checkedCount,
errors: errorCount
}
});
return;
}
// Build notification message with details
const updateList = updatesAvailable
.map(u => {
const currentShort = u.currentDigest?.substring(0, 12) || 'unknown';
const newShort = u.newDigest?.substring(0, 12) || 'unknown';
return `- ${u.containerName} (${u.imageName})\n ${currentShort}... -> ${newShort}...`;
})
.join('\n');
if (config.autoUpdate) {
// Auto-update mode: actually update the containers with safe-pull flow
await log('');
await log('=== AUTO-UPDATE MODE ===');
// Get scanner settings and vulnerability criteria
const scannerSettings = await getScannerSettings(environmentId);
const vulnerabilityCriteria = (config.vulnerabilityCriteria || 'never') as VulnerabilityCriteria;
// Scan if scanning is enabled (scanner !== 'none')
// The vulnerabilityCriteria only controls whether to BLOCK updates, not whether to SCAN
const shouldScan = scannerSettings.scanner !== 'none';
await log(`Vulnerability criteria: ${vulnerabilityCriteria}`);
if (shouldScan) {
await log(`Scanner: ${scannerSettings.scanner} (scan enabled)`);
}
await log(`Updating ${updatesAvailable.length} containers...`);
let successCount = 0;
let failCount = 0;
let blockedCount = 0;
const updatedContainers: string[] = [];
const failedContainers: string[] = [];
const blockedContainers: { name: string; reason: string; scannerResults?: { scanner: string; critical: number; high: number; medium: number; low: number }[] }[] = [];
for (const update of updatesAvailable) {
// Skip Dockhand container - cannot update itself
if (isDockhandContainer(update.imageName)) {
await log(`\n[${update.containerName}] Skipping - cannot auto-update Dockhand itself`);
continue;
}
try {
await log(`\nUpdating: ${update.containerName}`);
// Get full container config
const inspectData = await inspectContainer(update.containerId, environmentId) as any;
const wasRunning = inspectData.State.Running;
const containerConfig = inspectData.Config;
const hostConfig = inspectData.HostConfig;
// SAFE-PULL FLOW
if (shouldScan && !isDigestBasedImage(update.imageName)) {
const tempTag = getTempImageTag(update.imageName);
await log(` Safe-pull with temp tag: ${tempTag}`);
// Step 1: Pull new image
await log(` Pulling ${update.imageName}...`);
await pullImage(update.imageName, () => {}, environmentId);
// Step 2: Get new image ID
const newImageId = await getImageIdByTag(update.imageName, environmentId);
if (!newImageId) {
throw new Error('Failed to get new image ID after pull');
}
await log(` New image: ${newImageId.substring(0, 19)}`);
// Step 3: SAFETY - Restore original tag to old image
const [oldRepo, oldTag] = parseImageNameAndTag(update.imageName);
await tagImage(update.currentImageId, oldRepo, oldTag, environmentId);
await log(` Restored original tag to safe image`);
// Step 4: Tag new image with temp suffix
const [tempRepo, tempTagName] = parseImageNameAndTag(tempTag);
await tagImage(newImageId, tempRepo, tempTagName, environmentId);
// Step 5: Scan temp image
await log(` Scanning for vulnerabilities...`);
let scanBlocked = false;
let blockReason = '';
let currentScannerResults: { scanner: string; critical: number; high: number; medium: number; low: number }[] = [];
// Collect scan logs to log after scan completes
const scanLogs: string[] = [];
try {
const scanResults = await scanImage(tempTag, environmentId, (progress) => {
if (progress.message) {
scanLogs.push(` [${progress.scanner || 'scan'}] ${progress.message}`);
}
});
// Log collected scan messages
for (const scanLog of scanLogs) {
await log(scanLog);
}
if (scanResults.length > 0) {
const scanSummary = combineScanSummaries(scanResults);
await log(` Scan: ${scanSummary.critical} critical, ${scanSummary.high} high, ${scanSummary.medium} medium, ${scanSummary.low} low`);
// Capture per-scanner results for blocking info
currentScannerResults = scanResults.map(r => ({
scanner: r.scanner,
critical: r.summary.critical,
high: r.summary.high,
medium: r.summary.medium,
low: r.summary.low
}));
// Save scan results
for (const result of scanResults) {
try {
await saveVulnerabilityScan({
environmentId,
imageId: newImageId,
imageName: result.imageName,
scanner: result.scanner,
scannedAt: result.scannedAt,
scanDuration: result.scanDuration,
criticalCount: result.summary.critical,
highCount: result.summary.high,
mediumCount: result.summary.medium,
lowCount: result.summary.low,
negligibleCount: result.summary.negligible,
unknownCount: result.summary.unknown,
vulnerabilities: result.vulnerabilities,
error: result.error ?? null
});
} catch { /* ignore save errors */ }
}
// Check if blocked
const { blocked, reason } = shouldBlockUpdate(vulnerabilityCriteria, scanSummary, undefined);
if (blocked) {
scanBlocked = true;
blockReason = reason;
}
}
} catch (scanErr: any) {
await log(` Scan failed: ${scanErr.message}`);
scanBlocked = true;
blockReason = `Scan failed: ${scanErr.message}`;
}
if (scanBlocked) {
// BLOCKED - Remove temp image
await log(` UPDATE BLOCKED: ${blockReason}`);
await removeTempImage(newImageId, environmentId);
await log(` Removed blocked image - container stays safe`);
blockedCount++;
blockedContainers.push({
name: update.containerName,
reason: blockReason,
scannerResults: currentScannerResults.length > 0 ? currentScannerResults : undefined
});
continue;
}
// APPROVED - Re-tag to original
await log(` Scan passed, re-tagging...`);
await tagImage(newImageId, oldRepo, oldTag, environmentId);
try {
await removeTempImage(tempTag, environmentId);
} catch { /* ignore cleanup errors */ }
} else {
// Simple pull (no scanning or digest-based image)
await log(` Pulling ${update.imageName}...`);
await pullImage(update.imageName, () => {}, environmentId);
}
// Stop container if running
if (wasRunning) {
await log(` Stopping...`);
await stopContainer(update.containerId, environmentId);
}
// Remove old container
await log(` Removing old container...`);
await removeContainer(update.containerId, true, environmentId);
// Prepare port bindings
const ports: { [key: string]: { HostPort: string } } = {};
if (hostConfig.PortBindings) {
for (const [containerPort, bindings] of Object.entries(hostConfig.PortBindings)) {
if (bindings && (bindings as any[]).length > 0) {
ports[containerPort] = { HostPort: (bindings as any[])[0].HostPort || '' };
}
}
}
// Create new container
await log(` Creating new container...`);
const newContainer = await createContainer({
name: update.containerName,
image: update.imageName,
ports,
volumeBinds: hostConfig.Binds || [],
env: containerConfig.Env || [],
labels: containerConfig.Labels || {},
cmd: containerConfig.Cmd || undefined,
restartPolicy: hostConfig.RestartPolicy?.Name || 'no',
networkMode: hostConfig.NetworkMode || undefined
}, environmentId);
// Start if was running
if (wasRunning) {
await log(` Starting...`);
await newContainer.start();
}
await log(` Updated successfully`);
successCount++;
updatedContainers.push(update.containerName);
// Remove from pending table - successfully updated
await removePendingContainerUpdate(environmentId, update.containerId);
} catch (err: any) {
await log(` FAILED: ${err.message}`);
failCount++;
failedContainers.push(update.containerName);
}
}
await log('');
await log(`=== UPDATE COMPLETE ===`);
await log(`Updated: ${successCount}`);
await log(`Blocked: ${blockedCount}`);
await log(`Failed: ${failCount}`);
// Send notifications
if (blockedCount > 0) {
await sendEventNotification('auto_update_blocked', {
title: `${blockedCount} update(s) blocked in ${env.name}`,
message: blockedContainers.map(c => `- ${c.name}: ${c.reason}`).join('\n'),
type: 'warning'
}, environmentId);
}
const notificationMessage = successCount > 0
? `Updated ${successCount} container(s) in ${env.name}:\n${updatedContainers.map(c => `- ${c}`).join('\n')}${blockedCount > 0 ? `\n\nBlocked (${blockedCount}):\n${blockedContainers.map(c => `- ${c.name}`).join('\n')}` : ''}${failCount > 0 ? `\n\nFailed (${failCount}):\n${failedContainers.map(c => `- ${c}`).join('\n')}` : ''}`
: blockedCount > 0 ? `All updates blocked in ${env.name}` : `Update failed for all containers in ${env.name}`;
await sendEventNotification('batch_update_success', {
title: successCount > 0 ? `Containers updated in ${env.name}` : blockedCount > 0 ? `Updates blocked in ${env.name}` : `Container updates failed in ${env.name}`,
message: notificationMessage,
type: successCount > 0 && failCount === 0 && blockedCount === 0 ? 'success' : successCount > 0 ? 'warning' : 'error'
}, environmentId);
// Blocked/failed containers stay in pending table (successfully updated ones were removed)
await updateScheduleExecution(execution.id, {
status: failCount > 0 && successCount === 0 && blockedCount === 0 ? 'failed' : 'success',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: {
mode: 'auto_update',
updatesFound: updatesAvailable.length,
containersChecked: checkedCount,
errors: errorCount,
autoUpdate: true,
vulnerabilityCriteria,
summary: { checked: checkedCount, updated: successCount, blocked: blockedCount, failed: failCount },
containers: [
...updatedContainers.map(name => ({ name, status: 'updated' as const })),
...blockedContainers.map(c => ({ name: c.name, status: 'blocked' as const, blockReason: c.reason, scannerResults: c.scannerResults })),
...failedContainers.map(name => ({ name, status: 'failed' as const }))
],
updated: successCount,
blocked: blockedCount,
failed: failCount,
blockedContainers
}
});
} else {
// Check-only mode: just send notification
await log('');
await log('Check-only mode - sending notification about available updates');
// Pending updates already added as we discovered them
await sendEventNotification('updates_detected', {
title: `Container updates available in ${env.name}`,
message: `${updatesAvailable.length} update(s) available:\n${updateList}`,
type: 'info'
}, environmentId);
await updateScheduleExecution(execution.id, {
status: 'success',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: {
mode: 'notify_only',
updatesFound: updatesAvailable.length,
containersChecked: checkedCount,
errors: errorCount,
autoUpdate: false,
summary: { checked: checkedCount, updated: 0, blocked: 0, failed: 0 },
containers: updatesAvailable.map(u => ({
name: u.containerName,
status: 'checked' as const,
imageName: u.imageName,
currentDigest: u.currentDigest,
newDigest: u.newDigest
}))
}
});
}
} catch (error: any) {
await log(`Error: ${error.message}`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: error.message
});
}
} finally {
runningUpdateChecks.delete(environmentId);
}
}

View File

@@ -1,102 +0,0 @@
/**
* Git Stack Auto-Sync Task
*
* Handles automatic syncing and deploying of git-based compose stacks.
*/
import type { ScheduleTrigger } from '../../db';
import {
createScheduleExecution,
updateScheduleExecution,
appendScheduleExecutionLog
} from '../../db';
import { deployGitStack } from '../../git';
import { sendEventNotification } from '../../notifications';
/**
* Execute a git stack sync.
*/
export async function runGitStackSync(
stackId: number,
stackName: string,
environmentId: number | null | undefined,
triggeredBy: ScheduleTrigger
): Promise<void> {
const startTime = Date.now();
// Create execution record
const execution = await createScheduleExecution({
scheduleType: 'git_stack_sync',
scheduleId: stackId,
environmentId: environmentId ?? null,
entityName: stackName,
triggeredBy,
status: 'running'
});
await updateScheduleExecution(execution.id, {
startedAt: new Date().toISOString()
});
const log = (message: string) => {
console.log(`[Git-sync] ${message}`);
appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
};
try {
log(`Starting sync for stack: ${stackName}`);
// Deploy the git stack (only if there are changes)
const result = await deployGitStack(stackId, { force: false });
const envId = environmentId ?? undefined;
if (result.success) {
if (result.skipped) {
log(`No changes detected for stack: ${stackName}, skipping redeploy`);
// Send notification for skipped sync
await sendEventNotification('git_sync_skipped', {
title: 'Git sync skipped',
message: `Stack "${stackName}" sync skipped: no changes detected`,
type: 'info'
}, envId);
} else {
log(`Successfully deployed stack: ${stackName}`);
// Send notification for successful sync
await sendEventNotification('git_sync_success', {
title: 'Git stack deployed',
message: `Stack "${stackName}" was synced and deployed successfully`,
type: 'success'
}, envId);
}
if (result.output) log(result.output);
await updateScheduleExecution(execution.id, {
status: result.skipped ? 'skipped' : 'success',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: { output: result.output }
});
} else {
throw new Error(result.error || 'Deployment failed');
}
} catch (error: any) {
log(`Error: ${error.message}`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: error.message
});
// Send notification for failed sync
const envId = environmentId ?? undefined;
await sendEventNotification('git_sync_failed', {
title: 'Git sync failed',
message: `Stack "${stackName}" sync failed: ${error.message}`,
type: 'error'
}, envId);
}
}

View File

@@ -1,202 +0,0 @@
/**
* System Cleanup Tasks
*
* Handles system cleanup jobs (schedule executions, container events).
*/
import type { ScheduleTrigger } from '../../db';
import {
getScheduleRetentionDays,
cleanupOldExecutions,
getEventRetentionDays,
getScheduleCleanupEnabled,
getEventCleanupEnabled,
createScheduleExecution,
updateScheduleExecution,
appendScheduleExecutionLog
} from '../../db';
// System job IDs
export const SYSTEM_SCHEDULE_CLEANUP_ID = 1;
export const SYSTEM_EVENT_CLEANUP_ID = 2;
export const SYSTEM_VOLUME_HELPER_CLEANUP_ID = 3;
/**
* Execute schedule execution cleanup job.
*/
export async function runScheduleCleanupJob(triggeredBy: ScheduleTrigger = 'cron'): Promise<void> {
// Check if cleanup is enabled (skip check if manually triggered)
if (triggeredBy === 'cron') {
const enabled = await getScheduleCleanupEnabled();
if (!enabled) {
return; // Skip execution if disabled
}
}
const startTime = Date.now();
// Create execution record
const execution = await createScheduleExecution({
scheduleType: 'system_cleanup',
scheduleId: SYSTEM_SCHEDULE_CLEANUP_ID,
environmentId: null,
entityName: 'Schedule execution cleanup',
triggeredBy,
status: 'running'
});
await updateScheduleExecution(execution.id, {
startedAt: new Date().toISOString()
});
const log = async (message: string) => {
console.log(`[Schedule Cleanup] ${message}`);
await appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
};
try {
const retentionDays = await getScheduleRetentionDays();
await log(`Starting cleanup with ${retentionDays} day retention`);
await cleanupOldExecutions(retentionDays);
await log('Cleanup completed successfully');
await updateScheduleExecution(execution.id, {
status: 'success',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: { retentionDays }
});
} catch (error: any) {
await log(`Error: ${error.message}`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: error.message
});
}
}
/**
* Execute event cleanup job.
*/
export async function runEventCleanupJob(triggeredBy: ScheduleTrigger = 'cron'): Promise<void> {
// Check if cleanup is enabled (skip check if manually triggered)
if (triggeredBy === 'cron') {
const enabled = await getEventCleanupEnabled();
if (!enabled) {
return; // Skip execution if disabled
}
}
const startTime = Date.now();
// Create execution record
const execution = await createScheduleExecution({
scheduleType: 'system_cleanup',
scheduleId: SYSTEM_EVENT_CLEANUP_ID,
environmentId: null,
entityName: 'Container event cleanup',
triggeredBy,
status: 'running'
});
await updateScheduleExecution(execution.id, {
startedAt: new Date().toISOString()
});
const log = async (message: string) => {
console.log(`[Event Cleanup] ${message}`);
await appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
};
try {
const { deleteOldContainerEvents } = await import('../../db');
const retentionDays = await getEventRetentionDays();
await log(`Starting cleanup of events older than ${retentionDays} days`);
const deleted = await deleteOldContainerEvents(retentionDays);
await log(`Removed ${deleted} old container events`);
await updateScheduleExecution(execution.id, {
status: 'success',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
details: { deletedCount: deleted, retentionDays }
});
} catch (error: any) {
await log(`Error: ${error.message}`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: error.message
});
}
}
/**
* Execute volume helper cleanup job.
* Cleans up stale dockhand-browse-* containers used for volume browsing.
* @param triggeredBy - What triggered this execution
* @param cleanupFns - Optional cleanup functions (passed from scheduler to avoid dynamic import issues)
*/
export async function runVolumeHelperCleanupJob(
triggeredBy: ScheduleTrigger = 'cron',
cleanupFns?: {
cleanupStaleVolumeHelpers: () => Promise<void>;
cleanupExpiredVolumeHelpers: () => Promise<void>;
}
): Promise<void> {
const startTime = Date.now();
// Create execution record
const execution = await createScheduleExecution({
scheduleType: 'system_cleanup',
scheduleId: SYSTEM_VOLUME_HELPER_CLEANUP_ID,
environmentId: null,
entityName: 'Volume helper cleanup',
triggeredBy,
status: 'running'
});
await updateScheduleExecution(execution.id, {
startedAt: new Date().toISOString()
});
const log = async (message: string) => {
console.log(`[Volume Helper Cleanup] ${message}`);
await appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
};
try {
await log('Starting cleanup of stale and expired volume helper containers');
if (cleanupFns) {
// Use provided functions (from scheduler static imports)
await cleanupFns.cleanupStaleVolumeHelpers();
await cleanupFns.cleanupExpiredVolumeHelpers();
} else {
// Fallback to dynamic import (may not work in production)
const { runVolumeHelperCleanup } = await import('../../db');
await runVolumeHelperCleanup();
}
await log('Cleanup completed successfully');
await updateScheduleExecution(execution.id, {
status: 'success',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime
});
} catch (error: any) {
await log(`Error: ${error.message}`);
await updateScheduleExecution(execution.id, {
status: 'failed',
completedAt: new Date().toISOString(),
duration: Date.now() - startTime,
errorMessage: error.message
});
}
}

View File

@@ -1,114 +0,0 @@
/**
* Shared utilities for container and environment auto-update tasks.
*/
import type { VulnerabilityCriteria } from '../../db';
import type { VulnerabilitySeverity } from '../../scanner';
/**
* Parse image name and tag from a full image reference.
* Handles various formats:
* - nginx → ["nginx", "latest"]
* - nginx:1.25 → ["nginx", "1.25"]
* - registry.example.com:5000/myimage:v1 → ["registry.example.com:5000/myimage", "v1"]
* - nginx:latest-dockhand-pending → ["nginx", "latest-dockhand-pending"]
*/
export function parseImageNameAndTag(imageName: string): [string, string] {
// Handle digest-based images (return as-is with empty tag)
if (imageName.includes('@sha256:')) {
return [imageName, ''];
}
// Find the last colon that's part of the tag (not part of registry port)
const lastColon = imageName.lastIndexOf(':');
if (lastColon === -1) {
return [imageName, 'latest'];
}
// Check if this colon is part of a registry port
// Registry ports appear before a slash: registry:5000/image
const afterColon = imageName.substring(lastColon + 1);
if (afterColon.includes('/')) {
// The colon is part of the registry, not the tag
return [imageName, 'latest'];
}
// The colon separates repo from tag
return [imageName.substring(0, lastColon), afterColon];
}
/**
* Determine if an update should be blocked based on vulnerability criteria.
*/
export function shouldBlockUpdate(
criteria: VulnerabilityCriteria,
newScanSummary: VulnerabilitySeverity,
currentScanSummary?: VulnerabilitySeverity
): { blocked: boolean; reason: string } {
const totalVulns = newScanSummary.critical + newScanSummary.high + newScanSummary.medium + newScanSummary.low;
switch (criteria) {
case 'any':
if (totalVulns > 0) {
return {
blocked: true,
reason: `Found ${totalVulns} vulnerabilities (${newScanSummary.critical} critical, ${newScanSummary.high} high, ${newScanSummary.medium} medium, ${newScanSummary.low} low)`
};
}
break;
case 'critical_high':
if (newScanSummary.critical > 0 || newScanSummary.high > 0) {
return {
blocked: true,
reason: `Found ${newScanSummary.critical} critical and ${newScanSummary.high} high severity vulnerabilities`
};
}
break;
case 'critical':
if (newScanSummary.critical > 0) {
return {
blocked: true,
reason: `Found ${newScanSummary.critical} critical vulnerabilities`
};
}
break;
case 'more_than_current':
if (currentScanSummary) {
const currentTotal = currentScanSummary.critical + currentScanSummary.high + currentScanSummary.medium + currentScanSummary.low;
if (totalVulns > currentTotal) {
return {
blocked: true,
reason: `New image has ${totalVulns} vulnerabilities vs ${currentTotal} in current image`
};
}
}
break;
case 'never':
default:
break;
}
return { blocked: false, reason: '' };
}
/**
* Check if a container is the Dockhand application itself.
* Used to prevent Dockhand from updating its own container.
*/
export function isDockhandContainer(imageName: string): boolean {
return imageName.toLowerCase().includes('fnsys/dockhand');
}
/**
* Combine multiple scan summaries by taking the maximum of each severity level.
*/
export function combineScanSummaries(results: { summary: VulnerabilitySeverity }[]): VulnerabilitySeverity {
return results.reduce((acc, result) => ({
critical: Math.max(acc.critical, result.summary.critical),
high: Math.max(acc.high, result.summary.high),
medium: Math.max(acc.medium, result.summary.medium),
low: Math.max(acc.low, result.summary.low),
negligible: Math.max(acc.negligible, result.summary.negligible),
unknown: Math.max(acc.unknown, result.summary.unknown)
}), { critical: 0, high: 0, medium: 0, low: 0, negligible: 0, unknown: 0 });
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,593 +0,0 @@
/**
* Subprocess Manager
*
* Manages background subprocesses for metrics and event collection using Bun.spawn.
* Provides crash recovery, graceful shutdown, and IPC message routing.
*/
import { Subprocess } from 'bun';
import { saveHostMetric, logContainerEvent, type ContainerEventAction } from './db';
import { sendEventNotification, sendEnvironmentNotification } from './notifications';
import { containerEventEmitter } from './event-collector';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { existsSync } from 'node:fs';
// Get the directory of this file (works in both Vite and Bun)
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Determine subprocess script paths
// In development: src/lib/server/subprocesses/*.ts (via __dirname)
// In production: /app/subprocesses/*.js (bundled by scripts/build-subprocesses.ts)
function getSubprocessPath(name: string): string {
// Production path (Docker container) - bundled JS files
const prodPath = `/app/subprocesses/${name}.js`;
if (existsSync(prodPath)) {
return prodPath;
}
// Development path (relative to this file) - raw TS files
return path.join(__dirname, 'subprocesses', `${name}.ts`);
}
// IPC Message Types (Subprocess → Main)
export interface MetricMessage {
type: 'metric';
envId: number;
cpu: number;
memPercent: number;
memUsed: number;
memTotal: number;
}
export interface DiskWarningMessage {
type: 'disk_warning';
envId: number;
envName: string;
message: string;
diskPercent?: number;
}
export interface ContainerEventMessage {
type: 'container_event';
event: {
environmentId: number;
containerId: string;
containerName: string | null;
image: string | null;
action: ContainerEventAction;
actorAttributes: Record<string, string> | null;
timestamp: string;
};
notification?: {
action: ContainerEventAction;
title: string;
message: string;
notificationType: 'success' | 'error' | 'warning' | 'info';
image?: string;
};
}
export interface EnvStatusMessage {
type: 'env_status';
envId: number;
envName: string;
online: boolean;
error?: string;
}
export interface ReadyMessage {
type: 'ready';
}
export interface ErrorMessage {
type: 'error';
message: string;
}
export type SubprocessMessage =
| MetricMessage
| DiskWarningMessage
| ContainerEventMessage
| EnvStatusMessage
| ReadyMessage
| ErrorMessage;
// IPC Message Types (Main → Subprocess)
export interface RefreshEnvironmentsCommand {
type: 'refresh_environments';
}
export interface ShutdownCommand {
type: 'shutdown';
}
export type MainProcessCommand = RefreshEnvironmentsCommand | ShutdownCommand;
// Subprocess configuration
interface SubprocessConfig {
name: string;
scriptPath: string;
restartDelayMs: number;
maxRestarts: number;
}
// Subprocess state
interface SubprocessState {
process: Subprocess<'ignore', 'inherit', 'inherit'> | null;
restartCount: number;
lastRestartTime: number;
isShuttingDown: boolean;
}
class SubprocessManager {
private metricsState: SubprocessState = {
process: null,
restartCount: 0,
lastRestartTime: 0,
isShuttingDown: false
};
private eventsState: SubprocessState = {
process: null,
restartCount: 0,
lastRestartTime: 0,
isShuttingDown: false
};
private readonly metricsConfig: SubprocessConfig = {
name: 'metrics-subprocess',
scriptPath: getSubprocessPath('metrics-subprocess'),
restartDelayMs: 5000,
maxRestarts: 10
};
private readonly eventsConfig: SubprocessConfig = {
name: 'event-subprocess',
scriptPath: getSubprocessPath('event-subprocess'),
restartDelayMs: 5000,
maxRestarts: 10
};
/**
* Start all subprocesses
*/
async start(): Promise<void> {
console.log('[SubprocessManager] Starting background subprocesses...');
await this.startMetricsSubprocess();
await this.startEventsSubprocess();
console.log('[SubprocessManager] All subprocesses started');
}
/**
* Stop all subprocesses gracefully
*/
async stop(): Promise<void> {
console.log('[SubprocessManager] Stopping background subprocesses...');
this.metricsState.isShuttingDown = true;
this.eventsState.isShuttingDown = true;
// Send shutdown commands
this.sendToMetrics({ type: 'shutdown' });
this.sendToEvents({ type: 'shutdown' });
// Wait a bit for graceful shutdown
await new Promise((resolve) => setTimeout(resolve, 1000));
// Force kill if still running
if (this.metricsState.process) {
this.metricsState.process.kill();
this.metricsState.process = null;
}
if (this.eventsState.process) {
this.eventsState.process.kill();
this.eventsState.process = null;
}
console.log('[SubprocessManager] All subprocesses stopped');
}
/**
* Notify subprocesses to refresh their environment list
*/
refreshEnvironments(): void {
this.sendToMetrics({ type: 'refresh_environments' });
this.sendToEvents({ type: 'refresh_environments' });
}
/**
* Start the metrics collection subprocess
*/
private async startMetricsSubprocess(): Promise<void> {
if (this.metricsState.isShuttingDown) return;
try {
console.log(`[SubprocessManager] Starting ${this.metricsConfig.name}...`);
const proc = Bun.spawn(['bun', 'run', this.metricsConfig.scriptPath], {
stdio: ['inherit', 'inherit', 'inherit'],
env: { ...process.env, SKIP_MIGRATIONS: '1' },
ipc: (message) => this.handleMetricsMessage(message as SubprocessMessage),
onExit: (proc, exitCode, signalCode) => {
this.handleMetricsExit(exitCode, signalCode);
}
});
this.metricsState.process = proc;
this.metricsState.restartCount = 0;
console.log(`[SubprocessManager] ${this.metricsConfig.name} started (PID: ${proc.pid})`);
} catch (error) {
const msg = error instanceof Error ? error.message : String(error);
console.error(`[SubprocessManager] Failed to start ${this.metricsConfig.name}: ${msg}`);
this.scheduleMetricsRestart();
}
}
/**
* Start the event collection subprocess
*/
private async startEventsSubprocess(): Promise<void> {
if (this.eventsState.isShuttingDown) return;
try {
console.log(`[SubprocessManager] Starting ${this.eventsConfig.name}...`);
const proc = Bun.spawn(['bun', 'run', this.eventsConfig.scriptPath], {
stdio: ['inherit', 'inherit', 'inherit'],
env: { ...process.env, SKIP_MIGRATIONS: '1' },
ipc: (message) => this.handleEventsMessage(message as SubprocessMessage),
onExit: (proc, exitCode, signalCode) => {
this.handleEventsExit(exitCode, signalCode);
}
});
this.eventsState.process = proc;
this.eventsState.restartCount = 0;
console.log(`[SubprocessManager] ${this.eventsConfig.name} started (PID: ${proc.pid})`);
} catch (error) {
const msg = error instanceof Error ? error.message : String(error);
console.error(`[SubprocessManager] Failed to start ${this.eventsConfig.name}: ${msg}`);
this.scheduleEventsRestart();
}
}
/**
* Handle IPC messages from metrics subprocess
*/
private async handleMetricsMessage(message: SubprocessMessage): Promise<void> {
try {
switch (message.type) {
case 'ready':
console.log(`[SubprocessManager] ${this.metricsConfig.name} is ready`);
break;
case 'metric':
// Save metric to database
await saveHostMetric(
message.cpu,
message.memPercent,
message.memUsed,
message.memTotal,
message.envId
);
break;
case 'disk_warning':
// Send disk warning notification
await sendEventNotification(
'disk_space_warning',
{
title: message.diskPercent ? 'Disk space warning' : 'High Docker disk usage',
message: message.message,
type: 'warning'
},
message.envId
);
break;
case 'error':
console.error(`[SubprocessManager] ${this.metricsConfig.name} error:`, message.message);
break;
}
} catch (error) {
const msg = error instanceof Error ? error.message : String(error);
console.error(`[SubprocessManager] Error handling metrics message: ${msg}`);
}
}
/**
* Handle IPC messages from events subprocess
*/
private async handleEventsMessage(message: SubprocessMessage): Promise<void> {
try {
switch (message.type) {
case 'ready':
console.log(`[SubprocessManager] ${this.eventsConfig.name} is ready`);
break;
case 'container_event':
// Save event to database
const savedEvent = await logContainerEvent(message.event);
// Broadcast to SSE clients
containerEventEmitter.emit('event', savedEvent);
// Send notification if provided
if (message.notification) {
const { action, title, message: notifMessage, notificationType, image } = message.notification;
sendEnvironmentNotification(message.event.environmentId, action, {
title,
message: notifMessage,
type: notificationType
}, image).catch((err) => {
console.error('[SubprocessManager] Failed to send notification:', err);
});
}
break;
case 'env_status':
// Broadcast to dashboard via containerEventEmitter
containerEventEmitter.emit('env_status', {
envId: message.envId,
envName: message.envName,
online: message.online,
error: message.error
});
// Send environment status notification
if (message.online) {
await sendEventNotification(
'environment_online',
{
title: 'Environment online',
message: `Environment "${message.envName}" is now reachable`,
type: 'success'
},
message.envId
).catch((err) => {
console.error('[SubprocessManager] Failed to send online notification:', err);
});
} else {
await sendEventNotification(
'environment_offline',
{
title: 'Environment offline',
message: `Environment "${message.envName}" is unreachable${message.error ? `: ${message.error}` : ''}`,
type: 'error'
},
message.envId
).catch((err) => {
console.error('[SubprocessManager] Failed to send offline notification:', err);
});
}
break;
case 'error':
console.error(`[SubprocessManager] ${this.eventsConfig.name} error:`, message.message);
break;
}
} catch (error) {
const msg = error instanceof Error ? error.message : String(error);
console.error(`[SubprocessManager] Error handling events message: ${msg}`);
}
}
/**
* Handle metrics subprocess exit
*/
private handleMetricsExit(exitCode: number | null, signalCode: string | null): void {
if (this.metricsState.isShuttingDown) {
console.log(`[SubprocessManager] ${this.metricsConfig.name} stopped`);
return;
}
console.error(
`[SubprocessManager] ${this.metricsConfig.name} exited unexpectedly (code: ${exitCode}, signal: ${signalCode})`
);
this.metricsState.process = null;
this.scheduleMetricsRestart();
}
/**
* Handle events subprocess exit
*/
private handleEventsExit(exitCode: number | null, signalCode: string | null): void {
if (this.eventsState.isShuttingDown) {
console.log(`[SubprocessManager] ${this.eventsConfig.name} stopped`);
return;
}
console.error(
`[SubprocessManager] ${this.eventsConfig.name} exited unexpectedly (code: ${exitCode}, signal: ${signalCode})`
);
this.eventsState.process = null;
this.scheduleEventsRestart();
}
/**
* Schedule metrics subprocess restart with backoff
*/
private scheduleMetricsRestart(): void {
if (this.metricsState.isShuttingDown) return;
if (this.metricsState.restartCount >= this.metricsConfig.maxRestarts) {
console.error(
`[SubprocessManager] ${this.metricsConfig.name} exceeded max restarts (${this.metricsConfig.maxRestarts}), giving up`
);
return;
}
const delay = this.metricsConfig.restartDelayMs * Math.pow(2, this.metricsState.restartCount);
this.metricsState.restartCount++;
console.log(
`[SubprocessManager] Restarting ${this.metricsConfig.name} in ${delay}ms (attempt ${this.metricsState.restartCount}/${this.metricsConfig.maxRestarts})`
);
setTimeout(() => {
this.startMetricsSubprocess();
}, delay);
}
/**
* Schedule events subprocess restart with backoff
*/
private scheduleEventsRestart(): void {
if (this.eventsState.isShuttingDown) return;
if (this.eventsState.restartCount >= this.eventsConfig.maxRestarts) {
console.error(
`[SubprocessManager] ${this.eventsConfig.name} exceeded max restarts (${this.eventsConfig.maxRestarts}), giving up`
);
return;
}
const delay = this.eventsConfig.restartDelayMs * Math.pow(2, this.eventsState.restartCount);
this.eventsState.restartCount++;
console.log(
`[SubprocessManager] Restarting ${this.eventsConfig.name} in ${delay}ms (attempt ${this.eventsState.restartCount}/${this.eventsConfig.maxRestarts})`
);
setTimeout(() => {
this.startEventsSubprocess();
}, delay);
}
/**
* Send command to metrics subprocess
*/
private sendToMetrics(command: MainProcessCommand): void {
if (this.metricsState.process) {
try {
this.metricsState.process.send(command);
} catch (error) {
const msg = error instanceof Error ? error.message : String(error);
console.error(`[SubprocessManager] Failed to send to metrics subprocess: ${msg}`);
}
}
}
/**
* Send command to events subprocess
*/
private sendToEvents(command: MainProcessCommand): void {
if (this.eventsState.process) {
try {
this.eventsState.process.send(command);
} catch (error) {
const msg = error instanceof Error ? error.message : String(error);
console.error(`[SubprocessManager] Failed to send to events subprocess: ${msg}`);
}
}
}
/**
* Get metrics subprocess PID (for HMR cleanup)
*/
getMetricsPid(): number | null {
return this.metricsState.process?.pid ?? null;
}
/**
* Get events subprocess PID (for HMR cleanup)
*/
getEventsPid(): number | null {
return this.eventsState.process?.pid ?? null;
}
}
// Singleton instance
let manager: SubprocessManager | null = null;
// Store PIDs globally to survive HMR reloads
// Using globalThis to persist across module reloads in dev mode
const GLOBAL_KEY = '__dockhand_subprocess_pids__';
interface SubprocessPids {
metrics: number | null;
events: number | null;
}
function getStoredPids(): SubprocessPids {
return (globalThis as any)[GLOBAL_KEY] || { metrics: null, events: null };
}
function setStoredPids(pids: SubprocessPids): void {
(globalThis as any)[GLOBAL_KEY] = pids;
}
/**
* Kill any orphaned processes from previous HMR reloads
*/
function killOrphanedProcesses(): void {
const pids = getStoredPids();
if (pids.metrics) {
try {
process.kill(pids.metrics, 'SIGTERM');
console.log(`[SubprocessManager] Killed orphaned metrics process (PID: ${pids.metrics})`);
} catch {
// Process already dead, ignore
}
}
if (pids.events) {
try {
process.kill(pids.events, 'SIGTERM');
console.log(`[SubprocessManager] Killed orphaned events process (PID: ${pids.events})`);
} catch {
// Process already dead, ignore
}
}
setStoredPids({ metrics: null, events: null });
}
/**
* Start background subprocesses
*/
export async function startSubprocesses(): Promise<void> {
// Kill any orphaned processes from HMR reloads
killOrphanedProcesses();
if (manager) {
console.warn('[SubprocessManager] Subprocesses already started');
return;
}
manager = new SubprocessManager();
await manager.start();
// Store PIDs for HMR cleanup
setStoredPids({
metrics: manager.getMetricsPid(),
events: manager.getEventsPid()
});
}
/**
* Stop background subprocesses
*/
export async function stopSubprocesses(): Promise<void> {
if (manager) {
await manager.stop();
manager = null;
}
setStoredPids({ metrics: null, events: null });
}
/**
* Notify subprocesses to refresh environments
*/
export function refreshSubprocessEnvironments(): void {
if (manager) {
manager.refreshEnvironments();
}
}

View File

@@ -1,446 +0,0 @@
/**
* Event Collection Subprocess
*
* Runs as a separate process via Bun.spawn to collect Docker container events
* without blocking the main HTTP thread.
*
* Communication with main process via IPC (process.send).
*/
import { getEnvironments, type ContainerEventAction } from '../db';
import { getDockerEvents } from '../docker';
import type { MainProcessCommand } from '../subprocess-manager';
// Reconnection settings
const RECONNECT_DELAY = 5000; // 5 seconds
const MAX_RECONNECT_DELAY = 60000; // 1 minute max
// Track environment online status for notifications
// Only send notifications on status CHANGES, not on every reconnect attempt
const environmentOnlineStatus: Map<number, boolean> = new Map();
// Active collectors per environment
const collectors: Map<number, AbortController> = new Map();
// Recent event cache for deduplication (key: timeNano-containerId-action)
const recentEvents: Map<string, number> = new Map();
const DEDUP_WINDOW_MS = 5000; // 5 second window for deduplication
const CACHE_CLEANUP_INTERVAL_MS = 30000; // Clean up cache every 30 seconds
let cacheCleanupInterval: ReturnType<typeof setInterval> | null = null;
let isShuttingDown = false;
// Actions we care about for container activity
const CONTAINER_ACTIONS: ContainerEventAction[] = [
'create',
'start',
'stop',
'die',
'kill',
'restart',
'pause',
'unpause',
'destroy',
'rename',
'update',
'oom',
'health_status'
];
// Scanner image patterns to exclude from events
const SCANNER_IMAGE_PATTERNS = [
'anchore/grype',
'aquasec/trivy',
'ghcr.io/anchore/grype',
'ghcr.io/aquasecurity/trivy'
];
// Container name patterns to exclude from events
const EXCLUDED_CONTAINER_PREFIXES = ['dockhand-browse-'];
/**
* Send message to main process
*/
function send(message: any): void {
if (process.send) {
process.send(message);
}
}
function isScannerContainer(image: string | null | undefined): boolean {
if (!image) return false;
const lowerImage = image.toLowerCase();
return SCANNER_IMAGE_PATTERNS.some((pattern) => lowerImage.includes(pattern.toLowerCase()));
}
function isExcludedContainer(containerName: string | null | undefined): boolean {
if (!containerName) return false;
return EXCLUDED_CONTAINER_PREFIXES.some((prefix) => containerName.startsWith(prefix));
}
/**
* Update environment online status and notify main process on change
*/
function updateEnvironmentStatus(
envId: number,
envName: string,
isOnline: boolean,
errorMessage?: string
) {
const previousStatus = environmentOnlineStatus.get(envId);
// Only send notification on status CHANGE (not on first connection or repeated failures)
if (previousStatus !== undefined && previousStatus !== isOnline) {
send({
type: 'env_status',
envId,
envName,
online: isOnline,
error: errorMessage
});
}
environmentOnlineStatus.set(envId, isOnline);
}
interface DockerEvent {
Type: string;
Action: string;
Actor: {
ID: string;
Attributes: Record<string, string>;
};
time: number;
timeNano: number;
}
/**
* Clean up old entries from the deduplication cache
*/
function cleanupRecentEvents() {
const now = Date.now();
for (const [key, timestamp] of recentEvents.entries()) {
if (now - timestamp > DEDUP_WINDOW_MS) {
recentEvents.delete(key);
}
}
}
/**
* Process a Docker event
*/
function processEvent(event: DockerEvent, envId: number) {
// Only process container events
if (event.Type !== 'container') return;
// Map Docker action to our action type
const action = event.Action.split(':')[0] as ContainerEventAction;
// Skip actions we don't care about
if (!CONTAINER_ACTIONS.includes(action)) return;
const containerId = event.Actor?.ID;
const containerName = event.Actor?.Attributes?.name;
const image = event.Actor?.Attributes?.image;
if (!containerId) return;
// Skip scanner containers (Trivy, Grype)
if (isScannerContainer(image)) return;
// Skip internal Dockhand containers (volume browser helpers)
if (isExcludedContainer(containerName)) return;
// Deduplicate events
const dedupKey = `${envId}-${event.timeNano}-${containerId}-${action}`;
if (recentEvents.has(dedupKey)) {
return;
}
// Mark as processed
recentEvents.set(dedupKey, Date.now());
// Clean up if cache gets too large
if (recentEvents.size > 200) {
cleanupRecentEvents();
}
// Convert Unix nanosecond timestamp to ISO string
const timestamp = new Date(Math.floor(event.timeNano / 1000000)).toISOString();
// Prepare notification data
const actionLabel = action.charAt(0).toUpperCase() + action.slice(1);
const containerLabel = containerName || containerId.substring(0, 12);
const notificationType =
action === 'die' || action === 'kill' || action === 'oom'
? 'error'
: action === 'stop'
? 'warning'
: action === 'start'
? 'success'
: 'info';
// Send event to main process for DB save and SSE broadcast
send({
type: 'container_event',
event: {
environmentId: envId,
containerId: containerId,
containerName: containerName || null,
image: image || null,
action,
actorAttributes: event.Actor?.Attributes || null,
timestamp
},
notification: {
action,
title: `Container ${actionLabel}`,
message: `Container "${containerLabel}" ${action}${image ? ` (${image})` : ''}`,
notificationType,
image
}
});
}
/**
* Start collecting events for a specific environment
*/
async function startEnvironmentCollector(envId: number, envName: string) {
// Stop existing collector if any
stopEnvironmentCollector(envId);
const controller = new AbortController();
collectors.set(envId, controller);
let reconnectDelay = RECONNECT_DELAY;
const connect = async () => {
if (controller.signal.aborted || isShuttingDown) return;
let reader: ReadableStreamDefaultReader<Uint8Array> | null = null;
try {
console.log(
`[EventSubprocess] Connecting to Docker events for ${envName} (env ${envId})...`
);
const eventStream = await getDockerEvents({ type: ['container'] }, envId);
if (!eventStream) {
console.error(`[EventSubprocess] Failed to get event stream for ${envName}`);
updateEnvironmentStatus(envId, envName, false, 'Failed to connect to Docker');
scheduleReconnect();
return;
}
// Reset reconnect delay on successful connection
reconnectDelay = RECONNECT_DELAY;
console.log(`[EventSubprocess] Connected to Docker events for ${envName}`);
updateEnvironmentStatus(envId, envName, true);
reader = eventStream.getReader();
const decoder = new TextDecoder();
let buffer = '';
try {
while (!controller.signal.aborted && !isShuttingDown) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (line.trim()) {
try {
const event = JSON.parse(line) as DockerEvent;
processEvent(event, envId);
} catch {
// Ignore parse errors for partial chunks
}
}
}
}
} catch (error: any) {
if (!controller.signal.aborted && !isShuttingDown) {
if (error.name !== 'AbortError') {
console.error(`[EventSubprocess] Stream error for ${envName}:`, error.message);
updateEnvironmentStatus(envId, envName, false, error.message);
}
}
} finally {
if (reader) {
try {
reader.releaseLock();
} catch {
// Reader already released or stream closed - ignore
}
}
}
// Connection closed, reconnect
if (!controller.signal.aborted && !isShuttingDown) {
scheduleReconnect();
}
} catch (error: any) {
if (reader) {
try {
reader.releaseLock();
} catch {
// Reader already released or stream closed - ignore
}
}
if (!controller.signal.aborted && !isShuttingDown && error.name !== 'AbortError') {
console.error(`[EventSubprocess] Connection error for ${envName}:`, error.message);
updateEnvironmentStatus(envId, envName, false, error.message);
}
if (!controller.signal.aborted && !isShuttingDown) {
scheduleReconnect();
}
}
};
const scheduleReconnect = () => {
if (controller.signal.aborted || isShuttingDown) return;
console.log(`[EventSubprocess] Reconnecting to ${envName} in ${reconnectDelay / 1000}s...`);
setTimeout(() => {
if (!controller.signal.aborted && !isShuttingDown) {
connect();
}
}, reconnectDelay);
// Exponential backoff
reconnectDelay = Math.min(reconnectDelay * 2, MAX_RECONNECT_DELAY);
};
// Start the connection
connect();
}
/**
* Stop collecting events for a specific environment
*/
function stopEnvironmentCollector(envId: number) {
const controller = collectors.get(envId);
if (controller) {
controller.abort();
collectors.delete(envId);
environmentOnlineStatus.delete(envId);
}
}
/**
* Refresh collectors when environments change
*/
async function refreshEventCollectors() {
if (isShuttingDown) return;
try {
const environments = await getEnvironments();
// Filter: only collect for environments with activity enabled AND not Hawser Edge
const activeEnvIds = new Set(
environments
.filter((e) => e.collectActivity && e.connectionType !== 'hawser-edge')
.map((e) => e.id)
);
// Stop collectors for removed environments or those with collection disabled
for (const envId of collectors.keys()) {
if (!activeEnvIds.has(envId)) {
console.log(`[EventSubprocess] Stopping collector for environment ${envId}`);
stopEnvironmentCollector(envId);
}
}
// Start collectors for environments with collection enabled
for (const env of environments) {
// Skip Hawser Edge (handled by main process)
if (env.connectionType === 'hawser-edge') continue;
if (env.collectActivity && !collectors.has(env.id)) {
startEnvironmentCollector(env.id, env.name);
}
}
} catch (error) {
console.error('[EventSubprocess] Failed to refresh collectors:', error);
send({ type: 'error', message: `Failed to refresh collectors: ${error}` });
}
}
/**
* Handle commands from main process
*/
function handleCommand(command: MainProcessCommand): void {
switch (command.type) {
case 'refresh_environments':
console.log('[EventSubprocess] Refreshing environments...');
refreshEventCollectors();
break;
case 'shutdown':
console.log('[EventSubprocess] Shutdown requested');
shutdown();
break;
}
}
/**
* Graceful shutdown
*/
function shutdown(): void {
isShuttingDown = true;
// Stop periodic cache cleanup
if (cacheCleanupInterval) {
clearInterval(cacheCleanupInterval);
cacheCleanupInterval = null;
}
// Stop all environment collectors
for (const envId of collectors.keys()) {
stopEnvironmentCollector(envId);
}
// Clear the deduplication cache
recentEvents.clear();
console.log('[EventSubprocess] Stopped');
process.exit(0);
}
/**
* Start the event collector
*/
async function start(): Promise<void> {
console.log('[EventSubprocess] Starting container event collection...');
// Start collectors for all environments
await refreshEventCollectors();
// Start periodic cache cleanup
cacheCleanupInterval = setInterval(cleanupRecentEvents, CACHE_CLEANUP_INTERVAL_MS);
console.log('[EventSubprocess] Started deduplication cache cleanup (every 30s)');
// Listen for commands from main process
process.on('message', (message: MainProcessCommand) => {
handleCommand(message);
});
// Handle termination signals
process.on('SIGTERM', shutdown);
process.on('SIGINT', shutdown);
// Signal ready
send({ type: 'ready' });
console.log('[EventSubprocess] Started successfully');
}
// Start the subprocess
start();

View File

@@ -1,419 +0,0 @@
/**
* Metrics Collection Subprocess
*
* Runs as a separate process via Bun.spawn to collect CPU/memory metrics
* and check disk space without blocking the main HTTP thread.
*
* Communication with main process via IPC (process.send).
*/
import { getEnvironments, getEnvSetting } from '../db';
import { listContainers, getContainerStats, getDockerInfo, getDiskUsage } from '../docker';
import os from 'node:os';
import type { MainProcessCommand } from '../subprocess-manager';
const COLLECT_INTERVAL = 10000; // 10 seconds
const DISK_CHECK_INTERVAL = 300000; // 5 minutes
const DEFAULT_DISK_THRESHOLD = 80; // 80% threshold for disk warnings
const ENV_METRICS_TIMEOUT = 15000; // 15 seconds timeout per environment for metrics
const ENV_DISK_TIMEOUT = 20000; // 20 seconds timeout per environment for disk checks
/**
* Timeout wrapper - returns fallback if promise takes too long
*/
function withTimeout<T>(promise: Promise<T>, ms: number, fallback: T): Promise<T> {
return Promise.race([
promise,
new Promise<T>(resolve => setTimeout(() => resolve(fallback), ms))
]);
}
// Track last disk warning sent per environment to avoid spamming
const lastDiskWarning: Map<number, number> = new Map();
const DISK_WARNING_COOLDOWN = 3600000; // 1 hour between warnings
let collectInterval: ReturnType<typeof setInterval> | null = null;
let diskCheckInterval: ReturnType<typeof setInterval> | null = null;
let isShuttingDown = false;
/**
* Send message to main process
*/
function send(message: any): void {
if (process.send) {
process.send(message);
}
}
/**
* Collect metrics for a single environment
*/
async function collectEnvMetrics(env: { id: number; name: string; host?: string; socketPath?: string; collectMetrics?: boolean; connectionType?: string }) {
try {
// Skip environments where metrics collection is disabled
if (env.collectMetrics === false) {
return;
}
// Skip Hawser Edge environments (handled by main process)
if (env.connectionType === 'hawser-edge') {
return;
}
// Get running containers
const containers = await listContainers(false, env.id); // Only running
let totalCpuPercent = 0;
let totalContainerMemUsed = 0;
// Get stats for each running container
const statsPromises = containers.map(async (container) => {
try {
const stats = (await getContainerStats(container.id, env.id)) as any;
// Calculate CPU percentage
const cpuDelta =
stats.cpu_stats.cpu_usage.total_usage - stats.precpu_stats.cpu_usage.total_usage;
const systemDelta =
stats.cpu_stats.system_cpu_usage - stats.precpu_stats.system_cpu_usage;
const cpuCount = stats.cpu_stats.online_cpus || os.cpus().length;
let cpuPercent = 0;
if (systemDelta > 0 && cpuDelta > 0) {
cpuPercent = (cpuDelta / systemDelta) * cpuCount * 100;
}
// Get container memory usage (subtract cache for actual usage)
const memUsage = stats.memory_stats?.usage || 0;
const memCache = stats.memory_stats?.stats?.cache || 0;
const actualMemUsed = memUsage - memCache;
return { cpuPercent, memUsage: actualMemUsed > 0 ? actualMemUsed : memUsage };
} catch {
return { cpuPercent: 0, memUsage: 0 };
}
});
const statsResults = await Promise.all(statsPromises);
totalCpuPercent = statsResults.reduce((sum, r) => sum + r.cpuPercent, 0);
totalContainerMemUsed = statsResults.reduce((sum, r) => sum + r.memUsage, 0);
// Get host memory info from Docker
const info = (await getDockerInfo(env.id)) as any;
const memTotal = info?.MemTotal || os.totalmem();
// Calculate memory: sum of all container memory vs host total
const memUsed = totalContainerMemUsed;
const memPercent = memTotal > 0 ? (memUsed / memTotal) * 100 : 0;
// Normalize CPU by number of cores from the Docker host
const cpuCount = info?.NCPU || os.cpus().length;
const normalizedCpu = totalCpuPercent / cpuCount;
// Validate values - skip if any are NaN, Infinity, or negative
const finalCpu = Number.isFinite(normalizedCpu) && normalizedCpu >= 0 ? normalizedCpu : 0;
const finalMemPercent = Number.isFinite(memPercent) && memPercent >= 0 ? memPercent : 0;
const finalMemUsed = Number.isFinite(memUsed) && memUsed >= 0 ? memUsed : 0;
const finalMemTotal = Number.isFinite(memTotal) && memTotal > 0 ? memTotal : 0;
// Only send if we have valid memory total (otherwise metrics are meaningless)
if (finalMemTotal > 0) {
send({
type: 'metric',
envId: env.id,
cpu: finalCpu,
memPercent: finalMemPercent,
memUsed: finalMemUsed,
memTotal: finalMemTotal
});
}
} catch (error) {
// Skip this environment if it fails (might be offline)
console.error(`[MetricsSubprocess] Failed to collect metrics for ${env.name}:`, error);
}
}
/**
* Collect metrics for all environments
*/
async function collectMetrics() {
if (isShuttingDown) return;
try {
const environments = await getEnvironments();
// Filter enabled environments and collect metrics in parallel
const enabledEnvs = environments.filter((env) => env.collectMetrics !== false);
// Process all environments in parallel with per-environment timeouts
// Use Promise.allSettled so one slow/failed env doesn't block others
const results = await Promise.allSettled(
enabledEnvs.map((env) =>
withTimeout(
collectEnvMetrics(env).then(() => env.name),
ENV_METRICS_TIMEOUT,
null
)
)
);
// Log any environments that timed out
results.forEach((result, index) => {
if (result.status === 'fulfilled' && result.value === null) {
console.warn(`[MetricsSubprocess] Environment "${enabledEnvs[index].name}" metrics timed out after ${ENV_METRICS_TIMEOUT}ms`);
} else if (result.status === 'rejected') {
console.warn(`[MetricsSubprocess] Environment "${enabledEnvs[index].name}" metrics failed:`, result.reason);
}
});
} catch (error) {
console.error('[MetricsSubprocess] Metrics collection error:', error);
send({ type: 'error', message: `Metrics collection error: ${error}` });
}
}
/**
* Parse size string like "107.4GB" to bytes
*/
function parseSize(sizeStr: string): number {
const units: Record<string, number> = {
B: 1,
KB: 1024,
MB: 1024 * 1024,
GB: 1024 * 1024 * 1024,
TB: 1024 * 1024 * 1024 * 1024
};
const match = sizeStr.match(/^([\d.]+)\s*([KMGT]?B)$/i);
if (!match) return 0;
const value = parseFloat(match[1]);
const unit = match[2].toUpperCase();
return value * (units[unit] || 1);
}
/**
* Format bytes to human readable string
*/
function formatSize(bytes: number): string {
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
let unitIndex = 0;
let size = bytes;
while (size >= 1024 && unitIndex < units.length - 1) {
size /= 1024;
unitIndex++;
}
return `${size.toFixed(1)} ${units[unitIndex]}`;
}
/**
* Check disk space for a single environment
*/
async function checkEnvDiskSpace(env: { id: number; name: string; collectMetrics?: boolean; connectionType?: string }) {
try {
// Skip environments where metrics collection is disabled
if (env.collectMetrics === false) {
return;
}
// Skip Hawser Edge environments (handled by main process)
if (env.connectionType === 'hawser-edge') {
return;
}
// Check if we're in cooldown for this environment
const lastWarningTime = lastDiskWarning.get(env.id);
if (lastWarningTime && Date.now() - lastWarningTime < DISK_WARNING_COOLDOWN) {
return; // Skip this environment, still in cooldown
}
// Get Docker disk usage data
const diskData = (await getDiskUsage(env.id)) as any;
if (!diskData) return;
// Calculate total Docker disk usage using reduce for cleaner code
let totalUsed = 0;
if (diskData.Images) {
totalUsed += diskData.Images.reduce((sum: number, img: any) => sum + (img.Size || 0), 0);
}
if (diskData.Containers) {
totalUsed += diskData.Containers.reduce((sum: number, c: any) => sum + (c.SizeRw || 0), 0);
}
if (diskData.Volumes) {
totalUsed += diskData.Volumes.reduce(
(sum: number, v: any) => sum + (v.UsageData?.Size || 0),
0
);
}
if (diskData.BuildCache) {
totalUsed += diskData.BuildCache.reduce((sum: number, bc: any) => sum + (bc.Size || 0), 0);
}
// Get Docker root filesystem info from Docker info
const info = (await getDockerInfo(env.id)) as any;
const driverStatus = info?.DriverStatus;
// Try to find "Data Space Total" from driver status
let dataSpaceTotal = 0;
let diskPercentUsed = 0;
if (driverStatus) {
for (const [key, value] of driverStatus) {
if (key === 'Data Space Total' && typeof value === 'string') {
dataSpaceTotal = parseSize(value);
break;
}
}
}
// If we found total disk space, calculate percentage
if (dataSpaceTotal > 0) {
diskPercentUsed = (totalUsed / dataSpaceTotal) * 100;
} else {
// Fallback: just report absolute usage if we can't determine percentage
const GB = 1024 * 1024 * 1024;
if (totalUsed > 50 * GB) {
send({
type: 'disk_warning',
envId: env.id,
envName: env.name,
message: `Environment "${env.name}" is using ${formatSize(totalUsed)} of Docker disk space`
});
lastDiskWarning.set(env.id, Date.now());
}
return;
}
// Check against threshold
const threshold =
(await getEnvSetting('disk_warning_threshold', env.id)) || DEFAULT_DISK_THRESHOLD;
if (diskPercentUsed >= threshold) {
console.log(
`[MetricsSubprocess] Docker disk usage for ${env.name}: ${diskPercentUsed.toFixed(1)}% (threshold: ${threshold}%)`
);
send({
type: 'disk_warning',
envId: env.id,
envName: env.name,
message: `Environment "${env.name}" Docker disk usage is at ${diskPercentUsed.toFixed(1)}% (${formatSize(totalUsed)} used)`,
diskPercent: diskPercentUsed
});
lastDiskWarning.set(env.id, Date.now());
}
} catch (error) {
// Skip this environment if it fails
console.error(`[MetricsSubprocess] Failed to check disk space for ${env.name}:`, error);
}
}
/**
* Check disk space for all environments
*/
async function checkDiskSpace() {
if (isShuttingDown) return;
try {
const environments = await getEnvironments();
// Filter enabled environments and check disk space in parallel
const enabledEnvs = environments.filter((env) => env.collectMetrics !== false);
// Process all environments in parallel with per-environment timeouts
// Use Promise.allSettled so one slow/failed env doesn't block others
const results = await Promise.allSettled(
enabledEnvs.map((env) =>
withTimeout(
checkEnvDiskSpace(env).then(() => env.name),
ENV_DISK_TIMEOUT,
null
)
)
);
// Log any environments that timed out
results.forEach((result, index) => {
if (result.status === 'fulfilled' && result.value === null) {
console.warn(`[MetricsSubprocess] Environment "${enabledEnvs[index].name}" disk check timed out after ${ENV_DISK_TIMEOUT}ms`);
} else if (result.status === 'rejected') {
console.warn(`[MetricsSubprocess] Environment "${enabledEnvs[index].name}" disk check failed:`, result.reason);
}
});
} catch (error) {
console.error('[MetricsSubprocess] Disk space check error:', error);
send({ type: 'error', message: `Disk space check error: ${error}` });
}
}
/**
* Handle commands from main process
*/
function handleCommand(command: MainProcessCommand): void {
switch (command.type) {
case 'refresh_environments':
console.log('[MetricsSubprocess] Refreshing environments...');
// The next collection cycle will pick up the new environments
break;
case 'shutdown':
console.log('[MetricsSubprocess] Shutdown requested');
shutdown();
break;
}
}
/**
* Graceful shutdown
*/
function shutdown(): void {
isShuttingDown = true;
if (collectInterval) {
clearInterval(collectInterval);
collectInterval = null;
}
if (diskCheckInterval) {
clearInterval(diskCheckInterval);
diskCheckInterval = null;
}
lastDiskWarning.clear();
console.log('[MetricsSubprocess] Stopped');
process.exit(0);
}
/**
* Start the metrics collector
*/
function start(): void {
console.log('[MetricsSubprocess] Starting metrics collection (every 10s)...');
// Initial collection
collectMetrics();
// Schedule regular collection
collectInterval = setInterval(collectMetrics, COLLECT_INTERVAL);
// Start disk space checking (every 5 minutes)
console.log('[MetricsSubprocess] Starting disk space monitoring (every 5 minutes)');
checkDiskSpace(); // Initial check
diskCheckInterval = setInterval(checkDiskSpace, DISK_CHECK_INTERVAL);
// Listen for commands from main process
process.on('message', (message: MainProcessCommand) => {
handleCommand(message);
});
// Handle termination signals
process.on('SIGTERM', shutdown);
process.on('SIGINT', shutdown);
// Signal ready
send({ type: 'ready' });
console.log('[MetricsSubprocess] Started successfully');
}
// Start the subprocess
start();

View File

@@ -1,15 +0,0 @@
// Track server start time for uptime calculation
let serverStartTime: number | null = null;
export function setServerStartTime(): void {
if (serverStartTime === null) {
serverStartTime = Date.now();
}
}
export function getServerUptime(): number {
if (serverStartTime === null) {
return 0;
}
return Math.floor((Date.now() - serverStartTime) / 1000);
}