mirror of
https://github.com/khoaliber/dockhand.git
synced 2026-03-05 05:39:04 +00:00
Initial commit
This commit is contained in:
575
lib/server/scheduler/tasks/container-update.ts
Normal file
575
lib/server/scheduler/tasks/container-update.ts
Normal file
@@ -0,0 +1,575 @@
|
||||
/**
|
||||
* Container Auto-Update Task
|
||||
*
|
||||
* Handles automatic container updates with vulnerability scanning.
|
||||
*/
|
||||
|
||||
import type { ScheduleTrigger, VulnerabilityCriteria } from '../../db';
|
||||
import {
|
||||
getAutoUpdateSettingById,
|
||||
updateAutoUpdateLastChecked,
|
||||
updateAutoUpdateLastUpdated,
|
||||
createScheduleExecution,
|
||||
updateScheduleExecution,
|
||||
appendScheduleExecutionLog,
|
||||
saveVulnerabilityScan,
|
||||
getCombinedScanForImage
|
||||
} from '../../db';
|
||||
import {
|
||||
pullImage,
|
||||
listContainers,
|
||||
inspectContainer,
|
||||
createContainer,
|
||||
stopContainer,
|
||||
removeContainer,
|
||||
checkImageUpdateAvailable,
|
||||
getTempImageTag,
|
||||
isDigestBasedImage,
|
||||
getImageIdByTag,
|
||||
removeTempImage,
|
||||
tagImage
|
||||
} from '../../docker';
|
||||
import { getScannerSettings, scanImage, type ScanResult, type VulnerabilitySeverity } from '../../scanner';
|
||||
import { sendEventNotification } from '../../notifications';
|
||||
import { parseImageNameAndTag, shouldBlockUpdate, combineScanSummaries, isDockhandContainer } from './update-utils';
|
||||
|
||||
/**
|
||||
* Execute a container auto-update.
|
||||
*/
|
||||
export async function runContainerUpdate(
|
||||
settingId: number,
|
||||
containerName: string,
|
||||
environmentId: number | null | undefined,
|
||||
triggeredBy: ScheduleTrigger
|
||||
): Promise<void> {
|
||||
const envId = environmentId ?? undefined;
|
||||
const startTime = Date.now();
|
||||
|
||||
// Create execution record
|
||||
const execution = await createScheduleExecution({
|
||||
scheduleType: 'container_update',
|
||||
scheduleId: settingId,
|
||||
environmentId: environmentId ?? null,
|
||||
entityName: containerName,
|
||||
triggeredBy,
|
||||
status: 'running'
|
||||
});
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
startedAt: new Date().toISOString()
|
||||
});
|
||||
|
||||
const log = (message: string) => {
|
||||
console.log(`[Auto-update] ${message}`);
|
||||
appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
|
||||
};
|
||||
|
||||
try {
|
||||
log(`Checking container: ${containerName}`);
|
||||
await updateAutoUpdateLastChecked(containerName, envId);
|
||||
|
||||
// Find the container
|
||||
const containers = await listContainers(true, envId);
|
||||
const container = containers.find(c => c.name === containerName);
|
||||
|
||||
if (!container) {
|
||||
log(`Container not found: ${containerName}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: 'Container not found'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the full container config to extract the image name (tag)
|
||||
const inspectData = await inspectContainer(container.id, envId) as any;
|
||||
const imageNameFromConfig = inspectData.Config?.Image;
|
||||
|
||||
if (!imageNameFromConfig) {
|
||||
log(`Could not determine image name from container config`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: 'Could not determine image name'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Prevent Dockhand from updating itself
|
||||
if (isDockhandContainer(imageNameFromConfig)) {
|
||||
log(`Skipping Dockhand container - cannot auto-update self`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'skipped',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: { reason: 'Cannot auto-update Dockhand itself' }
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the actual image ID from inspect data
|
||||
const currentImageId = inspectData.Image;
|
||||
|
||||
log(`Container is using image: ${imageNameFromConfig}`);
|
||||
log(`Current image ID: ${currentImageId?.substring(0, 19)}`);
|
||||
|
||||
// Get scanner and schedule settings early to determine scan strategy
|
||||
const [scannerSettings, updateSetting] = await Promise.all([
|
||||
getScannerSettings(envId),
|
||||
getAutoUpdateSettingById(settingId)
|
||||
]);
|
||||
|
||||
const vulnerabilityCriteria = (updateSetting?.vulnerabilityCriteria || 'never') as VulnerabilityCriteria;
|
||||
// Scan if scanning is enabled (scanner !== 'none')
|
||||
// The vulnerabilityCriteria only controls whether to BLOCK updates, not whether to SCAN
|
||||
const shouldScan = scannerSettings.scanner !== 'none';
|
||||
|
||||
// =============================================================================
|
||||
// SAFE UPDATE FLOW
|
||||
// =============================================================================
|
||||
// 1. Registry check (no pull) - determine if update is available
|
||||
// 2. If scanning enabled:
|
||||
// a. Pull new image (overwrites original tag temporarily)
|
||||
// b. Get new image ID
|
||||
// c. SAFETY: Restore original tag to point to OLD image
|
||||
// d. Tag new image with temp suffix for scanning
|
||||
// e. Scan temp image
|
||||
// f. If blocked: remove temp image, original tag still safe
|
||||
// g. If approved: re-tag to original and proceed
|
||||
// 3. If no scanning: simple pull and update
|
||||
// =============================================================================
|
||||
|
||||
// Step 1: Check for update using registry check (no pull)
|
||||
log(`Checking registry for updates: ${imageNameFromConfig}`);
|
||||
const registryCheck = await checkImageUpdateAvailable(imageNameFromConfig, currentImageId, envId);
|
||||
|
||||
// Handle local images or registry errors
|
||||
if (registryCheck.isLocalImage) {
|
||||
log(`Local image detected - skipping (auto-update requires registry)`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'skipped',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: { reason: 'Local image - no registry available' }
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (registryCheck.error) {
|
||||
log(`Registry check error: ${registryCheck.error}`);
|
||||
// Don't fail on transient errors, just skip this run
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'skipped',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: { reason: `Registry check failed: ${registryCheck.error}` }
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!registryCheck.hasUpdate) {
|
||||
log(`Already up-to-date: ${containerName} is running the latest version`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'skipped',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: { reason: 'Already up-to-date' }
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
log(`Update available! Registry digest: ${registryCheck.registryDigest?.substring(0, 19) || 'unknown'}`);
|
||||
|
||||
// Variables for scan results
|
||||
let scanResults: ScanResult[] | undefined;
|
||||
let scanSummary: VulnerabilitySeverity | undefined;
|
||||
let newImageId: string | null = null;
|
||||
const newDigest = registryCheck.registryDigest;
|
||||
|
||||
// Step 2: Safe pull with temp tag protection (if scanning enabled)
|
||||
if (shouldScan) {
|
||||
log(`Safe-pull enabled (scanner: ${scannerSettings.scanner}, criteria: ${vulnerabilityCriteria})`);
|
||||
|
||||
// Check if this is a digest-based image (can't use temp tags)
|
||||
if (isDigestBasedImage(imageNameFromConfig)) {
|
||||
log(`Digest-based image detected - temp tag protection not available`);
|
||||
// Fall through to simple flow
|
||||
} else {
|
||||
const tempTag = getTempImageTag(imageNameFromConfig);
|
||||
log(`Using temp tag for safe pull: ${tempTag}`);
|
||||
|
||||
try {
|
||||
// Step 2a: Pull new image (overwrites original tag)
|
||||
log(`Pulling new image: ${imageNameFromConfig}`);
|
||||
await pullImage(imageNameFromConfig, undefined, envId);
|
||||
|
||||
// Step 2b: Get new image ID
|
||||
newImageId = await getImageIdByTag(imageNameFromConfig, envId);
|
||||
if (!newImageId) {
|
||||
throw new Error('Failed to get new image ID after pull');
|
||||
}
|
||||
log(`New image pulled: ${newImageId.substring(0, 19)}`);
|
||||
|
||||
// Step 2c: SAFETY - Restore original tag to OLD image
|
||||
log(`Restoring original tag to current safe image...`);
|
||||
const [oldRepo, oldTag] = parseImageNameAndTag(imageNameFromConfig);
|
||||
await tagImage(currentImageId, oldRepo, oldTag, envId);
|
||||
log(`Original tag ${imageNameFromConfig} restored to safe image`);
|
||||
|
||||
// Step 2d: Tag new image with temp suffix
|
||||
const [tempRepo, tempTagName] = parseImageNameAndTag(tempTag);
|
||||
await tagImage(newImageId, tempRepo, tempTagName, envId);
|
||||
log(`New image tagged as: ${tempTag}`);
|
||||
|
||||
// Step 2e: Scan temp image
|
||||
log(`Scanning new image for vulnerabilities...`);
|
||||
try {
|
||||
scanResults = await scanImage(tempTag, envId, (progress) => {
|
||||
const scannerTag = progress.scanner ? `[${progress.scanner}]` : '[scan]';
|
||||
if (progress.message) {
|
||||
log(`${scannerTag} ${progress.message}`);
|
||||
}
|
||||
if (progress.output) {
|
||||
log(`${scannerTag} ${progress.output}`);
|
||||
}
|
||||
});
|
||||
|
||||
if (scanResults.length > 0) {
|
||||
scanSummary = combineScanSummaries(scanResults);
|
||||
log(`Scan result: ${scanSummary.critical} critical, ${scanSummary.high} high, ${scanSummary.medium} medium, ${scanSummary.low} low`);
|
||||
|
||||
// Save scan results
|
||||
for (const result of scanResults) {
|
||||
try {
|
||||
await saveVulnerabilityScan({
|
||||
environmentId: envId ?? null,
|
||||
imageId: newImageId,
|
||||
imageName: result.imageName,
|
||||
scanner: result.scanner,
|
||||
scannedAt: result.scannedAt,
|
||||
scanDuration: result.scanDuration,
|
||||
criticalCount: result.summary.critical,
|
||||
highCount: result.summary.high,
|
||||
mediumCount: result.summary.medium,
|
||||
lowCount: result.summary.low,
|
||||
negligibleCount: result.summary.negligible,
|
||||
unknownCount: result.summary.unknown,
|
||||
vulnerabilities: result.vulnerabilities,
|
||||
error: result.error ?? null
|
||||
});
|
||||
} catch (saveError: any) {
|
||||
log(`Warning: Could not save scan results: ${saveError.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle 'more_than_current' criteria
|
||||
let currentScanSummary: VulnerabilitySeverity | undefined;
|
||||
if (vulnerabilityCriteria === 'more_than_current') {
|
||||
log(`Looking up cached scan for current image...`);
|
||||
try {
|
||||
const cachedScan = await getCombinedScanForImage(currentImageId, envId ?? null);
|
||||
if (cachedScan) {
|
||||
currentScanSummary = cachedScan;
|
||||
log(`Cached scan: ${currentScanSummary.critical} critical, ${currentScanSummary.high} high`);
|
||||
} else {
|
||||
log(`No cached scan found, scanning current image...`);
|
||||
const currentScanResults = await scanImage(currentImageId, envId, (progress) => {
|
||||
const tag = progress.scanner ? `[${progress.scanner}]` : '[scan]';
|
||||
if (progress.message) log(`${tag} ${progress.message}`);
|
||||
});
|
||||
if (currentScanResults.length > 0) {
|
||||
currentScanSummary = combineScanSummaries(currentScanResults);
|
||||
log(`Current image: ${currentScanSummary.critical} critical, ${currentScanSummary.high} high`);
|
||||
// Save for future use
|
||||
for (const result of currentScanResults) {
|
||||
try {
|
||||
await saveVulnerabilityScan({
|
||||
environmentId: envId ?? null,
|
||||
imageId: currentImageId,
|
||||
imageName: result.imageName,
|
||||
scanner: result.scanner,
|
||||
scannedAt: result.scannedAt,
|
||||
scanDuration: result.scanDuration,
|
||||
criticalCount: result.summary.critical,
|
||||
highCount: result.summary.high,
|
||||
mediumCount: result.summary.medium,
|
||||
lowCount: result.summary.low,
|
||||
negligibleCount: result.summary.negligible,
|
||||
unknownCount: result.summary.unknown,
|
||||
vulnerabilities: result.vulnerabilities,
|
||||
error: result.error ?? null
|
||||
});
|
||||
} catch { /* ignore */ }
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (cacheError: any) {
|
||||
log(`Warning: Could not get current scan: ${cacheError.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if update should be blocked
|
||||
const { blocked, reason } = shouldBlockUpdate(vulnerabilityCriteria, scanSummary, currentScanSummary);
|
||||
|
||||
if (blocked) {
|
||||
// Step 2f: BLOCKED - Remove temp image, original tag is safe
|
||||
log(`UPDATE BLOCKED: ${reason}`);
|
||||
log(`Removing blocked image: ${tempTag}`);
|
||||
await removeTempImage(newImageId, envId);
|
||||
log(`Blocked image removed - container will continue using safe image`);
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'skipped',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: {
|
||||
mode: 'auto_update',
|
||||
reason: 'vulnerabilities_found',
|
||||
blockReason: reason,
|
||||
vulnerabilityCriteria,
|
||||
summary: { checked: 1, updated: 0, blocked: 1, failed: 0 },
|
||||
containers: [{
|
||||
name: containerName,
|
||||
status: 'blocked',
|
||||
blockReason: reason,
|
||||
scannerResults: scanResults.map(r => ({
|
||||
scanner: r.scanner,
|
||||
critical: r.summary.critical,
|
||||
high: r.summary.high,
|
||||
medium: r.summary.medium,
|
||||
low: r.summary.low,
|
||||
negligible: r.summary.negligible,
|
||||
unknown: r.summary.unknown
|
||||
}))
|
||||
}],
|
||||
scanResult: {
|
||||
summary: scanSummary,
|
||||
scanners: scanResults.map(r => r.scanner),
|
||||
scannedAt: scanResults[0]?.scannedAt,
|
||||
scannerResults: scanResults.map(r => ({
|
||||
scanner: r.scanner,
|
||||
critical: r.summary.critical,
|
||||
high: r.summary.high,
|
||||
medium: r.summary.medium,
|
||||
low: r.summary.low,
|
||||
negligible: r.summary.negligible,
|
||||
unknown: r.summary.unknown
|
||||
}))
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await sendEventNotification('auto_update_blocked', {
|
||||
title: 'Auto-update blocked',
|
||||
message: `Container "${containerName}" update blocked: ${reason}`,
|
||||
type: 'warning'
|
||||
}, envId);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
log(`Scan passed vulnerability criteria`);
|
||||
}
|
||||
} catch (scanError: any) {
|
||||
// Scan failure - cleanup temp image and fail
|
||||
log(`Scan failed: ${scanError.message}`);
|
||||
log(`Removing temp image due to scan failure...`);
|
||||
await removeTempImage(newImageId, envId);
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: `Vulnerability scan failed: ${scanError.message}`
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Step 2g: APPROVED - Re-tag to original for update
|
||||
log(`Re-tagging approved image to: ${imageNameFromConfig}`);
|
||||
await tagImage(newImageId, oldRepo, oldTag, envId);
|
||||
log(`Image ready for update`);
|
||||
|
||||
// Clean up temp tag (optional, image will be removed when container is recreated)
|
||||
try {
|
||||
await removeTempImage(tempTag, envId);
|
||||
} catch { /* ignore cleanup errors */ }
|
||||
|
||||
} catch (pullError: any) {
|
||||
log(`Safe-pull failed: ${pullError.message}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: `Failed to pull image: ${pullError.message}`
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No scanning - simple pull
|
||||
log(`Pulling update (no vulnerability scan)...`);
|
||||
try {
|
||||
await pullImage(imageNameFromConfig, undefined, envId);
|
||||
log(`Image pulled successfully`);
|
||||
} catch (pullError: any) {
|
||||
log(`Pull failed: ${pullError.message}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: `Failed to pull image: ${pullError.message}`
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
log(`Proceeding with container recreation...`);
|
||||
const success = await recreateContainer(containerName, envId, log);
|
||||
|
||||
if (success) {
|
||||
await updateAutoUpdateLastUpdated(containerName, envId);
|
||||
log(`Successfully updated container: ${containerName}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'success',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: {
|
||||
mode: 'auto_update',
|
||||
newDigest,
|
||||
vulnerabilityCriteria,
|
||||
summary: { checked: 1, updated: 1, blocked: 0, failed: 0 },
|
||||
containers: [{
|
||||
name: containerName,
|
||||
status: 'updated',
|
||||
scannerResults: scanResults?.map(r => ({
|
||||
scanner: r.scanner,
|
||||
critical: r.summary.critical,
|
||||
high: r.summary.high,
|
||||
medium: r.summary.medium,
|
||||
low: r.summary.low,
|
||||
negligible: r.summary.negligible,
|
||||
unknown: r.summary.unknown
|
||||
}))
|
||||
}],
|
||||
scanResult: scanSummary ? {
|
||||
summary: scanSummary,
|
||||
scanners: scanResults?.map(r => r.scanner) || [],
|
||||
scannedAt: scanResults?.[0]?.scannedAt,
|
||||
scannerResults: scanResults?.map(r => ({
|
||||
scanner: r.scanner,
|
||||
critical: r.summary.critical,
|
||||
high: r.summary.high,
|
||||
medium: r.summary.medium,
|
||||
low: r.summary.low,
|
||||
negligible: r.summary.negligible,
|
||||
unknown: r.summary.unknown
|
||||
})) || []
|
||||
} : undefined
|
||||
}
|
||||
});
|
||||
|
||||
// Send notification for successful update
|
||||
await sendEventNotification('auto_update_success', {
|
||||
title: 'Container auto-updated',
|
||||
message: `Container "${containerName}" was updated to a new image version`,
|
||||
type: 'success'
|
||||
}, envId);
|
||||
} else {
|
||||
throw new Error('Failed to recreate container');
|
||||
}
|
||||
} catch (error: any) {
|
||||
log(`Error: ${error.message}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: error.message
|
||||
});
|
||||
|
||||
// Send notification for failed update
|
||||
await sendEventNotification('auto_update_failed', {
|
||||
title: 'Auto-update failed',
|
||||
message: `Container "${containerName}" auto-update failed: ${error.message}`,
|
||||
type: 'error'
|
||||
}, envId);
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// HELPER FUNCTIONS
|
||||
// =============================================================================
|
||||
|
||||
async function recreateContainer(
|
||||
containerName: string,
|
||||
envId?: number,
|
||||
log?: (msg: string) => void
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
// Find the container by name
|
||||
const containers = await listContainers(true, envId);
|
||||
const container = containers.find(c => c.name === containerName);
|
||||
|
||||
if (!container) {
|
||||
log?.(`Container not found: ${containerName}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get full container config
|
||||
const inspectData = await inspectContainer(container.id, envId) as any;
|
||||
const wasRunning = inspectData.State.Running;
|
||||
const config = inspectData.Config;
|
||||
const hostConfig = inspectData.HostConfig;
|
||||
|
||||
log?.(`Recreating container: ${containerName} (was running: ${wasRunning})`);
|
||||
|
||||
// Stop container if running
|
||||
if (wasRunning) {
|
||||
log?.('Stopping container...');
|
||||
await stopContainer(container.id, envId);
|
||||
}
|
||||
|
||||
// Remove old container
|
||||
log?.('Removing old container...');
|
||||
await removeContainer(container.id, true, envId);
|
||||
|
||||
// Prepare port bindings
|
||||
const ports: { [key: string]: { HostPort: string } } = {};
|
||||
if (hostConfig.PortBindings) {
|
||||
for (const [containerPort, bindings] of Object.entries(hostConfig.PortBindings)) {
|
||||
if (bindings && (bindings as any[]).length > 0) {
|
||||
ports[containerPort] = { HostPort: (bindings as any[])[0].HostPort || '' };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create new container
|
||||
log?.('Creating new container...');
|
||||
const newContainer = await createContainer({
|
||||
name: containerName,
|
||||
image: config.Image,
|
||||
ports,
|
||||
volumeBinds: hostConfig.Binds || [],
|
||||
env: config.Env || [],
|
||||
labels: config.Labels || {},
|
||||
cmd: config.Cmd || undefined,
|
||||
restartPolicy: hostConfig.RestartPolicy?.Name || 'no',
|
||||
networkMode: hostConfig.NetworkMode || undefined
|
||||
}, envId);
|
||||
|
||||
// Start if was running
|
||||
if (wasRunning) {
|
||||
log?.('Starting new container...');
|
||||
await newContainer.start();
|
||||
}
|
||||
|
||||
log?.('Container recreated successfully');
|
||||
return true;
|
||||
} catch (error: any) {
|
||||
log?.(`Failed to recreate container: ${error.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
509
lib/server/scheduler/tasks/env-update-check.ts
Normal file
509
lib/server/scheduler/tasks/env-update-check.ts
Normal file
@@ -0,0 +1,509 @@
|
||||
/**
|
||||
* Environment Update Check Task
|
||||
*
|
||||
* Checks all containers in an environment for available image updates.
|
||||
* Can optionally auto-update containers when updates are found.
|
||||
*/
|
||||
|
||||
import type { ScheduleTrigger, VulnerabilityCriteria } from '../../db';
|
||||
import {
|
||||
getEnvUpdateCheckSettings,
|
||||
getEnvironment,
|
||||
createScheduleExecution,
|
||||
updateScheduleExecution,
|
||||
appendScheduleExecutionLog,
|
||||
saveVulnerabilityScan,
|
||||
clearPendingContainerUpdates,
|
||||
addPendingContainerUpdate,
|
||||
removePendingContainerUpdate
|
||||
} from '../../db';
|
||||
import {
|
||||
listContainers,
|
||||
inspectContainer,
|
||||
checkImageUpdateAvailable,
|
||||
pullImage,
|
||||
stopContainer,
|
||||
removeContainer,
|
||||
createContainer,
|
||||
getTempImageTag,
|
||||
isDigestBasedImage,
|
||||
getImageIdByTag,
|
||||
removeTempImage,
|
||||
tagImage
|
||||
} from '../../docker';
|
||||
import { sendEventNotification } from '../../notifications';
|
||||
import { getScannerSettings, scanImage, type VulnerabilitySeverity } from '../../scanner';
|
||||
import { parseImageNameAndTag, shouldBlockUpdate, combineScanSummaries, isDockhandContainer } from './update-utils';
|
||||
|
||||
interface UpdateInfo {
|
||||
containerId: string;
|
||||
containerName: string;
|
||||
imageName: string;
|
||||
currentImageId: string;
|
||||
currentDigest?: string;
|
||||
newDigest?: string;
|
||||
}
|
||||
|
||||
// Track running update checks to prevent concurrent execution
|
||||
const runningUpdateChecks = new Set<number>();
|
||||
|
||||
/**
|
||||
* Execute environment update check job.
|
||||
* @param environmentId - The environment ID to check
|
||||
* @param triggeredBy - What triggered this execution
|
||||
*/
|
||||
export async function runEnvUpdateCheckJob(
|
||||
environmentId: number,
|
||||
triggeredBy: ScheduleTrigger = 'cron'
|
||||
): Promise<void> {
|
||||
// Prevent concurrent execution for the same environment
|
||||
if (runningUpdateChecks.has(environmentId)) {
|
||||
console.log(`[EnvUpdateCheck] Environment ${environmentId} update check already running, skipping`);
|
||||
return;
|
||||
}
|
||||
|
||||
runningUpdateChecks.add(environmentId);
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
// Get environment info
|
||||
const env = await getEnvironment(environmentId);
|
||||
if (!env) {
|
||||
console.error(`[EnvUpdateCheck] Environment ${environmentId} not found`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Get settings
|
||||
const config = await getEnvUpdateCheckSettings(environmentId);
|
||||
if (!config) {
|
||||
console.error(`[EnvUpdateCheck] No settings found for environment ${environmentId}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Create execution record
|
||||
const execution = await createScheduleExecution({
|
||||
scheduleType: 'env_update_check',
|
||||
scheduleId: environmentId,
|
||||
environmentId,
|
||||
entityName: `Update: ${env.name}`,
|
||||
triggeredBy,
|
||||
status: 'running'
|
||||
});
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
startedAt: new Date().toISOString()
|
||||
});
|
||||
|
||||
const log = async (message: string) => {
|
||||
console.log(`[EnvUpdateCheck] ${message}`);
|
||||
await appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
|
||||
};
|
||||
|
||||
try {
|
||||
await log(`Starting update check for environment: ${env.name}`);
|
||||
await log(`Auto-update mode: ${config.autoUpdate ? 'ON' : 'OFF'}`);
|
||||
|
||||
// Clear pending updates at the start - we'll re-add as we discover updates
|
||||
await clearPendingContainerUpdates(environmentId);
|
||||
|
||||
// Get all containers in this environment
|
||||
const containers = await listContainers(true, environmentId);
|
||||
await log(`Found ${containers.length} containers`);
|
||||
|
||||
const updatesAvailable: UpdateInfo[] = [];
|
||||
let checkedCount = 0;
|
||||
let errorCount = 0;
|
||||
|
||||
// Check each container for updates
|
||||
for (const container of containers) {
|
||||
try {
|
||||
const inspectData = await inspectContainer(container.id, environmentId) as any;
|
||||
const imageName = inspectData.Config?.Image;
|
||||
const currentImageId = inspectData.Image;
|
||||
|
||||
if (!imageName) {
|
||||
await log(` [${container.name}] Skipping - no image name found`);
|
||||
continue;
|
||||
}
|
||||
|
||||
checkedCount++;
|
||||
await log(` Checking: ${container.name} (${imageName})`);
|
||||
|
||||
const result = await checkImageUpdateAvailable(imageName, currentImageId, environmentId);
|
||||
|
||||
if (result.isLocalImage) {
|
||||
await log(` Local image - skipping update check`);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (result.error) {
|
||||
await log(` Error: ${result.error}`);
|
||||
errorCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (result.hasUpdate) {
|
||||
updatesAvailable.push({
|
||||
containerId: container.id,
|
||||
containerName: container.name,
|
||||
imageName,
|
||||
currentImageId,
|
||||
currentDigest: result.currentDigest,
|
||||
newDigest: result.registryDigest
|
||||
});
|
||||
// Add to pending table immediately - will be removed on successful update
|
||||
await addPendingContainerUpdate(environmentId, container.id, container.name, imageName);
|
||||
await log(` UPDATE AVAILABLE`);
|
||||
await log(` Current: ${result.currentDigest?.substring(0, 24) || 'unknown'}...`);
|
||||
await log(` New: ${result.registryDigest?.substring(0, 24) || 'unknown'}...`);
|
||||
} else {
|
||||
await log(` Up to date`);
|
||||
}
|
||||
} catch (err: any) {
|
||||
await log(` [${container.name}] Error: ${err.message}`);
|
||||
errorCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// Summary
|
||||
await log('');
|
||||
await log('=== SUMMARY ===');
|
||||
await log(`Total containers: ${containers.length}`);
|
||||
await log(`Checked: ${checkedCount}`);
|
||||
await log(`Updates available: ${updatesAvailable.length}`);
|
||||
await log(`Errors: ${errorCount}`);
|
||||
|
||||
if (updatesAvailable.length === 0) {
|
||||
await log('All containers are up to date');
|
||||
// Pending updates already cleared at start, nothing to add
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'success',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: {
|
||||
updatesFound: 0,
|
||||
containersChecked: checkedCount,
|
||||
errors: errorCount
|
||||
}
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Build notification message with details
|
||||
const updateList = updatesAvailable
|
||||
.map(u => {
|
||||
const currentShort = u.currentDigest?.substring(0, 12) || 'unknown';
|
||||
const newShort = u.newDigest?.substring(0, 12) || 'unknown';
|
||||
return `- ${u.containerName} (${u.imageName})\n ${currentShort}... -> ${newShort}...`;
|
||||
})
|
||||
.join('\n');
|
||||
|
||||
if (config.autoUpdate) {
|
||||
// Auto-update mode: actually update the containers with safe-pull flow
|
||||
await log('');
|
||||
await log('=== AUTO-UPDATE MODE ===');
|
||||
|
||||
// Get scanner settings and vulnerability criteria
|
||||
const scannerSettings = await getScannerSettings(environmentId);
|
||||
const vulnerabilityCriteria = (config.vulnerabilityCriteria || 'never') as VulnerabilityCriteria;
|
||||
// Scan if scanning is enabled (scanner !== 'none')
|
||||
// The vulnerabilityCriteria only controls whether to BLOCK updates, not whether to SCAN
|
||||
const shouldScan = scannerSettings.scanner !== 'none';
|
||||
|
||||
await log(`Vulnerability criteria: ${vulnerabilityCriteria}`);
|
||||
if (shouldScan) {
|
||||
await log(`Scanner: ${scannerSettings.scanner} (scan enabled)`);
|
||||
}
|
||||
await log(`Updating ${updatesAvailable.length} containers...`);
|
||||
|
||||
let successCount = 0;
|
||||
let failCount = 0;
|
||||
let blockedCount = 0;
|
||||
const updatedContainers: string[] = [];
|
||||
const failedContainers: string[] = [];
|
||||
const blockedContainers: { name: string; reason: string; scannerResults?: { scanner: string; critical: number; high: number; medium: number; low: number }[] }[] = [];
|
||||
|
||||
for (const update of updatesAvailable) {
|
||||
// Skip Dockhand container - cannot update itself
|
||||
if (isDockhandContainer(update.imageName)) {
|
||||
await log(`\n[${update.containerName}] Skipping - cannot auto-update Dockhand itself`);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
await log(`\nUpdating: ${update.containerName}`);
|
||||
|
||||
// Get full container config
|
||||
const inspectData = await inspectContainer(update.containerId, environmentId) as any;
|
||||
const wasRunning = inspectData.State.Running;
|
||||
const containerConfig = inspectData.Config;
|
||||
const hostConfig = inspectData.HostConfig;
|
||||
|
||||
// SAFE-PULL FLOW
|
||||
if (shouldScan && !isDigestBasedImage(update.imageName)) {
|
||||
const tempTag = getTempImageTag(update.imageName);
|
||||
await log(` Safe-pull with temp tag: ${tempTag}`);
|
||||
|
||||
// Step 1: Pull new image
|
||||
await log(` Pulling ${update.imageName}...`);
|
||||
await pullImage(update.imageName, () => {}, environmentId);
|
||||
|
||||
// Step 2: Get new image ID
|
||||
const newImageId = await getImageIdByTag(update.imageName, environmentId);
|
||||
if (!newImageId) {
|
||||
throw new Error('Failed to get new image ID after pull');
|
||||
}
|
||||
await log(` New image: ${newImageId.substring(0, 19)}`);
|
||||
|
||||
// Step 3: SAFETY - Restore original tag to old image
|
||||
const [oldRepo, oldTag] = parseImageNameAndTag(update.imageName);
|
||||
await tagImage(update.currentImageId, oldRepo, oldTag, environmentId);
|
||||
await log(` Restored original tag to safe image`);
|
||||
|
||||
// Step 4: Tag new image with temp suffix
|
||||
const [tempRepo, tempTagName] = parseImageNameAndTag(tempTag);
|
||||
await tagImage(newImageId, tempRepo, tempTagName, environmentId);
|
||||
|
||||
// Step 5: Scan temp image
|
||||
await log(` Scanning for vulnerabilities...`);
|
||||
let scanBlocked = false;
|
||||
let blockReason = '';
|
||||
let currentScannerResults: { scanner: string; critical: number; high: number; medium: number; low: number }[] = [];
|
||||
|
||||
// Collect scan logs to log after scan completes
|
||||
const scanLogs: string[] = [];
|
||||
|
||||
try {
|
||||
const scanResults = await scanImage(tempTag, environmentId, (progress) => {
|
||||
if (progress.message) {
|
||||
scanLogs.push(` [${progress.scanner || 'scan'}] ${progress.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Log collected scan messages
|
||||
for (const scanLog of scanLogs) {
|
||||
await log(scanLog);
|
||||
}
|
||||
|
||||
if (scanResults.length > 0) {
|
||||
const scanSummary = combineScanSummaries(scanResults);
|
||||
await log(` Scan: ${scanSummary.critical} critical, ${scanSummary.high} high, ${scanSummary.medium} medium, ${scanSummary.low} low`);
|
||||
|
||||
// Capture per-scanner results for blocking info
|
||||
currentScannerResults = scanResults.map(r => ({
|
||||
scanner: r.scanner,
|
||||
critical: r.summary.critical,
|
||||
high: r.summary.high,
|
||||
medium: r.summary.medium,
|
||||
low: r.summary.low
|
||||
}));
|
||||
|
||||
// Save scan results
|
||||
for (const result of scanResults) {
|
||||
try {
|
||||
await saveVulnerabilityScan({
|
||||
environmentId,
|
||||
imageId: newImageId,
|
||||
imageName: result.imageName,
|
||||
scanner: result.scanner,
|
||||
scannedAt: result.scannedAt,
|
||||
scanDuration: result.scanDuration,
|
||||
criticalCount: result.summary.critical,
|
||||
highCount: result.summary.high,
|
||||
mediumCount: result.summary.medium,
|
||||
lowCount: result.summary.low,
|
||||
negligibleCount: result.summary.negligible,
|
||||
unknownCount: result.summary.unknown,
|
||||
vulnerabilities: result.vulnerabilities,
|
||||
error: result.error ?? null
|
||||
});
|
||||
} catch { /* ignore save errors */ }
|
||||
}
|
||||
|
||||
// Check if blocked
|
||||
const { blocked, reason } = shouldBlockUpdate(vulnerabilityCriteria, scanSummary, undefined);
|
||||
if (blocked) {
|
||||
scanBlocked = true;
|
||||
blockReason = reason;
|
||||
}
|
||||
}
|
||||
} catch (scanErr: any) {
|
||||
await log(` Scan failed: ${scanErr.message}`);
|
||||
scanBlocked = true;
|
||||
blockReason = `Scan failed: ${scanErr.message}`;
|
||||
}
|
||||
|
||||
if (scanBlocked) {
|
||||
// BLOCKED - Remove temp image
|
||||
await log(` UPDATE BLOCKED: ${blockReason}`);
|
||||
await removeTempImage(newImageId, environmentId);
|
||||
await log(` Removed blocked image - container stays safe`);
|
||||
blockedCount++;
|
||||
blockedContainers.push({
|
||||
name: update.containerName,
|
||||
reason: blockReason,
|
||||
scannerResults: currentScannerResults.length > 0 ? currentScannerResults : undefined
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
// APPROVED - Re-tag to original
|
||||
await log(` Scan passed, re-tagging...`);
|
||||
await tagImage(newImageId, oldRepo, oldTag, environmentId);
|
||||
try {
|
||||
await removeTempImage(tempTag, environmentId);
|
||||
} catch { /* ignore cleanup errors */ }
|
||||
} else {
|
||||
// Simple pull (no scanning or digest-based image)
|
||||
await log(` Pulling ${update.imageName}...`);
|
||||
await pullImage(update.imageName, () => {}, environmentId);
|
||||
}
|
||||
|
||||
// Stop container if running
|
||||
if (wasRunning) {
|
||||
await log(` Stopping...`);
|
||||
await stopContainer(update.containerId, environmentId);
|
||||
}
|
||||
|
||||
// Remove old container
|
||||
await log(` Removing old container...`);
|
||||
await removeContainer(update.containerId, true, environmentId);
|
||||
|
||||
// Prepare port bindings
|
||||
const ports: { [key: string]: { HostPort: string } } = {};
|
||||
if (hostConfig.PortBindings) {
|
||||
for (const [containerPort, bindings] of Object.entries(hostConfig.PortBindings)) {
|
||||
if (bindings && (bindings as any[]).length > 0) {
|
||||
ports[containerPort] = { HostPort: (bindings as any[])[0].HostPort || '' };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create new container
|
||||
await log(` Creating new container...`);
|
||||
const newContainer = await createContainer({
|
||||
name: update.containerName,
|
||||
image: update.imageName,
|
||||
ports,
|
||||
volumeBinds: hostConfig.Binds || [],
|
||||
env: containerConfig.Env || [],
|
||||
labels: containerConfig.Labels || {},
|
||||
cmd: containerConfig.Cmd || undefined,
|
||||
restartPolicy: hostConfig.RestartPolicy?.Name || 'no',
|
||||
networkMode: hostConfig.NetworkMode || undefined
|
||||
}, environmentId);
|
||||
|
||||
// Start if was running
|
||||
if (wasRunning) {
|
||||
await log(` Starting...`);
|
||||
await newContainer.start();
|
||||
}
|
||||
|
||||
await log(` Updated successfully`);
|
||||
successCount++;
|
||||
updatedContainers.push(update.containerName);
|
||||
// Remove from pending table - successfully updated
|
||||
await removePendingContainerUpdate(environmentId, update.containerId);
|
||||
} catch (err: any) {
|
||||
await log(` FAILED: ${err.message}`);
|
||||
failCount++;
|
||||
failedContainers.push(update.containerName);
|
||||
}
|
||||
}
|
||||
|
||||
await log('');
|
||||
await log(`=== UPDATE COMPLETE ===`);
|
||||
await log(`Updated: ${successCount}`);
|
||||
await log(`Blocked: ${blockedCount}`);
|
||||
await log(`Failed: ${failCount}`);
|
||||
|
||||
// Send notifications
|
||||
if (blockedCount > 0) {
|
||||
await sendEventNotification('auto_update_blocked', {
|
||||
title: `${blockedCount} update(s) blocked in ${env.name}`,
|
||||
message: blockedContainers.map(c => `- ${c.name}: ${c.reason}`).join('\n'),
|
||||
type: 'warning'
|
||||
}, environmentId);
|
||||
}
|
||||
|
||||
const notificationMessage = successCount > 0
|
||||
? `Updated ${successCount} container(s) in ${env.name}:\n${updatedContainers.map(c => `- ${c}`).join('\n')}${blockedCount > 0 ? `\n\nBlocked (${blockedCount}):\n${blockedContainers.map(c => `- ${c.name}`).join('\n')}` : ''}${failCount > 0 ? `\n\nFailed (${failCount}):\n${failedContainers.map(c => `- ${c}`).join('\n')}` : ''}`
|
||||
: blockedCount > 0 ? `All updates blocked in ${env.name}` : `Update failed for all containers in ${env.name}`;
|
||||
|
||||
await sendEventNotification('batch_update_success', {
|
||||
title: successCount > 0 ? `Containers updated in ${env.name}` : blockedCount > 0 ? `Updates blocked in ${env.name}` : `Container updates failed in ${env.name}`,
|
||||
message: notificationMessage,
|
||||
type: successCount > 0 && failCount === 0 && blockedCount === 0 ? 'success' : successCount > 0 ? 'warning' : 'error'
|
||||
}, environmentId);
|
||||
|
||||
// Blocked/failed containers stay in pending table (successfully updated ones were removed)
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: failCount > 0 && successCount === 0 && blockedCount === 0 ? 'failed' : 'success',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: {
|
||||
mode: 'auto_update',
|
||||
updatesFound: updatesAvailable.length,
|
||||
containersChecked: checkedCount,
|
||||
errors: errorCount,
|
||||
autoUpdate: true,
|
||||
vulnerabilityCriteria,
|
||||
summary: { checked: checkedCount, updated: successCount, blocked: blockedCount, failed: failCount },
|
||||
containers: [
|
||||
...updatedContainers.map(name => ({ name, status: 'updated' as const })),
|
||||
...blockedContainers.map(c => ({ name: c.name, status: 'blocked' as const, blockReason: c.reason, scannerResults: c.scannerResults })),
|
||||
...failedContainers.map(name => ({ name, status: 'failed' as const }))
|
||||
],
|
||||
updated: successCount,
|
||||
blocked: blockedCount,
|
||||
failed: failCount,
|
||||
blockedContainers
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Check-only mode: just send notification
|
||||
await log('');
|
||||
await log('Check-only mode - sending notification about available updates');
|
||||
// Pending updates already added as we discovered them
|
||||
|
||||
await sendEventNotification('updates_detected', {
|
||||
title: `Container updates available in ${env.name}`,
|
||||
message: `${updatesAvailable.length} update(s) available:\n${updateList}`,
|
||||
type: 'info'
|
||||
}, environmentId);
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'success',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: {
|
||||
mode: 'notify_only',
|
||||
updatesFound: updatesAvailable.length,
|
||||
containersChecked: checkedCount,
|
||||
errors: errorCount,
|
||||
autoUpdate: false,
|
||||
summary: { checked: checkedCount, updated: 0, blocked: 0, failed: 0 },
|
||||
containers: updatesAvailable.map(u => ({
|
||||
name: u.containerName,
|
||||
status: 'checked' as const,
|
||||
imageName: u.imageName,
|
||||
currentDigest: u.currentDigest,
|
||||
newDigest: u.newDigest
|
||||
}))
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (error: any) {
|
||||
await log(`Error: ${error.message}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: error.message
|
||||
});
|
||||
}
|
||||
} finally {
|
||||
runningUpdateChecks.delete(environmentId);
|
||||
}
|
||||
}
|
||||
102
lib/server/scheduler/tasks/git-stack-sync.ts
Normal file
102
lib/server/scheduler/tasks/git-stack-sync.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
/**
|
||||
* Git Stack Auto-Sync Task
|
||||
*
|
||||
* Handles automatic syncing and deploying of git-based compose stacks.
|
||||
*/
|
||||
|
||||
import type { ScheduleTrigger } from '../../db';
|
||||
import {
|
||||
createScheduleExecution,
|
||||
updateScheduleExecution,
|
||||
appendScheduleExecutionLog
|
||||
} from '../../db';
|
||||
import { deployGitStack } from '../../git';
|
||||
import { sendEventNotification } from '../../notifications';
|
||||
|
||||
/**
|
||||
* Execute a git stack sync.
|
||||
*/
|
||||
export async function runGitStackSync(
|
||||
stackId: number,
|
||||
stackName: string,
|
||||
environmentId: number | null | undefined,
|
||||
triggeredBy: ScheduleTrigger
|
||||
): Promise<void> {
|
||||
const startTime = Date.now();
|
||||
|
||||
// Create execution record
|
||||
const execution = await createScheduleExecution({
|
||||
scheduleType: 'git_stack_sync',
|
||||
scheduleId: stackId,
|
||||
environmentId: environmentId ?? null,
|
||||
entityName: stackName,
|
||||
triggeredBy,
|
||||
status: 'running'
|
||||
});
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
startedAt: new Date().toISOString()
|
||||
});
|
||||
|
||||
const log = (message: string) => {
|
||||
console.log(`[Git-sync] ${message}`);
|
||||
appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
|
||||
};
|
||||
|
||||
try {
|
||||
log(`Starting sync for stack: ${stackName}`);
|
||||
|
||||
// Deploy the git stack (only if there are changes)
|
||||
const result = await deployGitStack(stackId, { force: false });
|
||||
|
||||
const envId = environmentId ?? undefined;
|
||||
|
||||
if (result.success) {
|
||||
if (result.skipped) {
|
||||
log(`No changes detected for stack: ${stackName}, skipping redeploy`);
|
||||
|
||||
// Send notification for skipped sync
|
||||
await sendEventNotification('git_sync_skipped', {
|
||||
title: 'Git sync skipped',
|
||||
message: `Stack "${stackName}" sync skipped: no changes detected`,
|
||||
type: 'info'
|
||||
}, envId);
|
||||
} else {
|
||||
log(`Successfully deployed stack: ${stackName}`);
|
||||
|
||||
// Send notification for successful sync
|
||||
await sendEventNotification('git_sync_success', {
|
||||
title: 'Git stack deployed',
|
||||
message: `Stack "${stackName}" was synced and deployed successfully`,
|
||||
type: 'success'
|
||||
}, envId);
|
||||
}
|
||||
if (result.output) log(result.output);
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: result.skipped ? 'skipped' : 'success',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: { output: result.output }
|
||||
});
|
||||
} else {
|
||||
throw new Error(result.error || 'Deployment failed');
|
||||
}
|
||||
} catch (error: any) {
|
||||
log(`Error: ${error.message}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: error.message
|
||||
});
|
||||
|
||||
// Send notification for failed sync
|
||||
const envId = environmentId ?? undefined;
|
||||
await sendEventNotification('git_sync_failed', {
|
||||
title: 'Git sync failed',
|
||||
message: `Stack "${stackName}" sync failed: ${error.message}`,
|
||||
type: 'error'
|
||||
}, envId);
|
||||
}
|
||||
}
|
||||
202
lib/server/scheduler/tasks/system-cleanup.ts
Normal file
202
lib/server/scheduler/tasks/system-cleanup.ts
Normal file
@@ -0,0 +1,202 @@
|
||||
/**
|
||||
* System Cleanup Tasks
|
||||
*
|
||||
* Handles system cleanup jobs (schedule executions, container events).
|
||||
*/
|
||||
|
||||
import type { ScheduleTrigger } from '../../db';
|
||||
import {
|
||||
getScheduleRetentionDays,
|
||||
cleanupOldExecutions,
|
||||
getEventRetentionDays,
|
||||
getScheduleCleanupEnabled,
|
||||
getEventCleanupEnabled,
|
||||
createScheduleExecution,
|
||||
updateScheduleExecution,
|
||||
appendScheduleExecutionLog
|
||||
} from '../../db';
|
||||
|
||||
// System job IDs
|
||||
export const SYSTEM_SCHEDULE_CLEANUP_ID = 1;
|
||||
export const SYSTEM_EVENT_CLEANUP_ID = 2;
|
||||
export const SYSTEM_VOLUME_HELPER_CLEANUP_ID = 3;
|
||||
|
||||
/**
|
||||
* Execute schedule execution cleanup job.
|
||||
*/
|
||||
export async function runScheduleCleanupJob(triggeredBy: ScheduleTrigger = 'cron'): Promise<void> {
|
||||
// Check if cleanup is enabled (skip check if manually triggered)
|
||||
if (triggeredBy === 'cron') {
|
||||
const enabled = await getScheduleCleanupEnabled();
|
||||
if (!enabled) {
|
||||
return; // Skip execution if disabled
|
||||
}
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
// Create execution record
|
||||
const execution = await createScheduleExecution({
|
||||
scheduleType: 'system_cleanup',
|
||||
scheduleId: SYSTEM_SCHEDULE_CLEANUP_ID,
|
||||
environmentId: null,
|
||||
entityName: 'Schedule execution cleanup',
|
||||
triggeredBy,
|
||||
status: 'running'
|
||||
});
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
startedAt: new Date().toISOString()
|
||||
});
|
||||
|
||||
const log = async (message: string) => {
|
||||
console.log(`[Schedule Cleanup] ${message}`);
|
||||
await appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
|
||||
};
|
||||
|
||||
try {
|
||||
const retentionDays = await getScheduleRetentionDays();
|
||||
await log(`Starting cleanup with ${retentionDays} day retention`);
|
||||
|
||||
await cleanupOldExecutions(retentionDays);
|
||||
|
||||
await log('Cleanup completed successfully');
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'success',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: { retentionDays }
|
||||
});
|
||||
} catch (error: any) {
|
||||
await log(`Error: ${error.message}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute event cleanup job.
|
||||
*/
|
||||
export async function runEventCleanupJob(triggeredBy: ScheduleTrigger = 'cron'): Promise<void> {
|
||||
// Check if cleanup is enabled (skip check if manually triggered)
|
||||
if (triggeredBy === 'cron') {
|
||||
const enabled = await getEventCleanupEnabled();
|
||||
if (!enabled) {
|
||||
return; // Skip execution if disabled
|
||||
}
|
||||
}
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
// Create execution record
|
||||
const execution = await createScheduleExecution({
|
||||
scheduleType: 'system_cleanup',
|
||||
scheduleId: SYSTEM_EVENT_CLEANUP_ID,
|
||||
environmentId: null,
|
||||
entityName: 'Container event cleanup',
|
||||
triggeredBy,
|
||||
status: 'running'
|
||||
});
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
startedAt: new Date().toISOString()
|
||||
});
|
||||
|
||||
const log = async (message: string) => {
|
||||
console.log(`[Event Cleanup] ${message}`);
|
||||
await appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
|
||||
};
|
||||
|
||||
try {
|
||||
const { deleteOldContainerEvents } = await import('../../db');
|
||||
const retentionDays = await getEventRetentionDays();
|
||||
|
||||
await log(`Starting cleanup of events older than ${retentionDays} days`);
|
||||
|
||||
const deleted = await deleteOldContainerEvents(retentionDays);
|
||||
|
||||
await log(`Removed ${deleted} old container events`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'success',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
details: { deletedCount: deleted, retentionDays }
|
||||
});
|
||||
} catch (error: any) {
|
||||
await log(`Error: ${error.message}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute volume helper cleanup job.
|
||||
* Cleans up stale dockhand-browse-* containers used for volume browsing.
|
||||
* @param triggeredBy - What triggered this execution
|
||||
* @param cleanupFns - Optional cleanup functions (passed from scheduler to avoid dynamic import issues)
|
||||
*/
|
||||
export async function runVolumeHelperCleanupJob(
|
||||
triggeredBy: ScheduleTrigger = 'cron',
|
||||
cleanupFns?: {
|
||||
cleanupStaleVolumeHelpers: () => Promise<void>;
|
||||
cleanupExpiredVolumeHelpers: () => Promise<void>;
|
||||
}
|
||||
): Promise<void> {
|
||||
const startTime = Date.now();
|
||||
|
||||
// Create execution record
|
||||
const execution = await createScheduleExecution({
|
||||
scheduleType: 'system_cleanup',
|
||||
scheduleId: SYSTEM_VOLUME_HELPER_CLEANUP_ID,
|
||||
environmentId: null,
|
||||
entityName: 'Volume helper cleanup',
|
||||
triggeredBy,
|
||||
status: 'running'
|
||||
});
|
||||
|
||||
await updateScheduleExecution(execution.id, {
|
||||
startedAt: new Date().toISOString()
|
||||
});
|
||||
|
||||
const log = async (message: string) => {
|
||||
console.log(`[Volume Helper Cleanup] ${message}`);
|
||||
await appendScheduleExecutionLog(execution.id, `[${new Date().toISOString()}] ${message}`);
|
||||
};
|
||||
|
||||
try {
|
||||
await log('Starting cleanup of stale and expired volume helper containers');
|
||||
|
||||
if (cleanupFns) {
|
||||
// Use provided functions (from scheduler static imports)
|
||||
await cleanupFns.cleanupStaleVolumeHelpers();
|
||||
await cleanupFns.cleanupExpiredVolumeHelpers();
|
||||
} else {
|
||||
// Fallback to dynamic import (may not work in production)
|
||||
const { runVolumeHelperCleanup } = await import('../../db');
|
||||
await runVolumeHelperCleanup();
|
||||
}
|
||||
|
||||
await log('Cleanup completed successfully');
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'success',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime
|
||||
});
|
||||
} catch (error: any) {
|
||||
await log(`Error: ${error.message}`);
|
||||
await updateScheduleExecution(execution.id, {
|
||||
status: 'failed',
|
||||
completedAt: new Date().toISOString(),
|
||||
duration: Date.now() - startTime,
|
||||
errorMessage: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
114
lib/server/scheduler/tasks/update-utils.ts
Normal file
114
lib/server/scheduler/tasks/update-utils.ts
Normal file
@@ -0,0 +1,114 @@
|
||||
/**
|
||||
* Shared utilities for container and environment auto-update tasks.
|
||||
*/
|
||||
|
||||
import type { VulnerabilityCriteria } from '../../db';
|
||||
import type { VulnerabilitySeverity } from '../../scanner';
|
||||
|
||||
/**
|
||||
* Parse image name and tag from a full image reference.
|
||||
* Handles various formats:
|
||||
* - nginx → ["nginx", "latest"]
|
||||
* - nginx:1.25 → ["nginx", "1.25"]
|
||||
* - registry.example.com:5000/myimage:v1 → ["registry.example.com:5000/myimage", "v1"]
|
||||
* - nginx:latest-dockhand-pending → ["nginx", "latest-dockhand-pending"]
|
||||
*/
|
||||
export function parseImageNameAndTag(imageName: string): [string, string] {
|
||||
// Handle digest-based images (return as-is with empty tag)
|
||||
if (imageName.includes('@sha256:')) {
|
||||
return [imageName, ''];
|
||||
}
|
||||
|
||||
// Find the last colon that's part of the tag (not part of registry port)
|
||||
const lastColon = imageName.lastIndexOf(':');
|
||||
if (lastColon === -1) {
|
||||
return [imageName, 'latest'];
|
||||
}
|
||||
|
||||
// Check if this colon is part of a registry port
|
||||
// Registry ports appear before a slash: registry:5000/image
|
||||
const afterColon = imageName.substring(lastColon + 1);
|
||||
if (afterColon.includes('/')) {
|
||||
// The colon is part of the registry, not the tag
|
||||
return [imageName, 'latest'];
|
||||
}
|
||||
|
||||
// The colon separates repo from tag
|
||||
return [imageName.substring(0, lastColon), afterColon];
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if an update should be blocked based on vulnerability criteria.
|
||||
*/
|
||||
export function shouldBlockUpdate(
|
||||
criteria: VulnerabilityCriteria,
|
||||
newScanSummary: VulnerabilitySeverity,
|
||||
currentScanSummary?: VulnerabilitySeverity
|
||||
): { blocked: boolean; reason: string } {
|
||||
const totalVulns = newScanSummary.critical + newScanSummary.high + newScanSummary.medium + newScanSummary.low;
|
||||
|
||||
switch (criteria) {
|
||||
case 'any':
|
||||
if (totalVulns > 0) {
|
||||
return {
|
||||
blocked: true,
|
||||
reason: `Found ${totalVulns} vulnerabilities (${newScanSummary.critical} critical, ${newScanSummary.high} high, ${newScanSummary.medium} medium, ${newScanSummary.low} low)`
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'critical_high':
|
||||
if (newScanSummary.critical > 0 || newScanSummary.high > 0) {
|
||||
return {
|
||||
blocked: true,
|
||||
reason: `Found ${newScanSummary.critical} critical and ${newScanSummary.high} high severity vulnerabilities`
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'critical':
|
||||
if (newScanSummary.critical > 0) {
|
||||
return {
|
||||
blocked: true,
|
||||
reason: `Found ${newScanSummary.critical} critical vulnerabilities`
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'more_than_current':
|
||||
if (currentScanSummary) {
|
||||
const currentTotal = currentScanSummary.critical + currentScanSummary.high + currentScanSummary.medium + currentScanSummary.low;
|
||||
if (totalVulns > currentTotal) {
|
||||
return {
|
||||
blocked: true,
|
||||
reason: `New image has ${totalVulns} vulnerabilities vs ${currentTotal} in current image`
|
||||
};
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 'never':
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return { blocked: false, reason: '' };
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a container is the Dockhand application itself.
|
||||
* Used to prevent Dockhand from updating its own container.
|
||||
*/
|
||||
export function isDockhandContainer(imageName: string): boolean {
|
||||
return imageName.toLowerCase().includes('fnsys/dockhand');
|
||||
}
|
||||
|
||||
/**
|
||||
* Combine multiple scan summaries by taking the maximum of each severity level.
|
||||
*/
|
||||
export function combineScanSummaries(results: { summary: VulnerabilitySeverity }[]): VulnerabilitySeverity {
|
||||
return results.reduce((acc, result) => ({
|
||||
critical: Math.max(acc.critical, result.summary.critical),
|
||||
high: Math.max(acc.high, result.summary.high),
|
||||
medium: Math.max(acc.medium, result.summary.medium),
|
||||
low: Math.max(acc.low, result.summary.low),
|
||||
negligible: Math.max(acc.negligible, result.summary.negligible),
|
||||
unknown: Math.max(acc.unknown, result.summary.unknown)
|
||||
}), { critical: 0, high: 0, medium: 0, low: 0, negligible: 0, unknown: 0 });
|
||||
}
|
||||
Reference in New Issue
Block a user