|
| 1 | +import fs from 'node:fs/promises' |
| 2 | +import path from 'node:path' |
| 3 | +import { execa } from 'execa' |
| 4 | +import { DeleteObjectsCommand, ListObjectsV2Command, PutObjectCommand } from '@aws-sdk/client-s3' |
| 5 | + |
| 6 | +const BACKUP_RETENTION_DAYS = 3 |
| 7 | + |
| 8 | +export default defineTask({ |
| 9 | + meta: { |
| 10 | + name: 'db:backup', |
| 11 | + description: 'Dump database and upload to S3 daily', |
| 12 | + }, |
| 13 | + async run() { |
| 14 | + const now = new Date() |
| 15 | + const timestamp = now.toISOString().split('T')[0] // YYYY-MM-DD |
| 16 | + const backupFileName = `backup-${timestamp}.sql.gz` |
| 17 | + const localBackupPath = path.join('/tmp', backupFileName) |
| 18 | + |
| 19 | + // S3 Environment Variables |
| 20 | + const s3Bucket = process.env.AWS_S3_BUCKET |
| 21 | + const s3Region = process.env.AWS_S3_REGION |
| 22 | + const s3AccessKey = process.env.AWS_S3_ACCESS_KEY |
| 23 | + const s3SecretKey = process.env.AWS_S3_SECRET_ACCESS_KEY |
| 24 | + |
| 25 | + if (!s3Bucket || !s3Region || !s3AccessKey || !s3SecretKey) { |
| 26 | + console.error('S3 environment variables are not fully configured. Skipping backup.') |
| 27 | + return { result: 'Error: Missing S3 environment variables' } |
| 28 | + } |
| 29 | + |
| 30 | + // Database Environment Variables |
| 31 | + const postgresUrl = process.env.POSTGRES_URL |
| 32 | + const dbHost = process.env.POSTGRES_HOST |
| 33 | + const dbPort = process.env.POSTGRES_PORT |
| 34 | + const dbUser = process.env.POSTGRES_USER |
| 35 | + const dbPassword = process.env.POSTGRES_PASSWORD |
| 36 | + const dbName = process.env.POSTGRES_DB |
| 37 | + |
| 38 | + let pgDumpBaseCommand: string |
| 39 | + const pgDumpExecaOptions: { shell: true, env: Record<string, string> } = { shell: true, env: {} } |
| 40 | + |
| 41 | + if (postgresUrl) { |
| 42 | + // Escape single quotes in the URL for shell safety, though pg_dump expects a raw URL. |
| 43 | + // The primary concern is the shell interpreting the quotes, not pg_dump itself. |
| 44 | + const escapedPostgresUrl = postgresUrl.replace(/'/g, '\'\\\\\'\'') |
| 45 | + pgDumpBaseCommand = `pg_dump --dbname='${escapedPostgresUrl}' --format=c` |
| 46 | + // PGPASSWORD is not typically set when the password is in the connection string for pg_dump. |
| 47 | + console.log('Using POSTGRES_URL for database connection.') |
| 48 | + } |
| 49 | + else if (dbHost && dbPort && dbUser && dbPassword && dbName) { |
| 50 | + pgDumpBaseCommand = `pg_dump --host=${dbHost} --port=${dbPort} --username=${dbUser} --dbname=${dbName} --format=c --no-password` |
| 51 | + pgDumpExecaOptions.env.PGPASSWORD = dbPassword |
| 52 | + console.log('Using individual POSTGRES_HOST/USER/DB variables for database connection.') |
| 53 | + } |
| 54 | + else { |
| 55 | + console.error('Database connection environment variables are not fully configured. Provide POSTGRES_URL or all of POSTGRES_HOST, POSTGRES_PORT, POSTGRES_USER, POSTGRES_PASSWORD, POSTGRES_DB. Skipping backup.') |
| 56 | + return { result: 'Error: Missing database environment variables' } |
| 57 | + } |
| 58 | + |
| 59 | + // pg_dump command will output to stdout, then piped to gzip |
| 60 | + const fullBackupCommand = `${pgDumpBaseCommand} | gzip > ${localBackupPath}` |
| 61 | + |
| 62 | + try { |
| 63 | + console.log(`Starting database backup to ${localBackupPath}...`) |
| 64 | + // Execute the piped command using shell |
| 65 | + await execa(fullBackupCommand, pgDumpExecaOptions) |
| 66 | + console.log('Database dump and compression successful.') |
| 67 | + |
| 68 | + const fileContent = await fs.readFile(localBackupPath) |
| 69 | + const s3Key = `backups/database/${backupFileName}` |
| 70 | + |
| 71 | + console.log(`Uploading backup to S3 bucket ${s3Bucket} with key ${s3Key}...`) |
| 72 | + const s3Client = getS3Client() |
| 73 | + |
| 74 | + await s3Client.send(new PutObjectCommand({ |
| 75 | + Bucket: s3Bucket, |
| 76 | + Key: s3Key, |
| 77 | + Body: fileContent, |
| 78 | + ContentType: 'application/gzip', // Specify content type for gzipped file |
| 79 | + })) |
| 80 | + console.log('Backup uploaded to S3 successfully.') |
| 81 | + |
| 82 | + // Implement 3-day retention policy |
| 83 | + console.log('Applying 3-day retention policy...') |
| 84 | + const listCommand = new ListObjectsV2Command({ |
| 85 | + Bucket: s3Bucket, |
| 86 | + Prefix: 'backups/database/', |
| 87 | + }) |
| 88 | + const listedObjects = await s3Client.send(listCommand) |
| 89 | + |
| 90 | + if (listedObjects.Contents && listedObjects.Contents.length > 0) { |
| 91 | + const cutoffDate = new Date(now) |
| 92 | + cutoffDate.setDate(now.getDate() - BACKUP_RETENTION_DAYS) // Keep today's and 2 previous days' backups |
| 93 | + |
| 94 | + const objectsToDelete = listedObjects.Contents.filter((obj) => { |
| 95 | + if (!obj.Key) |
| 96 | + return false |
| 97 | + const match = obj.Key.match(/backup-(\\d{4}-\\d{2}-\\d{2})\\.sql\\.gz$/) |
| 98 | + if (match && match[1]) { |
| 99 | + const backupDate = new Date(match[1]) |
| 100 | + // Ensure comparison is date-only by setting hours to 0 |
| 101 | + backupDate.setHours(0, 0, 0, 0) |
| 102 | + const comparisonCutoff = new Date(cutoffDate) |
| 103 | + comparisonCutoff.setHours(0, 0, 0, 0) |
| 104 | + return backupDate < comparisonCutoff |
| 105 | + } |
| 106 | + return false |
| 107 | + }) |
| 108 | + |
| 109 | + if (objectsToDelete.length > 0) { |
| 110 | + const deleteParams = { |
| 111 | + Bucket: s3Bucket, |
| 112 | + Delete: { |
| 113 | + Objects: objectsToDelete.map(obj => ({ Key: obj.Key })), |
| 114 | + Quiet: false, |
| 115 | + }, |
| 116 | + } |
| 117 | + const deleteResult = await s3Client.send(new DeleteObjectsCommand(deleteParams)) |
| 118 | + if (deleteResult.Deleted && deleteResult.Deleted.length > 0) { |
| 119 | + console.log(`Successfully deleted ${deleteResult.Deleted.length} old backup(s): ${deleteResult.Deleted.map(d => d.Key).join(', ')}`) |
| 120 | + } |
| 121 | + if (deleteResult.Errors && deleteResult.Errors.length > 0) { |
| 122 | + deleteResult.Errors.forEach(err => console.error(`Error deleting S3 object ${err.Key}: ${err.Message}`)) |
| 123 | + } |
| 124 | + } |
| 125 | + else { |
| 126 | + console.log('No old backups found to delete.') |
| 127 | + } |
| 128 | + } |
| 129 | + else { |
| 130 | + console.log('No backups found in S3 to apply retention policy.') |
| 131 | + } |
| 132 | + |
| 133 | + return { result: 'Success', backupPath: s3Key } |
| 134 | + } |
| 135 | + catch (error: any) { |
| 136 | + console.error('Database backup or S3 upload failed:', error.message) |
| 137 | + if (error.stderr) { |
| 138 | + console.error('pg_dump stderr:', error.stderr) |
| 139 | + } |
| 140 | + if (error.stdout) { |
| 141 | + console.error('pg_dump stdout:', error.stdout) |
| 142 | + } |
| 143 | + return { result: 'Error', error: error.message } |
| 144 | + } |
| 145 | + finally { |
| 146 | + try { |
| 147 | + await fs.unlink(localBackupPath) |
| 148 | + console.log(`Cleaned up local backup file: ${localBackupPath}`) |
| 149 | + } |
| 150 | + catch (cleanupError: any) { |
| 151 | + // Log if cleanup fails but don't let it mask the primary error |
| 152 | + console.warn(`Failed to clean up local backup file ${localBackupPath}:`, cleanupError.message) |
| 153 | + } |
| 154 | + } |
| 155 | + }, |
| 156 | +}) |
0 commit comments