diff --git a/ecosystem-sqlite.json b/ecosystem-sqlite.json index b695b0fe1a..9bb195fdd2 100644 --- a/ecosystem-sqlite.json +++ b/ecosystem-sqlite.json @@ -10,6 +10,17 @@ "env_production": { "NODE_ENV": "production" } + }, + { + "name": "sqlite-bree", + "script": "sqlite-bree.js", + "exec_mode": "fork", + "wait_ready": true, + "instances": "1", + "pmx": false, + "env_production": { + "NODE_ENV": "production" + } } ], "deploy": { diff --git a/jobs/sqlite-cleanup.js b/jobs/sqlite-cleanup.js new file mode 100644 index 0000000000..3b9c91b530 --- /dev/null +++ b/jobs/sqlite-cleanup.js @@ -0,0 +1,122 @@ +/** + * Copyright (c) Forward Email LLC + * SPDX-License-Identifier: BUSL-1.1 + */ + +// eslint-disable-next-line import/no-unassigned-import +require('#config/env'); + +const fs = require('node:fs'); +const path = require('node:path'); +const process = require('node:process'); +const { parentPort } = require('node:worker_threads'); + +// eslint-disable-next-line import/no-unassigned-import +require('#config/mongoose'); + +const Graceful = require('@ladjs/graceful'); +const Redis = require('@ladjs/redis'); +const mongoose = require('mongoose'); +const ms = require('ms'); +const parseErr = require('parse-err'); +const sharedConfig = require('@ladjs/shared-config'); + +const config = require('#config'); +const emailHelper = require('#helpers/email'); +const logger = require('#helpers/logger'); +const setupMongoose = require('#helpers/setup-mongoose'); + +const breeSharedConfig = sharedConfig('BREE'); +const client = new Redis(breeSharedConfig.redis, logger); + +const graceful = new Graceful({ + mongooses: [mongoose], + redisClients: [client], + logger +}); + +// store boolean if the job is cancelled +let isCancelled = false; + +// handle cancellation (this is a very simple example) +if (parentPort) + parentPort.once('message', (message) => { + // + // TODO: once we can manipulate concurrency option to p-map + // we could make it `Number.MAX_VALUE` here to speed cancellation up + // + // + if (message === 'cancel') { + isCancelled = true; + } + }); + +graceful.listen(); + +// +// find all files that end with: +// - `-backup.sqlite` +// - `-backup-wal.sqlite` +// - `-backup-shm.sqlite` +// +// +const AFFIXES = ['-backup', '-backup-wal', '-backup-shm']; + +(async () => { + await setupMongoose(logger); + + try { + if (isCancelled) return; + + const dirents = await fs.promises.readdir('/mnt', { + withFileTypes: true + }); + + for (const dirent of dirents) { + if (!dirent.isDirectory()) continue; + // eslint-disable-next-line no-await-in-loop + const files = await fs.promises.readdir(path.join('/mnt', dirent.name), { + withFileTypes: true + }); + for (const file of files) { + if (!file.isFile()) continue; + if (path.extname(file.name) !== '.sqlite') continue; + const basename = path.basename(file.name, path.extname(file.name)); + for (const affix of AFFIXES) { + if (!basename.endsWith(affix)) continue; + const filePath = path.join('/mnt', dirent.name, file.name); + // eslint-disable-next-line no-await-in-loop + const stat = await fs.promises.stat(filePath); + if (!stat.isFile()) continue; // safeguard + // delete any backups that are 4h+ old + if (stat.mtimeMs && stat.mtimeMs <= Date.now() - ms('4h')) { + // eslint-disable-next-line no-await-in-loop + await fs.promises.unlink(filePath); + } + + break; + } + } + } + } catch (err) { + await logger.error(err); + + await emailHelper({ + template: 'alert', + message: { + to: config.email.message.from, + subject: 'SQLite cleanup had an error' + }, + locals: { + message: `
${JSON.stringify(
+          parseErr(err),
+          null,
+          2
+        )}
` + } + }); + } + + if (parentPort) parentPort.postMessage('done'); + else process.exit(0); +})(); diff --git a/smtp-bree.js b/smtp-bree.js index 629820f81e..74c1707455 100644 --- a/smtp-bree.js +++ b/smtp-bree.js @@ -18,8 +18,8 @@ const bree = new Bree({ // // this is a long running job, but we attempt to restart it // every 30s in case errors (e.g. uncaught exception edge case causes `process.exit()`) - // this job has built-in setInterval for every 10m to unlock emails in queue - // and we will also retry deferred emails and put them back into the queue + // this job is recursive and calls its main function to keep itself running (and sending emails) + // (for putting emails back into queue that are frozen or need to be retried, see jobs/unlock-emails) // name: 'send-emails', interval: '30s', diff --git a/sqlite-bree.js b/sqlite-bree.js new file mode 100644 index 0000000000..7eeb8ac19e --- /dev/null +++ b/sqlite-bree.js @@ -0,0 +1,42 @@ +/** + * Copyright (c) Forward Email LLC + * SPDX-License-Identifier: BUSL-1.1 + */ + +// eslint-disable-next-line import/no-unassigned-import +require('#config/env'); + +const Bree = require('bree'); +const Graceful = require('@ladjs/graceful'); + +const logger = require('#helpers/logger'); + +const bree = new Bree({ + logger, + jobs: [ + { + // + // this is a long running job, but we attempt to restart it + // every 30s in case errors (e.g. uncaught exception edge case causes `process.exit()`) + // this job cleans up any backup artifacts from 1 hr+ ago for 'rekey' and 'backup' sqlite-server cases + // (e.g. the server stopped mid-backup or an error occurred, e.g. ran out of memory) + // and in an attempt to save on disk stoarge, we will run `fs.unlink` on each of these files + // + name: 'sqlite-cleanup', + interval: '30s', + timeout: 0 + } + ] +}); + +const graceful = new Graceful({ + brees: [bree], + logger +}); +graceful.listen(); + +(async () => { + await bree.start(); +})(); + +logger.info('SQLite bree started', { hide_meta: true }); diff --git a/sqlite-server.js b/sqlite-server.js index 94c1acc2ac..6cded189b9 100644 --- a/sqlite-server.js +++ b/sqlite-server.js @@ -977,7 +977,7 @@ async function parsePayload(data, ws) { // create backup const tmp = path.join( path.dirname(storagePath), - `${payload.id}.sqlite` + `${payload.id}-backup.sqlite` ); const results = await db.backup(tmp); let backup = true; @@ -1106,7 +1106,10 @@ async function parsePayload(data, ws) { storage_location: payload.session.user.storage_location }); const diskSpace = await checkDiskSpace(storagePath); - tmp = path.join(path.dirname(storagePath), `${payload.id}.sqlite`); + tmp = path.join( + path.dirname(storagePath), + `${payload.id}-backup.sqlite` + ); // const stats = await fs.promises.stat(storagePath);