Explorar o código

Merge pull request #2721 from NginxProxyManager/docker-user-group

Docker users and groups, refactor configuration
jc21 %!s(int64=2) %!d(string=hai) anos
pai
achega
065c2dac42
Modificáronse 36 ficheiros con 926 adicións e 786 borrados
  1. 6 6
      Jenkinsfile
  2. 3 1
      README.md
  3. 4 3
      backend/app.js
  4. 20 26
      backend/db.js
  5. 0 87
      backend/index.js
  6. 30 18
      backend/internal/certificate.js
  7. 16 16
      backend/internal/nginx.js
  8. 184 0
      backend/lib/config.js
  9. 1 1
      backend/lib/validator/index.js
  10. 2 2
      backend/models/now_helper.js
  11. 14 20
      backend/models/token.js
  12. 2 3
      backend/package.json
  13. 7 66
      backend/setup.js
  14. 320 287
      backend/yarn.lock
  15. 6 10
      docker/Dockerfile
  16. 20 18
      docker/docker-compose.ci.yml
  17. 18 12
      docker/docker-compose.dev.yml
  18. 29 0
      docker/rootfs/bin/common.sh
  19. 0 46
      docker/rootfs/bin/handle-ipv6-setting
  20. 2 3
      docker/rootfs/etc/nginx/nginx.conf
  21. 7 4
      docker/rootfs/etc/s6-overlay/s6-rc.d/backend/run
  22. 8 2
      docker/rootfs/etc/s6-overlay/s6-rc.d/frontend/run
  23. 5 2
      docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
  24. 18 0
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/00-all.sh
  25. 25 0
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/10-npmuser.sh
  26. 41 0
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/20-paths.sh
  27. 21 0
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/30-ownership.sh
  28. 17 0
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/40-dynamic.sh
  29. 36 0
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/50-ipv6.sh
  30. 30 0
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/60-secrets.sh
  31. 17 0
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/90-banner.sh
  32. 0 93
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/script.sh
  33. 1 1
      docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/up
  34. 6 7
      docs/advanced-config/README.md
  35. 9 52
      docs/setup/README.md
  36. 1 0
      docs/upgrading/README.md

+ 6 - 6
Jenkinsfile

@@ -14,9 +14,9 @@ pipeline {
 		ansiColor('xterm')
 	}
 	environment {
-		IMAGE                      = "nginx-proxy-manager"
+		IMAGE                      = 'nginx-proxy-manager'
 		BUILD_VERSION              = getVersion()
-		MAJOR_VERSION              = "2"
+		MAJOR_VERSION              = '2'
 		BRANCH_LOWER               = "${BRANCH_NAME.toLowerCase().replaceAll('/', '-')}"
 		COMPOSE_PROJECT_NAME       = "npm_${BRANCH_LOWER}_${BUILD_NUMBER}"
 		COMPOSE_FILE               = 'docker/docker-compose.ci.yml'
@@ -102,8 +102,8 @@ pipeline {
 				always {
 					// Dumps to analyze later
 					sh 'mkdir -p debug'
-					sh 'docker-compose logs fullstack-sqlite | gzip > debug/docker_fullstack_sqlite.log.gz'
-					sh 'docker-compose logs db | gzip > debug/docker_db.log.gz'
+					sh 'docker-compose logs fullstack-sqlite > debug/docker_fullstack_sqlite.log'
+					sh 'docker-compose logs db > debug/docker_db.log'
 					// Cypress videos and screenshot artifacts
 					dir(path: 'test/results') {
 						archiveArtifacts allowEmptyArchive: true, artifacts: '**/*', excludes: '**/*.xml'
@@ -128,8 +128,8 @@ pipeline {
 				always {
 					// Dumps to analyze later
 					sh 'mkdir -p debug'
-					sh 'docker-compose logs fullstack-mysql | gzip > debug/docker_fullstack_mysql.log.gz'
-					sh 'docker-compose logs db | gzip > debug/docker_db.log.gz'
+					sh 'docker-compose logs fullstack-mysql > debug/docker_fullstack_mysql.log'
+					sh 'docker-compose logs db > debug/docker_db.log'
 					// Cypress videos and screenshot artifacts
 					dir(path: 'test/results') {
 						archiveArtifacts allowEmptyArchive: true, artifacts: '**/*', excludes: '**/*.xml'

+ 3 - 1
README.md

@@ -56,7 +56,7 @@ I won't go in to too much detail here but here are the basics for someone new to
 2. Create a docker-compose.yml file similar to this:
 
 ```yml
-version: '3'
+version: '3.8'
 services:
   app:
     image: 'jc21/nginx-proxy-manager:latest'
@@ -70,6 +70,8 @@ services:
       - ./letsencrypt:/etc/letsencrypt
 ```
 
+This is the bare minimum configuration required. See the [documentation](https://nginxproxymanager.com/setup/) for more.
+
 3. Bring up your stack by running
 
 ```bash

+ 4 - 3
backend/app.js

@@ -2,6 +2,7 @@ const express     = require('express');
 const bodyParser  = require('body-parser');
 const fileUpload  = require('express-fileupload');
 const compression = require('compression');
+const config      = require('./lib/config');
 const log         = require('./logger').express;
 
 /**
@@ -24,7 +25,7 @@ app.enable('trust proxy', ['loopback', 'linklocal', 'uniquelocal']);
 app.enable('strict routing');
 
 // pretty print JSON when not live
-if (process.env.NODE_ENV !== 'production') {
+if (config.debug()) {
 	app.set('json spaces', 2);
 }
 
@@ -65,7 +66,7 @@ app.use(function (err, req, res, next) {
 		}
 	};
 
-	if (process.env.NODE_ENV === 'development' || (req.baseUrl + req.path).includes('nginx/certificates')) {
+	if (config.debug() || (req.baseUrl + req.path).includes('nginx/certificates')) {
 		payload.debug = {
 			stack:    typeof err.stack !== 'undefined' && err.stack ? err.stack.split('\n') : null,
 			previous: err.previous
@@ -74,7 +75,7 @@ app.use(function (err, req, res, next) {
 
 	// Not every error is worth logging - but this is good for now until it gets annoying.
 	if (typeof err.stack !== 'undefined' && err.stack) {
-		if (process.env.NODE_ENV === 'development' || process.env.DEBUG) {
+		if (config.debug()) {
 			log.debug(err.stack);
 		} else if (typeof err.public == 'undefined' || !err.public) {
 			log.warn(err.message);

+ 20 - 26
backend/db.js

@@ -1,33 +1,27 @@
-const config = require('config');
+const config = require('./lib/config');
 
 if (!config.has('database')) {
-	throw new Error('Database config does not exist! Please read the instructions: https://github.com/jc21/nginx-proxy-manager/blob/master/doc/INSTALL.md');
+	throw new Error('Database config does not exist! Please read the instructions: https://nginxproxymanager.com/setup/');
 }
 
 function generateDbConfig() {
-	if (config.database.engine === 'knex-native') {
-		return config.database.knex;
-	} else
-		return {
-			client:     config.database.engine,
-			connection: {
-				host:     config.database.host,
-				user:     config.database.user,
-				password: config.database.password,
-				database: config.database.name,
-				port:     config.database.port
-			},
-			migrations: {
-				tableName: 'migrations'
-			}
-		};
+	const cfg = config.get('database');
+	if (cfg.engine === 'knex-native') {
+		return cfg.knex;
+	}
+	return {
+		client:     cfg.engine,
+		connection: {
+			host:     cfg.host,
+			user:     cfg.user,
+			password: cfg.password,
+			database: cfg.name,
+			port:     cfg.port
+		},
+		migrations: {
+			tableName: 'migrations'
+		}
+	};
 }
 
-
-let data = generateDbConfig();
-
-if (typeof config.database.version !== 'undefined') {
-	data.version = config.database.version;
-}
-
-module.exports = require('knex')(data);
+module.exports = require('knex')(generateDbConfig());

+ 0 - 87
backend/index.js

@@ -3,9 +3,6 @@
 const logger = require('./logger').global;
 
 async function appStart () {
-	// Create config file db settings if environment variables have been set
-	await createDbConfigFromEnvironment();
-
 	const migrate             = require('./migrate');
 	const setup               = require('./setup');
 	const app                 = require('./app');
@@ -42,90 +39,6 @@ async function appStart () {
 		});
 }
 
-async function createDbConfigFromEnvironment() {
-	return new Promise((resolve, reject) => {
-		const envMysqlHost = process.env.DB_MYSQL_HOST || null;
-		const envMysqlPort = process.env.DB_MYSQL_PORT || null;
-		const envMysqlUser = process.env.DB_MYSQL_USER || null;
-		const envMysqlName = process.env.DB_MYSQL_NAME || null;
-		let envSqliteFile  = process.env.DB_SQLITE_FILE || null;
-
-		const fs       = require('fs');
-		const filename = (process.env.NODE_CONFIG_DIR || './config') + '/' + (process.env.NODE_ENV || 'default') + '.json';
-		let configData = {};
-
-		try {
-			configData = require(filename);
-		} catch (err) {
-			// do nothing
-		}
-
-		if (configData.database && configData.database.engine && !configData.database.fromEnv) {
-			logger.info('Manual db configuration already exists, skipping config creation from environment variables');
-			resolve();
-			return;
-		}
-
-		if ((!envMysqlHost || !envMysqlPort || !envMysqlUser || !envMysqlName) && !envSqliteFile){
-			envSqliteFile = '/data/database.sqlite';
-			logger.info(`No valid environment variables for database provided, using default SQLite file '${envSqliteFile}'`);
-		}
-
-		if (envMysqlHost && envMysqlPort && envMysqlUser && envMysqlName) {
-			const newConfig = {
-				fromEnv:  true,
-				engine:   'mysql',
-				host:     envMysqlHost,
-				port:     envMysqlPort,
-				user:     envMysqlUser,
-				password: process.env.DB_MYSQL_PASSWORD,
-				name:     envMysqlName,
-			};
-
-			if (JSON.stringify(configData.database) === JSON.stringify(newConfig)) {
-				// Config is unchanged, skip overwrite
-				resolve();
-				return;
-			}
-
-			logger.info('Generating MySQL knex configuration from environment variables');
-			configData.database = newConfig;
-
-		} else {
-			const newConfig = {
-				fromEnv: true,
-				engine:  'knex-native',
-				knex:    {
-					client:     'sqlite3',
-					connection: {
-						filename: envSqliteFile
-					},
-					useNullAsDefault: true
-				}
-			};
-			if (JSON.stringify(configData.database) === JSON.stringify(newConfig)) {
-				// Config is unchanged, skip overwrite
-				resolve();
-				return;
-			}
-
-			logger.info('Generating SQLite knex configuration');
-			configData.database = newConfig;
-		}
-
-		// Write config
-		fs.writeFile(filename, JSON.stringify(configData, null, 2), (err) => {
-			if (err) {
-				logger.error('Could not write db config to config file: ' + filename);
-				reject(err);
-			} else {
-				logger.debug('Wrote db configuration to config file: ' + filename);
-				resolve();
-			}
-		});
-	});
-}
-
 try {
 	appStart();
 } catch (err) {

+ 30 - 18
backend/internal/certificate.js

@@ -1,22 +1,24 @@
-const _                  = require('lodash');
-const fs                 = require('fs');
-const https              = require('https');
-const tempWrite          = require('temp-write');
-const moment             = require('moment');
-const logger             = require('../logger').ssl;
-const error              = require('../lib/error');
-const utils              = require('../lib/utils');
-const certificateModel   = require('../models/certificate');
-const dnsPlugins         = require('../global/certbot-dns-plugins');
-const internalAuditLog   = require('./audit-log');
-const internalNginx      = require('./nginx');
-const internalHost       = require('./host');
-const letsencryptStaging = process.env.NODE_ENV !== 'production';
+const _                = require('lodash');
+const fs               = require('fs');
+const https            = require('https');
+const tempWrite        = require('temp-write');
+const moment           = require('moment');
+const logger           = require('../logger').ssl;
+const config           = require('../lib/config');
+const error            = require('../lib/error');
+const utils            = require('../lib/utils');
+const certificateModel = require('../models/certificate');
+const dnsPlugins       = require('../global/certbot-dns-plugins');
+const internalAuditLog = require('./audit-log');
+const internalNginx    = require('./nginx');
+const internalHost     = require('./host');
+const archiver         = require('archiver');
+const path             = require('path');
+const { isArray }      = require('lodash');
+
+const letsencryptStaging = config.useLetsencryptStaging();
 const letsencryptConfig  = '/etc/letsencrypt.ini';
 const certbotCommand     = 'certbot';
-const archiver           = require('archiver');
-const path               = require('path');
-const { isArray }        = require('lodash');
 
 function omissions() {
 	return ['is_deleted'];
@@ -46,6 +48,8 @@ const internalCertificate = {
 
 			const cmd = certbotCommand + ' renew --non-interactive --quiet ' +
 				'--config "' + letsencryptConfig + '" ' +
+				'--work-dir "/tmp/letsencrypt-lib" ' +
+				'--logs-dir "/tmp/letsencrypt-log" ' +
 				'--preferred-challenges "dns,http" ' +
 				'--disable-hook-validation ' +
 				(letsencryptStaging ? '--staging' : '');
@@ -833,6 +837,8 @@ const internalCertificate = {
 
 		const cmd = certbotCommand + ' certonly ' +
 			'--config "' + letsencryptConfig + '" ' +
+			'--work-dir "/tmp/letsencrypt-lib" ' +
+			'--logs-dir "/tmp/letsencrypt-log" ' +
 			'--cert-name "npm-' + certificate.id + '" ' +
 			'--agree-tos ' +
 			'--authenticator webroot ' +
@@ -871,13 +877,15 @@ const internalCertificate = {
 		const escapedCredentials = certificate.meta.dns_provider_credentials.replaceAll('\'', '\\\'').replaceAll('\\', '\\\\');
 		const credentialsCmd     = 'mkdir -p /etc/letsencrypt/credentials 2> /dev/null; echo \'' + escapedCredentials + '\' > \'' + credentialsLocation + '\' && chmod 600 \'' + credentialsLocation + '\'';
 		// we call `. /opt/certbot/bin/activate` (`.` is alternative to `source` in dash) to access certbot venv
-		let prepareCmd = '. /opt/certbot/bin/activate && pip install ' + dns_plugin.package_name + (dns_plugin.version_requirement || '') + ' ' + dns_plugin.dependencies + ' && deactivate';
+		const prepareCmd = '. /opt/certbot/bin/activate && pip install --no-cache-dir --user ' + dns_plugin.package_name + (dns_plugin.version_requirement || '') + ' ' + dns_plugin.dependencies + ' && deactivate';
 
 		// Whether the plugin has a --<name>-credentials argument
 		const hasConfigArg = certificate.meta.dns_provider !== 'route53';
 
 		let mainCmd = certbotCommand + ' certonly ' +
 			'--config "' + letsencryptConfig + '" ' +
+			'--work-dir "/tmp/letsencrypt-lib" ' +
+			'--logs-dir "/tmp/letsencrypt-log" ' +
 			'--cert-name "npm-' + certificate.id + '" ' +
 			'--agree-tos ' +
 			'--email "' + certificate.meta.letsencrypt_email + '" ' +
@@ -974,6 +982,8 @@ const internalCertificate = {
 
 		const cmd = certbotCommand + ' renew --force-renewal ' +
 			'--config "' + letsencryptConfig + '" ' +
+			'--work-dir "/tmp/letsencrypt-lib" ' +
+			'--logs-dir "/tmp/letsencrypt-log" ' +
 			'--cert-name "npm-' + certificate.id + '" ' +
 			'--preferred-challenges "dns,http" ' +
 			'--no-random-sleep-on-renew ' +
@@ -1004,6 +1014,8 @@ const internalCertificate = {
 
 		let mainCmd = certbotCommand + ' renew ' +
 			'--config "' + letsencryptConfig + '" ' +
+			'--work-dir "/tmp/letsencrypt-lib" ' +
+			'--logs-dir "/tmp/letsencrypt-log" ' +
 			'--cert-name "npm-' + certificate.id + '" ' +
 			'--disable-hook-validation ' +
 			'--no-random-sleep-on-renew ' +

+ 16 - 16
backend/internal/nginx.js

@@ -1,9 +1,9 @@
-const _          = require('lodash');
-const fs         = require('fs');
-const logger     = require('../logger').nginx;
-const utils      = require('../lib/utils');
-const error      = require('../lib/error');
-const debug_mode = process.env.NODE_ENV !== 'production' || !!process.env.DEBUG;
+const _      = require('lodash');
+const fs     = require('fs');
+const logger = require('../logger').nginx;
+const config = require('../lib/config');
+const utils  = require('../lib/utils');
+const error  = require('../lib/error');
 
 const internalNginx = {
 
@@ -65,7 +65,7 @@ const internalNginx = {
 							}
 						});
 
-						if (debug_mode) {
+						if (config.debug()) {
 							logger.error('Nginx test failed:', valid_lines.join('\n'));
 						}
 
@@ -101,7 +101,7 @@ const internalNginx = {
 	 * @returns {Promise}
 	 */
 	test: () => {
-		if (debug_mode) {
+		if (config.debug()) {
 			logger.info('Testing Nginx configuration');
 		}
 
@@ -184,7 +184,7 @@ const internalNginx = {
 	generateConfig: (host_type, host) => {
 		const nice_host_type = internalNginx.getFileFriendlyHostType(host_type);
 
-		if (debug_mode) {
+		if (config.debug()) {
 			logger.info('Generating ' + nice_host_type + ' Config:', JSON.stringify(host, null, 2));
 		}
 
@@ -239,7 +239,7 @@ const internalNginx = {
 					.then((config_text) => {
 						fs.writeFileSync(filename, config_text, {encoding: 'utf8'});
 
-						if (debug_mode) {
+						if (config.debug()) {
 							logger.success('Wrote config:', filename, config_text);
 						}
 
@@ -249,7 +249,7 @@ const internalNginx = {
 						resolve(true);
 					})
 					.catch((err) => {
-						if (debug_mode) {
+						if (config.debug()) {
 							logger.warn('Could not write ' + filename + ':', err.message);
 						}
 
@@ -268,7 +268,7 @@ const internalNginx = {
 	 * @returns {Promise}
 	 */
 	generateLetsEncryptRequestConfig: (certificate) => {
-		if (debug_mode) {
+		if (config.debug()) {
 			logger.info('Generating LetsEncrypt Request Config:', certificate);
 		}
 
@@ -292,14 +292,14 @@ const internalNginx = {
 				.then((config_text) => {
 					fs.writeFileSync(filename, config_text, {encoding: 'utf8'});
 
-					if (debug_mode) {
+					if (config.debug()) {
 						logger.success('Wrote config:', filename, config_text);
 					}
 
 					resolve(true);
 				})
 				.catch((err) => {
-					if (debug_mode) {
+					if (config.debug()) {
 						logger.warn('Could not write ' + filename + ':', err.message);
 					}
 
@@ -416,8 +416,8 @@ const internalNginx = {
 	 * @param   {string}  config
 	 * @returns {boolean}
 	 */
-	advancedConfigHasDefaultLocation: function (config) {
-		return !!config.match(/^(?:.*;)?\s*?location\s*?\/\s*?{/im);
+	advancedConfigHasDefaultLocation: function (cfg) {
+		return !!cfg.match(/^(?:.*;)?\s*?location\s*?\/\s*?{/im);
 	},
 
 	/**

+ 184 - 0
backend/lib/config.js

@@ -0,0 +1,184 @@
+const fs      = require('fs');
+const NodeRSA = require('node-rsa');
+const logger  = require('../logger').global;
+
+const keysFile = '/data/keys.json';
+
+let instance = null;
+
+// 1. Load from config file first (not recommended anymore)
+// 2. Use config env variables next
+const configure = () => {
+	const filename = (process.env.NODE_CONFIG_DIR || './config') + '/' + (process.env.NODE_ENV || 'default') + '.json';
+	if (fs.existsSync(filename)) {
+		let configData;
+		try {
+			configData = require(filename);
+		} catch (err) {
+			// do nothing
+		}
+
+		if (configData && configData.database) {
+			logger.info(`Using configuration from file: ${filename}`);
+			instance      = configData;
+			instance.keys = getKeys();
+			return;
+		}
+	}
+
+	const envMysqlHost = process.env.DB_MYSQL_HOST || null;
+	const envMysqlUser = process.env.DB_MYSQL_USER || null;
+	const envMysqlName = process.env.DB_MYSQL_NAME || null;
+	if (envMysqlHost && envMysqlUser && envMysqlName) {
+		// we have enough mysql creds to go with mysql
+		logger.info('Using MySQL configuration');
+		instance = {
+			database: {
+				engine:   'mysql',
+				host:     envMysqlHost,
+				port:     process.env.DB_MYSQL_PORT || 3306,
+				user:     envMysqlUser,
+				password: process.env.DB_MYSQL_PASSWORD,
+				name:     envMysqlName,
+			},
+			keys: getKeys(),
+		};
+		return;
+	}
+
+	const envSqliteFile = process.env.DB_SQLITE_FILE || '/data/database.sqlite';
+	logger.info(`Using Sqlite: ${envSqliteFile}`);
+	instance = {
+		database: {
+			engine: 'knex-native',
+			knex:   {
+				client:     'sqlite3',
+				connection: {
+					filename: envSqliteFile
+				},
+				useNullAsDefault: true
+			}
+		},
+		keys: getKeys(),
+	};
+};
+
+const getKeys = () => {
+	// Get keys from file
+	if (!fs.existsSync(keysFile)) {
+		generateKeys();
+	} else if (process.env.DEBUG) {
+		logger.info('Keys file exists OK');
+	}
+	try {
+		return require(keysFile);
+	} catch (err) {
+		logger.error('Could not read JWT key pair from config file: ' + keysFile, err);
+		process.exit(1);
+	}
+};
+
+const generateKeys = () => {
+	logger.info('Creating a new JWT key pair...');
+	// Now create the keys and save them in the config.
+	const key = new NodeRSA({ b: 2048 });
+	key.generateKeyPair();
+
+	const keys = {
+		key: key.exportKey('private').toString(),
+		pub: key.exportKey('public').toString(),
+	};
+
+	// Write keys config
+	try {
+		fs.writeFileSync(keysFile, JSON.stringify(keys, null, 2));
+	} catch (err) {
+		logger.error('Could not write JWT key pair to config file: ' + keysFile + ': ' . err.message);
+		process.exit(1);
+	}
+	logger.info('Wrote JWT key pair to config file: ' + keysFile);
+};
+
+module.exports = {
+
+	/**
+	 *
+	 * @param   {string}  key   ie: 'database' or 'database.engine'
+	 * @returns {boolean}
+	 */
+	has: function(key) {
+		instance === null && configure();
+		const keys = key.split('.');
+		let level  = instance;
+		let has    = true;
+		keys.forEach((keyItem) =>{
+			if (typeof level[keyItem] === 'undefined') {
+				has = false;
+			} else {
+				level = level[keyItem];
+			}
+		});
+
+		return has;
+	},
+
+	/**
+	 * Gets a specific key from the top level
+	 *
+	 * @param {string} key
+	 * @returns {*}
+	 */
+	get: function (key) {
+		instance === null && configure();
+		if (key && typeof instance[key] !== 'undefined') {
+			return instance[key];
+		}
+		return instance;
+	},
+
+	/**
+	 * Is this a sqlite configuration?
+	 *
+	 * @returns {boolean}
+	 */
+	isSqlite: function () {
+		instance === null && configure();
+		return instance.database.knex && instance.database.knex.client === 'sqlite3';
+	},
+
+	/**
+	 * Are we running in debug mdoe?
+	 *
+	 * @returns {boolean}
+	 */
+	debug: function () {
+		return !!process.env.DEBUG;
+	},
+
+	/**
+	 * Returns a public key
+	 *
+	 * @returns {string}
+	 */
+	getPublicKey: function () {
+		instance === null && configure();
+		return instance.keys.pub;
+	},
+
+	/**
+	 * Returns a private key
+	 *
+	 * @returns {string}
+	 */
+	getPrivateKey: function () {
+		instance === null && configure();
+		return instance.keys.key;
+	},
+
+	/**
+	 * @returns {boolean}
+	 */
+	useLetsencryptStaging: function () {
+		return !!process.env.LE_STAGING;
+	}
+};

+ 1 - 1
backend/lib/validator/index.js

@@ -5,7 +5,7 @@ const definitions = require('../../schema/definitions.json');
 RegExp.prototype.toJSON = RegExp.prototype.toString;
 
 const ajv = require('ajv')({
-	verbose:     true, //process.env.NODE_ENV === 'development',
+	verbose:     true,
 	allErrors:   true,
 	format:      'full',  // strict regexes for format checks
 	coerceTypes: true,

+ 2 - 2
backend/models/now_helper.js

@@ -1,11 +1,11 @@
 const db     = require('../db');
-const config = require('config');
+const config = require('../lib/config');
 const Model  = require('objection').Model;
 
 Model.knex(db);
 
 module.exports = function () {
-	if (config.database.knex && config.database.knex.client === 'sqlite3') {
+	if (config.isSqlite()) {
 		// eslint-disable-next-line
 		return Model.raw("datetime('now','localtime')");
 	}

+ 14 - 20
backend/models/token.js

@@ -6,44 +6,36 @@
 const _      = require('lodash');
 const jwt    = require('jsonwebtoken');
 const crypto = require('crypto');
+const config = require('../lib/config');
 const error  = require('../lib/error');
+const logger = require('../logger').global;
 const ALGO   = 'RS256';
 
-let public_key  = null;
-let private_key = null;
-
-function checkJWTKeyPair() {
-	if (!public_key || !private_key) {
-		let config  = require('config');
-		public_key  = config.get('jwt.pub');
-		private_key = config.get('jwt.key');
-	}
-}
-
 module.exports = function () {
 
 	let token_data = {};
 
-	let self = {
+	const self = {
 		/**
 		 * @param {Object}  payload
 		 * @returns {Promise}
 		 */
 		create: (payload) => {
+			if (!config.getPrivateKey()) {
+				logger.error('Private key is empty!');
+			}
 			// sign with RSA SHA256
-			let options = {
+			const options = {
 				algorithm: ALGO,
 				expiresIn: payload.expiresIn || '1d'
 			};
 
 			payload.jti = crypto.randomBytes(12)
 				.toString('base64')
-				.substr(-8);
-
-			checkJWTKeyPair();
+				.substring(-8);
 
 			return new Promise((resolve, reject) => {
-				jwt.sign(payload, private_key, options, (err, token) => {
+				jwt.sign(payload, config.getPrivateKey(), options, (err, token) => {
 					if (err) {
 						reject(err);
 					} else {
@@ -62,13 +54,15 @@ module.exports = function () {
 		 * @returns {Promise}
 		 */
 		load: function (token) {
+			if (!config.getPublicKey()) {
+				logger.error('Public key is empty!');
+			}
 			return new Promise((resolve, reject) => {
-				checkJWTKeyPair();
 				try {
 					if (!token || token === null || token === 'null') {
 						reject(new error.AuthError('Empty token'));
 					} else {
-						jwt.verify(token, public_key, {ignoreExpiration: false, algorithms: [ALGO]}, (err, result) => {
+						jwt.verify(token, config.getPublicKey(), {ignoreExpiration: false, algorithms: [ALGO]}, (err, result) => {
 							if (err) {
 
 								if (err.name === 'TokenExpiredError') {
@@ -132,7 +126,7 @@ module.exports = function () {
 		 * @returns {Integer}
 		 */
 		getUserId: (default_value) => {
-			let attrs = self.get('attrs');
+			const attrs = self.get('attrs');
 			if (attrs && typeof attrs.id !== 'undefined' && attrs.id) {
 				return attrs.id;
 			}

+ 2 - 3
backend/package.json

@@ -10,7 +10,6 @@
 		"bcrypt": "^5.0.0",
 		"body-parser": "^1.19.0",
 		"compression": "^1.7.4",
-		"config": "^3.3.1",
 		"express": "^4.17.3",
 		"express-fileupload": "^1.1.9",
 		"gravatar": "^1.8.0",
@@ -22,7 +21,6 @@
 		"moment": "^2.29.4",
 		"mysql": "^2.18.1",
 		"node-rsa": "^1.0.8",
-		"nodemon": "^2.0.2",
 		"objection": "3.0.1",
 		"path": "^0.12.7",
 		"signale": "1.4.0",
@@ -36,8 +34,9 @@
 	"author": "Jamie Curnow <[email protected]>",
 	"license": "MIT",
 	"devDependencies": {
-		"eslint": "^6.8.0",
+		"eslint": "^8.36.0",
 		"eslint-plugin-align-assignments": "^1.1.2",
+		"nodemon": "^2.0.2",
 		"prettier": "^2.0.4"
 	}
 }

+ 7 - 66
backend/setup.js

@@ -1,6 +1,4 @@
-const fs                  = require('fs');
-const NodeRSA             = require('node-rsa');
-const config              = require('config');
+const config              = require('./lib/config');
 const logger              = require('./logger').setup;
 const certificateModel    = require('./models/certificate');
 const userModel           = require('./models/user');
@@ -9,62 +7,6 @@ const utils               = require('./lib/utils');
 const authModel           = require('./models/auth');
 const settingModel        = require('./models/setting');
 const dns_plugins         = require('./global/certbot-dns-plugins');
-const debug_mode          = process.env.NODE_ENV !== 'production' || !!process.env.DEBUG;
-
-/**
- * Creates a new JWT RSA Keypair if not alread set on the config
- *
- * @returns {Promise}
- */
-const setupJwt = () => {
-	return new Promise((resolve, reject) => {
-		// Now go and check if the jwt gpg keys have been created and if not, create them
-		if (!config.has('jwt') || !config.has('jwt.key') || !config.has('jwt.pub')) {
-			logger.info('Creating a new JWT key pair...');
-
-			// jwt keys are not configured properly
-			const filename  = config.util.getEnv('NODE_CONFIG_DIR') + '/' + (config.util.getEnv('NODE_ENV') || 'default') + '.json';
-			let config_data = {};
-
-			try {
-				config_data = require(filename);
-			} catch (err) {
-				// do nothing
-				if (debug_mode) {
-					logger.debug(filename + ' config file could not be required');
-				}
-			}
-
-			// Now create the keys and save them in the config.
-			let key = new NodeRSA({ b: 2048 });
-			key.generateKeyPair();
-
-			config_data.jwt = {
-				key: key.exportKey('private').toString(),
-				pub: key.exportKey('public').toString(),
-			};
-
-			// Write config
-			fs.writeFile(filename, JSON.stringify(config_data, null, 2), (err) => {
-				if (err) {
-					logger.error('Could not write JWT key pair to config file: ' + filename);
-					reject(err);
-				} else {
-					logger.info('Wrote JWT key pair to config file: ' + filename);
-					delete require.cache[require.resolve('config')];
-					resolve();
-				}
-			});
-		} else {
-			// JWT key pair exists
-			if (debug_mode) {
-				logger.debug('JWT Keypair already exists');
-			}
-
-			resolve();
-		}
-	});
-};
 
 /**
  * Creates a default admin users if one doesn't already exist in the database
@@ -119,8 +61,8 @@ const setupDefaultUser = () => {
 					.then(() => {
 						logger.info('Initial admin setup completed');
 					});
-			} else if (debug_mode) {
-				logger.debug('Admin user setup not required');
+			} else if (config.debug()) {
+				logger.info('Admin user setup not required');
 			}
 		});
 };
@@ -151,8 +93,8 @@ const setupDefaultSettings = () => {
 						logger.info('Default settings added');
 					});
 			}
-			if (debug_mode) {
-				logger.debug('Default setting setup not required');
+			if (config.debug()) {
+				logger.info('Default setting setup not required');
 			}
 		});
 };
@@ -189,7 +131,7 @@ const setupCertbotPlugins = () => {
 				});
 
 				if (plugins.length) {
-					const install_cmd = '. /opt/certbot/bin/activate && pip install ' + plugins.join(' ') + ' && deactivate';
+					const install_cmd = '. /opt/certbot/bin/activate && pip install --no-cache-dir --user ' + plugins.join(' ') + ' && deactivate';
 					promises.push(utils.exec(install_cmd));
 				}
 
@@ -225,8 +167,7 @@ const setupLogrotation = () => {
 };
 
 module.exports = function () {
-	return setupJwt()
-		.then(setupDefaultUser)
+	return setupDefaultUser()
 		.then(setupDefaultSettings)
 		.then(setupCertbotPlugins)
 		.then(setupLogrotation);

A diferenza do arquivo foi suprimida porque é demasiado grande
+ 320 - 287
backend/yarn.lock


+ 6 - 10
docker/Dockerfile

@@ -35,21 +35,17 @@ COPY frontend/dist /app/frontend
 COPY global        /app/global
 
 WORKDIR /app
-RUN yarn install
+RUN yarn install \
+	&& yarn cache clean
 
 # add late to limit cache-busting by modifications
 COPY docker/rootfs /
 
 # Remove frontend service not required for prod, dev nginx config as well
-RUN rm -rf /etc/services.d/frontend /etc/nginx/conf.d/dev.conf
-
-# Change permission of logrotate config file
-RUN chmod 644 /etc/logrotate.d/nginx-proxy-manager
-
-# fix for pip installs
-# https://github.com/NginxProxyManager/nginx-proxy-manager/issues/1769
-RUN pip uninstall --yes setuptools \
-	&& pip install "setuptools==58.0.0"
+RUN rm -rf /etc/services.d/frontend /etc/nginx/conf.d/dev.conf \
+	&& chmod 644 /etc/logrotate.d/nginx-proxy-manager \
+	&& pip uninstall --yes setuptools \
+	&& pip install --no-cache-dir "setuptools==58.0.0"
 
 VOLUME [ "/data", "/etc/letsencrypt" ]
 ENTRYPOINT [ "/init" ]

+ 20 - 18
docker/docker-compose.ci.yml

@@ -1,17 +1,18 @@
 # WARNING: This is a CI docker-compose file used for building and testing of the entire app, it should not be used for production.
-version: "3"
+version: '3.8'
 services:
 
   fullstack-mysql:
-    image: ${IMAGE}:ci-${BUILD_NUMBER}
+    image: "${IMAGE}:ci-${BUILD_NUMBER}"
     environment:
-      NODE_ENV: "development"
+      DEBUG: 'true'
+      LE_STAGING: 'true'
       FORCE_COLOR: 1
-      DB_MYSQL_HOST: "db"
-      DB_MYSQL_PORT: 3306
-      DB_MYSQL_USER: "npm"
-      DB_MYSQL_PASSWORD: "npm"
-      DB_MYSQL_NAME: "npm"
+      DB_MYSQL_HOST: 'db'
+      DB_MYSQL_PORT: '3306'
+      DB_MYSQL_USER: 'npm'
+      DB_MYSQL_PASSWORD: 'npm'
+      DB_MYSQL_NAME: 'npm'
     volumes:
       - npm_data:/data
     expose:
@@ -26,11 +27,12 @@ services:
       timeout: 3s
 
   fullstack-sqlite:
-    image: ${IMAGE}:ci-${BUILD_NUMBER}
+    image: "${IMAGE}:ci-${BUILD_NUMBER}"
     environment:
-      NODE_ENV: "development"
+      DEBUG: 'true'
+      LE_STAGING: 'true'
       FORCE_COLOR: 1
-      DB_SQLITE_FILE: "/data/database.sqlite"
+      DB_SQLITE_FILE: '/data/mydb.sqlite'
     volumes:
       - npm_data:/data
     expose:
@@ -45,26 +47,26 @@ services:
   db:
     image: jc21/mariadb-aria
     environment:
-      MYSQL_ROOT_PASSWORD: "npm"
-      MYSQL_DATABASE: "npm"
-      MYSQL_USER: "npm"
-      MYSQL_PASSWORD: "npm"
+      MYSQL_ROOT_PASSWORD: 'npm'
+      MYSQL_DATABASE: 'npm'
+      MYSQL_USER: 'npm'
+      MYSQL_PASSWORD: 'npm'
     volumes:
       - db_data:/var/lib/mysql
 
   cypress-mysql:
-    image: ${IMAGE}-cypress:ci-${BUILD_NUMBER}
+    image: "${IMAGE}-cypress:ci-${BUILD_NUMBER}"
     build:
       context: ../test/
       dockerfile: cypress/Dockerfile
     environment:
-      CYPRESS_baseUrl: "http://fullstack-mysql:81"
+      CYPRESS_baseUrl: 'http://fullstack-mysql:81'
     volumes:
       - cypress-logs:/results
     command: cypress run --browser chrome --config-file=${CYPRESS_CONFIG:-cypress/config/ci.json}
 
   cypress-sqlite:
-    image: ${IMAGE}-cypress:ci-${BUILD_NUMBER}
+    image: "${IMAGE}-cypress:ci-${BUILD_NUMBER}"
     build:
       context: ../test/
       dockerfile: cypress/Dockerfile

+ 18 - 12
docker/docker-compose.dev.yml

@@ -1,6 +1,7 @@
 # WARNING: This is a DEVELOPMENT docker-compose file, it should not be used for production.
-version: "3.5"
+version: '3.8'
 services:
+
   npm:
     image: nginxproxymanager:dev
     container_name: npm_core
@@ -14,14 +15,19 @@ services:
     networks:
       - nginx_proxy_manager
     environment:
-      NODE_ENV: "development"
+      PUID: 1000
+      PGID: 1000
       FORCE_COLOR: 1
-      DEVELOPMENT: "true"
-      DB_MYSQL_HOST: "db"
-      DB_MYSQL_PORT: 3306
-      DB_MYSQL_USER: "npm"
-      DB_MYSQL_PASSWORD: "npm"
-      DB_MYSQL_NAME: "npm"
+      # specifically for dev:
+      DEBUG: 'true'
+      DEVELOPMENT: 'true'
+      LE_STAGING: 'true'
+      # db:
+      DB_MYSQL_HOST: 'db'
+      DB_MYSQL_PORT: '3306'
+      DB_MYSQL_USER: 'npm'
+      DB_MYSQL_PASSWORD: 'npm'
+      DB_MYSQL_NAME: 'npm'
       # DB_SQLITE_FILE: "/data/database.sqlite"
       # DISABLE_IPV6: "true"
     volumes:
@@ -42,10 +48,10 @@ services:
     networks:
       - nginx_proxy_manager
     environment:
-      MYSQL_ROOT_PASSWORD: "npm"
-      MYSQL_DATABASE: "npm"
-      MYSQL_USER: "npm"
-      MYSQL_PASSWORD: "npm"
+      MYSQL_ROOT_PASSWORD: 'npm'
+      MYSQL_DATABASE: 'npm'
+      MYSQL_USER: 'npm'
+      MYSQL_PASSWORD: 'npm'
     volumes:
       - db_data:/var/lib/mysql
 

+ 29 - 0
docker/rootfs/bin/common.sh

@@ -0,0 +1,29 @@
+#!/bin/bash
+
+set -e
+
+CYAN='\E[1;36m'
+BLUE='\E[1;34m'
+YELLOW='\E[1;33m'
+RED='\E[1;31m'
+RESET='\E[0m'
+export CYAN BLUE YELLOW RED RESET
+
+log_info () {
+	echo -e "${BLUE}❯ ${CYAN}$1${RESET}"
+}
+
+log_error () {
+	echo -e "${RED}❯ $1${RESET}"
+}
+
+# The `run` file will only execute 1 line so this helps keep things
+# logically separated
+
+log_fatal () {
+	echo -e "${RED}--------------------------------------${RESET}"
+	echo -e "${RED}ERROR: $1${RESET}"
+	echo -e "${RED}--------------------------------------${RESET}"
+	/run/s6/basedir/bin/halt
+	exit 1
+}

+ 0 - 46
docker/rootfs/bin/handle-ipv6-setting

@@ -1,46 +0,0 @@
-#!/bin/bash
-
-# This command reads the `DISABLE_IPV6` env var and will either enable
-# or disable ipv6 in all nginx configs based on this setting.
-
-# Lowercase
-DISABLE_IPV6=$(echo "${DISABLE_IPV6:-}" | tr '[:upper:]' '[:lower:]')
-
-CYAN='\E[1;36m'
-BLUE='\E[1;34m'
-YELLOW='\E[1;33m'
-RED='\E[1;31m'
-RESET='\E[0m'
-
-FOLDER=$1
-if [ "$FOLDER" == "" ]; then
-	echo -e "${RED}❯ $0 requires a absolute folder path as the first argument!${RESET}"
-	echo -e "${YELLOW}  ie: $0 /data/nginx${RESET}"
-	exit 1
-fi
-
-FILES=$(find "$FOLDER" -type f -name "*.conf")
-if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ]; then
-	# IPV6 is disabled
-	echo "Disabling IPV6 in hosts"
-	echo -e "${BLUE}❯ ${CYAN}Disabling IPV6 in hosts: ${YELLOW}${FOLDER}${RESET}"
-
-	# Iterate over configs and run the regex
-	for FILE in $FILES
-	do
-		echo -e "  ${BLUE}❯ ${YELLOW}${FILE}${RESET}"
-		sed -E -i 's/^([^#]*)listen \[::\]/\1#listen [::]/g' "$FILE"
-	done
-
-else
-	# IPV6 is enabled
-	echo -e "${BLUE}❯ ${CYAN}Enabling IPV6 in hosts: ${YELLOW}${FOLDER}${RESET}"
-
-	# Iterate over configs and run the regex
-	for FILE in $FILES
-	do
-		echo -e "  ${BLUE}❯ ${YELLOW}${FILE}${RESET}"
-		sed -E -i 's/^(\s*)#listen \[::\]/\1listen [::]/g' "$FILE"
-	done
-
-fi

+ 2 - 3
docker/rootfs/etc/nginx/nginx.conf

@@ -1,7 +1,6 @@
 # run nginx in foreground
 daemon off;
-
-user root;
+pid /run/nginx/nginx.pid;
 
 # Set number of worker processes automatically based on number of CPU cores.
 worker_processes auto;
@@ -57,7 +56,7 @@ http {
 	}
 
 	# Real IP Determination
-	
+
 	# Local subnets:
 	set_real_ip_from 10.0.0.0/8;
 	set_real_ip_from 172.16.0.0/12; # Includes Docker subnet

+ 7 - 4
docker/rootfs/etc/s6-overlay/s6-rc.d/backend/run

@@ -3,17 +3,20 @@
 
 set -e
 
-echo "❯ Starting backend ..."
+. /bin/common.sh
+
+log_info 'Starting backend ...'
+
 if [ "$DEVELOPMENT" == "true" ]; then
 	cd /app || exit 1
 	# If yarn install fails: add --verbose --network-concurrency 1
-	yarn install
-	node --max_old_space_size=250 --abort_on_uncaught_exception node_modules/nodemon/bin/nodemon.js
+	s6-setuidgid npmuser yarn install
+	exec s6-setuidgid npmuser bash -c 'export HOME=/tmp/npmuserhome;node --max_old_space_size=250 --abort_on_uncaught_exception node_modules/nodemon/bin/nodemon.js'
 else
 	cd /app || exit 1
 	while :
 	do
-		node --abort_on_uncaught_exception --max_old_space_size=250 index.js
+		s6-setuidgid npmuser bash -c 'export HOME=/tmp/npmuserhome;node --abort_on_uncaught_exception --max_old_space_size=250 index.js'
 		sleep 1
 	done
 fi

+ 8 - 2
docker/rootfs/etc/s6-overlay/s6-rc.d/frontend/run

@@ -6,10 +6,16 @@ set -e
 # This service is DEVELOPMENT only.
 
 if [ "$DEVELOPMENT" == "true" ]; then
+	. /bin/common.sh
 	cd /app/frontend || exit 1
+	log_info 'Starting frontend ...'
+	HOME=/tmp/npmuserhome
+	export HOME
+	mkdir -p /app/frontend/dist
+	chown -R npmuser:npmuser /app/frontend/dist
 	# If yarn install fails: add --verbose --network-concurrency 1
-	yarn install
-	yarn watch
+	s6-setuidgid npmuser yarn install
+	exec s6-setuidgid npmuser yarn watch
 else
 	exit 0
 fi

+ 5 - 2
docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/run

@@ -3,5 +3,8 @@
 
 set -e
 
-echo "❯ Starting nginx ..."
-exec nginx
+. /bin/common.sh
+
+log_info 'Starting nginx ...'
+
+exec s6-setuidgid npmuser nginx

+ 18 - 0
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/00-all.sh

@@ -0,0 +1,18 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+
+set -e
+
+. /bin/common.sh
+
+if [ "$(id -u)" != "0" ]; then
+	log_fatal "This docker container must be run as root, do not specify a user.\nYou can specify PUID and PGID env vars to run processes as that user and group after initialization."
+fi
+
+. /etc/s6-overlay/s6-rc.d/prepare/10-npmuser.sh
+. /etc/s6-overlay/s6-rc.d/prepare/20-paths.sh
+. /etc/s6-overlay/s6-rc.d/prepare/30-ownership.sh
+. /etc/s6-overlay/s6-rc.d/prepare/40-dynamic.sh
+. /etc/s6-overlay/s6-rc.d/prepare/50-ipv6.sh
+. /etc/s6-overlay/s6-rc.d/prepare/60-secrets.sh
+. /etc/s6-overlay/s6-rc.d/prepare/90-banner.sh

+ 25 - 0
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/10-npmuser.sh

@@ -0,0 +1,25 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+
+set -e
+
+PUID=${PUID:-911}
+PGID=${PGID:-911}
+
+log_info 'Configuring npmuser ...'
+
+groupmod -g 1000 users || exit 1
+
+if id -u npmuser; then
+	# user already exists
+	usermod -u "${PUID}" npmuser || exit 1
+else
+	# Add npmuser user
+	useradd -u "${PUID}" -U -d /tmp/npmuserhome -s /bin/false npmuser || exit 1
+fi
+
+usermod -G users npmuser || exit 1
+groupmod -o -g "${PGID}" npmuser || exit 1
+# Home for npmuser
+mkdir -p /tmp/npmuserhome
+chown -R npmuser:npmuser /tmp/npmuserhome

+ 41 - 0
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/20-paths.sh

@@ -0,0 +1,41 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+
+set -e
+
+log_info 'Checking paths ...'
+
+# Ensure /data is mounted
+if [ ! -d '/data' ]; then
+	log_fatal '/data is not mounted! Check your docker configuration.'
+fi
+# Ensure /etc/letsencrypt is mounted
+if [ ! -d '/etc/letsencrypt' ]; then
+	log_fatal '/etc/letsencrypt is not mounted! Check your docker configuration.'
+fi
+
+# Create required folders
+mkdir -p \
+	/data/nginx \
+	/data/custom_ssl \
+	/data/logs \
+	/data/access \
+	/data/nginx/default_host \
+	/data/nginx/default_www \
+	/data/nginx/proxy_host \
+	/data/nginx/redirection_host \
+	/data/nginx/stream \
+	/data/nginx/dead_host \
+	/data/nginx/temp \
+	/data/letsencrypt-acme-challenge \
+	/run/nginx \
+	/tmp/nginx/body \
+	/var/log/nginx \
+	/var/lib/nginx/cache/public \
+	/var/lib/nginx/cache/private \
+	/var/cache/nginx/proxy_temp
+
+touch /var/log/nginx/error.log || true
+chmod 777 /var/log/nginx/error.log || true
+chmod -R 777 /var/cache/nginx || true
+chmod 644 /etc/logrotate.d/nginx-proxy-manager

+ 21 - 0
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/30-ownership.sh

@@ -0,0 +1,21 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+
+set -e
+
+log_info 'Setting ownership ...'
+
+# root
+chown root /tmp/nginx
+
+# npmuser
+chown -R npmuser:npmuser \
+	/data \
+	/etc/letsencrypt \
+	/etc/nginx \
+	/run/nginx \
+	/tmp/nginx \
+	/var/cache/nginx \
+	/var/lib/logrotate \
+	/var/lib/nginx \
+	/var/log/nginx

+ 17 - 0
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/40-dynamic.sh

@@ -0,0 +1,17 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+
+set -e
+
+log_info 'Dynamic resolvers ...'
+
+DISABLE_IPV6=$(echo "${DISABLE_IPV6:-}" | tr '[:upper:]' '[:lower:]')
+
+# Dynamically generate resolvers file, if resolver is IPv6, enclose in `[]`
+# thanks @tfmm
+if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ];
+then
+	echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) ipv6=off valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
+else
+	echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
+fi

+ 36 - 0
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/50-ipv6.sh

@@ -0,0 +1,36 @@
+#!/bin/bash
+
+# This command reads the `DISABLE_IPV6` env var and will either enable
+# or disable ipv6 in all nginx configs based on this setting.
+
+log_info 'IPv6 ...'
+
+# Lowercase
+DISABLE_IPV6=$(echo "${DISABLE_IPV6:-}" | tr '[:upper:]' '[:lower:]')
+
+process_folder () {
+	FILES=$(find "$1" -type f -name "*.conf")
+	SED_REGEX=
+
+	if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ]; then
+		# IPV6 is disabled
+		echo "Disabling IPV6 in hosts in: $1"
+		SED_REGEX='s/^([^#]*)listen \[::\]/\1#listen [::]/g'
+	else
+		# IPV6 is enabled
+		echo "Enabling IPV6 in hosts in: $1"
+		SED_REGEX='s/^(\s*)#listen \[::\]/\1listen [::]/g'
+	fi
+
+	for FILE in $FILES
+	do
+		echo "- ${FILE}"
+		sed -E -i "$SED_REGEX" "$FILE"
+	done
+
+	# ensure the files are still owned by the npmuser
+	chown -R npmuser:npmuser "$1"
+}
+
+process_folder /etc/nginx/conf.d
+process_folder /data/nginx

+ 30 - 0
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/60-secrets.sh

@@ -0,0 +1,30 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+
+set -e
+
+# in s6, environmental variables are written as text files for s6 to monitor
+# search through full-path filenames for files ending in "__FILE"
+log_info 'Docker secrets ...'
+
+for FILENAME in $(find /var/run/s6/container_environment/ | grep "__FILE$"); do
+	echo "[secret-init] Evaluating ${FILENAME##*/} ..."
+
+	# set SECRETFILE to the contents of the full-path textfile
+	SECRETFILE=$(cat "${FILENAME}")
+	# if SECRETFILE exists / is not null
+	if [[ -f "${SECRETFILE}" ]]; then
+		# strip the appended "__FILE" from environmental variable name ...
+		STRIPFILE=$(echo "${FILENAME}" | sed "s/__FILE//g")
+		# echo "[secret-init] Set STRIPFILE to ${STRIPFILE}"  # DEBUG - rm for prod!
+
+		# ... and set value to contents of secretfile
+		# since s6 uses text files, this is effectively "export ..."
+		printf $(cat "${SECRETFILE}") > "${STRIPFILE}"
+		# echo "[secret-init] Set ${STRIPFILE##*/} to $(cat ${STRIPFILE})"  # DEBUG - rm for prod!"
+		echo "Success: ${STRIPFILE##*/} set from ${FILENAME##*/}"
+
+	else
+		echo "Cannot find secret in ${FILENAME}"
+	fi
+done

+ 17 - 0
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/90-banner.sh

@@ -0,0 +1,17 @@
+#!/command/with-contenv bash
+# shellcheck shell=bash
+
+set -e
+
+echo
+echo "-------------------------------------
+ _   _ ____  __  __
+| \ | |  _ \|  \/  |
+|  \| | |_) | |\/| |
+| |\  |  __/| |  | |
+|_| \_|_|   |_|  |_|
+-------------------------------------
+User UID: $(id -u npmuser)
+User GID: $(id -g npmuser)
+-------------------------------------
+"

+ 0 - 93
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/script.sh

@@ -1,93 +0,0 @@
-#!/command/with-contenv bash
-# shellcheck shell=bash
-
-set -e
-
-DATA_PATH=/data
-
-# Ensure /data is mounted
-if [ ! -d "$DATA_PATH" ]; then
-	echo '--------------------------------------'
-	echo "ERROR: $DATA_PATH is not mounted! Check your docker configuration."
-	echo '--------------------------------------'
-	/run/s6/basedir/bin/halt
-	exit 1
-fi
-
-echo "❯ Checking folder structure ..."
-
-# Create required folders
-mkdir -p /tmp/nginx/body \
-	/run/nginx \
-	/var/log/nginx \
-	/data/nginx \
-	/data/custom_ssl \
-	/data/logs \
-	/data/access \
-	/data/nginx/default_host \
-	/data/nginx/default_www \
-	/data/nginx/proxy_host \
-	/data/nginx/redirection_host \
-	/data/nginx/stream \
-	/data/nginx/dead_host \
-	/data/nginx/temp \
-	/var/lib/nginx/cache/public \
-	/var/lib/nginx/cache/private \
-	/var/cache/nginx/proxy_temp \
-	/data/letsencrypt-acme-challenge
-
-touch /var/log/nginx/error.log && chmod 777 /var/log/nginx/error.log && chmod -R 777 /var/cache/nginx
-chown root /tmp/nginx
-
-# Dynamically generate resolvers file, if resolver is IPv6, enclose in `[]`
-# thanks @tfmm
-if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ];
-then
-	echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) ipv6=off valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
-else
-	echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
-fi
-
-echo "Changing ownership of /data/logs to $(id -u):$(id -g)"
-chown -R "$(id -u):$(id -g)" /data/logs
-
-# Handle IPV6 settings
-/bin/handle-ipv6-setting /etc/nginx/conf.d
-/bin/handle-ipv6-setting /data/nginx
-
-# ref: https://github.com/linuxserver/docker-baseimage-alpine/blob/master/root/etc/cont-init.d/01-envfile
-
-# in s6, environmental variables are written as text files for s6 to monitor
-# search through full-path filenames for files ending in "__FILE"
-echo "❯ Secrets-init ..."
-for FILENAME in $(find /var/run/s6/container_environment/ | grep "__FILE$"); do
-	echo "[secret-init] Evaluating ${FILENAME##*/} ..."
-
-	# set SECRETFILE to the contents of the full-path textfile
-	SECRETFILE=$(cat "${FILENAME}")
-	# if SECRETFILE exists / is not null
-	if [[ -f "${SECRETFILE}" ]]; then
-		# strip the appended "__FILE" from environmental variable name ...
-		STRIPFILE=$(echo "${FILENAME}" | sed "s/__FILE//g")
-		# echo "[secret-init] Set STRIPFILE to ${STRIPFILE}"  # DEBUG - rm for prod!
-
-		# ... and set value to contents of secretfile
-		# since s6 uses text files, this is effectively "export ..."
-		printf $(cat "${SECRETFILE}") > "${STRIPFILE}"
-		# echo "[secret-init] Set ${STRIPFILE##*/} to $(cat ${STRIPFILE})"  # DEBUG - rm for prod!"
-		echo "[secret-init] Success! ${STRIPFILE##*/} set from ${FILENAME##*/}"
-
-	else
-		echo "[secret-init] cannot find secret in ${FILENAME}"
-	fi
-done
-
-echo
-echo "-------------------------------------
- _   _ ____  __  __
-| \ | |  _ \|  \/  |
-|  \| | |_) | |\/| |
-| |\  |  __/| |  | |
-|_| \_|_|   |_|  |_|
--------------------------------------
-"

+ 1 - 1
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/up

@@ -1,2 +1,2 @@
 # shellcheck shell=bash
-/etc/s6-overlay/s6-rc.d/prepare/script.sh
+/etc/s6-overlay/s6-rc.d/prepare/00-all.sh

+ 6 - 7
docs/advanced-config/README.md

@@ -25,7 +25,7 @@ networks:
 Let's look at a Portainer example:
 
 ```yml
-version: '3'
+version: '3.8'
 services:
 
   portainer:
@@ -60,14 +60,14 @@ healthcheck:
   timeout: 3s
 ```
 
-## Docker Secrets
+## Docker File Secrets
 
-This image supports the use of Docker secrets to import from file and keep sensitive usernames or passwords from being passed or preserved in plaintext.
+This image supports the use of Docker secrets to import from files and keep sensitive usernames or passwords from being passed or preserved in plaintext.
 
 You can set any environment variable from a file by appending `__FILE` (double-underscore FILE) to the environmental variable name.
 
 ```yml
-version: "3.7"
+version: '3.8'
 
 secrets:
   # Secrets are single-line text files where the sole content is the secret
@@ -96,9 +96,7 @@ services:
       # DB_MYSQL_PASSWORD: "npm"  # use secret instead
       DB_MYSQL_PASSWORD__FILE: /run/secrets/MYSQL_PWD
       DB_MYSQL_NAME: "npm"
-      # If you would rather use Sqlite uncomment this
-      # and remove all DB_MYSQL_* lines above
-      # DB_SQLITE_FILE: "/data/database.sqlite"
+      # If you would rather use Sqlite, remove all DB_MYSQL_* lines above
       # Uncomment this if IPv6 is not enabled on your host
       # DISABLE_IPV6: 'true'
     volumes:
@@ -108,6 +106,7 @@ services:
       - MYSQL_PWD
     depends_on:
       - db
+
   db:
     image: jc21/mariadb-aria
     restart: unless-stopped

+ 9 - 52
docs/setup/README.md

@@ -5,7 +5,7 @@
 Create a `docker-compose.yml` file:
 
 ```yml
-version: "3"
+version: '3.8'
 services:
   app:
     image: 'jc21/nginx-proxy-manager:latest'
@@ -20,7 +20,7 @@ services:
 
     # Uncomment the next line if you uncomment anything in the section
     # environment:
-      # Uncomment this if you want to change the location of 
+      # Uncomment this if you want to change the location of
       # the SQLite DB file within the container
       # DB_SQLITE_FILE: "/data/database.sqlite"
 
@@ -51,7 +51,7 @@ are going to use.
 Here is an example of what your `docker-compose.yml` will look like when using a MariaDB container:
 
 ```yml
-version: "3"
+version: '3.8'
 services:
   app:
     image: 'jc21/nginx-proxy-manager:latest'
@@ -64,6 +64,10 @@ services:
       # Add any other Stream port you want to expose
       # - '21:21' # FTP
     environment:
+      # Unix user and group IDs, optional
+      PUID: 1000
+      PGID: 1000
+      # Mysql/Maria connection parameters:
       DB_MYSQL_HOST: "db"
       DB_MYSQL_PORT: 3306
       DB_MYSQL_USER: "npm"
@@ -118,13 +122,12 @@ Please note that the `jc21/mariadb-aria:latest` image might have some problems o
 
 After the app is running for the first time, the following will happen:
 
-1. The database will initialize with table structures
-2. GPG keys will be generated and saved in the configuration file
+1. GPG keys will be generated and saved in the data folder
+2. The database will initialize with table structures
 3. A default admin user will be created
 
 This process can take a couple of minutes depending on your machine.
 
-
 ## Default Administrator User
 
 ```
@@ -134,49 +137,3 @@ Password: changeme
 
 Immediately after logging in with this default user you will be asked to modify your details and change your password.
 
-## Configuration File
-
-::: warning
-
-This section is meant for advanced users
-
-:::
-
-If you would like more control over the database settings you can define a custom config JSON file.
-
-
-Here's an example for `sqlite` configuration as it is generated from the environment variables:
-
-```json
-{
-  "database": {
-    "engine": "knex-native",
-    "knex": {
-      "client": "sqlite3",
-      "connection": {
-        "filename": "/data/database.sqlite"
-      },
-      "useNullAsDefault": true
-    }
-  }
-}
-```
-
-You can modify the `knex` object with your custom configuration, but note that not all knex clients might be installed in the image.
-
-Once you've created your configuration file you can mount it to `/app/config/production.json` inside you container using:
-
-```
-[...]
-services:
-  app:
-    image: 'jc21/nginx-proxy-manager:latest'
-    [...]
-    volumes:
-      - ./config.json:/app/config/production.json
-      [...]
-[...]
-```
-
-**Note:** After the first run of the application, the config file will be altered to include generated encryption keys unique to your installation.
-These keys affect the login and session management of the application. If these keys change for any reason, all users will be logged out.

+ 1 - 0
docs/upgrading/README.md

@@ -9,3 +9,4 @@ This project will automatically update any databases or other requirements so yo
 any crazy instructions. These steps above will pull the latest updates and recreate the docker
 containers.
 
+See the [list of releases](https://github.com/NginxProxyManager/nginx-proxy-manager/releases) for any upgrade steps specific to each release.

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio