Procházet zdrojové kódy

Merge pull request #4261 from NginxProxyManager/develop

v2.12.2
jc21 před 10 měsíci
rodič
revize
b4f49969d6
49 změnil soubory, kde provedl 769 přidání a 147 odebrání
  1. 1 1
      .version
  2. 46 2
      Jenkinsfile
  3. 1 1
      README.md
  4. 10 4
      backend/internal/access-list.js
  5. 5 4
      backend/internal/audit-log.js
  6. 4 3
      backend/internal/dead-host.js
  7. 7 6
      backend/internal/host.js
  8. 4 3
      backend/internal/proxy-host.js
  9. 4 3
      backend/internal/redirection-host.js
  10. 7 6
      backend/internal/stream.js
  11. 5 3
      backend/internal/token.js
  12. 48 5
      backend/lib/config.js
  13. 13 1
      backend/lib/helpers.js
  14. 3 0
      backend/models/redirection_host.js
  15. 1 0
      backend/package.json
  16. 3 1
      backend/schema/components/stream-object.json
  17. 1 2
      backend/schema/paths/nginx/access-lists/listID/put.json
  18. 8 8
      backend/setup.js
  19. 2 2
      backend/templates/_access.conf
  20. 7 2
      backend/templates/_listen.conf
  21. 1 5
      backend/templates/_location.conf
  22. 2 0
      backend/templates/dead_host.conf
  23. 91 3
      backend/yarn.lock
  24. 8 0
      docker/ci.env
  25. binární
      docker/ci/postgres/authentik.sql.gz
  26. 2 1
      docker/dev/Dockerfile
  27. 78 0
      docker/docker-compose.ci.postgres.yml
  28. 105 16
      docker/docker-compose.dev.yml
  29. 1 1
      docker/rootfs/etc/nginx/conf.d/include/assets.conf
  30. 1 3
      docs/src/advanced-config/index.md
  31. 57 5
      docs/src/setup/index.md
  32. 1 0
      docs/src/third-party/index.md
  33. 3 3
      docs/yarn.lock
  34. 2 2
      frontend/js/app/nginx/access/list/item.ejs
  35. 2 2
      frontend/js/app/nginx/certificates/list/item.ejs
  36. 2 2
      frontend/js/app/nginx/dead/list/item.ejs
  37. 2 2
      frontend/js/app/nginx/proxy/list/item.ejs
  38. 2 2
      frontend/js/app/nginx/redirection/list/item.ejs
  39. 2 2
      frontend/js/app/nginx/stream/list/item.ejs
  40. 12 12
      frontend/js/app/user/form.ejs
  41. 1 1
      frontend/js/app/user/form.js
  42. 3 3
      frontend/yarn.lock
  43. 36 12
      global/certbot-dns-plugins.json
  44. 1 1
      scripts/.common.sh
  45. 2 0
      scripts/ci/fulltest-cypress
  46. 5 6
      scripts/start-dev
  47. 64 0
      test/cypress/e2e/api/Ldap.cy.js
  48. 97 0
      test/cypress/e2e/api/OAuth.cy.js
  49. 6 6
      test/yarn.lock

+ 1 - 1
.version

@@ -1 +1 @@
-2.12.1
+2.12.2

+ 46 - 2
Jenkinsfile

@@ -43,7 +43,7 @@ pipeline {
 					steps {
 						script {
 							// Defaults to the Branch name, which is applies to all branches AND pr's
-							buildxPushTags = "-t docker.io/jc21/${IMAGE}:github-${BRANCH_LOWER}"
+							buildxPushTags = "-t docker.io/nginxproxymanager/${IMAGE}-dev:${BRANCH_LOWER}"
 						}
 					}
 				}
@@ -167,6 +167,44 @@ pipeline {
 				}
 			}
 		}
+		stage('Test Postgres') {
+			environment {
+				COMPOSE_PROJECT_NAME = "npm_${BRANCH_LOWER}_${BUILD_NUMBER}_postgres"
+				COMPOSE_FILE         = 'docker/docker-compose.ci.yml:docker/docker-compose.ci.postgres.yml'
+			}
+			when {
+				not {
+					equals expected: 'UNSTABLE', actual: currentBuild.result
+				}
+			}
+			steps {
+				sh 'rm -rf ./test/results/junit/*'
+				sh './scripts/ci/fulltest-cypress'
+			}
+			post {
+				always {
+					// Dumps to analyze later
+					sh 'mkdir -p debug/postgres'
+					sh 'docker logs $(docker-compose ps --all -q fullstack) > debug/postgres/docker_fullstack.log 2>&1'
+					sh 'docker logs $(docker-compose ps --all -q stepca) > debug/postgres/docker_stepca.log 2>&1'
+					sh 'docker logs $(docker-compose ps --all -q pdns) > debug/postgres/docker_pdns.log 2>&1'
+					sh 'docker logs $(docker-compose ps --all -q pdns-db) > debug/postgres/docker_pdns-db.log 2>&1'
+					sh 'docker logs $(docker-compose ps --all -q dnsrouter) > debug/postgres/docker_dnsrouter.log 2>&1'
+					sh 'docker logs $(docker-compose ps --all -q db-postgres) > debug/postgres/docker_db-postgres.log 2>&1'
+					sh 'docker logs $(docker-compose ps --all -q authentik) > debug/postgres/docker_authentik.log 2>&1'
+					sh 'docker logs $(docker-compose ps --all -q authentik-redis) > debug/postgres/docker_authentik-redis.log 2>&1'
+					sh 'docker logs $(docker-compose ps --all -q authentik-ldap) > debug/postgres/docker_authentik-ldap.log 2>&1'
+
+					junit 'test/results/junit/*'
+					sh 'docker-compose down --remove-orphans --volumes -t 30 || true'
+				}
+				unstable {
+					dir(path: 'testing/results') {
+						archiveArtifacts(allowEmptyArchive: true, artifacts: '**/*', excludes: '**/*.xml')
+					}
+				}
+			}
+		}
 		stage('MultiArch Build') {
 			when {
 				not {
@@ -203,7 +241,13 @@ pipeline {
 					}
 					steps {
 						script {
-							npmGithubPrComment("Docker Image for build ${BUILD_NUMBER} is available on [DockerHub](https://cloud.docker.com/repository/docker/jc21/${IMAGE}) as `jc21/${IMAGE}:github-${BRANCH_LOWER}`\n\n**Note:** ensure you backup your NPM instance before testing this PR image! Especially if this PR contains database changes.", true)
+							npmGithubPrComment("""Docker Image for build ${BUILD_NUMBER} is available on
+[DockerHub](https://cloud.docker.com/repository/docker/nginxproxymanager/${IMAGE}-dev)
+as `nginxproxymanager/${IMAGE}-dev:${BRANCH_LOWER}`
+
+**Note:** ensure you backup your NPM instance before testing this image! Especially if there are database changes
+**Note:** this is a different docker image namespace than the official image
+""", true)
 						}
 					}
 				}

+ 1 - 1
README.md

@@ -1,7 +1,7 @@
 <p align="center">
 	<img src="https://nginxproxymanager.com/github.png">
 	<br><br>
-	<img src="https://img.shields.io/badge/version-2.12.1-green.svg?style=for-the-badge">
+	<img src="https://img.shields.io/badge/version-2.12.2-green.svg?style=for-the-badge">
 	<a href="https://hub.docker.com/repository/docker/jc21/nginx-proxy-manager">
 		<img src="https://img.shields.io/docker/stars/jc21/nginx-proxy-manager.svg?style=for-the-badge">
 	</a>

+ 10 - 4
backend/internal/access-list.js

@@ -81,7 +81,7 @@ const internalAccessList = {
 
 				return internalAccessList.build(row)
 					.then(() => {
-						if (row.proxy_host_count) {
+						if (parseInt(row.proxy_host_count, 10)) {
 							return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts);
 						}
 					})
@@ -223,7 +223,7 @@ const internalAccessList = {
 			.then((row) => {
 				return internalAccessList.build(row)
 					.then(() => {
-						if (row.proxy_host_count) {
+						if (parseInt(row.proxy_host_count, 10)) {
 							return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts);
 						}
 					}).then(internalNginx.reload)
@@ -252,7 +252,10 @@ const internalAccessList = {
 				let query = accessListModel
 					.query()
 					.select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count'))
-					.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
+					.leftJoin('proxy_host', function() {
+						this.on('proxy_host.access_list_id', '=', 'access_list.id')
+							.andOn('proxy_host.is_deleted', '=', 0);
+					})
 					.where('access_list.is_deleted', 0)
 					.andWhere('access_list.id', data.id)
 					.allowGraph('[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]')
@@ -373,7 +376,10 @@ const internalAccessList = {
 				let query = accessListModel
 					.query()
 					.select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count'))
-					.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
+					.leftJoin('proxy_host', function() {
+						this.on('proxy_host.access_list_id', '=', 'access_list.id')
+							.andOn('proxy_host.is_deleted', '=', 0);
+					})
 					.where('access_list.is_deleted', 0)
 					.groupBy('access_list.id')
 					.allowGraph('[owner,items,clients]')

+ 5 - 4
backend/internal/audit-log.js

@@ -1,5 +1,6 @@
-const error         = require('../lib/error');
-const auditLogModel = require('../models/audit-log');
+const error            = require('../lib/error');
+const auditLogModel    = require('../models/audit-log');
+const {castJsonIfNeed} = require('../lib/helpers');
 
 const internalAuditLog = {
 
@@ -22,9 +23,9 @@ const internalAuditLog = {
 					.allowGraph('[user]');
 
 				// Query is used for searching
-				if (typeof search_query === 'string') {
+				if (typeof search_query === 'string' && search_query.length > 0) {
 					query.where(function () {
-						this.where('meta', 'like', '%' + search_query + '%');
+						this.where(castJsonIfNeed('meta'), 'like', '%' + search_query + '%');
 					});
 				}
 

+ 4 - 3
backend/internal/dead-host.js

@@ -6,6 +6,7 @@ const internalHost        = require('./host');
 const internalNginx       = require('./nginx');
 const internalAuditLog    = require('./audit-log');
 const internalCertificate = require('./certificate');
+const {castJsonIfNeed}    = require('../lib/helpers');
 
 function omissions () {
 	return ['is_deleted'];
@@ -409,16 +410,16 @@ const internalDeadHost = {
 					.where('is_deleted', 0)
 					.groupBy('id')
 					.allowGraph('[owner,certificate]')
-					.orderBy('domain_names', 'ASC');
+					.orderBy(castJsonIfNeed('domain_names'), 'ASC');
 
 				if (access_data.permission_visibility !== 'all') {
 					query.andWhere('owner_user_id', access.token.getUserId(1));
 				}
 
 				// Query is used for searching
-				if (typeof search_query === 'string') {
+				if (typeof search_query === 'string' && search_query.length > 0) {
 					query.where(function () {
-						this.where('domain_names', 'like', '%' + search_query + '%');
+						this.where(castJsonIfNeed('domain_names'), 'like', '%' + search_query + '%');
 					});
 				}
 

+ 7 - 6
backend/internal/host.js

@@ -2,6 +2,7 @@ const _                    = require('lodash');
 const proxyHostModel       = require('../models/proxy_host');
 const redirectionHostModel = require('../models/redirection_host');
 const deadHostModel        = require('../models/dead_host');
+const {castJsonIfNeed}     = require('../lib/helpers');
 
 const internalHost = {
 
@@ -17,7 +18,7 @@ const internalHost = {
 	cleanSslHstsData: function (data, existing_data) {
 		existing_data = existing_data === undefined ? {} : existing_data;
 
-		let combined_data = _.assign({}, existing_data, data);
+		const combined_data = _.assign({}, existing_data, data);
 
 		if (!combined_data.certificate_id) {
 			combined_data.ssl_forced    = false;
@@ -73,7 +74,7 @@ const internalHost = {
 	 * @returns {Promise}
 	 */
 	getHostsWithDomains: function (domain_names) {
-		let promises = [
+		const promises = [
 			proxyHostModel
 				.query()
 				.where('is_deleted', 0),
@@ -125,19 +126,19 @@ const internalHost = {
 	 * @returns {Promise}
 	 */
 	isHostnameTaken: function (hostname, ignore_type, ignore_id) {
-		let promises = [
+		const promises = [
 			proxyHostModel
 				.query()
 				.where('is_deleted', 0)
-				.andWhere('domain_names', 'like', '%' + hostname + '%'),
+				.andWhere(castJsonIfNeed('domain_names'), 'like', '%' + hostname + '%'),
 			redirectionHostModel
 				.query()
 				.where('is_deleted', 0)
-				.andWhere('domain_names', 'like', '%' + hostname + '%'),
+				.andWhere(castJsonIfNeed('domain_names'), 'like', '%' + hostname + '%'),
 			deadHostModel
 				.query()
 				.where('is_deleted', 0)
-				.andWhere('domain_names', 'like', '%' + hostname + '%')
+				.andWhere(castJsonIfNeed('domain_names'), 'like', '%' + hostname + '%')
 		];
 
 		return Promise.all(promises)

+ 4 - 3
backend/internal/proxy-host.js

@@ -6,6 +6,7 @@ const internalHost        = require('./host');
 const internalNginx       = require('./nginx');
 const internalAuditLog    = require('./audit-log');
 const internalCertificate = require('./certificate');
+const {castJsonIfNeed}    = require('../lib/helpers');
 
 function omissions () {
 	return ['is_deleted', 'owner.is_deleted'];
@@ -416,16 +417,16 @@ const internalProxyHost = {
 					.where('is_deleted', 0)
 					.groupBy('id')
 					.allowGraph('[owner,access_list,certificate]')
-					.orderBy('domain_names', 'ASC');
+					.orderBy(castJsonIfNeed('domain_names'), 'ASC');
 
 				if (access_data.permission_visibility !== 'all') {
 					query.andWhere('owner_user_id', access.token.getUserId(1));
 				}
 
 				// Query is used for searching
-				if (typeof search_query === 'string') {
+				if (typeof search_query === 'string' && search_query.length > 0) {
 					query.where(function () {
-						this.where('domain_names', 'like', '%' + search_query + '%');
+						this.where(castJsonIfNeed('domain_names'), 'like', `%${search_query}%`);
 					});
 				}
 

+ 4 - 3
backend/internal/redirection-host.js

@@ -6,6 +6,7 @@ const internalHost         = require('./host');
 const internalNginx        = require('./nginx');
 const internalAuditLog     = require('./audit-log');
 const internalCertificate  = require('./certificate');
+const {castJsonIfNeed}     = require('../lib/helpers');
 
 function omissions () {
 	return ['is_deleted'];
@@ -409,16 +410,16 @@ const internalRedirectionHost = {
 					.where('is_deleted', 0)
 					.groupBy('id')
 					.allowGraph('[owner,certificate]')
-					.orderBy('domain_names', 'ASC');
+					.orderBy(castJsonIfNeed('domain_names'), 'ASC');
 
 				if (access_data.permission_visibility !== 'all') {
 					query.andWhere('owner_user_id', access.token.getUserId(1));
 				}
 
 				// Query is used for searching
-				if (typeof search_query === 'string') {
+				if (typeof search_query === 'string' && search_query.length > 0) {
 					query.where(function () {
-						this.where('domain_names', 'like', '%' + search_query + '%');
+						this.where(castJsonIfNeed('domain_names'), 'like', `%${search_query}%`);
 					});
 				}
 

+ 7 - 6
backend/internal/stream.js

@@ -4,6 +4,7 @@ const utils            = require('../lib/utils');
 const streamModel      = require('../models/stream');
 const internalNginx    = require('./nginx');
 const internalAuditLog = require('./audit-log');
+const {castJsonIfNeed} = require('../lib/helpers');
 
 function omissions () {
 	return ['is_deleted'];
@@ -293,21 +294,21 @@ const internalStream = {
 	getAll: (access, expand, search_query) => {
 		return access.can('streams:list')
 			.then((access_data) => {
-				let query = streamModel
+				const query = streamModel
 					.query()
 					.where('is_deleted', 0)
 					.groupBy('id')
 					.allowGraph('[owner]')
-					.orderBy('incoming_port', 'ASC');
+					.orderByRaw('CAST(incoming_port AS INTEGER) ASC');
 
 				if (access_data.permission_visibility !== 'all') {
 					query.andWhere('owner_user_id', access.token.getUserId(1));
 				}
 
 				// Query is used for searching
-				if (typeof search_query === 'string') {
+				if (typeof search_query === 'string' && search_query.length > 0) {
 					query.where(function () {
-						this.where('incoming_port', 'like', '%' + search_query + '%');
+						this.where(castJsonIfNeed('incoming_port'), 'like', `%${search_query}%`);
 					});
 				}
 
@@ -327,9 +328,9 @@ const internalStream = {
 	 * @returns {Promise}
 	 */
 	getCount: (user_id, visibility) => {
-		let query = streamModel
+		const query = streamModel
 			.query()
-			.count('id as count')
+			.count('id AS count')
 			.where('is_deleted', 0);
 
 		if (visibility !== 'all') {

+ 5 - 3
backend/internal/token.js

@@ -5,6 +5,8 @@ const authModel  = require('../models/auth');
 const helpers    = require('../lib/helpers');
 const TokenModel = require('../models/token');
 
+const ERROR_MESSAGE_INVALID_AUTH = 'Invalid email or password';
+
 module.exports = {
 
 	/**
@@ -69,15 +71,15 @@ module.exports = {
 													};
 												});
 										} else {
-											throw new error.AuthError('Invalid password');
+											throw new error.AuthError(ERROR_MESSAGE_INVALID_AUTH);
 										}
 									});
 							} else {
-								throw new error.AuthError('No password auth for user');
+								throw new error.AuthError(ERROR_MESSAGE_INVALID_AUTH);
 							}
 						});
 				} else {
-					throw new error.AuthError('No relevant user found');
+					throw new error.AuthError(ERROR_MESSAGE_INVALID_AUTH);
 				}
 			});
 	},

+ 48 - 5
backend/lib/config.js

@@ -2,7 +2,10 @@ const fs      = require('fs');
 const NodeRSA = require('node-rsa');
 const logger  = require('../logger').global;
 
-const keysFile = '/data/keys.json';
+const keysFile         = '/data/keys.json';
+const mysqlEngine      = 'mysql2';
+const postgresEngine   = 'pg';
+const sqliteClientName = 'sqlite3';
 
 let instance = null;
 
@@ -14,7 +17,7 @@ const configure = () => {
 		let configData;
 		try {
 			configData = require(filename);
-		} catch (err) {
+		} catch (_) {
 			// do nothing
 		}
 
@@ -34,7 +37,7 @@ const configure = () => {
 		logger.info('Using MySQL configuration');
 		instance = {
 			database: {
-				engine:   'mysql2',
+				engine:   mysqlEngine,
 				host:     envMysqlHost,
 				port:     process.env.DB_MYSQL_PORT || 3306,
 				user:     envMysqlUser,
@@ -46,13 +49,33 @@ const configure = () => {
 		return;
 	}
 
+	const envPostgresHost = process.env.DB_POSTGRES_HOST || null;
+	const envPostgresUser = process.env.DB_POSTGRES_USER || null;
+	const envPostgresName = process.env.DB_POSTGRES_NAME || null;
+	if (envPostgresHost && envPostgresUser && envPostgresName) {
+		// we have enough postgres creds to go with postgres
+		logger.info('Using Postgres configuration');
+		instance = {
+			database: {
+				engine:   postgresEngine,
+				host:     envPostgresHost,
+				port:     process.env.DB_POSTGRES_PORT || 5432,
+				user:     envPostgresUser,
+				password: process.env.DB_POSTGRES_PASSWORD,
+				name:     envPostgresName,
+			},
+			keys: getKeys(),
+		};
+		return;
+	}
+
 	const envSqliteFile = process.env.DB_SQLITE_FILE || '/data/database.sqlite';
 	logger.info(`Using Sqlite: ${envSqliteFile}`);
 	instance = {
 		database: {
 			engine: 'knex-native',
 			knex:   {
-				client:     'sqlite3',
+				client:     sqliteClientName,
 				connection: {
 					filename: envSqliteFile
 				},
@@ -143,7 +166,27 @@ module.exports = {
 	 */
 	isSqlite: function () {
 		instance === null && configure();
-		return instance.database.knex && instance.database.knex.client === 'sqlite3';
+		return instance.database.knex && instance.database.knex.client === sqliteClientName;
+	},
+
+	/**
+	 * Is this a mysql configuration?
+	 *
+	 * @returns {boolean}
+	 */
+	isMysql: function () {
+		instance === null && configure();
+		return instance.database.engine === mysqlEngine;
+	},
+	
+	/**
+		 * Is this a postgres configuration?
+		 *
+		 * @returns {boolean}
+		 */
+	isPostgres: function () {
+		instance === null && configure();
+		return instance.database.engine === postgresEngine;
 	},
 
 	/**

+ 13 - 1
backend/lib/helpers.js

@@ -1,4 +1,6 @@
-const moment = require('moment');
+const moment       = require('moment');
+const {isPostgres} = require('./config');
+const {ref}        = require('objection');
 
 module.exports = {
 
@@ -45,6 +47,16 @@ module.exports = {
 			}
 		});
 		return obj;
+	},
+
+	/**
+	 * Casts a column to json if using postgres
+	 *
+	 * @param {string} colName
+	 * @returns {string|Objection.ReferenceBuilder}
+	 */
+	castJsonIfNeed: function (colName) {
+		return isPostgres() ? ref(colName).castText() : colName;
 	}
 
 };

+ 3 - 0
backend/models/redirection_host.js

@@ -17,6 +17,9 @@ const boolFields = [
 	'preserve_path',
 	'ssl_forced',
 	'block_exploits',
+	'hsts_enabled',
+	'hsts_subdomains',
+	'http2_support',
 ];
 
 class RedirectionHost extends Model {

+ 1 - 0
backend/package.json

@@ -23,6 +23,7 @@
 		"node-rsa": "^1.0.8",
 		"objection": "3.0.1",
 		"path": "^0.12.7",
+		"pg": "^8.13.1",
 		"signale": "1.4.0",
 		"sqlite3": "5.1.6",
 		"temp-write": "^4.0.0"

+ 3 - 1
backend/schema/components/stream-object.json

@@ -19,7 +19,9 @@
 		"incoming_port": {
 			"type": "integer",
 			"minimum": 1,
-			"maximum": 65535
+			"maximum": 65535,
+			"if": {"properties": {"tcp_forwarding": {"const": true}}},
+			"then": {"not": {"oneOf": [{"const": 80}, {"const": 443}]}}
 		},
 		"forwarding_host": {
 			"anyOf": [

+ 1 - 2
backend/schema/paths/nginx/access-lists/listID/put.json

@@ -49,8 +49,7 @@
 										"minLength": 1
 									},
 									"password": {
-										"type": "string",
-										"minLength": 1
+										"type": "string"
 									}
 								}
 							}

+ 8 - 8
backend/setup.js

@@ -15,18 +15,18 @@ const certbot             = require('./lib/certbot');
 const setupDefaultUser = () => {
 	return userModel
 		.query()
-		.select(userModel.raw('COUNT(`id`) as `count`'))
+		.select('id', )
 		.where('is_deleted', 0)
 		.first()
 		.then((row) => {
-			if (!row.count) {
+			if (!row || !row.id) {
 				// Create a new user and set password
-				let email    = process.env.INITIAL_ADMIN_EMAIL || '[email protected]';
-				let password = process.env.INITIAL_ADMIN_PASSWORD || 'changeme';
-				
+				const email    = process.env.INITIAL_ADMIN_EMAIL || '[email protected]';
+				const password = process.env.INITIAL_ADMIN_PASSWORD || 'changeme';
+
 				logger.info('Creating a new user: ' + email + ' with password: ' + password);
 
-				let data = {
+				const data = {
 					is_deleted: 0,
 					email:      email,
 					name:       'Administrator',
@@ -77,11 +77,11 @@ const setupDefaultUser = () => {
 const setupDefaultSettings = () => {
 	return settingModel
 		.query()
-		.select(settingModel.raw('COUNT(`id`) as `count`'))
+		.select('id')
 		.where({id: 'default-site'})
 		.first()
 		.then((row) => {
-			if (!row.count) {
+			if (!row || !row.id) {
 				settingModel
 					.query()
 					.insert({

+ 2 - 2
backend/templates/_access.conf

@@ -4,7 +4,7 @@
     auth_basic            "Authorization required";
     auth_basic_user_file  /data/access/{{ access_list_id }};
 
-    {% if access_list.pass_auth == 0 %}
+    {% if access_list.pass_auth == 0 or access_list.pass_auth == true %}
     proxy_set_header Authorization "";
     {% endif %}
 
@@ -17,7 +17,7 @@
     deny all;
 
     # Access checks must...
-    {% if access_list.satisfy_any == 1 %}
+    {% if access_list.satisfy_any == 1 or access_list.satisfy_any == true %}
     satisfy any;
     {% else %}
     satisfy all;

+ 7 - 2
backend/templates/_listen.conf

@@ -5,11 +5,16 @@
   #listen [::]:80;
 {% endif %}
 {% if certificate -%}
-  listen 443 ssl{% if http2_support == 1 or http2_support == true %} http2{% endif %};
+  listen 443 ssl;
 {% if ipv6 -%}
-  listen [::]:443 ssl{% if http2_support == 1 or http2_support == true %} http2{% endif %};
+  listen [::]:443 ssl;
 {% else -%}
   #listen [::]:443;
 {% endif %}
 {% endif %}
   server_name {{ domain_names | join: " " }};
+{% if http2_support == 1 or http2_support == true %}
+  http2 on;
+{% else -%}
+  http2 off;
+{% endif %}

+ 1 - 5
backend/templates/_location.conf

@@ -7,11 +7,7 @@
     proxy_set_header X-Forwarded-For    $remote_addr;
     proxy_set_header X-Real-IP		$remote_addr;
 
-    set $proxy_forward_scheme {{ forward_scheme }};
-    set $proxy_server         "{{ forward_host }}";
-    set $proxy_port           {{ forward_port }};
-
-    proxy_pass       $proxy_forward_scheme://$proxy_server:$proxy_port{{ forward_path }};
+    proxy_pass       {{ forward_scheme }}://{{ forward_host }}:{{ forward_port }}{{ forward_path }};
 
     {% include "_access.conf" %}
     {% include "_assets.conf" %}

+ 2 - 0
backend/templates/dead_host.conf

@@ -22,5 +22,7 @@ server {
   }
 {% endif %}
 
+  # Custom
+  include /data/nginx/custom/server_dead[.]conf;
 }
 {% endif %}

+ 91 - 3
backend/yarn.lock

@@ -830,9 +830,9 @@ crc32-stream@^4.0.2:
     readable-stream "^3.4.0"
 
 cross-spawn@^7.0.2:
-  version "7.0.3"
-  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
-  integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
+  version "7.0.6"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f"
+  integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==
   dependencies:
     path-key "^3.1.0"
     shebang-command "^2.0.0"
@@ -2735,11 +2735,67 @@ path@^0.12.7:
     process "^0.11.1"
     util "^0.10.3"
 
+pg-cloudflare@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/pg-cloudflare/-/pg-cloudflare-1.1.1.tgz#e6d5833015b170e23ae819e8c5d7eaedb472ca98"
+  integrity sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==
+
 [email protected]:
   version "2.5.0"
   resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.5.0.tgz#538cadd0f7e603fc09a12590f3b8a452c2c0cf34"
   integrity sha512-r5o/V/ORTA6TmUnyWZR9nCj1klXCO2CEKNRlVuJptZe85QuhFayC7WeMic7ndayT5IRIR0S0xFxFi2ousartlQ==
 
+pg-connection-string@^2.7.0:
+  version "2.7.0"
+  resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.7.0.tgz#f1d3489e427c62ece022dba98d5262efcb168b37"
+  integrity sha512-PI2W9mv53rXJQEOb8xNR8lH7Hr+EKa6oJa38zsK0S/ky2er16ios1wLKhZyxzD7jUReiWokc9WK5nxSnC7W1TA==
+
[email protected]:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/pg-int8/-/pg-int8-1.0.1.tgz#943bd463bf5b71b4170115f80f8efc9a0c0eb78c"
+  integrity sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==
+
+pg-pool@^3.7.0:
+  version "3.7.0"
+  resolved "https://registry.yarnpkg.com/pg-pool/-/pg-pool-3.7.0.tgz#d4d3c7ad640f8c6a2245adc369bafde4ebb8cbec"
+  integrity sha512-ZOBQForurqh4zZWjrgSwwAtzJ7QiRX0ovFkZr2klsen3Nm0aoh33Ls0fzfv3imeH/nw/O27cjdz5kzYJfeGp/g==
+
+pg-protocol@^1.7.0:
+  version "1.7.0"
+  resolved "https://registry.yarnpkg.com/pg-protocol/-/pg-protocol-1.7.0.tgz#ec037c87c20515372692edac8b63cf4405448a93"
+  integrity sha512-hTK/mE36i8fDDhgDFjy6xNOG+LCorxLG3WO17tku+ij6sVHXh1jQUJ8hYAnRhNla4QVD2H8er/FOjc/+EgC6yQ==
+
+pg-types@^2.1.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/pg-types/-/pg-types-2.2.0.tgz#2d0250d636454f7cfa3b6ae0382fdfa8063254a3"
+  integrity sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==
+  dependencies:
+    pg-int8 "1.0.1"
+    postgres-array "~2.0.0"
+    postgres-bytea "~1.0.0"
+    postgres-date "~1.0.4"
+    postgres-interval "^1.1.0"
+
+pg@^8.13.1:
+  version "8.13.1"
+  resolved "https://registry.yarnpkg.com/pg/-/pg-8.13.1.tgz#6498d8b0a87ff76c2df7a32160309d3168c0c080"
+  integrity sha512-OUir1A0rPNZlX//c7ksiu7crsGZTKSOXJPgtNiHGIlC9H0lO+NC6ZDYksSgBYY/thSWhnSRBv8w1lieNNGATNQ==
+  dependencies:
+    pg-connection-string "^2.7.0"
+    pg-pool "^3.7.0"
+    pg-protocol "^1.7.0"
+    pg-types "^2.1.0"
+    pgpass "1.x"
+  optionalDependencies:
+    pg-cloudflare "^1.1.1"
+
[email protected]:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/pgpass/-/pgpass-1.0.5.tgz#9b873e4a564bb10fa7a7dbd55312728d422a223d"
+  integrity sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==
+  dependencies:
+    split2 "^4.1.0"
+
 picomatch@^2.0.4, picomatch@^2.2.1:
   version "2.2.2"
   resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad"
@@ -2758,6 +2814,28 @@ pkg-conf@^2.1.0:
     find-up "^2.0.0"
     load-json-file "^4.0.0"
 
+postgres-array@~2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/postgres-array/-/postgres-array-2.0.0.tgz#48f8fce054fbc69671999329b8834b772652d82e"
+  integrity sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==
+
+postgres-bytea@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/postgres-bytea/-/postgres-bytea-1.0.0.tgz#027b533c0aa890e26d172d47cf9ccecc521acd35"
+  integrity sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==
+
+postgres-date@~1.0.4:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/postgres-date/-/postgres-date-1.0.7.tgz#51bc086006005e5061c591cee727f2531bf641a8"
+  integrity sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==
+
+postgres-interval@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/postgres-interval/-/postgres-interval-1.2.0.tgz#b460c82cb1587507788819a06aa0fffdb3544695"
+  integrity sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==
+  dependencies:
+    xtend "^4.0.0"
+
 prelude-ls@^1.2.1:
   version "1.2.1"
   resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396"
@@ -3194,6 +3272,11 @@ socks@^2.6.2:
     ip "^2.0.0"
     smart-buffer "^4.2.0"
 
+split2@^4.1.0:
+  version "4.2.0"
+  resolved "https://registry.yarnpkg.com/split2/-/split2-4.2.0.tgz#c9c5920904d148bab0b9f67145f245a86aadbfa4"
+  integrity sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==
+
 sprintf-js@~1.0.2:
   version "1.0.3"
   resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
@@ -3665,6 +3748,11 @@ xdg-basedir@^4.0.0:
   resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz#4bc8d9984403696225ef83a1573cbbcb4e79db13"
   integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==
 
+xtend@^4.0.0:
+  version "4.0.2"
+  resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54"
+  integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==
+
 y18n@^4.0.0:
   version "4.0.1"
   resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.1.tgz#8db2b83c31c5d75099bb890b23f3094891e247d4"

+ 8 - 0
docker/ci.env

@@ -0,0 +1,8 @@
+AUTHENTIK_SECRET_KEY=gl8woZe8L6IIX8SC0c5Ocsj0xPkX5uJo5DVZCFl+L/QGbzuplfutYuua2ODNLEiDD3aFd9H2ylJmrke0
+AUTHENTIK_REDIS__HOST=authentik-redis
+AUTHENTIK_POSTGRESQL__HOST=db-postgres
+AUTHENTIK_POSTGRESQL__USER=authentik
+AUTHENTIK_POSTGRESQL__NAME=authentik
+AUTHENTIK_POSTGRESQL__PASSWORD=07EKS5NLI6Tpv68tbdvrxfvj
+AUTHENTIK_BOOTSTRAP_PASSWORD=admin
[email protected]

binární
docker/ci/postgres/authentik.sql.gz


+ 2 - 1
docker/dev/Dockerfile

@@ -29,7 +29,8 @@ COPY scripts/install-s6 /tmp/install-s6
 RUN rm -f /etc/nginx/conf.d/production.conf \
 	&& chmod 644 /etc/logrotate.d/nginx-proxy-manager \
 	&& /tmp/install-s6 "${TARGETPLATFORM}" \
-	&& rm -f /tmp/install-s6
+	&& rm -f /tmp/install-s6 \
+	&& chmod 644 -R /root/.cache
 
 # Certs for testing purposes
 COPY --from=pebbleca /test/certs/pebble.minica.pem /etc/ssl/certs/pebble.minica.pem

+ 78 - 0
docker/docker-compose.ci.postgres.yml

@@ -0,0 +1,78 @@
+# WARNING: This is a CI docker-compose file used for building and testing of the entire app, it should not be used for production.
+services:
+
+  cypress:
+    environment:
+      CYPRESS_stack: 'postgres'
+
+  fullstack:
+    environment:
+      DB_POSTGRES_HOST: 'db-postgres'
+      DB_POSTGRES_PORT: '5432'
+      DB_POSTGRES_USER: 'npm'
+      DB_POSTGRES_PASSWORD: 'npmpass'
+      DB_POSTGRES_NAME: 'npm'
+    depends_on:
+      - db-postgres
+      - authentik
+      - authentik-worker
+      - authentik-ldap
+
+  db-postgres:
+    image: postgres:latest
+    environment:
+      POSTGRES_USER: 'npm'
+      POSTGRES_PASSWORD: 'npmpass'
+      POSTGRES_DB: 'npm'
+    volumes:
+      - psql_vol:/var/lib/postgresql/data
+      - ./ci/postgres:/docker-entrypoint-initdb.d
+    networks:
+      - fulltest
+
+  authentik-redis:
+    image: 'redis:alpine'
+    command: --save 60 1 --loglevel warning
+    restart: unless-stopped
+    healthcheck:
+      test: ['CMD-SHELL', 'redis-cli ping | grep PONG']
+      start_period: 20s
+      interval: 30s
+      retries: 5
+      timeout: 3s
+    volumes:
+      - redis_vol:/data
+
+  authentik:
+    image: ghcr.io/goauthentik/server:2024.10.1
+    restart: unless-stopped
+    command: server
+    env_file:
+      - ci.env
+    depends_on:
+      - authentik-redis
+      - db-postgres
+
+  authentik-worker:
+    image: ghcr.io/goauthentik/server:2024.10.1
+    restart: unless-stopped
+    command: worker
+    env_file:
+      - ci.env
+    depends_on:
+      - authentik-redis
+      - db-postgres
+
+  authentik-ldap:
+    image: ghcr.io/goauthentik/ldap:2024.10.1
+    environment:
+      AUTHENTIK_HOST: 'http://authentik:9000'
+      AUTHENTIK_INSECURE: 'true'
+      AUTHENTIK_TOKEN: 'wKYZuRcI0ETtb8vWzMCr04oNbhrQUUICy89hSpDln1OEKLjiNEuQ51044Vkp'
+    restart: unless-stopped
+    depends_on:
+      - authentik
+
+volumes:
+  psql_vol:
+  redis_vol:

+ 105 - 16
docker/docker-compose.dev.yml

@@ -2,8 +2,8 @@
 services:
 
   fullstack:
-    image: nginxproxymanager:dev
-    container_name: npm_core
+    image: npm2dev:core
+    container_name: npm2dev.core
     build:
       context: ./
       dockerfile: ./dev/Dockerfile
@@ -26,11 +26,17 @@ services:
       DEVELOPMENT: 'true'
       LE_STAGING: 'true'
       # db:
-      DB_MYSQL_HOST: 'db'
-      DB_MYSQL_PORT: '3306'
-      DB_MYSQL_USER: 'npm'
-      DB_MYSQL_PASSWORD: 'npm'
-      DB_MYSQL_NAME: 'npm'
+      # DB_MYSQL_HOST: 'db'
+      # DB_MYSQL_PORT: '3306'
+      # DB_MYSQL_USER: 'npm'
+      # DB_MYSQL_PASSWORD: 'npm'
+      # DB_MYSQL_NAME: 'npm'
+      # db-postgres:
+      DB_POSTGRES_HOST: 'db-postgres'
+      DB_POSTGRES_PORT: '5432'
+      DB_POSTGRES_USER: 'npm'
+      DB_POSTGRES_PASSWORD: 'npmpass'
+      DB_POSTGRES_NAME: 'npm'
       # DB_SQLITE_FILE: "/data/database.sqlite"
       # DISABLE_IPV6: "true"
       # Required for DNS Certificate provisioning testing:
@@ -49,11 +55,15 @@ services:
       timeout: 3s
     depends_on:
       - db
+      - db-postgres
+      - authentik
+      - authentik-worker
+      - authentik-ldap
     working_dir: /app
 
   db:
     image: jc21/mariadb-aria
-    container_name: npm_db
+    container_name: npm2dev.db
     ports:
       - 33306:3306
     networks:
@@ -66,8 +76,22 @@ services:
     volumes:
       - db_data:/var/lib/mysql
 
+  db-postgres:
+    image: postgres:latest
+    container_name: npm2dev.db-postgres
+    networks:
+      - nginx_proxy_manager
+    environment:
+      POSTGRES_USER: 'npm'
+      POSTGRES_PASSWORD: 'npmpass'
+      POSTGRES_DB: 'npm'
+    volumes:
+      - psql_data:/var/lib/postgresql/data
+      - ./ci/postgres:/docker-entrypoint-initdb.d
+
   stepca:
     image: jc21/testca
+    container_name: npm2dev.stepca
     volumes:
       - './dev/resolv.conf:/etc/resolv.conf:ro'
       - '/etc/localtime:/etc/localtime:ro'
@@ -78,6 +102,7 @@ services:
 
   dnsrouter:
     image: jc21/dnsrouter
+    container_name: npm2dev.dnsrouter
     volumes:
       - ./dev/dnsrouter-config.json.tmp:/dnsrouter-config.json:ro
     networks:
@@ -85,7 +110,7 @@ services:
 
   swagger:
     image: swaggerapi/swagger-ui:latest
-    container_name: npm_swagger
+    container_name: npm2dev.swagger
     ports:
       - 3082:80
     environment:
@@ -96,7 +121,7 @@ services:
 
   squid:
     image: ubuntu/squid
-    container_name: npm_squid
+    container_name: npm2dev.squid
     volumes:
       - './dev/squid.conf:/etc/squid/squid.conf:ro'
       - './dev/resolv.conf:/etc/resolv.conf:ro'
@@ -108,6 +133,7 @@ services:
 
   pdns:
     image: pschiffe/pdns-mysql
+    container_name: npm2dev.pdns
     volumes:
       - '/etc/localtime:/etc/localtime:ro'
     environment:
@@ -136,6 +162,7 @@ services:
 
   pdns-db:
     image: mariadb
+    container_name: npm2dev.pdns-db
     environment:
       MYSQL_ROOT_PASSWORD: 'pdns'
       MYSQL_DATABASE: 'pdns'
@@ -149,7 +176,8 @@ services:
       - nginx_proxy_manager
 
   cypress:
-    image: "npm_dev_cypress"
+    image: npm2dev:cypress
+    container_name: npm2dev.cypress
     build:
       context: ../
       dockerfile: test/cypress/Dockerfile
@@ -164,16 +192,77 @@ services:
     networks:
       - nginx_proxy_manager
 
+  authentik-redis:
+    image: 'redis:alpine'
+    container_name: npm2dev.authentik-redis
+    command: --save 60 1 --loglevel warning
+    networks:
+      - nginx_proxy_manager
+    restart: unless-stopped
+    healthcheck:
+      test: ['CMD-SHELL', 'redis-cli ping | grep PONG']
+      start_period: 20s
+      interval: 30s
+      retries: 5
+      timeout: 3s
+    volumes:
+      - redis_data:/data
+
+  authentik:
+    image: ghcr.io/goauthentik/server:2024.10.1
+    container_name: npm2dev.authentik
+    restart: unless-stopped
+    command: server
+    networks:
+      - nginx_proxy_manager
+    env_file:
+      - ci.env
+    ports:
+      - 9000:9000 
+    depends_on:
+      - authentik-redis
+      - db-postgres
+
+  authentik-worker:
+    image: ghcr.io/goauthentik/server:2024.10.1
+    container_name: npm2dev.authentik-worker
+    restart: unless-stopped
+    command: worker
+    networks:
+      - nginx_proxy_manager
+    env_file:
+      - ci.env
+    depends_on:
+      - authentik-redis
+      - db-postgres
+
+  authentik-ldap:
+    image: ghcr.io/goauthentik/ldap:2024.10.1
+    container_name: npm2dev.authentik-ldap
+    networks:
+      - nginx_proxy_manager
+    environment:
+      AUTHENTIK_HOST: 'http://authentik:9000'
+      AUTHENTIK_INSECURE: 'true'
+      AUTHENTIK_TOKEN: 'wKYZuRcI0ETtb8vWzMCr04oNbhrQUUICy89hSpDln1OEKLjiNEuQ51044Vkp'
+    restart: unless-stopped
+    depends_on:
+      - authentik
+
 volumes:
   npm_data:
-    name: npm_core_data
+    name: npm2dev_core_data
   le_data:
-    name: npm_le_data
+    name: npm2dev_le_data
   db_data:
-    name: npm_db_data
+    name: npm2dev_db_data
   pdns_mysql:
-    name: npm_pdns_mysql
+    name: npnpm2dev_pdns_mysql
+  psql_data:
+    name: npm2dev_psql_data
+  redis_data:
+    name: npm2dev_redis_data
 
 networks:
   nginx_proxy_manager:
-    name: npm_network
+    name: npm2dev_network

+ 1 - 1
docker/rootfs/etc/nginx/conf.d/include/assets.conf

@@ -1,4 +1,4 @@
-location ~* ^.*\.(css|js|jpe?g|gif|png|webp|woff|eot|ttf|svg|ico|css\.map|js\.map)$ {
+location ~* ^.*\.(css|js|jpe?g|gif|png|webp|woff|woff2|eot|ttf|svg|ico|css\.map|js\.map)$ {
 	if_modified_since off;
 
 	# use the public cache

+ 1 - 3
docs/src/advanced-config/index.md

@@ -50,7 +50,6 @@ networks:
 Let's look at a Portainer example:
 
 ```yml
-version: '3.8'
 services:
 
   portainer:
@@ -92,8 +91,6 @@ This image supports the use of Docker secrets to import from files and keep sens
 You can set any environment variable from a file by appending `__FILE` (double-underscore FILE) to the environmental variable name.
 
 ```yml
-version: '3.8'
-
 secrets:
   # Secrets are single-line text files where the sole content is the secret
   # Paths in this example assume that secrets are kept in local folder called ".secrets"
@@ -184,6 +181,7 @@ You can add your custom configuration snippet files at `/data/nginx/custom` as f
  - `/data/nginx/custom/server_stream.conf`: Included at the end of every stream server block
  - `/data/nginx/custom/server_stream_tcp.conf`: Included at the end of every TCP stream server block
  - `/data/nginx/custom/server_stream_udp.conf`: Included at the end of every UDP stream server block
+ - `/data/nginx/custom/server_dead.conf`: Included at the end of every 404 server block
 
 Every file is optional.
 

+ 57 - 5
docs/src/setup/index.md

@@ -9,7 +9,6 @@ outline: deep
 Create a `docker-compose.yml` file:
 
 ```yml
-version: '3.8'
 services:
   app:
     image: 'jc21/nginx-proxy-manager:latest'
@@ -22,8 +21,7 @@ services:
       # Add any other Stream port you want to expose
       # - '21:21' # FTP
 
-    # Uncomment the next line if you uncomment anything in the section
-    # environment:
+    environment:
       # Uncomment this if you want to change the location of
       # the SQLite DB file within the container
       # DB_SQLITE_FILE: "/data/database.sqlite"
@@ -55,7 +53,6 @@ are going to use.
 Here is an example of what your `docker-compose.yml` will look like when using a MariaDB container:
 
 ```yml
-version: '3.8'
 services:
   app:
     image: 'jc21/nginx-proxy-manager:latest'
@@ -101,6 +98,53 @@ Please note, that `DB_MYSQL_*` environment variables will take precedent over `D
 
 :::
 
+## Using Postgres database
+
+Similar to the MySQL server setup:
+
+```yml
+services:
+  app:
+    image: 'jc21/nginx-proxy-manager:latest'
+    restart: unless-stopped
+    ports:
+      # These ports are in format <host-port>:<container-port>
+      - '80:80' # Public HTTP Port
+      - '443:443' # Public HTTPS Port
+      - '81:81' # Admin Web Port
+      # Add any other Stream port you want to expose
+      # - '21:21' # FTP
+    environment:
+      # Postgres parameters:
+      DB_POSTGRES_HOST: 'db'
+      DB_POSTGRES_PORT: '5432'
+      DB_POSTGRES_USER: 'npm'
+      DB_POSTGRES_PASSWORD: 'npmpass'
+      DB_POSTGRES_NAME: 'npm'
+      # Uncomment this if IPv6 is not enabled on your host
+      # DISABLE_IPV6: 'true'
+    volumes:
+      - ./data:/data
+      - ./letsencrypt:/etc/letsencrypt
+    depends_on:
+      - db
+
+  db:
+    image: postgres:latest
+    environment:
+      POSTGRES_USER: 'npm'
+      POSTGRES_PASSWORD: 'npmpass'
+      POSTGRES_DB: 'npm'
+    volumes:
+      - ./postgres:/var/lib/postgresql/data
+```
+
+::: warning
+
+Custom Postgres schema is not supported, as such `public` will be used.
+
+:::
+
 ## Running on Raspberry PI / ARM devices
 
 The docker images support the following architectures:
@@ -137,5 +181,13 @@ Email:    [email protected]
 Password: changeme
 ```
 
-Immediately after logging in with this default user you will be asked to modify your details and change your password.
+Immediately after logging in with this default user you will be asked to modify your details and change your password. You can change defaults with:
+
+
+```
+    environment:
+      INITIAL_ADMIN_EMAIL: [email protected]
+      INITIAL_ADMIN_PASSWORD: mypassword1
+```
+
 

+ 1 - 0
docs/src/third-party/index.md

@@ -12,6 +12,7 @@ Known integrations:
 - [HomeAssistant Hass.io plugin](https://github.com/hassio-addons/addon-nginx-proxy-manager)
 - [UnRaid / Synology](https://github.com/jlesage/docker-nginx-proxy-manager)
 - [Proxmox Scripts](https://github.com/ej52/proxmox-scripts/tree/main/apps/nginx-proxy-manager)
+- [Proxmox VE Helper-Scripts](https://community-scripts.github.io/ProxmoxVE/scripts?id=nginxproxymanager)
 - [nginxproxymanagerGraf](https://github.com/ma-karai/nginxproxymanagerGraf)
 
 

+ 3 - 3
docs/yarn.lock

@@ -873,9 +873,9 @@ mitt@^3.0.1:
   integrity sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==
 
 nanoid@^3.3.7:
-  version "3.3.7"
-  resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8"
-  integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==
+  version "3.3.8"
+  resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf"
+  integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==
 
 [email protected]:
   version "0.4.3"

+ 2 - 2
frontend/js/app/nginx/access/list/item.ejs

@@ -1,6 +1,6 @@
 <td class="text-center">
-    <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
-        <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
+    <div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>">
+        <span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span>
     </div>
 </td>
 <td>

+ 2 - 2
frontend/js/app/nginx/certificates/list/item.ejs

@@ -1,6 +1,6 @@
 <td class="text-center">
-    <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
-        <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
+    <div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>">
+        <span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span>
     </div>
 </td>
 <td>

+ 2 - 2
frontend/js/app/nginx/dead/list/item.ejs

@@ -1,6 +1,6 @@
 <td class="text-center">
-    <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
-        <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
+    <div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>">
+        <span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span>
     </div>
 </td>
 <td>

+ 2 - 2
frontend/js/app/nginx/proxy/list/item.ejs

@@ -1,6 +1,6 @@
 <td class="text-center">
-    <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
-        <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
+    <div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>">
+        <span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span>
     </div>
 </td>
 <td>

+ 2 - 2
frontend/js/app/nginx/redirection/list/item.ejs

@@ -1,6 +1,6 @@
 <td class="text-center">
-    <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
-        <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
+    <div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>">
+        <span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span>
     </div>
 </td>
 <td>

+ 2 - 2
frontend/js/app/nginx/stream/list/item.ejs

@@ -1,6 +1,6 @@
 <td class="text-center">
-    <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
-        <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
+    <div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>">
+        <span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span>
     </div>
 </td>
 <td>

+ 12 - 12
frontend/js/app/user/form.ejs

@@ -1,10 +1,10 @@
 <div class="modal-content">
-    <div class="modal-header">
-        <h5 class="modal-title"><%- i18n('users', 'form-title', {id: id}) %></h5>
-        <button type="button" class="close cancel" aria-label="Close" data-dismiss="modal">&nbsp;</button>
-    </div>
-    <div class="modal-body">
-        <form>
+    <form>
+        <div class="modal-header">
+            <h5 class="modal-title"><%- i18n('users', 'form-title', {id: id}) %></h5>
+            <button type="button" class="close cancel" aria-label="Close" data-dismiss="modal">&nbsp;</button>
+        </div>
+        <div class="modal-body">
             <div class="row">
                 <div class="col-sm-6 col-md-6">
                     <div class="form-group">
@@ -49,10 +49,10 @@
                 </div>
                 <% } %>
             </div>
-        </form>
-    </div>
-    <div class="modal-footer">
-        <button type="button" class="btn btn-secondary cancel" data-dismiss="modal"><%- i18n('str', 'cancel') %></button>
-        <button type="button" class="btn btn-teal save"><%- i18n('str', 'save') %></button>
-    </div>
+        </div>
+        <div class="modal-footer">
+            <button type="button" class="btn btn-secondary cancel" data-dismiss="modal"><%- i18n('str', 'cancel') %></button>
+            <button type="submit" class="btn btn-teal save"><%- i18n('str', 'save') %></button>
+        </div>
+    </form>
 </div>

+ 1 - 1
frontend/js/app/user/form.js

@@ -19,7 +19,7 @@ module.exports = Mn.View.extend({
 
     events: {
 
-        'click @ui.save': function (e) {
+        'submit @ui.form': function (e) {
             e.preventDefault();
             this.ui.error.hide();
             let view = this;

+ 3 - 3
frontend/yarn.lock

@@ -2648,9 +2648,9 @@ electron-to-chromium@^1.3.47:
   integrity sha512-67V62Z4CFOiAtox+o+tosGfVk0QX4DJgH609tjT8QymbJZVAI/jWnAthnr8c5hnRNziIRwkc9EMQYejiVz3/9Q==
 
 elliptic@^6.5.3, elliptic@^6.5.4:
-  version "6.5.7"
-  resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.7.tgz#8ec4da2cb2939926a1b9a73619d768207e647c8b"
-  integrity sha512-ESVCtTwiA+XhY3wyh24QqRGBoP3rEdDUl3EDUUo9tft074fi19IrdpH7hLCMMP3CIj7jb3W96rn8lt/BqIlt5Q==
+  version "6.6.0"
+  resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.6.0.tgz#5919ec723286c1edf28685aa89261d4761afa210"
+  integrity sha512-dpwoQcLc/2WLQvJvLRHKZ+f9FgOdjnq11rurqwekGQygGPsYSK29OMMD2WalatiqQ+XGFDglTNixpPfI+lpaAA==
   dependencies:
     bn.js "^4.11.9"
     brorand "^1.1.0"

+ 36 - 12
global/certbot-dns-plugins.json

@@ -7,7 +7,7 @@
 		"credentials": "dns_acmedns_api_url = http://acmedns-server/\ndns_acmedns_registration_file = /data/acme-registration.json",
 		"full_plugin_name": "dns-acmedns"
 	},
-    "active24":{
+	"active24": {
 		"name": "Active24",
 		"package_name": "certbot-dns-active24",
 		"version": "~=1.5.1",
@@ -18,7 +18,7 @@
 	"aliyun": {
 		"name": "Aliyun",
 		"package_name": "certbot-dns-aliyun",
-		"version": "~=0.38.1",
+		"version": "~=2.0.0",
 		"dependencies": "",
 		"credentials": "dns_aliyun_access_key = 12345678\ndns_aliyun_access_key_secret = 1234567890abcdef1234567890abcdef",
 		"full_plugin_name": "dns-aliyun"
@@ -31,6 +31,14 @@
 		"credentials": "# This plugin supported API authentication using either Service Principals or utilizing a Managed Identity assigned to the virtual machine.\n# Regardless which authentication method used, the identity will need the “DNS Zone Contributor” role assigned to it.\n# As multiple Azure DNS Zones in multiple resource groups can exist, the config file needs a mapping of zone to resource group ID. Multiple zones -> ID mappings can be listed by using the key dns_azure_zoneX where X is a unique number. At least 1 zone mapping is required.\n\n# Using a service principal (option 1)\ndns_azure_sp_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\ndns_azure_sp_client_secret = E-xqXU83Y-jzTI6xe9fs2YC~mck3ZzUih9\ndns_azure_tenant_id = ed1090f3-ab18-4b12-816c-599af8a88cf7\n\n# Using used assigned MSI (option 2)\n# dns_azure_msi_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\n\n# Using system assigned MSI (option 3)\n# dns_azure_msi_system_assigned = true\n\n# Zones (at least one always required)\ndns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1\ndns_azure_zone2 = example.org:/subscriptions/99800903-fb14-4992-9aff-12eaf2744622/resourceGroups/dns2",
 		"full_plugin_name": "dns-azure"
 	},
+	"beget": {
+		"name":"Beget",
+		"package_name": "certbot-beget-plugin",
+		"version": "~=1.0.0.dev9",
+		"dependencies": "",
+		"credentials": "# Beget API credentials used by Certbot\nbeget_plugin_username = username\nbeget_plugin_password = password",
+		"full_plugin_name": "beget-plugin"
+	},
 	"bunny": {
 		"name": "bunny.net",
 		"package_name": "certbot-dns-bunny",
@@ -247,6 +255,14 @@
 		"credentials": "dns_hetzner_api_token = 0123456789abcdef0123456789abcdef",
 		"full_plugin_name": "dns-hetzner"
 	},
+	"hostingnl": {
+		"name": "Hosting.nl",
+		"package_name": "certbot-dns-hostingnl",
+		"version": "~=0.1.5",
+		"dependencies": "",
+		"credentials": "dns_hostingnl_api_key = 0123456789abcdef0123456789abcdef",
+		"full_plugin_name": "dns-hostingnl"
+	},
 	"hover": {
 		"name": "Hover",
 		"package_name": "certbot-dns-hover",
@@ -303,6 +319,14 @@
 		"credentials": "dns_joker_username = <Dynamic DNS Authentication Username>\ndns_joker_password = <Dynamic DNS Authentication Password>\ndns_joker_domain = <Dynamic DNS Domain>",
 		"full_plugin_name": "dns-joker"
 	},
+	"leaseweb": {
+		"name": "LeaseWeb",
+		"package_name": "certbot-dns-leaseweb",
+		"version": "~=1.0.1",
+		"dependencies": "",
+		"credentials": "dns_leaseweb_api_token = 01234556789",
+		"full_plugin_name": "dns-leaseweb"
+	},
 	"linode": {
 		"name": "Linode",
 		"package_name": "certbot-dns-linode",
@@ -394,7 +418,7 @@
 	"porkbun": {
 		"name": "Porkbun",
 		"package_name": "certbot-dns-porkbun",
-		"version": "~=0.2",
+		"version": "~=0.9",
 		"dependencies": "",
 		"credentials": "dns_porkbun_key=your-porkbun-api-key\ndns_porkbun_secret=your-porkbun-api-secret",
 		"full_plugin_name": "dns-porkbun"
@@ -424,13 +448,13 @@
 		"full_plugin_name": "dns-rfc2136"
 	},
 	"rockenstein": {
-                "name": "rockenstein AG",
-                "package_name": "certbot-dns-rockenstein",
-                "version": "~=1.0.0",
-                "dependencies": "",
-                "credentials": "dns_rockenstein_token=<token>",
-                "full_plugin_name": "dns-rockenstein"
-        },
+		"name": "rockenstein AG",
+		"package_name": "certbot-dns-rockenstein",
+		"version": "~=1.0.0",
+		"dependencies": "",
+		"credentials": "dns_rockenstein_token=<token>",
+		"full_plugin_name": "dns-rockenstein"
+	},
 	"route53": {
 		"name": "Route 53 (Amazon)",
 		"package_name": "certbot-dns-route53",
@@ -487,7 +511,7 @@
 		"credentials": "dns_websupport_identifier = <api_key>\ndns_websupport_secret_key = <secret>",
 		"full_plugin_name": "dns-websupport"
 	},
-	"wedos":{
+	"wedos": {
 		"name": "Wedos",
 		"package_name": "certbot-dns-wedos",
 		"version": "~=2.2",
@@ -503,4 +527,4 @@
 		"credentials": "edgedns_client_secret = as3d1asd5d1a32sdfsdfs2d1asd5=\nedgedns_host = sdflskjdf-dfsdfsdf-sdfsdfsdf.luna.akamaiapis.net\nedgedns_access_token = kjdsi3-34rfsdfsdf-234234fsdfsdf\nedgedns_client_token = dkfjdf-342fsdfsd-23fsdfsdfsdf",
 		"full_plugin_name": "edgedns"
 	}
-}
+}

+ 1 - 1
scripts/.common.sh

@@ -11,7 +11,7 @@ YELLOW='\E[1;33m'
 export BLUE CYAN GREEN RED RESET YELLOW
 
 # Docker Compose
-COMPOSE_PROJECT_NAME="npmdev"
+COMPOSE_PROJECT_NAME="npm2dev"
 COMPOSE_FILE="docker/docker-compose.dev.yml"
 
 export COMPOSE_FILE COMPOSE_PROJECT_NAME

+ 2 - 0
scripts/ci/fulltest-cypress

@@ -67,6 +67,8 @@ printf "nameserver %s\noptions ndots:0" "${DNSROUTER_IP}" > "${LOCAL_RESOLVE}"
 # bring up all remaining containers, except cypress!
 docker-compose up -d --remove-orphans stepca squid
 docker-compose pull db-mysql || true # ok to fail
+docker-compose pull db-postgres || true # ok to fail
+docker-compose pull authentik authentik-redis authentik-ldap || true # ok to fail
 docker-compose up -d --remove-orphans --pull=never fullstack
 
 # wait for main container to be healthy

+ 5 - 6
scripts/start-dev

@@ -36,12 +36,11 @@ if hash docker-compose 2>/dev/null; then
 
 	# bring up all remaining containers, except cypress!
 	docker-compose up -d --remove-orphans stepca squid
-	docker-compose pull db
-	docker-compose up -d --remove-orphans --pull=never fullstack
+	docker-compose pull db db-postgres authentik-redis authentik authentik-worker authentik-ldap
+	docker-compose build --pull --parallel fullstack
+	docker-compose up -d --remove-orphans fullstack
 	docker-compose up -d --remove-orphans swagger
 
-	# docker-compose up -d --remove-orphans --force-recreate --build
-
 	# wait for main container to be healthy
 	bash "$DIR/wait-healthy" "$(docker-compose ps --all -q fullstack)" 120
 
@@ -53,10 +52,10 @@ if hash docker-compose 2>/dev/null; then
 
 	if [ "$1" == "-f" ]; then
 		echo -e "${BLUE}❯ ${YELLOW}Following Backend Container:${RESET}"
-		docker logs -f npm_core
+		docker logs -f npm2dev.core
 	else
 		echo -e "${YELLOW}Hint:${RESET} You can follow the output of some of the containers with:"
-		echo "  docker logs -f npm_core"
+		echo "  docker logs -f npm2dev.core"
 	fi
 else
 	echo -e "${RED}❯ docker-compose command is not available${RESET}"

+ 64 - 0
test/cypress/e2e/api/Ldap.cy.js

@@ -0,0 +1,64 @@
+/// <reference types="cypress" />
+
+describe('LDAP with Authentik', () => {
+	let token;
+	if (Cypress.env('skipStackCheck') === 'true' || Cypress.env('stack') === 'postgres') {
+
+		before(() => {
+			cy.getToken().then((tok) => {
+				token = tok;
+
+				// cy.task('backendApiPut', {
+				// 	token: token,
+				// 	path:  '/api/settings/ldap-auth',
+				// 	data:  {
+				// 		value: {
+				// 			host: 'authentik-ldap:3389',
+				// 			base_dn: 'ou=users,DC=ldap,DC=goauthentik,DC=io',
+				// 			user_dn: 'cn={{USERNAME}},ou=users,DC=ldap,DC=goauthentik,DC=io',
+				// 			email_property: 'mail',
+				// 			name_property: 'sn',
+				// 			self_filter: '(&(cn={{USERNAME}})(ak-active=TRUE))',
+				// 			auto_create_user: true
+				// 		}
+				// 	}
+				// }).then((data) => {
+				// 	cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
+				// 	expect(data.result).to.have.property('id');
+				// 	expect(data.result.id).to.be.greaterThan(0);
+				// });
+
+				// cy.task('backendApiPut', {
+				// 	token: token,
+				// 	path:  '/api/settings/auth-methods',
+				// 	data:  {
+				// 		value: [
+				// 			'local',
+				// 			'ldap'
+				// 		]
+				// 	}
+				// }).then((data) => {
+				// 	cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
+				// 	expect(data.result).to.have.property('id');
+				// 	expect(data.result.id).to.be.greaterThan(0);
+				// });
+			});
+		});
+
+		it.skip('Should log in with LDAP', function() {
+			// cy.task('backendApiPost', {
+			// 	token: token,
+			// 	path:  '/api/auth',
+			// 	data:  {
+			// 		// Authentik LDAP creds:
+			// 		type: 'ldap',
+			// 		identity: 'cypress',
+			// 		secret: 'fqXBfUYqHvYqiwBHWW7f'
+			// 	}
+			// }).then((data) => {
+			// 	cy.validateSwaggerSchema('post', 200, '/auth', data);
+			// 	expect(data.result).to.have.property('token');
+			// });
+		});
+	}
+});

+ 97 - 0
test/cypress/e2e/api/OAuth.cy.js

@@ -0,0 +1,97 @@
+/// <reference types="cypress" />
+
+describe('OAuth with Authentik', () => {
+	let token;
+	if (Cypress.env('skipStackCheck') === 'true' || Cypress.env('stack') === 'postgres') {
+
+		before(() => {
+			cy.getToken().then((tok) => {
+				token = tok;
+
+				// cy.task('backendApiPut', {
+				// 	token: token,
+				// 	path:  '/api/settings/oauth-auth',
+				// 	data:  {
+				// 		value: {
+				// 			client_id: '7iO2AvuUp9JxiSVkCcjiIbQn4mHmUMBj7yU8EjqU',
+				// 			client_secret: 'VUMZzaGTrmXJ8PLksyqzyZ6lrtz04VvejFhPMBP9hGZNCMrn2LLBanySs4ta7XGrDr05xexPyZT1XThaf4ubg00WqvHRVvlu4Naa1aMootNmSRx3VAk6RSslUJmGyHzq',
+				// 			authorization_url: 'http://authentik:9000/application/o/authorize/',
+				// 			resource_url: 'http://authentik:9000/application/o/userinfo/',
+				// 			token_url: 'http://authentik:9000/application/o/token/',
+				// 			logout_url: 'http://authentik:9000/application/o/npm/end-session/',
+				// 			identifier: 'preferred_username',
+				// 			scopes: [],
+				// 			auto_create_user: true
+				// 		}
+				// 	}
+				// }).then((data) => {
+				// 	cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
+				// 	expect(data.result).to.have.property('id');
+				// 	expect(data.result.id).to.be.greaterThan(0);
+				// });
+
+				// cy.task('backendApiPut', {
+				// 	token: token,
+				// 	path:  '/api/settings/auth-methods',
+				// 	data:  {
+				// 		value: [
+				// 			'local',
+				// 			'oauth'
+				// 		]
+				// 	}
+				// }).then((data) => {
+				// 	cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
+				// 	expect(data.result).to.have.property('id');
+				// 	expect(data.result.id).to.be.greaterThan(0);
+				// });
+			});
+		});
+
+		it.skip('Should log in with OAuth', function() {
+			// cy.task('backendApiGet', {
+			// 	path:  '/oauth/login?redirect_base=' + encodeURI(Cypress.config('baseUrl')),
+			// }).then((data) => {
+			// 	expect(data).to.have.property('result');
+
+			// 	cy.origin('http://authentik:9000', {args: data.result}, (url) => {
+			// 		cy.visit(url);
+			// 		cy.get('ak-flow-executor')
+			// 		.shadow()
+			// 		.find('ak-stage-identification')
+			// 		.shadow()
+			// 		.find('input[name="uidField"]', { visible: true })
+			// 		.type('cypress');
+
+			// 	cy.get('ak-flow-executor')
+			// 		.shadow()
+			// 		.find('ak-stage-identification')
+			// 		.shadow()
+			// 		.find('button[type="submit"]', { visible: true })
+			// 		.click();
+
+			// 	cy.get('ak-flow-executor')
+			// 		.shadow()
+			// 		.find('ak-stage-password')
+			// 		.shadow()
+			// 		.find('input[name="password"]', { visible: true })
+			// 		.type('fqXBfUYqHvYqiwBHWW7f');
+
+			// 	cy.get('ak-flow-executor')
+			// 		.shadow()
+			// 		.find('ak-stage-password')
+			// 		.shadow()
+			// 		.find('button[type="submit"]', { visible: true })
+			// 		.click();
+			// 	})
+
+			// 	// we should be logged in
+			// 	cy.get('#root p.chakra-text')
+			// 		.first()
+			// 		.should('have.text', 'Nginx Proxy Manager');
+
+			// 	// logout:
+			// 	cy.clearLocalStorage();
+			// });
+		});
+	}
+});

+ 6 - 6
test/yarn.lock

@@ -132,9 +132,9 @@
   integrity sha512-BsWiH1yFGjXXS2yvrf5LyuoSIIbPrGUWob917o+BTKuZ7qJdxX8aJLRxs1fS9n6r7vESrq1OUqb68dANcFXuQQ==
 
 "@eslint/plugin-kit@^0.2.0":
-  version "0.2.0"
-  resolved "https://registry.yarnpkg.com/@eslint/plugin-kit/-/plugin-kit-0.2.0.tgz#8712dccae365d24e9eeecb7b346f85e750ba343d"
-  integrity sha512-vH9PiIMMwvhCx31Af3HiGzsVNULDbyVkHXwlemn/B0TFj/00ho3y55efXrUZTfQipxoHC5u4xq6zblww1zm1Ig==
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/@eslint/plugin-kit/-/plugin-kit-0.2.3.tgz#812980a6a41ecf3a8341719f92a6d1e784a2e0e8"
+  integrity sha512-2b/g5hRmpbb1o4GnTZax9N9m0FXzz9OV42ZzI4rDDMDuHUqigAiQCEWChBWCY4ztAGVRjoWT19v0yMmc5/L5kA==
   dependencies:
     levn "^0.4.1"
 
@@ -628,9 +628,9 @@ [email protected]:
   integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=
 
 cross-spawn@^7.0.0, cross-spawn@^7.0.2:
-  version "7.0.3"
-  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
-  integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
+  version "7.0.6"
+  resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f"
+  integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==
   dependencies:
     path-key "^3.1.0"
     shebang-command "^2.0.0"