lifecycle: cleanup prometheus (#2972)

* remove high cardinality labels

Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>

* retry worker number for prometheus multiprocess id

Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>

* revert to pid, use subdirectories

Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>

* cleanup more

Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>

* use worker id based off of https://github.com/benoitc/gunicorn/issues/1352

Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>

* fix missing app label

Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>

* tests/e2e: remove static names

Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>

* fix

Signed-off-by: Jens Langhammer <jens.langhammer@beryju.org>
This commit is contained in:
Jens L 2022-05-29 21:45:25 +02:00 committed by GitHub
parent 9f2529c886
commit 3eb466ff4b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 129 additions and 148 deletions

View File

@ -2,10 +2,6 @@
from importlib import import_module
from django.apps import AppConfig
from django.db import ProgrammingError
from authentik.core.signals import GAUGE_MODELS
from authentik.lib.utils.reflection import get_apps
class AuthentikCoreConfig(AppConfig):
@ -19,12 +15,3 @@ class AuthentikCoreConfig(AppConfig):
def ready(self):
import_module("authentik.core.signals")
import_module("authentik.core.managed")
try:
for app in get_apps():
for model in app.get_models():
GAUGE_MODELS.labels(
model_name=model._meta.model_name,
app=model._meta.app_label,
).set(model.objects.count())
except ProgrammingError:
pass

View File

@ -1,7 +1,6 @@
"""authentik core signals"""
from typing import TYPE_CHECKING
from django.apps import apps
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.contrib.sessions.backends.cache import KEY_PREFIX
from django.core.cache import cache
@ -10,30 +9,14 @@ from django.db.models import Model
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django.http.request import HttpRequest
from prometheus_client import Gauge
from authentik.root.monitoring import monitoring_set
# Arguments: user: User, password: str
password_changed = Signal()
GAUGE_MODELS = Gauge("authentik_models", "Count of various objects", ["model_name", "app"])
if TYPE_CHECKING:
from authentik.core.models import AuthenticatedSession, User
@receiver(monitoring_set)
# pylint: disable=unused-argument
def monitoring_set_models(sender, **kwargs):
"""set models gauges"""
for model in apps.get_models():
GAUGE_MODELS.labels(
model_name=model._meta.model_name,
app=model._meta.app_label,
).set(model.objects.count())
@receiver(post_save)
# pylint: disable=unused-argument
def post_save_application(sender: type[Model], instance, created: bool, **_):

View File

@ -23,7 +23,7 @@ GAUGE_POLICIES_CACHED = Gauge(
HIST_POLICIES_BUILD_TIME = Histogram(
"authentik_policies_build_time",
"Execution times complete policy result to an object",
["object_name", "object_type", "user"],
["object_pk", "object_type"],
)
@ -91,9 +91,8 @@ class PolicyEngine:
op="authentik.policy.engine.build",
description=self.__pbm,
) as span, HIST_POLICIES_BUILD_TIME.labels(
object_name=self.__pbm,
object_pk=str(self.__pbm.pk),
object_type=f"{self.__pbm._meta.app_label}.{self.__pbm._meta.model_name}",
user=self.request.user,
).time():
span: Span
span.set_data("pbm", self.__pbm)

View File

@ -28,9 +28,8 @@ HIST_POLICIES_EXECUTION_TIME = Histogram(
"binding_order",
"binding_target_type",
"binding_target_name",
"object_name",
"object_pk",
"object_type",
"user",
],
)
@ -137,9 +136,8 @@ class PolicyProcess(PROCESS_CLASS):
binding_order=self.binding.order,
binding_target_type=self.binding.target_type,
binding_target_name=self.binding.target_name,
object_name=self.request.obj,
object_pk=str(self.request.obj.pk),
object_type=f"{self.request.obj._meta.app_label}.{self.request.obj._meta.model_name}",
user=str(self.request.user),
).time():
span: Span
span.set_data("policy", self.binding.policy)

View File

@ -409,12 +409,12 @@ LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"plain": {
"json": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.processors.JSONRenderer(sort_keys=True),
"foreign_pre_chain": LOG_PRE_CHAIN,
},
"colored": {
"console": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(colors=DEBUG),
"foreign_pre_chain": LOG_PRE_CHAIN,
@ -424,7 +424,7 @@ LOGGING = {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "colored" if DEBUG else "plain",
"formatter": "console" if DEBUG else "json",
},
},
"loggers": {},

View File

@ -23,11 +23,11 @@ var (
FlowTimingGet = promauto.NewHistogramVec(prometheus.HistogramOpts{
Name: "authentik_outpost_flow_timing_get",
Help: "Duration it took to get a challenge",
}, []string{"stage", "flow", "client", "user"})
}, []string{"stage", "flow"})
FlowTimingPost = promauto.NewHistogramVec(prometheus.HistogramOpts{
Name: "authentik_outpost_flow_timing_post",
Help: "Duration it took to send a challenge",
}, []string{"stage", "flow", "client", "user"})
}, []string{"stage", "flow"})
)
type FlowExecutor struct {
@ -163,10 +163,8 @@ func (fe *FlowExecutor) solveFlowChallenge(depth int) (bool, error) {
gcsp.SetTag("authentik.flow.component", ch.GetComponent())
gcsp.Finish()
FlowTimingGet.With(prometheus.Labels{
"stage": ch.GetComponent(),
"flow": fe.flowSlug,
"client": fe.cip,
"user": fe.Answers[StageIdentification],
"stage": ch.GetComponent(),
"flow": fe.flowSlug,
}).Observe(float64(gcsp.EndTime.Sub(gcsp.StartTime)))
// Resole challenge
@ -230,10 +228,8 @@ func (fe *FlowExecutor) solveFlowChallenge(depth int) (bool, error) {
}
}
FlowTimingPost.With(prometheus.Labels{
"stage": ch.GetComponent(),
"flow": fe.flowSlug,
"client": fe.cip,
"user": fe.Answers[StageIdentification],
"stage": ch.GetComponent(),
"flow": fe.flowSlug,
}).Observe(float64(scsp.EndTime.Sub(scsp.StartTime)))
if depth >= 10 {

View File

@ -9,20 +9,17 @@ import (
log "github.com/sirupsen/logrus"
"goauthentik.io/internal/outpost/ldap/bind"
"goauthentik.io/internal/outpost/ldap/metrics"
"goauthentik.io/internal/utils"
)
func (ls *LDAPServer) Bind(bindDN string, bindPW string, conn net.Conn) (ldap.LDAPResultCode, error) {
req, span := bind.NewRequest(bindDN, bindPW, conn)
selectedApp := ""
defer func() {
span.Finish()
metrics.Requests.With(prometheus.Labels{
"outpost_name": ls.ac.Outpost.Name,
"type": "bind",
"filter": "",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": selectedApp,
}).Observe(float64(span.EndTime.Sub(span.StartTime)))
req.Log().WithField("took-ms", span.EndTime.Sub(span.StartTime).Milliseconds()).Info("Bind request")
}()
@ -39,6 +36,7 @@ func (ls *LDAPServer) Bind(bindDN string, bindPW string, conn net.Conn) (ldap.LD
for _, instance := range ls.providers {
username, err := instance.binder.GetUsername(bindDN)
if err == nil {
selectedApp = instance.GetAppSlug()
return instance.binder.Bind(username, req)
} else {
req.Log().WithError(err).Debug("Username not for instance")
@ -49,8 +47,7 @@ func (ls *LDAPServer) Bind(bindDN string, bindPW string, conn net.Conn) (ldap.LD
"outpost_name": ls.ac.Outpost.Name,
"type": "bind",
"reason": "no_provider",
"dn": bindDN,
"client": utils.GetIP(conn.RemoteAddr()),
"app": "",
}).Inc()
return ldap.LDAPResultOperationsError, nil
}

View File

@ -75,8 +75,7 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
"outpost_name": db.si.GetOutpostName(),
"type": "bind",
"reason": "invalid_credentials",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": db.si.GetAppSlug(),
}).Inc()
req.Log().Info("Invalid credentials")
return ldap.LDAPResultInvalidCredentials, nil
@ -86,8 +85,7 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
"outpost_name": db.si.GetOutpostName(),
"type": "bind",
"reason": "flow_error",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": db.si.GetAppSlug(),
}).Inc()
req.Log().WithError(err).Warning("failed to execute flow")
return ldap.LDAPResultOperationsError, nil
@ -100,8 +98,7 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
"outpost_name": db.si.GetOutpostName(),
"type": "bind",
"reason": "access_denied",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": db.si.GetAppSlug(),
}).Inc()
return ldap.LDAPResultInsufficientAccessRights, nil
}
@ -110,8 +107,7 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
"outpost_name": db.si.GetOutpostName(),
"type": "bind",
"reason": "access_check_fail",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": db.si.GetAppSlug(),
}).Inc()
req.Log().WithError(err).Warning("failed to check access")
return ldap.LDAPResultOperationsError, nil
@ -125,8 +121,7 @@ func (db *DirectBinder) Bind(username string, req *bind.Request) (ldap.LDAPResul
"outpost_name": db.si.GetOutpostName(),
"type": "bind",
"reason": "user_info_fail",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": db.si.GetAppSlug(),
}).Inc()
req.Log().WithError(err).Warning("failed to get user info")
return ldap.LDAPResultOperationsError, nil

View File

@ -15,11 +15,11 @@ var (
Requests = promauto.NewHistogramVec(prometheus.HistogramOpts{
Name: "authentik_outpost_ldap_requests",
Help: "The total number of configured providers",
}, []string{"outpost_name", "type", "dn", "filter", "client"})
}, []string{"outpost_name", "type", "app"})
RequestsRejected = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "authentik_outpost_ldap_requests_rejected",
Help: "Total number of rejected requests",
}, []string{"outpost_name", "type", "reason", "dn", "client"})
}, []string{"outpost_name", "type", "reason", "app"})
)
func RunServer() {

View File

@ -12,20 +12,17 @@ import (
log "github.com/sirupsen/logrus"
"goauthentik.io/internal/outpost/ldap/metrics"
"goauthentik.io/internal/outpost/ldap/search"
"goauthentik.io/internal/utils"
)
func (ls *LDAPServer) Search(bindDN string, searchReq ldap.SearchRequest, conn net.Conn) (ldap.ServerSearchResult, error) {
req, span := search.NewRequest(bindDN, searchReq, conn)
selectedApp := ""
defer func() {
span.Finish()
metrics.Requests.With(prometheus.Labels{
"outpost_name": ls.ac.Outpost.Name,
"type": "search",
"filter": req.Filter,
"dn": req.BindDN,
"client": utils.GetIP(conn.RemoteAddr()),
"app": selectedApp,
}).Observe(float64(span.EndTime.Sub(span.StartTime)))
req.Log().WithField("took-ms", span.EndTime.Sub(span.StartTime).Milliseconds()).Info("Search request")
}()
@ -50,6 +47,7 @@ func (ls *LDAPServer) Search(bindDN string, searchReq ldap.SearchRequest, conn n
for _, provider := range ls.providers {
providerBase, _ := goldap.ParseDN(strings.ToLower(provider.BaseDN))
if providerBase.AncestorOf(bd) || providerBase.Equal(bd) {
selectedApp = provider.GetAppSlug()
return provider.searcher.Search(req)
}
}

View File

@ -44,8 +44,7 @@ func (ds *DirectSearcher) Search(req *search.Request) (ldap.ServerSearchResult,
"outpost_name": ds.si.GetOutpostName(),
"type": "search",
"reason": "filter_parse_fail",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": ds.si.GetAppSlug(),
}).Inc()
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultOperationsError}, fmt.Errorf("Search Error: error parsing filter: %s", req.Filter)
}
@ -54,8 +53,7 @@ func (ds *DirectSearcher) Search(req *search.Request) (ldap.ServerSearchResult,
"outpost_name": ds.si.GetOutpostName(),
"type": "search",
"reason": "empty_bind_dn",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": ds.si.GetAppSlug(),
}).Inc()
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: Anonymous BindDN not allowed %s", req.BindDN)
}
@ -64,8 +62,7 @@ func (ds *DirectSearcher) Search(req *search.Request) (ldap.ServerSearchResult,
"outpost_name": ds.si.GetOutpostName(),
"type": "search",
"reason": "invalid_bind_dn",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": ds.si.GetAppSlug(),
}).Inc()
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: BindDN %s not in our BaseDN %s", req.BindDN, ds.si.GetBaseDN())
}
@ -77,8 +74,7 @@ func (ds *DirectSearcher) Search(req *search.Request) (ldap.ServerSearchResult,
"outpost_name": ds.si.GetOutpostName(),
"type": "search",
"reason": "user_info_not_cached",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": ds.si.GetAppSlug(),
}).Inc()
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, errors.New("access denied")
}
@ -90,8 +86,7 @@ func (ds *DirectSearcher) Search(req *search.Request) (ldap.ServerSearchResult,
"outpost_name": ds.si.GetOutpostName(),
"type": "search",
"reason": "filter_parse_fail",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": ds.si.GetAppSlug(),
}).Inc()
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultOperationsError}, fmt.Errorf("Search Error: error parsing filter: %s", req.Filter)
}

View File

@ -47,8 +47,7 @@ func (ms *MemorySearcher) Search(req *search.Request) (ldap.ServerSearchResult,
"outpost_name": ms.si.GetOutpostName(),
"type": "search",
"reason": "filter_parse_fail",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": ms.si.GetAppSlug(),
}).Inc()
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultOperationsError}, fmt.Errorf("Search Error: error parsing filter: %s", req.Filter)
}
@ -57,8 +56,7 @@ func (ms *MemorySearcher) Search(req *search.Request) (ldap.ServerSearchResult,
"outpost_name": ms.si.GetOutpostName(),
"type": "search",
"reason": "empty_bind_dn",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": ms.si.GetAppSlug(),
}).Inc()
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: Anonymous BindDN not allowed %s", req.BindDN)
}
@ -67,8 +65,7 @@ func (ms *MemorySearcher) Search(req *search.Request) (ldap.ServerSearchResult,
"outpost_name": ms.si.GetOutpostName(),
"type": "search",
"reason": "invalid_bind_dn",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": ms.si.GetAppSlug(),
}).Inc()
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, fmt.Errorf("Search Error: BindDN %s not in our BaseDN %s", req.BindDN, ms.si.GetBaseDN())
}
@ -80,8 +77,7 @@ func (ms *MemorySearcher) Search(req *search.Request) (ldap.ServerSearchResult,
"outpost_name": ms.si.GetOutpostName(),
"type": "search",
"reason": "user_info_not_cached",
"dn": req.BindDN,
"client": req.RemoteAddr(),
"app": ms.si.GetAppSlug(),
}).Inc()
return ldap.ServerSearchResult{ResultCode: ldap.LDAPResultInsufficientAccessRights}, errors.New("access denied")
}

View File

@ -134,11 +134,9 @@ func NewApplication(p api.ProxyOutpostConfig, c *http.Client, cs *ak.CryptoStore
metrics.Requests.With(prometheus.Labels{
"outpost_name": a.outpostName,
"type": "app",
"scheme": r.URL.Scheme,
"method": r.Method,
"path": r.URL.Path,
"host": web.GetHost(r),
"user": user,
"scheme": r.URL.Scheme,
}).Observe(float64(after))
})
})

View File

@ -54,18 +54,11 @@ func (a *Application) configureProxy() error {
}()
after := time.Since(before)
user := ""
if claims != nil {
user = claims.Email
}
metrics.UpstreamTiming.With(prometheus.Labels{
"outpost_name": a.outpostName,
"upstream_host": r.URL.Host,
"scheme": r.URL.Scheme,
"method": r.Method,
"path": r.URL.Path,
"host": web.GetHost(r),
"user": user,
}).Observe(float64(after))
})
return nil

View File

@ -22,11 +22,8 @@ func (ps *ProxyServer) HandlePing(rw http.ResponseWriter, r *http.Request) {
metrics.Requests.With(prometheus.Labels{
"outpost_name": ps.akAPI.Outpost.Name,
"method": r.Method,
"scheme": r.URL.Scheme,
"path": r.URL.Path,
"host": web.GetHost(r),
"type": "ping",
"user": "",
}).Observe(float64(after))
}
@ -37,11 +34,8 @@ func (ps *ProxyServer) HandleStatic(rw http.ResponseWriter, r *http.Request) {
metrics.Requests.With(prometheus.Labels{
"outpost_name": ps.akAPI.Outpost.Name,
"method": r.Method,
"scheme": r.URL.Scheme,
"path": r.URL.Path,
"host": web.GetHost(r),
"type": "ping",
"user": "",
"type": "static",
}).Observe(float64(after))
}

View File

@ -15,11 +15,11 @@ var (
Requests = promauto.NewHistogramVec(prometheus.HistogramOpts{
Name: "authentik_outpost_proxy_requests",
Help: "The total number of configured providers",
}, []string{"outpost_name", "method", "scheme", "path", "host", "type", "user"})
}, []string{"outpost_name", "method", "scheme", "host", "type"})
UpstreamTiming = promauto.NewHistogramVec(prometheus.HistogramOpts{
Name: "authentik_outpost_proxy_upstream_time",
Help: "A summary of the duration we wait for the upstream reply",
}, []string{"outpost_name", "method", "scheme", "path", "host", "upstream_host", "user"})
}, []string{"outpost_name", "method", "scheme", "host", "upstream_host"})
)
func RunServer() {

View File

@ -37,11 +37,6 @@ MODE_FILE="/tmp/authentik-mode"
if [[ "$1" == "server" ]]; then
wait_for_db
echo "server" > $MODE_FILE
# We only set PROMETHEUS_MULTIPROC_DIR for serer, as with the worker it just fills up the disk
# as one file is created per process
#
# Set to TMPDIR instead hardcoded path so this can be used outside docker too
export PROMETHEUS_MULTIPROC_DIR=$TMPDIR
python -m lifecycle.migrate
/authentik-proxy
elif [[ "$1" == "worker" ]]; then

View File

@ -3,15 +3,23 @@ import os
import pwd
from hashlib import sha512
from multiprocessing import cpu_count
from os import makedirs
from pathlib import Path
from tempfile import gettempdir
from typing import TYPE_CHECKING
import structlog
from kubernetes.config.incluster_config import SERVICE_HOST_ENV_NAME
from prometheus_client.values import MultiProcessValue
from authentik import get_full_version
from authentik.lib.config import CONFIG
from authentik.lib.utils.http import get_http_session
from authentik.lib.utils.reflection import get_env
from lifecycle.worker import DjangoUvicornWorker
if TYPE_CHECKING:
from gunicorn.arbiter import Arbiter
bind = "127.0.0.1:8000"
@ -22,19 +30,27 @@ try:
except KeyError:
pass
_tmp = Path(gettempdir())
worker_class = "lifecycle.worker.DjangoUvicornWorker"
worker_tmp_dir = gettempdir()
worker_tmp_dir = str(_tmp.joinpath("authentik_worker_tmp"))
prometheus_tmp_dir = str(_tmp.joinpath("authentik_prometheus_tmp"))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "authentik.root.settings")
os.environ.setdefault("PROMETHEUS_MULTIPROC_DIR", prometheus_tmp_dir)
makedirs(worker_tmp_dir, exist_ok=True)
makedirs(prometheus_tmp_dir, exist_ok=True)
max_requests = 1000
max_requests_jitter = 50
_debug = CONFIG.y_bool("DEBUG", False)
logconfig_dict = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"json_formatter": {
"json": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.processors.JSONRenderer(),
"foreign_pre_chain": [
@ -43,14 +59,20 @@ logconfig_dict = {
structlog.processors.TimeStamper(),
structlog.processors.StackInfoRenderer(),
],
}
},
"console": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(colors=True),
"foreign_pre_chain": [
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
structlog.processors.TimeStamper(),
structlog.processors.StackInfoRenderer(),
],
},
},
"handlers": {
"error_console": {
"class": "logging.StreamHandler",
"formatter": "json_formatter",
},
"console": {"class": "logging.StreamHandler", "formatter": "json_formatter"},
"console": {"class": "logging.StreamHandler", "formatter": "json" if _debug else "console"},
},
"loggers": {
"uvicorn": {"handlers": ["console"], "level": "WARNING", "propagate": False},
@ -69,11 +91,54 @@ workers = int(os.environ.get("WORKERS", default_workers))
threads = int(os.environ.get("THREADS", 4))
# pylint: disable=unused-argument
def worker_exit(server, worker):
def post_fork(server: "Arbiter", worker: DjangoUvicornWorker):
"""Tell prometheus to use worker number instead of process ID for multiprocess"""
from prometheus_client import values
values.ValueClass = MultiProcessValue(lambda: worker._worker_id)
# pylint: disable=unused-argument
def worker_exit(server: "Arbiter", worker: DjangoUvicornWorker):
"""Remove pid dbs when worker is shutdown"""
from prometheus_client import multiprocess
multiprocess.mark_process_dead(worker.pid)
multiprocess.mark_process_dead(worker._worker_id)
def on_starting(server: "Arbiter"):
"""Attach a set of IDs that can be temporarily re-used.
Used on reloads when each worker exists twice."""
server._worker_id_overload = set()
def nworkers_changed(server: "Arbiter", new_value, old_value):
"""Gets called on startup too.
Set the current number of workers. Required if we raise the worker count
temporarily using TTIN because server.cfg.workers won't be updated and if
one of those workers dies, we wouldn't know the ids go that far."""
server._worker_id_current_workers = new_value
def _next_worker_id(server: "Arbiter"):
"""If there are IDs open for re-use, take one. Else look for a free one."""
if server._worker_id_overload:
return server._worker_id_overload.pop()
in_use = set(w._worker_id for w in tuple(server.WORKERS.values()) if w.alive)
free = set(range(1, server._worker_id_current_workers + 1)) - in_use
return free.pop()
def on_reload(server: "Arbiter"):
"""Add a full set of ids into overload so it can be re-used once."""
server._worker_id_overload = set(range(1, server.cfg.workers + 1))
def pre_fork(server: "Arbiter", worker: DjangoUvicornWorker):
"""Attach the next free worker_id before forking off."""
worker._worker_id = _next_worker_id(server)
if not CONFIG.y_bool("disable_startup_analytics", False):

View File

@ -10,7 +10,9 @@ from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait
from authentik.core.models import User
from authentik.flows.models import Flow, FlowDesignation, FlowStageBinding
from authentik.core.tests.utils import create_test_flow
from authentik.flows.models import FlowDesignation, FlowStageBinding
from authentik.lib.generators import generate_id
from authentik.stages.email.models import EmailStage, EmailTemplates
from authentik.stages.identification.models import IdentificationStage
from authentik.stages.prompt.models import FieldTypes, Prompt, PromptStage
@ -64,21 +66,16 @@ class TestFlowsEnroll(SeleniumTestCase):
)
# Stages
first_stage = PromptStage.objects.create(name="prompt-stage-first")
first_stage = PromptStage.objects.create(name=generate_id())
first_stage.fields.set([username_prompt, password, password_repeat])
first_stage.save()
second_stage = PromptStage.objects.create(name="prompt-stage-second")
second_stage = PromptStage.objects.create(name=generate_id())
second_stage.fields.set([name_field, email])
second_stage.save()
user_write = UserWriteStage.objects.create(name="enroll-user-write")
user_login = UserLoginStage.objects.create(name="enroll-user-login")
user_write = UserWriteStage.objects.create(name=generate_id())
user_login = UserLoginStage.objects.create(name=generate_id())
flow = Flow.objects.create(
name="default-enrollment-flow",
slug="default-enrollment-flow",
title="default-enrollment-flow",
designation=FlowDesignation.ENROLLMENT,
)
flow = create_test_flow(FlowDesignation.ENROLLMENT)
# Attach enrollment flow to identification stage
ident_stage: IdentificationStage = IdentificationStage.objects.first()
@ -133,27 +130,22 @@ class TestFlowsEnroll(SeleniumTestCase):
)
# Stages
first_stage = PromptStage.objects.create(name="prompt-stage-first")
first_stage = PromptStage.objects.create(name=generate_id())
first_stage.fields.set([username_prompt, password, password_repeat])
first_stage.save()
second_stage = PromptStage.objects.create(name="prompt-stage-second")
second_stage = PromptStage.objects.create(name=generate_id())
second_stage.fields.set([name_field, email])
second_stage.save()
email_stage = EmailStage.objects.create(
name="enroll-email",
name=generate_id(),
host="localhost",
port=1025,
template=EmailTemplates.ACCOUNT_CONFIRM,
)
user_write = UserWriteStage.objects.create(name="enroll-user-write")
user_login = UserLoginStage.objects.create(name="enroll-user-login")
user_write = UserWriteStage.objects.create(name=generate_id())
user_login = UserLoginStage.objects.create(name=generate_id())
flow = Flow.objects.create(
name="default-enrollment-flow",
slug="default-enrollment-flow",
title="default-enrollment-flow",
designation=FlowDesignation.ENROLLMENT,
)
flow = create_test_flow(FlowDesignation.ENROLLMENT)
# Attach enrollment flow to identification stage
ident_stage: IdentificationStage = IdentificationStage.objects.first()