This repository has been archived on 2024-05-31. You can view files and clone it, but cannot push or open issues or pull requests.
authentik/lifecycle/gunicorn.conf.py

72 lines
2.1 KiB
Python
Raw Normal View History

2020-09-02 22:04:12 +00:00
"""Gunicorn config"""
import os
import pwd
2020-09-11 21:21:11 +00:00
from multiprocessing import cpu_count
2020-09-02 22:04:12 +00:00
import structlog
from kubernetes.config.incluster_config import SERVICE_HOST_ENV_NAME
2020-09-02 22:04:12 +00:00
bind = "127.0.0.1:8000"
2020-09-02 22:04:12 +00:00
try:
pwd.getpwnam("authentik")
user = "authentik"
group = "authentik"
except KeyError:
pass
2020-09-02 22:04:12 +00:00
worker_class = "lifecycle.worker.DjangoUvicornWorker"
# Docker containers don't have /tmp as tmpfs
if os.path.exists("/dev/shm"): # nosec
worker_tmp_dir = "/dev/shm" # nosec
2020-09-02 22:04:12 +00:00
2020-12-05 21:08:42 +00:00
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "authentik.root.settings")
max_requests = 1000
max_requests_jitter = 50
2020-09-02 22:04:12 +00:00
logconfig_dict = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"json_formatter": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.processors.JSONRenderer(),
"foreign_pre_chain": [
structlog.stdlib.add_log_level,
structlog.stdlib.add_logger_name,
structlog.processors.TimeStamper(),
structlog.processors.StackInfoRenderer(),
],
}
},
"handlers": {
"error_console": {
"class": "logging.StreamHandler",
"formatter": "json_formatter",
},
"console": {"class": "logging.StreamHandler", "formatter": "json_formatter"},
},
2020-09-06 13:52:48 +00:00
"loggers": {
"uvicorn": {"handlers": ["console"], "level": "WARNING", "propagate": False},
"gunicorn": {"handlers": ["console"], "level": "INFO", "propagate": False},
2020-09-06 13:52:48 +00:00
},
2020-09-02 22:04:12 +00:00
}
2020-09-11 21:21:11 +00:00
# if we're running in kubernetes, use fixed workers because we can scale with more pods
# otherwise (assume docker-compose), use as much as we can
if SERVICE_HOST_ENV_NAME in os.environ:
default_workers = 2
2020-09-11 21:21:11 +00:00
else:
default_workers = max(cpu_count() * 0.25, 1) + 1 # Minimum of 2 workers
workers = int(os.environ.get("WORKERS", default_workers))
threads = int(os.environ.get("THREADS", 4))
# pylint: disable=unused-argument
def worker_exit(server, worker):
"""Remove pid dbs when worker is shutdown"""
from prometheus_client import multiprocess
multiprocess.mark_process_dead(worker.pid)