core: adjust timeout of backup task's caching

This commit is contained in:
Jens Langhammer 2020-11-03 22:53:24 +01:00
parent ba96c9526e
commit 34793f7cef
3 changed files with 7 additions and 5 deletions

View File

@ -37,6 +37,7 @@ def clean_expired_models(self: MonitoredTask):
@CELERY_APP.task(bind=True, base=MonitoredTask) @CELERY_APP.task(bind=True, base=MonitoredTask)
def backup_database(self: MonitoredTask): # pragma: no cover def backup_database(self: MonitoredTask): # pragma: no cover
"""Database backup""" """Database backup"""
self.result_timeout_hours = 25
try: try:
start = datetime.now() start = datetime.now()
out = StringIO() out = StringIO()

View File

@ -66,13 +66,13 @@ class TaskInfo:
"""Delete task info from cache""" """Delete task info from cache"""
return cache.delete(f"task_{self.task_name}") return cache.delete(f"task_{self.task_name}")
def save(self): def save(self, timeout_hours=6):
"""Save task into cache""" """Save task into cache"""
key = f"task_{self.task_name}" key = f"task_{self.task_name}"
if self.result.uid: if self.result.uid:
key += f"_{self.result.uid}" key += f"_{self.result.uid}"
self.task_name += f"_{self.result.uid}" self.task_name += f"_{self.result.uid}"
cache.set(key, self, timeout=13 * 60 * 60) cache.set(key, self, timeout=timeout_hours * 60 * 60)
class MonitoredTask(Task): class MonitoredTask(Task):
@ -90,6 +90,7 @@ class MonitoredTask(Task):
self.save_on_success = True self.save_on_success = True
self._uid = None self._uid = None
self._result = TaskResult(status=TaskResultStatus.ERROR, messages=[]) self._result = TaskResult(status=TaskResultStatus.ERROR, messages=[])
self.result_timeout_hours = 6
def set_uid(self, uid: str): def set_uid(self, uid: str):
"""Set UID, so in the case of an unexpected error its saved correctly""" """Set UID, so in the case of an unexpected error its saved correctly"""
@ -115,7 +116,7 @@ class MonitoredTask(Task):
task_call_func=self.__name__, task_call_func=self.__name__,
task_call_args=args, task_call_args=args,
task_call_kwargs=kwargs, task_call_kwargs=kwargs,
).save() ).save(self.result_timeout_hours)
return super().after_return(status, retval, task_id, args, kwargs, einfo=einfo) return super().after_return(status, retval, task_id, args, kwargs, einfo=einfo)
# pylint: disable=too-many-arguments # pylint: disable=too-many-arguments
@ -131,7 +132,7 @@ class MonitoredTask(Task):
task_call_func=self.__name__, task_call_func=self.__name__,
task_call_args=args, task_call_args=args,
task_call_kwargs=kwargs, task_call_kwargs=kwargs,
).save() ).save(self.result_timeout_hours)
return super().on_failure(exc, task_id, args, kwargs, einfo=einfo) return super().on_failure(exc, task_id, args, kwargs, einfo=einfo)
def run(self, *args, **kwargs): def run(self, *args, **kwargs):

View File

@ -322,7 +322,7 @@ if not DEBUG and _ERROR_REPORTING:
], ],
before_send=before_send, before_send=before_send,
release="passbook@%s" % __version__, release="passbook@%s" % __version__,
traces_sample_rate=1.0, traces_sample_rate=0.6,
environment=CONFIG.y("error_reporting.environment", "customer"), environment=CONFIG.y("error_reporting.environment", "customer"),
send_default_pii=CONFIG.y_bool("error_reporting.send_pii", False), send_default_pii=CONFIG.y_bool("error_reporting.send_pii", False),
) )