mirror of
https://github.com/ZwareBear/awx.git
synced 2026-04-20 23:11:48 -05:00
* Based on the tower topology (Instance and InstanceGroup relationships), have celery dyamically listen to queues on boot * Add celery task capable of "refreshing" what queues each celeryd worker listens to. This will be used to support changes in the topology. * Cleaned up some celery task definitions. * Converged wrongly targeted job launch/finish messages to 'tower' queue, rather than a 1-off queue. * Dynamically route celery tasks destined for the local node * separate beat process add support for separate beat process
38 lines
973 B
Python
38 lines
973 B
Python
|
|
# Python
|
|
import logging
|
|
|
|
# Celery
|
|
from celery import Task, shared_task
|
|
|
|
# AWX
|
|
from awx.main.scheduler import TaskManager
|
|
|
|
logger = logging.getLogger('awx.main.scheduler')
|
|
|
|
# TODO: move logic to UnifiedJob model and use bind=True feature of celery.
|
|
# Would we need the request loop then? I think so. Even if we get the in-memory
|
|
# updated model, the call to schedule() may get stale data.
|
|
|
|
|
|
class LogErrorsTask(Task):
|
|
def on_failure(self, exc, task_id, args, kwargs, einfo):
|
|
logger.exception('Task {} encountered exception.'.format(self.name), exc_info=exc)
|
|
super(LogErrorsTask, self).on_failure(exc, task_id, args, kwargs, einfo)
|
|
|
|
|
|
@shared_task(base=LogErrorsTask)
|
|
def run_job_launch(job_id):
|
|
TaskManager().schedule()
|
|
|
|
|
|
@shared_task(base=LogErrorsTask)
|
|
def run_job_complete(job_id):
|
|
TaskManager().schedule()
|
|
|
|
|
|
@shared_task(base=LogErrorsTask)
|
|
def run_task_manager():
|
|
logger.debug("Running Tower task manager.")
|
|
TaskManager().schedule()
|