Esempio n. 1
0
 def __init__(self):
     self.buff = {}
     self.redis = redis.Redis.from_url(settings.BROKER_URL)
     self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
     self.queue_pop = 0
     self.queue_name = settings.CALLBACK_QUEUE
     self.prof = AWXProfiler("CallbackBrokerWorker")
     for key in self.redis.keys('awx_callback_receiver_statistics_*'):
         self.redis.delete(key)
Esempio n. 2
0
 def __init__(self, prefix=""):
     self.prefix = prefix
     # initialize each metric to 0 and force metric_has_changed to true. This
     # ensures each task manager metric will be overridden when pipe_execute
     # is called later.
     self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
     self.start_time = time.time()
     self.start_task_limit = settings.START_TASK_LIMIT
     for m in self.subsystem_metrics.METRICS:
         if m.startswith(self.prefix):
             self.subsystem_metrics.set(m, 0)
Esempio n. 3
0
 def record_aggregate_metrics(self, *args):
     if not settings.IS_TESTING():
         # increment task_manager_schedule_calls regardless if the other
         # metrics are recorded
         s_metrics.Metrics(auto_pipe_execute=True).inc("task_manager_schedule_calls", 1)
         # Only record metrics if the last time recording was more
         # than SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL ago.
         # Prevents a short-duration task manager that runs directly after a
         # long task manager to override useful metrics.
         current_time = time.time()
         time_last_recorded = current_time - self.subsystem_metrics.decode("task_manager_recorded_timestamp")
         if time_last_recorded > settings.SUBSYSTEM_METRICS_TASK_MANAGER_RECORD_INTERVAL:
             logger.debug(f"recording metrics, last recorded {time_last_recorded} seconds ago")
             self.subsystem_metrics.set("task_manager_recorded_timestamp", current_time)
             self.subsystem_metrics.pipe_execute()
         else:
             logger.debug(f"skipping recording metrics, last recorded {time_last_recorded} seconds ago")
Esempio n. 4
0
 def __init__(
     self,
     name,
     event_loop,
     stats: BroadcastWebsocketStats,
     remote_host: str,
     remote_port: int = settings.BROADCAST_WEBSOCKET_PORT,
     protocol: str = settings.BROADCAST_WEBSOCKET_PROTOCOL,
     verify_ssl: bool = settings.BROADCAST_WEBSOCKET_VERIFY_CERT,
     endpoint: str = 'broadcast',
 ):
     self.name = name
     self.event_loop = event_loop
     self.stats = stats
     self.remote_host = remote_host
     self.remote_port = remote_port
     self.endpoint = endpoint
     self.protocol = protocol
     self.verify_ssl = verify_ssl
     self.channel_layer = None
     self.subsystem_metrics = s_metrics.Metrics()
Esempio n. 5
0
    def __init__(self):
        """
        Do NOT put database queries or other potentially expensive operations
        in the task manager init. The task manager object is created every time a
        job is created, transitions state, and every 30 seconds on each tower node.
        More often then not, the object is destroyed quickly because the NOOP case is hit.

        The NOOP case is short-circuit logic. If the task manager realizes that another instance
        of the task manager is already running, then it short-circuits and decides not to run.
        """
        # start task limit indicates how many pending jobs can be started on this
        # .schedule() run. Starting jobs is expensive, and there is code in place to reap
        # the task manager after 5 minutes. At scale, the task manager can easily take more than
        # 5 minutes to start pending jobs. If this limit is reached, pending jobs
        # will no longer be started and will be started on the next task manager cycle.
        self.start_task_limit = settings.START_TASK_LIMIT
        self.time_delta_job_explanation = timedelta(seconds=30)
        self.subsystem_metrics = s_metrics.Metrics(auto_pipe_execute=False)
        # initialize each metric to 0 and force metric_has_changed to true. This
        # ensures each task manager metric will be overridden when pipe_execute
        # is called later.
        for m in self.subsystem_metrics.METRICS:
            if m.startswith("task_manager"):
                self.subsystem_metrics.set(m, 0)
Esempio n. 6
0
 def __init__(self):
     self.queue = getattr(settings, 'CALLBACK_QUEUE', '')
     self.logger = logging.getLogger(
         'awx.main.queue.CallbackQueueDispatcher')
     self.connection = redis.Redis.from_url(settings.BROKER_URL)
     self.subsystem_metrics = s_metrics.Metrics()