コード例 #1
0
def emit_event_detail(serializer, relation, **kwargs):
    instance = kwargs['instance']
    created = kwargs['created']
    if created:
        event_serializer = serializer(instance)
        consumers.emit_channel_notification(
            '-'.join([
                event_serializer.get_group_name(instance),
                str(getattr(instance, relation))
            ]), event_serializer.data)
コード例 #2
0
ファイル: jobs.py プロジェクト: skyle97/cyborgbackup
    def _websocket_emit_status(self, status):
        try:
            status_data = dict(job_id=self.id, status=status)
            status_data.update(self.websocket_emit_data())
            status_data['group_name'] = 'jobs'
            emit_channel_notification('jobs-status_changed', status_data)

        except IOError:  # includes socket errors
            logger.exception(
                '%s failed to emit channel msg about status change',
                self.log_format)
コード例 #3
0
    def update_computed_fields(self):
        future_rs = tzcron.Schedule(self.schedule.crontab, pytz.utc)
        next_run_actual = next(future_rs)

        if next_run_actual is not None:
            if not datetime_exists(next_run_actual):
                # skip imaginary dates, like 2:30 on DST boundaries
                next_run_actual = next(future_rs)
            next_run_actual = next_run_actual.astimezone(pytz.utc)

        self.next_run = next_run_actual
        emit_channel_notification('schedules-changed', dict(id=self.id, group_name='schedules'))
コード例 #4
0
 def handle(self, *args, **kwargs):
     i=6
     while i < 200:
         consumers.emit_channel_notification("job_events-4", {"id": 16139540,
             "type": "job_event",
             "url": "/api/v1/job_events/16139540/",
             "related": {"job": "/api/v1/jobs/38631/"},
             "summary_fields": {
                 "job": {
                     "id": 4,
                     "name": "Backup Job Backup Mail from Knet knet.milkywan.cloud",
                     "status": "running",
                     "failed": False,
                     "elapsed": "0.000"
                 }
             },
             "created": "2020-04-11T17:50:38.159331+00:00",
             "modified": "2020-04-11T17:50:38.159340+00:00",
             "job": 4,
             "event": "verbose",
             "counter": i,
             "event_display":
             "Verbose",
             "event_data": {},
             "event_level": 0,
             "failed": False,
             "changed": False,
             "uuid": "",
             "task": "",
             "stdout": "Merging into master chunks insddex {}...".format(i),
             "start_line": 8,
             "end_line": 9,
             "verbosity": 0,
             "event_name": "verbose",
             "group_name": "job_events"
         })
         i = i+1
コード例 #5
0
 def handle(self, *args, **kwargs):
     consumers.emit_channel_notification(
         "jobs-status_changed", {
             "job_id": 38631,
             "status": "successful",
             "job_name": "Backup Job Backup mail from Knet",
             "group_name": "jobs"
         })
     consumers.emit_channel_notification(
         "jobs-status_changed", {
             "job_id": 38631,
             "status": "failed",
             "job_name": "Backup Job Backup mail from Knet",
             "group_name": "jobs"
         })
     consumers.emit_channel_notification(
         "jobs-status_changed", {
             "job_id": 38631,
             "status": "running",
             "job_name": "Backup Job Backup mail from Knet",
             "group_name": "jobs"
         })
コード例 #6
0
    def callback_worker(self, queue_actual, idx):
        signal_handler = WorkerSignalHandler()
        while not signal_handler.kill_now:
            try:
                body = queue_actual.get(block=True, timeout=1)
            except QueueEmpty:
                continue
            except Exception as e:
                logger.error("Exception on worker thread, restarting: " +
                             str(e))
                continue
            try:

                event_map = {
                    'job_id': JobEvent,
                    'catalog': Catalog,
                }

                if not any([key in body for key in event_map]):
                    raise Exception('Payload does not have a job identifier')
                if settings.DEBUG:
                    from pygments import highlight
                    from pygments.lexers import PythonLexer
                    from pygments.formatters import Terminal256Formatter
                    from pprint import pformat
                    logger.info('Body: {}'.format(
                        highlight(pformat(body, width=160), PythonLexer(),
                                  Terminal256Formatter(
                                      style='friendly')))[:1024 * 4])

                def _save_event_data():
                    for key, cls in event_map.items():
                        if key in body:
                            cls.create_from_data(**body)

                job_identifier = 'unknown job'
                for key in event_map.keys():
                    if key in body:
                        job_identifier = body[key]
                        break

                if body.get('event') == 'EOF':
                    try:
                        msg = 'Event processing is finished for Job {}, sending notifications'
                        logger.info(msg.format(job_identifier))
                        # EOF events are sent when stdout for the running task is
                        # closed. don't actually persist them to the database; we
                        # just use them to report `summary` websocket events as an
                        # approximation for when a job is "done"
                        emit_channel_notification(
                            'jobs-summary',
                            dict(group_name='jobs', job_id=job_identifier))
                        # Additionally, when we've processed all events, we should
                        # have all the data we need to send out success/failure
                        # notification templates
                        j = Job.objects.get(pk=job_identifier)
                        if hasattr(j, 'send_notification_templates'):
                            retries = 0
                            while retries < 5:
                                if j.finished:
                                    state = 'succeeded' if j.status == 'successful' else 'failed'
                                    j.send_notification_templates(state)
                                    break
                                else:
                                    # wait a few seconds to avoid a race where the
                                    # events are persisted _before_ the UJ.status
                                    # changes from running -> successful
                                    retries += 1
                                    time.sleep(1)
                                    j = Job.objects.get(pk=job_identifier)
                    except Exception:
                        logger.exception(
                            'Worker failed to emit notifications: Job {}'.
                            format(job_identifier))
                    continue

                retries = 0
                while retries <= self.MAX_RETRIES:
                    try:
                        _save_event_data()
                        break
                    except (OperationalError, InterfaceError, InternalError):
                        if retries >= self.MAX_RETRIES:
                            msg = 'Worker could not re-establish database connection, shutting down gracefully: Job {}'
                            logger.exception(msg.format(job_identifier))
                            os.kill(os.getppid(), signal.SIGINT)
                            return
                        delay = 60 * retries
                        logger.exception(
                            'Database Error Saving Job Event, retry #{i} in {delay} seconds:'
                            .format(i=retries + 1, delay=delay))
                        django_connection.close()
                        time.sleep(delay)
                        retries += 1
                    except DatabaseError:
                        logger.exception(
                            'Database Error Saving Job Event for Job {}'.
                            format(job_identifier))
                        break
            except Exception as exc:
                import traceback
                tb = traceback.format_exc()
                logger.error('Callback Task Processor Raised Exception: %r',
                             exc)
                logger.error('Detail: {}'.format(tb))