示例#1
0
 def _error_handler(self, crash_data, exc_info, extra):
     """Captures errors from signature generation"""
     extra['uuid'] = crash_data.get('uuid', None)
     sentry_client.capture_error(self.sentry_dsn,
                                 self.logger,
                                 exc_info=exc_info,
                                 extra=extra)
示例#2
0
    def _capture_error(self, exc_info, crash_id=None):
        """Capture an error in sentry if able.

        :arg crash_id: a crash id
        :arg exc_info: the exc info as it comes from sys.exc_info()

        """
        extra = {}
        if crash_id:
            extra["crash_id"] = crash_id

        sentry_client.capture_error(self.logger, exc_info, extra=extra)
示例#3
0
 def handle(self, **options):
     """Execute cronrun command."""
     try:
         if options['job']:
             job_args = options.get('job_arg') or []
             # Re-add the -- because they're optional arguments; note that this
             # doesn't support positional arguments
             cmd_args = ['--%s' % arg for arg in job_args]
             return self.cmd_run_one(options['job'], options['force'], cmd_args)
         else:
             return self.cmd_run_all()
     except Exception:
         capture_error(settings.SENTRY_DSN)
         raise
示例#4
0
文件: cronrun.py 项目: xincun/socorro
 def handle(self, **options):
     """Execute cronrun command."""
     try:
         if options['job']:
             job_args = options.get('job_arg') or []
             # Re-add the -- because they're optional arguments; note that this
             # doesn't support positional arguments
             cmd_args = ['--%s' % arg for arg in job_args]
             return self.cmd_run_one(options['job'], options['force'],
                                     cmd_args)
         else:
             return self.cmd_run_all()
     except Exception:
         capture_error(settings.SENTRY_DSN)
         raise
示例#5
0
    def _capture_error(self, exc_info, crash_id=None):
        """Capture an error in sentry if able.

        :arg crash_id: a crash id
        :arg exc_info: the exc info as it comes from sys.exc_info()

        """
        if self.config.sentry and self.config.sentry.dsn:
            sentry_dsn = self.config.sentry.dsn
        else:
            sentry_dsn = None

        extra = {}
        if crash_id:
            extra['crash_id'] = crash_id

        sentry_client.capture_error(sentry_dsn,
                                    self.logger,
                                    exc_info,
                                    extra=extra)
示例#6
0
    def _capture_error(self, exc_info, crash_id=None):
        """Capture an error in sentry if able.

        :arg crash_id: a crash id
        :arg exc_info: the exc info as it comes from sys.exc_info()

        """
        if self.config.sentry and self.config.sentry.dsn:
            sentry_dsn = self.config.sentry.dsn
        else:
            sentry_dsn = None

        extra = {}
        if crash_id:
            extra['crash_id'] = crash_id

        sentry_client.capture_error(
            sentry_dsn,
            self.logger,
            exc_info,
            extra=extra
        )
示例#7
0
    def process_crash(self, raw_crash, raw_dumps, processed_crash):
        """Take a raw_crash and its associated raw_dumps and return a processed_crash

        If this throws an exception, the crash was not processed correctly.

        """
        # processor_meta_data will be used to ferry "inside information" to
        # transformation rules. Sometimes rules need a bit more extra
        # information about the transformation process itself.
        processor_meta_data = DotDict()
        processor_meta_data.processor_notes = [
            self.config.processor_name, self.__class__.__name__
        ]
        processor_meta_data.quit_check = self.quit_check
        processor_meta_data.processor = self
        processor_meta_data.config = self.config

        if "processor_notes" in processed_crash:
            original_processor_notes = [
                x.strip() for x in processed_crash.processor_notes.split(";")
            ]
            processor_meta_data.processor_notes.append(
                "earlier processing: %s" %
                processed_crash.get("started_datetime", 'Unknown Date'))
        else:
            original_processor_notes = []

        processed_crash.success = False
        processed_crash.started_datetime = utc_now()
        # for backwards compatibility:
        processed_crash.startedDateTime = processed_crash.started_datetime
        processed_crash.signature = 'EMPTY: crash failed to process'

        crash_id = raw_crash['uuid']

        # quit_check calls ought to be scattered around the code to allow
        # the processor to be responsive to requests to shut down.
        self.quit_check()

        start_time = self.logger.info('starting transform for crash: %s',
                                      crash_id)
        processor_meta_data.started_timestamp = start_time

        # Apply rules; if a rule fails, capture the error and continue onward
        for rule in self.rules:
            try:
                rule.act(raw_crash, raw_dumps, processed_crash,
                         processor_meta_data)

            except Exception as exc:
                # If a rule throws an error, capture it and toss it in the
                # processor notes
                sentry_client.capture_error(logger=self.logger,
                                            extra={'crash_id': crash_id})
                # NOTE(willkg): notes are public, so we can't put exception
                # messages in them
                processor_meta_data.processor_notes.append(
                    'rule %s failed: %s' %
                    (rule.__class__.__name__, exc.__class__.__name__))

            self.quit_check()

        # The crash made it through the processor rules with no exceptions
        # raised, call it a success
        processed_crash.success = True

        # The processor notes are in the form of a list.  Join them all
        # together to make a single string
        processor_meta_data.processor_notes.extend(original_processor_notes)
        processed_crash.processor_notes = '; '.join(
            processor_meta_data.processor_notes)
        completed_datetime = utc_now()
        processed_crash.completed_datetime = completed_datetime

        # For backwards compatibility
        processed_crash.completeddatetime = completed_datetime

        self.logger.info("finishing %s transform for crash: %s",
                         'successful' if processed_crash.success else 'failed',
                         crash_id)
        return processed_crash
示例#8
0
    def _run_one(self, job_spec, force=False, cmd_args=None):
        """Run a single job.

        :arg job_spec: job spec dict
        :arg force: forces the job to run even if it's not time to run
        :arg cmd_args: list of "--key=val" positional args as you would pass
            them on a command line

        """
        cmd_args = cmd_args or []
        cmd = job_spec['cmd']

        # Make sure we have a job record before trying to run anything
        job = Job.objects.get_or_create(app_name=cmd)[0]

        if force:
            # If we're forcing the job, just run it without the bookkeeping.
            return self._run_job(job_spec, *cmd_args)

        # Figure out whether this job should be run now
        seconds = convert_frequency(job_spec.get('frequency', DEFAULT_FREQUENCY))
        if not time_to_run(job_spec, job):
            logger.info("skipping %s: not time to run", cmd)
            return

        logger.info('about to run %s', cmd)

        now = timezone.now()
        log_run = True
        exc_type = exc_value = exc_tb = None
        start_time = None
        run_time = None

        with self.lock_job(job_spec['cmd']):
            try:
                cmd_kwargs = {}
                last_success = job.last_success

                # Backfill jobs can have multiple run-times, so we iterate
                # through all possible ones until either we get them all done
                # or it dies
                for run_time in get_run_times(job_spec, job.last_success):
                    if job_spec.get('backfill', False):
                        # If "backfill" is in the spec, then we want to pass in
                        # run_time as an argument
                        cmd_kwargs['run_time'] = format_datetime(run_time)

                    if job_spec.get('last_success', False):
                        # If "last_success" is in the spec, we want to pass in
                        # the last_success as an argument
                        cmd_kwargs['last_success'] = format_datetime(last_success)

                    logger.info('running: %s %s %s', cmd, cmd_args, cmd_kwargs)

                    start_time = time.time()
                    self._run_job(job_spec, *cmd_args, **cmd_kwargs)
                    end_time = time.time()

                    logger.info('successfully ran %s on %s', cmd, run_time)
                    last_success = run_time
                    self._remember_success(cmd, last_success, end_time - start_time)

            except OngoingJobError:
                log_run = False
                raise

            except Exception:
                end_time = time.time()
                exc_type, exc_value, exc_tb = sys.exc_info()
                single_line_tb = (
                    ''.join(traceback.format_exception(*sys.exc_info()))
                    .replace('\n', '\\n')
                )

                # Send error to sentry, log it, and remember the failure
                capture_error(settings.SENTRY_DSN)
                logger.error('error when running %s (%s): %s', cmd, run_time, single_line_tb)
                self._remember_failure(
                    cmd,
                    end_time - start_time,
                    exc_type,
                    exc_value,
                    exc_tb
                )

            finally:
                if log_run:
                    self._log_run(
                        cmd,
                        seconds,
                        job_spec.get('time'),
                        run_time,
                        now,
                        exc_type, exc_value, exc_tb
                    )
示例#9
0
    def process_crash(self, raw_crash, raw_dumps, processed_crash):
        """Take a raw_crash and its associated raw_dumps and return a processed_crash

        If this throws an exception, the crash was not processed correctly.

        """
        # processor_meta_data will be used to ferry "inside information" to
        # transformation rules. Sometimes rules need a bit more extra
        # information about the transformation process itself.
        processor_meta_data = DotDict()
        processor_meta_data.processor_notes = [
            self.config.processor_name,
            self.__class__.__name__
        ]
        processor_meta_data.quit_check = self.quit_check
        processor_meta_data.processor = self
        processor_meta_data.config = self.config

        if "processor_notes" in processed_crash:
            original_processor_notes = [
                x.strip() for x in processed_crash.processor_notes.split(";")
            ]
            processor_meta_data.processor_notes.append(
                "earlier processing: %s" % processed_crash.get(
                    "started_datetime",
                    'Unknown Date'
                )
            )
        else:
            original_processor_notes = []

        processed_crash.success = False
        processed_crash.started_datetime = utc_now()
        # for backwards compatibility:
        processed_crash.startedDateTime = processed_crash.started_datetime
        processed_crash.signature = 'EMPTY: crash failed to process'

        crash_id = raw_crash['uuid']

        # quit_check calls ought to be scattered around the code to allow
        # the processor to be responsive to requests to shut down.
        self.quit_check()

        start_time = self.logger.info('starting transform for crash: %s', crash_id)
        processor_meta_data.started_timestamp = start_time

        # Apply rules; if a rule fails, capture the error and continue onward
        for rule in self.rules:
            try:
                rule.act(raw_crash, raw_dumps, processed_crash, processor_meta_data)

            except Exception as exc:
                # If a rule throws an error, capture it and toss it in the
                # processor notes
                sentry_client.capture_error(
                    sentry_dsn=self.sentry_dsn,
                    logger=self.logger,
                    extra={'crash_id': crash_id}
                )
                # NOTE(willkg): notes are public, so we can't put exception
                # messages in them
                processor_meta_data.processor_notes.append(
                    'rule %s failed: %s' % (rule.__class__.__name__, exc.__class__.__name__)
                )

            self.quit_check()

        # The crash made it through the processor rules with no exceptions
        # raised, call it a success
        processed_crash.success = True

        # The processor notes are in the form of a list.  Join them all
        # together to make a single string
        processor_meta_data.processor_notes.extend(original_processor_notes)
        processed_crash.processor_notes = '; '.join(processor_meta_data.processor_notes)
        completed_datetime = utc_now()
        processed_crash.completed_datetime = completed_datetime

        # For backwards compatibility
        processed_crash.completeddatetime = completed_datetime

        self.logger.info(
            "finishing %s transform for crash: %s",
            'successful' if processed_crash.success else 'failed',
            crash_id
        )
        return processed_crash
示例#10
0
 def _error_handler(self, crash_data, exc_info, extra):
     """Captures errors from signature generation"""
     extra['uuid'] = crash_data.get('uuid', None)
     sentry_client.capture_error(
         self.sentry_dsn, self.logger, exc_info=exc_info, extra=extra
     )
示例#11
0
文件: cronrun.py 项目: xincun/socorro
    def _run_one(self, job_spec, force=False, cmd_args=None):
        """Run a single job.

        :arg job_spec: job spec dict
        :arg force: forces the job to run even if it's not time to run
        :arg cmd_args: list of "--key=val" positional args as you would pass
            them on a command line

        """
        cmd_args = cmd_args or []
        cmd = job_spec['cmd']

        # Make sure we have a job record before trying to run anything
        job = Job.objects.get_or_create(app_name=cmd)[0]

        if force:
            # If we're forcing the job, just run it without the bookkeeping.
            return self._run_job(job_spec, *cmd_args)

        # Figure out whether this job should be run now
        seconds = convert_frequency(
            job_spec.get('frequency', DEFAULT_FREQUENCY))
        if not time_to_run(job_spec, job):
            logger.info("skipping %s: not time to run", cmd)
            return

        logger.info('about to run %s', cmd)

        now = timezone.now()
        log_run = True
        exc_type = exc_value = exc_tb = None
        start_time = None
        run_time = None

        with self.lock_job(job_spec['cmd']):
            try:
                cmd_kwargs = {}
                last_success = job.last_success

                # Backfill jobs can have multiple run-times, so we iterate
                # through all possible ones until either we get them all done
                # or it dies
                for run_time in get_run_times(job_spec, job.last_success):
                    if job_spec.get('backfill', False):
                        # If "backfill" is in the spec, then we want to pass in
                        # run_time as an argument
                        cmd_kwargs['run_time'] = format_datetime(run_time)

                    if job_spec.get('last_success', False):
                        # If "last_success" is in the spec, we want to pass in
                        # the last_success as an argument
                        cmd_kwargs['last_success'] = format_datetime(
                            last_success)

                    logger.info('running: %s %s %s', cmd, cmd_args, cmd_kwargs)

                    start_time = time.time()
                    self._run_job(job_spec, *cmd_args, **cmd_kwargs)
                    end_time = time.time()

                    logger.info('successfully ran %s on %s', cmd, run_time)
                    last_success = run_time
                    self._remember_success(cmd, last_success,
                                           end_time - start_time)

            except OngoingJobError:
                log_run = False
                raise

            except Exception:
                end_time = time.time()
                exc_type, exc_value, exc_tb = sys.exc_info()
                single_line_tb = (''.join(
                    traceback.format_exception(*sys.exc_info())).replace(
                        '\n', '\\n'))

                # Send error to sentry, log it, and remember the failure
                capture_error(settings.SENTRY_DSN)
                logger.error('error when running %s (%s): %s', cmd, run_time,
                             single_line_tb)
                self._remember_failure(cmd, end_time - start_time, exc_type,
                                       exc_value, exc_tb)

            finally:
                if log_run:
                    self._log_run(cmd, seconds, job_spec.get('time'), run_time,
                                  now, exc_type, exc_value, exc_tb)