Exemplo n.º 1
0
    def test_calls_run_scanner_with_mock(self, run_scanner_mock):
        upload_pk = 1234

        run_customs(upload_pk)

        assert run_scanner_mock.called
        run_scanner_mock.assert_called_once_with(upload_pk,
                                                 scanner=CUSTOMS,
                                                 api_url=self.API_URL,
                                                 api_key=self.API_KEY)
Exemplo n.º 2
0
    def test_does_not_run_when_results_contain_errors(self, run_scanner_mock):
        self.results.update({'errors': 1})

        returned_results = run_customs(self.results, self.upload_pk)

        assert not run_scanner_mock.called
        assert returned_results == self.results
Exemplo n.º 3
0
    def test_calls_run_scanner_with_mock(self, run_scanner_mock):
        run_scanner_mock.return_value = self.results

        returned_results = run_customs(self.results, self.upload_pk)

        assert run_scanner_mock.called
        run_scanner_mock.assert_called_once_with(
            self.results,
            self.upload_pk,
            scanner=CUSTOMS,
            api_url=self.API_URL,
            api_key=self.API_KEY,
        )
        assert returned_results == self.results
Exemplo n.º 4
0
def handle_upload_validation_result(
        results, upload_pk, channel, is_mozilla_signed):
    """Annotate a set of validation results and save them to the given
    FileUpload instance."""
    upload = FileUpload.objects.get(pk=upload_pk)

    if waffle.switch_is_active('enable-yara') and results['errors'] == 0:
        # Run Yara. This cannot be asynchronous because we have no way to know
        # whether the task will complete before we attach a `Version` to it
        # later in the submission process... Because we cannot use `chord`
        # reliably right now (requires Celery 4.2+), this task is actually not
        # run as a task, it's a simple function call.
        #
        # TODO: use `run_yara` as a task in the submission chord once it is
        # possible. See: https://github.com/mozilla/addons-server/issues/12216
        run_yara(upload.pk)

    if waffle.switch_is_active('enable-customs') and results['errors'] == 0:
        # Run customs. This cannot be asynchronous because we have no way to
        # know whether the task will complete before we attach a `Version` to
        # it later in the submission process... Because we cannot use `chord`
        # reliably right now (requires Celery 4.2+), this task is actually not
        # run as a task, it's a simple function call.
        #
        # TODO: use `run_customs` as a task in the submission chord once it is
        # possible. See: https://github.com/mozilla/addons-server/issues/12217
        run_customs(upload.pk)

    if waffle.switch_is_active('enable-wat') and results['errors'] == 0:
        # Run wat. This cannot be asynchronous because we have no way to know
        # whether the task will complete before we attach a `Version` to it
        # later in the submission process... Because we cannot use `chord`
        # reliably right now (requires Celery 4.2+), this task is actually not
        # run as a task, it's a simple function call.
        #
        # TODO: use `run_wat` as a task in the submission chord once it is
        # possible. See: https://github.com/mozilla/addons-server/issues/12224
        run_wat(upload.pk)

    # Check for API keys in submissions.
    # Make sure it is extension-like, e.g. no search plugin
    try:
        results = check_for_api_keys_in_file(results=results, upload=upload)
    except (ValidationError, BadZipfile, IOError):
        pass

    # Annotate results with potential webext warnings on new versions.
    if upload.addon_id and upload.version:
        annotations.annotate_webext_incompatibilities(
            results=results, file_=None, addon=upload.addon,
            version_string=upload.version, channel=channel)

    upload.validation = json.dumps(results)
    upload.save()  # We want to hit the custom save().

    # Track the time it took from first upload through validation
    # until the results were processed and saved.
    upload_start = utc_millesecs_from_epoch(upload.created)
    now = datetime.datetime.now()
    now_ts = utc_millesecs_from_epoch(now)
    delta = now_ts - upload_start
    statsd.timing('devhub.validation_results_processed', delta)

    if not storage.exists(upload.path):
        # TODO: actually fix this so we can get stats. It seems that
        # the file maybe gets moved but it needs more investigation.
        log.warning('Scaled upload stats were not tracked. File is '
                    'missing: {}'.format(upload.path))
        return

    size = Decimal(storage.size(upload.path))
    megabyte = Decimal(1024 * 1024)

    # Stash separate metrics for small / large files.
    quantifier = 'over' if size > megabyte else 'under'
    statsd.timing(
        'devhub.validation_results_processed_{}_1mb'.format(quantifier), delta)

    # Scale the upload / processing time by package size (in MB)
    # so we can normalize large XPIs which naturally take longer to validate.
    scaled_delta = None
    size_in_mb = size / megabyte
    if size > 0:
        # If the package is smaller than 1MB, don't scale it. This should
        # help account for validator setup time.
        unit = size_in_mb if size > megabyte else Decimal(1)
        scaled_delta = Decimal(delta) / unit
        statsd.timing('devhub.validation_results_processed_per_mb',
                      scaled_delta)

    log.info('Time to process and save upload validation; '
             'upload.pk={upload}; processing_time={delta}; '
             'scaled_per_mb={scaled}; upload_size_in_mb={size_in_mb}; '
             'created={created}; now={now}'
             .format(delta=delta, upload=upload.pk,
                     created=upload.created, now=now,
                     scaled=scaled_delta, size_in_mb=size_in_mb))