Ejemplo n.º 1
0
def test_block_unmocked_requests():
    """Ensure the `block_unmocked_requests` fixture prevents requests from hitting the network."""
    url = 'https://example.com'

    with pytest.raises(RuntimeError, message='Tests must mock all HTTP requests!'):
        fetch_text(url)

    with responses.RequestsMock() as rsps:
        rsps.add(responses.GET, url, body='Mocked requests still work')
        text = fetch_text(url)
        assert text == 'Mocked requests still work'
Ejemplo n.º 2
0
def test_block_unmocked_requests():
    """Ensure the `block_unmocked_requests` fixture prevents requests from hitting the network."""
    url = 'https://example.com'

    with pytest.raises(RuntimeError, message='Tests must mock all HTTP requests!'):
        fetch_text(url)

    with responses.RequestsMock() as rsps:
        rsps.add(responses.GET, url, body='Mocked requests still work')
        text = fetch_text(url)
        assert text == 'Mocked requests still work'
Ejemplo n.º 3
0
    def handle(self, *args, **options):
        try:
            repository_name, job_guid, log_url_or_obj = args
        except ValueError:
            raise CommandError('3 arguments required, %s given' % len(args))

        try:
            log_obj = json.loads(log_url_or_obj)
        except ValueError:
            try:
                log_obj = expand_log_url(repository_name, job_guid, log_url_or_obj)
            except ValueError:
                # This log_url either isn't in the database, or there are multiple possible
                # urls in the database, so we will be unable to update the pending state
                log_obj = None

        if log_obj:
            log_url = log_obj["url"]
        else:
            log_url = log_url_or_obj

        log_text = fetch_text(log_url)

        if not log_text:
            return

        log_content = StringIO(log_text)

        try:
            repository = Repository.objects.get(name=repository_name, active_status='active')
        except Repository.DoesNotExist:
            raise CommandError('Unknown repository %s' % repository_name)

        log_iter = reader.read(log_content)

        failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
        log_iter = list(islice(log_iter, failure_lines_cutoff+1))

        if len(log_iter) > failure_lines_cutoff:
            # Alter the N+1th log line to indicate the list was truncated.
            log_iter[-1].update(action='truncated')

        with transaction.atomic():
            FailureLine.objects.bulk_create(
                [FailureLine(repository=repository, job_guid=job_guid, **failure_line)
                 for failure_line in log_iter]
            )

        if log_obj is not None:
            with JobsModel(repository_name) as jm:
                jm.update_job_log_url_status(log_obj["id"], "parsed")
        else:
            logger.warning("Unable to set parsed state of job log")
Ejemplo n.º 4
0
def fetch_log(job_log):
    try:
        log_text = fetch_text(job_log.url)
    except HTTPError as e:
        job_log.update_status(JobLog.FAILED)
        if e.response is not None and e.response.status_code in (403, 404):
            logger.warning("Unable to retrieve log for %s: %s", job_log.url, e)
            return
        raise

    if not log_text:
        return

    return (json.loads(item) for item in log_text.splitlines())
Ejemplo n.º 5
0
def fetch_log(job_log):
    try:
        log_text = fetch_text(job_log.url)
    except HTTPError as e:
        job_log.update_status(JobLog.FAILED)
        if e.response is not None and e.response.status_code in (403, 404):
            logger.warning("Unable to retrieve log for %s: %s",
                           job_log.url, e)
            return
        raise

    if not log_text:
        return

    return (json.loads(item) for item in log_text.splitlines())
Ejemplo n.º 6
0
def store_failure_lines(repository_name, job_guid, job_log):
    try:
        log_text = fetch_text(job_log.url)
    except HTTPError as e:
        job_log.status = JobLog.FAILED
        job_log.save()
        if e.response is not None and e.response.status_code in (403, 404):
            logger.warning("Unable to retrieve log for %s: %s", job_log.url, e)
            return
        raise

    if not log_text:
        return

    log_iter = (json.loads(item) for item in log_text.splitlines())

    try:
        repository = Repository.objects.get(name=repository_name,
                                            active_status='active')
    except Repository.DoesNotExist:
        logger.error("Unknown repository %s" % repository_name)
        raise

    failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
    log_iter = list(islice(log_iter, failure_lines_cutoff + 1))

    if len(log_iter) > failure_lines_cutoff:
        # Alter the N+1th log line to indicate the list was truncated.
        log_iter[-1].update(action='truncated')

    retry = False
    with transaction.atomic():
        try:
            create(repository, job_guid, job_log, log_iter)
        except OperationalError as e:
            logger.warning("Got OperationalError inserting failure_line")
            # Retry iff this error is the "incorrect String Value" error
            retry = e.args[0] == 1366

    logger.info("store failure lines 3")
    if retry:
        with transaction.atomic():
            logger.info("Retrying insert with astral character replacement")
            # Sometimes get an error if we can't save a string as MySQL pseudo-UTF8
            log_iter = list(replace_astral(log_iter))
            create(repository, job_guid, job_log, log_iter)
Ejemplo n.º 7
0
def store_failure_lines(repository_name, job_guid, job_log):
    try:
        log_text = fetch_text(job_log.url)
    except HTTPError as e:
        job_log.status = JobLog.FAILED
        job_log.save()
        if e.response is not None and e.response.status_code in (403, 404):
            logger.warning("Unable to retrieve log for %s: %s",
                           job_log.url, e)
            return
        raise

    if not log_text:
        return

    log_iter = (json.loads(item) for item in log_text.splitlines())

    try:
        repository = Repository.objects.get(name=repository_name, active_status='active')
    except Repository.DoesNotExist:
        logger.error("Unknown repository %s" % repository_name)
        raise

    failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
    log_iter = list(islice(log_iter, failure_lines_cutoff+1))

    if len(log_iter) > failure_lines_cutoff:
        # Alter the N+1th log line to indicate the list was truncated.
        log_iter[-1].update(action='truncated')

    retry = False
    with transaction.atomic():
        try:
            create(repository, job_guid, job_log, log_iter)
        except OperationalError as e:
            logger.warning("Got OperationalError inserting failure_line")
            # Retry iff this error is the "incorrect String Value" error
            retry = e.args[0] == 1366

    logger.info("store failure lines 3")
    if retry:
        with transaction.atomic():
            logger.info("Retrying insert with astral character replacement")
            # Sometimes get an error if we can't save a string as MySQL pseudo-UTF8
            log_iter = list(replace_astral(log_iter))
            create(repository, job_guid, job_log, log_iter)
Ejemplo n.º 8
0
    def handle(self, *args, **options):
        try:
            log_url, job_guid, repository_name = args
        except ValueError:
            raise CommandError('3 arguments required, %s given' % len(args))

        log_text = fetch_text(log_url)

        if not log_text:
            return

        log_content = StringIO(log_text)

        try:
            repository = Repository.objects.get(name=repository_name,
                                                active_status='active')
        except Repository.DoesNotExist:
            raise CommandError('Unknown repository %s' % repository_name)

        log_iter = reader.read(log_content)

        failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
        log_iter = list(islice(log_iter, failure_lines_cutoff + 1))

        if len(log_iter) > failure_lines_cutoff:
            # Alter the N+1th log line to indicate the list was truncated.
            log_iter[-1].update(action='truncated')

        with JobsModel(repository_name) as jobs_model:
            job_id = jobs_model.get_job_ids_by_guid([job_guid])

            if not job_id:
                raise CommandError(
                    'No job found with guid %s in the %s repository' %
                    (job_guid, repository_name))

        with transaction.atomic():
            FailureLine.objects.bulk_create([
                FailureLine(repository=repository,
                            job_guid=job_guid,
                            **failure_line) for failure_line in log_iter
            ])
Ejemplo n.º 9
0
    def handle(self, *args, **options):
        try:
            log_url, job_guid, repository_name = args
        except ValueError:
            raise CommandError('3 arguments required, %s given' % len(args))

        log_text = fetch_text(log_url)

        if not log_text:
            return

        log_content = StringIO(log_text)

        try:
            repository = Repository.objects.get(name=repository_name, active_status='active')
        except Repository.DoesNotExist:
            raise CommandError('Unknown repository %s' % repository_name)

        log_iter = reader.read(log_content)

        failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
        log_iter = list(islice(log_iter, failure_lines_cutoff+1))

        if len(log_iter) > failure_lines_cutoff:
            # Alter the N+1th log line to indicate the list was truncated.
            log_iter[-1].update(action='truncated')

        with JobsModel(repository_name) as jobs_model:
            job_id = jobs_model.get_job_ids_by_guid([job_guid])

            if not job_id:
                raise CommandError('No job found with guid %s in the %s repository' % (job_guid, repository_name))

        with transaction.atomic():
            FailureLine.objects.bulk_create(
                [FailureLine(repository=repository, job_guid=job_guid, **failure_line)
                 for failure_line in log_iter]
            )
Ejemplo n.º 10
0
    def handle(self, *args, **options):
        if options["min_id"] is None:
            options["min_id"] = (FailureLine.objects.filter(
                action="test_result").exclude(message=None).exclude(
                    message="").order_by("-id").values_list(
                        "id", flat=True)[options["num_lines"]])

        failure_lines = (FailureLine.objects.filter(
            id__gt=options["min_id"],
            action="test_result").exclude(message=None).exclude(
                message="").order_by("id")[:options["num_lines"]])

        self.stderr.write("Using min id %d" % options["min_id"])
        self.stderr.write("Got %d lines" % len(failure_lines))

        t0 = time.time()
        fetch_text(settings.ELASTIC_SEARCH["url"])
        self.stderr.write("Simple GET took %dms" % ((time.time() - t0) * 1000))

        failure_lines_by_job = defaultdict(list)
        for line in failure_lines:
            failure_lines_by_job[line.job_guid].append(line)

        matcher = matchers.ElasticSearchTestMatcher(None)
        all_matches = {}

        if options["profile"]:
            import cProfile
            prof = cProfile.Profile()
            prof.enable()

        total_lines = 0
        t0 = time.time()
        for job_guid, failure_lines in iteritems(failure_lines_by_job):
            total_lines += len(failure_lines)
            matches = matcher(failure_lines)
            all_matches[job_guid] = matches

        duration = 1000 * (time.time() - t0)
        self.stderr.write("Total lines %d" % total_lines)
        self.stderr.write("Called ElasticSearch %i times" % matcher.calls)
        self.stderr.write("Took %dms" % duration)

        if options["profile"]:
            prof.disable()
            prof.dump_stats(options["profile"])

        json_data = {}
        for key, values in iteritems(all_matches):
            json_values = [[item[0].id, item[1].id, item[2]]
                           for item in values]
            json_data[key] = json_values

        json_string = json.dumps(json_data)
        if options["ref_data"]:
            with open(options["ref_data"]) as f:
                ref_data = json.load(f)
            this_data = json.loads(json_string)
            if this_data == ref_data:
                self.stderr.write("Output matches refdata")
            else:
                self.stderr.write("Output does not match refdata")

        self.stdout.write(json_string)
Ejemplo n.º 11
0
    def handle(self, *args, **options):
        if options["min_id"] is None:
            options["min_id"] = (FailureLine.objects
                                 .filter(action="test_result")
                                 .exclude(message=None)
                                 .exclude(message="")
                                 .order_by("-id")
                                 .values_list("id", flat=True)[options["num_lines"]])

        failure_lines = (FailureLine.objects
                         .filter(id__gt=options["min_id"],
                                 action="test_result")
                         .exclude(message=None)
                         .exclude(message="")
                         .order_by("id")[:options["num_lines"]])

        self.stderr.write("Using min id %d" % options["min_id"])
        self.stderr.write("Got %d lines" % len(failure_lines))

        t0 = time.time()
        fetch_text(settings.ELASTIC_SEARCH["url"])
        self.stderr.write("Simple GET took %dms" % ((time.time() - t0) * 1000))

        failure_lines_by_job = defaultdict(list)
        for line in failure_lines:
            failure_lines_by_job[line.job_guid].append(line)

        matcher = matchers.ElasticSearchTestMatcher(None)
        all_matches = {}

        if options["profile"]:
            import cProfile
            prof = cProfile.Profile()
            prof.enable()

        total_lines = 0
        t0 = time.time()
        for job_guid, failure_lines in failure_lines_by_job.iteritems():
            total_lines += len(failure_lines)
            matches = matcher(failure_lines)
            all_matches[job_guid] = matches

        duration = 1000 * (time.time() - t0)
        self.stderr.write("Total lines %d" % total_lines)
        self.stderr.write("Total lines in matcher %d" % matcher.lines)
        self.stderr.write("Called ElasticSearch %i times" % matcher.calls)
        self.stderr.write("Took %dms" % duration)

        if options["profile"]:
            prof.disable()
            prof.dump_stats(options["profile"])

        json_data = {}
        for key, values in all_matches.iteritems():
            json_values = [[item[0].id, item[1].id, item[2]] for item in values]
            json_data[key] = json_values

        json_string = json.dumps(json_data)
        if options["ref_data"]:
            with open(options["ref_data"]) as f:
                ref_data = json.load(f)
            this_data = json.loads(json_string)
            if this_data == ref_data:
                self.stderr.write("Output matches refdata")
            else:
                self.stderr.write("Output does not match refdata")

        self.stdout.write(json_string)