Beispiel #1
0
    def log_is_unstable(self, log_f):
        log_f.seek(0)

        statuses = defaultdict(set)

        def handle_status(item):
            if item["test"] == self.target:
                statuses[item["subtest"]].add(item["status"])

        def handle_end(item):
            if item["test"] == self.target:
                statuses[None].add(item["status"])

        reader.each_log(reader.read(log_f),
                        {"test_status": handle_status,
                         "test_end": handle_end})

        logger.debug(str(statuses))

        if not statuses:
            logger.error("Didn't get any useful output from wptrunner")
            log_f.seek(0)
            for item in reader.read(log_f):
                logger.debug(item)
            return None

        return any(len(item) > 1 for item in statuses.itervalues())
    def handle(self, *args, **options):

        if not len(args) == 3:
            raise CommandError('3 arguments required, %s given' % len(args))
        log_response = requests.get(args[0], timeout=30)
        log_response.raise_for_status()

        if log_response.text:
            log_content = StringIO(log_response.text)

            try:
                repository = Repository.objects.get(name=args[2], active_status='active')
            except Repository.DoesNotExist:
                raise CommandError('Unknown repository %s' % args[2])

            log_iter = reader.read(log_content)

            with JobsModel(args[2]) as jobs_model:
                job_id = jobs_model.get_job_ids_by_guid([args[1]])
                if not job_id:
                    raise CommandError('No job found with guid %s in the %s repository' % (args[1], args[2]))

            FailureLine.objects.bulk_create(
                [FailureLine(repository=repository, job_guid=args[1], **failure_line)
                 for failure_line in log_iter]
            )
    def test_handler(self):
        data = [{"action": "action_0", "data": "data_0"},
                {"action": "action_1", "data": "data_1"}]

        f = self.to_file_like(data)

        test = self

        class ReaderTestHandler(reader.LogHandler):

            def __init__(self):
                self.action_0_count = 0
                self.action_1_count = 0

            def action_0(self, item):
                test.assertEquals(item["action"], "action_0")
                self.action_0_count += 1

            def action_1(self, item):
                test.assertEquals(item["action"], "action_1")
                self.action_1_count += 1

        handler = ReaderTestHandler()
        reader.handle_log(reader.read(f), handler)

        self.assertEquals(handler.action_0_count, 1)
        self.assertEquals(handler.action_1_count, 1)
Beispiel #4
0
    def test_each_log(self):
        data = [{
            "action": "action_0",
            "data": "data_0"
        }, {
            "action": "action_1",
            "data": "data_1"
        }]

        f = self.to_file_like(data)

        count = {"action_0": 0, "action_1": 0}

        def f_action_0(item):
            count[item["action"]] += 1

        def f_action_1(item):
            count[item["action"]] += 2

        reader.each_log(reader.read(f), {
            "action_0": f_action_0,
            "action_1": f_action_1
        })

        self.assertEquals({"action_0": 1, "action_1": 2}, count)
    def handle(self, *args, **options):

        if not len(args) == 3:
            raise CommandError('3 arguments required, %s given' % len(args))
        log_response = requests.get(args[0], timeout=30)
        log_response.raise_for_status()

        if log_response.text:
            log_content = StringIO(log_response.text)

            try:
                repository = Repository.objects.get(name=args[2], active_status='active')
            except Repository.DoesNotExist:
                raise CommandError('Unknown repository %s' % args[2])

            log_iter = reader.read(log_content)

            failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
            log_iter = list(islice(log_iter, failure_lines_cutoff+1))

            if len(log_iter) > failure_lines_cutoff:
                # Alter the N+1th log line to indicate the list was truncated.
                log_iter[-1].update(action='truncated')

            with JobsModel(args[2]) as jobs_model:
                job_id = jobs_model.get_job_ids_by_guid([args[1]])

                if not job_id:
                    raise CommandError('No job found with guid %s in the %s repository' % (args[1], args[2]))

            with transaction.atomic():
                FailureLine.objects.bulk_create(
                    [FailureLine(repository=repository, job_guid=args[1], **failure_line)
                     for failure_line in log_iter]
                )
Beispiel #6
0
    def test_handler(self):
        data = [{
            "action": "action_0",
            "data": "data_0"
        }, {
            "action": "action_1",
            "data": "data_1"
        }]

        f = self.to_file_like(data)

        test = self

        class ReaderTestHandler(reader.LogHandler):
            def __init__(self):
                self.action_0_count = 0
                self.action_1_count = 0

            def action_0(self, item):
                test.assertEquals(item["action"], "action_0")
                self.action_0_count += 1

            def action_1(self, item):
                test.assertEquals(item["action"], "action_1")
                self.action_1_count += 1

        handler = ReaderTestHandler()
        reader.handle_log(reader.read(f), handler)

        self.assertEquals(handler.action_0_count, 1)
        self.assertEquals(handler.action_1_count, 1)
    def handle(self, *args, **options):

        if not len(args) == 3:
            raise CommandError('3 arguments required, %s given' % len(args))
        log_response = requests.get(args[0], timeout=30)
        log_response.raise_for_status()

        if log_response.text:
            log_content = StringIO(log_response.text)

            try:
                repository = Repository.objects.get(name=args[2], active_status='active')
            except Repository.DoesNotExist:
                raise CommandError('Unknown repository %s' % args[2])

            log_iter = reader.read(log_content)

            failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
            log_iter = list(islice(log_iter, failure_lines_cutoff+1))

            if len(log_iter) > failure_lines_cutoff:
                # Alter the N+1th log line to indicate the list was truncated.
                log_iter[-1].update(action='truncated')

            with JobsModel(args[2]) as jobs_model:
                job_id = jobs_model.get_job_ids_by_guid([args[1]])

                if not job_id:
                    raise CommandError('No job found with guid %s in the %s repository' % (args[1], args[2]))

            FailureLine.objects.bulk_create(
                [FailureLine(repository=repository, job_guid=args[1], **failure_line)
                 for failure_line in log_iter]
            )
Beispiel #8
0
    def test_imap_log(self):
        data = [
            {
                "action": "action_0",
                "data": "data_0"
            },
            {
                "action": "action_1",
                "data": "data_1"
            },
        ]

        f = self.to_file_like(data)

        def f_action_0(item):
            return ("action_0", item["data"])

        def f_action_1(item):
            return ("action_1", item["data"])

        res_iter = reader.imap_log(reader.read(f), {
            "action_0": f_action_0,
            "action_1": f_action_1
        })
        self.assertEquals([("action_0", "data_0"), ("action_1", "data_1")],
                          list(res_iter))
Beispiel #9
0
def get_statuses(filenames):
    handler = StatusHandler()

    for filename in filenames:
        with open(filename) as f:
            reader.handle_log(reader.read(f), handler)

    return handler.statuses
Beispiel #10
0
def get_statuses(filenames):
    handler = StatusHandler()

    for filename in filenames:
        with open(filename) as f:
            reader.handle_log(reader.read(f), handler)

    return handler.statuses
Beispiel #11
0
def record_results(no_clean, *log_files):
    runs_cleaned = set()

    for name in log_files:
        run_name, filename = name.split(":", 1)
        if run_name not in runs_cleaned and not no_clean:
            clean_run(run_name)
            runs_cleaned.add(run_name)
        sys.stdout.write("Processing run %s\n" % run_name)

        with open(filename) as f:
            test_handler = LogHandlerTests()
            reader.handle_log(reader.read(f),
                              test_handler)
            f.seek(0)
            result_handler = LogHandlerResults(run_name)
            reader.handle_log(reader.read(f),
                              result_handler)
def process_results(log, iterations):
    inconsistent = []
    handler = LogHandler()
    reader.handle_log(reader.read(log), handler)
    results = handler.results
    for test, test_results in results.iteritems():
        for subtest, result in test_results.iteritems():
            if is_inconsistent(result, iterations):
                inconsistent.append((test, subtest, result))
    return results, inconsistent
Beispiel #13
0
def process_results(log, iterations):
    inconsistent = []
    handler = LogHandler()
    reader.handle_log(reader.read(log), handler)
    results = handler.results
    for test, test_results in results.iteritems():
        for subtest, result in test_results.iteritems():
            if is_inconsistent(result, iterations):
                inconsistent.append((test, subtest, result))
    return results, inconsistent
Beispiel #14
0
    def test_read(self):
        data = [{
            "action": "action_0",
            "data": "data_0"
        }, {
            "action": "action_1",
            "data": "data_1"
        }]

        f = self.to_file_like(data)
        self.assertEquals(data, list(reader.read(f)))
Beispiel #15
0
def load_results(branch, results):
    handler = ResultHandler()
    for result in results:
        urls = get_blobber_urls(branch, result)
        if urls:
            prefix = result["platform"] # platform
            for url in urls:
                f = get_file(url)
                handler.result = result
                reader.handle_log(reader.read(f), handler)
    return {key: value[0] for key, value in handler.data.iteritems()}
    def handle(self, *args, **options):
        try:
            repository_name, job_guid, log_url_or_obj = args
        except ValueError:
            raise CommandError('3 arguments required, %s given' % len(args))

        try:
            log_obj = json.loads(log_url_or_obj)
        except ValueError:
            try:
                log_obj = expand_log_url(repository_name, job_guid, log_url_or_obj)
            except ValueError:
                # This log_url either isn't in the database, or there are multiple possible
                # urls in the database, so we will be unable to update the pending state
                log_obj = None

        if log_obj:
            log_url = log_obj["url"]
        else:
            log_url = log_url_or_obj

        log_text = fetch_text(log_url)

        if not log_text:
            return

        log_content = StringIO(log_text)

        try:
            repository = Repository.objects.get(name=repository_name, active_status='active')
        except Repository.DoesNotExist:
            raise CommandError('Unknown repository %s' % repository_name)

        log_iter = reader.read(log_content)

        failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
        log_iter = list(islice(log_iter, failure_lines_cutoff+1))

        if len(log_iter) > failure_lines_cutoff:
            # Alter the N+1th log line to indicate the list was truncated.
            log_iter[-1].update(action='truncated')

        with transaction.atomic():
            FailureLine.objects.bulk_create(
                [FailureLine(repository=repository, job_guid=job_guid, **failure_line)
                 for failure_line in log_iter]
            )

        if log_obj is not None:
            with JobsModel(repository_name) as jm:
                jm.update_job_log_url_status(log_obj["id"], "parsed")
        else:
            logger.warning("Unable to set parsed state of job log")
def process_results(log, iterations):
    """Process test log and return overall results and list of inconsistent tests."""
    inconsistent = []
    handler = LogHandler()
    reader.handle_log(reader.read(log), handler)
    results = handler.results
    for test_name, test in results.iteritems():
        if is_inconsistent(test["status"], iterations):
            inconsistent.append((test_name, None, test["status"], []))
        for subtest_name, subtest in test["subtests"].iteritems():
            if is_inconsistent(subtest["status"], iterations):
                inconsistent.append((test_name, subtest_name, subtest["status"], subtest["messages"]))
    return results, inconsistent
def process_results(log, iterations):
    """Process test log and return overall results and list of inconsistent tests."""
    inconsistent = []
    handler = LogHandler()
    reader.handle_log(reader.read(log), handler)
    results = handler.results
    for test_name, test in results.iteritems():
        if is_inconsistent(test["status"], iterations):
            inconsistent.append((test_name, None, test["status"], []))
        for subtest_name, subtest in test["subtests"].iteritems():
            if is_inconsistent(subtest["status"], iterations):
                inconsistent.append((test_name, subtest_name, subtest["status"], subtest["messages"]))
    return results, inconsistent
    def test_imap_log(self):
        data = [{"action": "action_0", "data": "data_0"},
                {"action": "action_1", "data": "data_1"}]

        f = self.to_file_like(data)

        def f_action_0(item):
            return ("action_0", item["data"])

        def f_action_1(item):
            return ("action_1", item["data"])

        res_iter = reader.imap_log(reader.read(f),
                                   {"action_0": f_action_0,
                                    "action_1": f_action_1})
        self.assertEquals([("action_0", "data_0"), ("action_1", "data_1")],
                          list(res_iter))
Beispiel #20
0
def main(**kwargs):
    if kwargs["output"] is None:
        output = sys.stdout
    else:
        output = open(kwargs["output"], "w")
    readers = [read(open(filename, 'r')) for filename in kwargs["files"]]
    start_events = [process_until_suite_start(reader, output) for reader in readers]
    validate_start_events(start_events)
    merged_start_event = merge_start_events(start_events)
    dump_entry(fill_process_info(merged_start_event), output)

    end_events = [process_until_suite_end(reader, output) for reader in readers]
    dump_entry(fill_process_info(end_events[0]), output)

    for reader in readers:
        for entry in reader:
            dump_entry(entry, output)
    def handle(self, *args, **options):
        try:
            log_url, job_guid, repository_name = args
        except ValueError:
            raise CommandError('3 arguments required, %s given' % len(args))

        log_text = fetch_text(log_url)

        if not log_text:
            return

        log_content = StringIO(log_text)

        try:
            repository = Repository.objects.get(name=repository_name,
                                                active_status='active')
        except Repository.DoesNotExist:
            raise CommandError('Unknown repository %s' % repository_name)

        log_iter = reader.read(log_content)

        failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
        log_iter = list(islice(log_iter, failure_lines_cutoff + 1))

        if len(log_iter) > failure_lines_cutoff:
            # Alter the N+1th log line to indicate the list was truncated.
            log_iter[-1].update(action='truncated')

        with JobsModel(repository_name) as jobs_model:
            job_id = jobs_model.get_job_ids_by_guid([job_guid])

            if not job_id:
                raise CommandError(
                    'No job found with guid %s in the %s repository' %
                    (job_guid, repository_name))

        with transaction.atomic():
            FailureLine.objects.bulk_create([
                FailureLine(repository=repository,
                            job_guid=job_guid,
                            **failure_line) for failure_line in log_iter
            ])
    def test_each_log(self):
        data = [{"action": "action_0", "data": "data_0"},
                {"action": "action_1", "data": "data_1"}]

        f = self.to_file_like(data)

        count = {"action_0": 0,
                 "action_1": 0}

        def f_action_0(item):
            count[item["action"]] += 1

        def f_action_1(item):
            count[item["action"]] += 2

        reader.each_log(reader.read(f),
                        {"action_0": f_action_0,
                         "action_1": f_action_1})

        self.assertEquals({"action_0": 1, "action_1": 2}, count)
Beispiel #23
0
def process_results(log, iterations):
    """Process test log and return overall results and list of inconsistent tests."""
    inconsistent = []
    slow = []
    handler = LogHandler()
    reader.handle_log(reader.read(log), handler)
    results = handler.results
    for test_name, test in results.iteritems():
        if is_inconsistent(test["status"], iterations):
            inconsistent.append((test_name, None, test["status"], []))
        for subtest_name, subtest in test["subtests"].iteritems():
            if is_inconsistent(subtest["status"], iterations):
                inconsistent.append((test_name, subtest_name,
                                     subtest["status"], subtest["messages"]))

        slow_status = find_slow_status(test)
        if slow_status is not None:
            slow.append(
                (test_name, slow_status, test["longest_duration"][slow_status],
                 test["timeout"]))

    return results, inconsistent, slow
    def handle(self, *args, **options):
        try:
            log_url, job_guid, repository_name = args
        except ValueError:
            raise CommandError('3 arguments required, %s given' % len(args))

        log_text = fetch_text(log_url)

        if not log_text:
            return

        log_content = StringIO(log_text)

        try:
            repository = Repository.objects.get(name=repository_name, active_status='active')
        except Repository.DoesNotExist:
            raise CommandError('Unknown repository %s' % repository_name)

        log_iter = reader.read(log_content)

        failure_lines_cutoff = settings.FAILURE_LINES_CUTOFF
        log_iter = list(islice(log_iter, failure_lines_cutoff+1))

        if len(log_iter) > failure_lines_cutoff:
            # Alter the N+1th log line to indicate the list was truncated.
            log_iter[-1].update(action='truncated')

        with JobsModel(repository_name) as jobs_model:
            job_id = jobs_model.get_job_ids_by_guid([job_guid])

            if not job_id:
                raise CommandError('No job found with guid %s in the %s repository' % (job_guid, repository_name))

        with transaction.atomic():
            FailureLine.objects.bulk_create(
                [FailureLine(repository=repository, job_guid=job_guid, **failure_line)
                 for failure_line in log_iter]
            )
Beispiel #25
0
def process_results(log, iterations):
    """Process test log and return overall results and list of inconsistent tests."""
    inconsistent = []
    slow = []
    handler = LogHandler()
    reader.handle_log(reader.read(log), handler)
    results = handler.results
    for test_name, test in results.iteritems():
        if is_inconsistent(test["status"], iterations):
            inconsistent.append((test_name, None, test["status"], []))
        for subtest_name, subtest in test["subtests"].iteritems():
            if is_inconsistent(subtest["status"], iterations):
                inconsistent.append((test_name, subtest_name, subtest["status"], subtest["messages"]))

        slow_status = find_slow_status(test)
        if slow_status is not None:
            slow.append((
                test_name,
                slow_status,
                test["longest_duration"][slow_status],
                test["timeout"]
            ))

    return results, inconsistent, slow
Beispiel #26
0
 def update_from_log(self, log_file):
     self.run_info = None
     log_reader = reader.read(log_file)
     reader.each_log(log_reader, self.action_map)
 def update_from_log(self, log_file):
     self.run_info = None
     log_reader = reader.read(log_file)
     reader.each_log(log_reader, self.action_map)
    def test_read(self):
        data = [{"action": "action_0", "data": "data_0"},
                {"action": "action_1", "data": "data_1"}]

        f = self.to_file_like(data)
        self.assertEquals(data, list(reader.read(f)))