Esempio n. 1
0
 def test_multi_logs(self):
     self.assertEqual(parse_logs({
                                 'firefox': 'INFO:check_stability:Hello World!\nDEBUG:check_stability:Goodbye Cruel World!',
                                 'chrome': 'DEBUG:check_stability:Hello World!\nWARNING:check_stability:Goodbye Cruel World!'
                                 }),
                      {'firefox': 'Hello World!', 'chrome': 'Goodbye Cruel World!'},
                      'It should include and exclude correctly across multiple logs.')
Esempio n. 2
0
def webhook_handler(payload, signature):
    """Respond to Travis webhook."""
    travis = Travis()
    github = GitHub()

    # The payload comes in the request, but we need to make sure it is
    # really signed by Travis CI. If not, respond to this request with
    # an error.
    verified_payload = travis.get_verified_payload(payload, signature)
    error = verified_payload.get('error')
    if error:
        return error.get('message'), error.get('code')

    issue_number = int(verified_payload.get('pull_request_number'))
    logs = travis.get_logs(verified_payload)

    comments = parse_logs(logs)

    # Create a separate comment for every job
    for title, comment in comments.iteritems():
        try:
            github.post_comment(issue_number, comment, title)
        except requests.RequestException as err:
            logging.error(err.response.text)
            return err.response.text, 500

    return "OK", 200
Esempio n. 3
0
 def test_multi_logs(self):
     self.assertEqual(
         parse_logs([{
             'job_id':
             94,
             'title':
             'firefox',
             'data':
             'INFO:check_stability:Hello World!\nDEBUG:check_stability:Goodbye Cruel World!'
         }, {
             'job_id':
             32,
             'title':
             'chrome',
             'data':
             'DEBUG:check_stability:Hello World!\nWARNING:check_stability:Goodbye Cruel World!'
         }]), [{
             'job_id': 94,
             'title': 'firefox',
             'text': 'Hello World!'
         }, {
             'job_id': 32,
             'title': 'chrome',
             'text': 'Goodbye Cruel World!'
         }],
         'It should include and exclude correctly across multiple logs.')
Esempio n. 4
0
 def test_new_line(self):
     self.assertEqual(
         parse_logs([{
             'job_id':
             83,
             'title':
             'firefox',
             'data':
             'INFO:check_stability:Hello World!\nWARNING:check_stability:Goodbye Cruel World!'
         }, {
             'job_id':
             88,
             'title':
             'chrome',
             'data':
             'INFO:check_stability:Hello World!\nWARNING:check_stability:Goodbye Cruel World!'
         }]), [{
             'job_id': 83,
             'title': 'firefox',
             'text': 'Hello World!\nGoodbye Cruel World!'
         }, {
             'job_id': 88,
             'title': 'chrome',
             'text': 'Hello World!\nGoodbye Cruel World!'
         }], 'It should include newline characters as appropriate.')
Esempio n. 5
0
 def test_non_check_stability(self):
     self.assertEqual(
         parse_logs([{
             'job_id': 8,
             'title': 'firefox',
             'data': 'Hello World!'
         }]), [{
             'job_id': 8,
             'title': 'firefox',
             'text': ''
         }],
         'It should exclude lines that do not include ":check_stability:"')
Esempio n. 6
0
 def test_debug_level(self):
     self.assertEqual(
         parse_logs([{
             'job_id': 5,
             'title': 'firefox',
             'data': 'DEBUG:check_stability:Hello World'
         }]), [{
             'job_id': 5,
             'title': 'firefox',
             'text': ''
         }],
         'It should exclude debug log statements that include ":check_stability:".'
     )
Esempio n. 7
0
 def test_non_debug_level(self):
     self.assertEqual(
         parse_logs([{
             'job_id': 3,
             'title': 'firefox',
             'data': 'ABCDE:check_stability:Hello World'
         }]), [{
             'job_id': 3,
             'title': 'firefox',
             'text': 'Hello World'
         }],
         'It should include non-debug log statements that include ":check_stability:".'
     )
Esempio n. 8
0
 def test_multi_line(self):
     self.assertEqual(
         parse_logs([{
             'job_id':
             9,
             'title':
             'firefox',
             'data':
             'Hello World!\nERROR:check_stability:Goodbye Cruel World!'
         }]), [{
             'job_id': 9,
             'title': 'firefox',
             'text': 'Goodbye Cruel World!'
         }],
         'It should include and exclude correctly across multiple lines.')
Esempio n. 9
0
def run_test_suite(model_dir, configuration, disabled, print_parsed,
                   ignore_test):
    try:
        # TODO: assert TF version. Some models may not run on TF1.12 etc
        model_dir = os.path.abspath(model_dir)
        test_suites = os.listdir(model_dir)

        failed_tests = []
        passed_tests = []
        skipped_tests = []

        # download/prepare repo if needed:
        repo_filename = model_dir + '/repo.txt'
        repo_based = False  # Is this test dir repo based or pb/pbtxt/savedmodel based?
        if os.path.isfile(repo_filename):
            repo_based = True
            repo_info = [
                line.strip() for line in open(repo_filename).readlines()
                if len(line.strip()) > 0
            ]
            repo_name = repo_info[0]
            repo_version = repo_info[1] if len(repo_info) == 2 else 'master'
            repo_dl_loc = model_dir + '/downloaded_model'
            assert not os.path.isdir(
                repo_dl_loc
            ), "Did not expect " + repo_dl_loc + " to be present. Maybe a leftover from the last run that was not deleted?"
            download_repo(repo_dl_loc, repo_name, repo_version)
            ready_repo(model_dir, repo_dl_loc)

        # Iterate through each sub-test
        for flname in test_suites:
            sub_test_dir = model_dir + '/' + flname
            # if its  directory starting with test, and not containing "disabled" in its name
            item_is_a_subtest = not os.path.isfile(
                sub_test_dir) and flname.startswith('test')
            if item_is_a_subtest:
                disabled_by_dir_name = 'disabled' in flname
                disabled_by_cli = flname in disabled
                if (not disabled_by_dir_name) and (not disabled_by_cli):
                    custom_parser_present = os.path.isfile(
                        sub_test_dir + '/custom_log_parser.py')
                    if repo_based:
                        # TODO: shift the timing inside apply_patch_and_test
                        sub_test_dir = model_dir + '/' + flname
                        tstart = time.time()
                        try:
                            so, se = apply_patch_and_test(
                                sub_test_dir, ('NGRAPH_TF_LOG_PLACEMENT=1',
                                               '')[custom_parser_present])
                        except Exception as e:
                            print(e)
                            failed_tests.append(flname)
                            continue
                        tend = time.time()
                        command_executor.commands += '\n'
                    else:
                        model = [
                            i for i in os.listdir(sub_test_dir)
                            if '.md' not in i and '.json' not in i
                        ]
                        assert len(model) == 1
                        model = model[0]
                        split_on_dot = model.split('.')
                        assert len(split_on_dot) <= 2
                        if len(split_on_dot) == 1:
                            model_format = 'savedmodel'
                        elif split_on_dot[1] in ['pb', 'pbtxt']:
                            model_format = split_on_dot[1]
                        else:
                            assert False, "Unknown input format. Expected savedmodel, pb or pbtxt"
                        # TODO: support checkpoint too later
                        gdef = get_gdef(model_format,
                                        sub_test_dir + '/' + model)
                        # TODO: run Level1 tests on gdef. needs another json for that (one which specifies input shapes etc)

                    expected_json_file = sub_test_dir + '/expected.json'
                    expected_json_present = os.path.isfile(expected_json_file)
                    if print_parsed or expected_json_present:
                        # parse logs in this case
                        if custom_parser_present:
                            sys.path.insert(0, os.path.abspath(sub_test_dir))
                            from custom_log_parser import custom_parse_logs
                            parsed_vals = custom_parse_logs(so)
                            sys.path.pop(0)
                        else:
                            parsed_vals = parse_logs(so)
                        if print_parsed:
                            to_be_printed = {
                                configuration: {
                                    'logparse': parsed_vals,
                                    'time': tend - tstart
                                }
                            }
                            replaced_single_with_double_quotes = json.loads(
                                to_be_printed.__str__().replace("\'", "\""))
                            print(
                                json.dumps(replaced_single_with_double_quotes,
                                           sort_keys=True,
                                           indent=4,
                                           separators=(',', ': ')))
                    # If expected.json is present, run some extra tests. If not present we deem the test passed if it ran apply_patch_and_test without raising any errors
                    if expected_json_present:
                        try:
                            expected = get_expected_from_json(
                                expected_json_file, configuration,
                                not custom_parser_present)
                        except:
                            assert False, 'Failed to parse ' + expected_json_file
                        assert check_test_types(expected.keys(
                        )), "Got unexpected key in " + expected.keys(
                        ) + ". Should have been " + ','.join(valid_test_types)
                        # We run the test if 'logparse' is present in the expected values to check
                        # for and it is not in the ignore list
                        if ('logparse' in expected) and ('logparse'
                                                         not in ignore_test):
                            passed, fail_help_string = compare_parsed_values(
                                parsed_vals, expected['logparse'])
                            if not passed:
                                print('Failed in test ' + flname +
                                      '. Help message: ' + fail_help_string)
                                failed_tests.append(flname)
                                continue
                        if ('time' in expected) and ('time'
                                                     not in ignore_test):
                            actual_runtime = tend - tstart
                            # TODO: decide this criteria. time can be pretty variable
                            # TODO: the percentage (0.1) for the time bound might be passed through `expected.json`
                            time_check = (actual_runtime - expected['time']
                                          ) / expected['time'] < 0.1
                            if not time_check:
                                print("Expected run time for test " + flname +
                                      " is " + str(expected['time']) +
                                      " but it actually took " +
                                      str(actual_runtime))
                                failed_tests.append(flname)
                                continue
                    passed_tests.append(flname)
                else:
                    skipped_tests.append(flname)
                # Make sure the test is exactly one of passed, skipped or failed
                assert sum([
                    flname in skipped_tests, flname in passed_tests, flname
                    in failed_tests
                ]) == 1, str(
                    flname
                ) + ' does not appear exactly once in passed, skipped or failed test lists'

        # Clean up if needed
        cleanup_script = model_dir + '/cleanup.sh'
        if os.path.isfile(cleanup_script):
            assert repo_based, 'Did not expect a cleanup script in non-repo based test'
            command_executor('chmod +x ' + cleanup_script)
            command_executor(cleanup_script)
        command_executor.commands += '# Exiting. Done with tests in ' + model_dir.split(
            '/')[-1]
        return passed_tests, failed_tests, skipped_tests
        # TODO: use gdef to run
        # TODO: add axpy test folders for pb. pbtxt and savedmodel
        # TODO integrate the if-else paths as much as possible

        # TODO: check throughput/latency
    except Exception as e:
        print(e)
        return passed_tests, failed_tests, skipped_tests
    finally:
        if (os.path.isdir(repo_dl_loc)):
            command_executor('rm -rf ' + repo_dl_loc)
Esempio n. 10
0
 def test_empty_dict(self):
     self.assertEqual(parse_logs({}),
                      {},
                      'It should return an empty dict if passed an empty dict.')
Esempio n. 11
0
 def test_new_line(self):
     self.assertEqual(parse_logs({'firefox': 'INFO:check_stability:Hello World!\nWARNING:check_stability:Goodbye Cruel World!',
                                  'chrome': 'INFO:check_stability:Hello World!\nWARNING:check_stability:Goodbye Cruel World!'}),
                      {'firefox': 'Hello World!\nGoodbye Cruel World!',
                       'chrome': 'Hello World!\nGoodbye Cruel World!'},
                      'It should include newline characters as appropriate.')
Esempio n. 12
0
 def test_multi_line(self):
     self.assertEqual(parse_logs({'firefox': 'Hello World!\nERROR:check_stability:Goodbye Cruel World!'}),
                      {'firefox': 'Goodbye Cruel World!'},
                      'It should include and exclude correctly across multiple lines.')
Esempio n. 13
0
 def test_non_check_stability(self):
     self.assertEqual(parse_logs({'firefox': 'Hello World!'}),
                      {'firefox': ''},
                      'It should exclude lines that do not include ":check_stability:"')
Esempio n. 14
0
 def test_debug_level(self):
     self.assertEqual(parse_logs({'firefox': 'DEBUG:check_stability:Hello World'}),
                      {'firefox': ''},
                      'It should exclude debug log statements that include ":check_stability:".')
Esempio n. 15
0
 def test_non_debug_level(self):
     self.assertEqual(parse_logs({'firefox': 'ABCDE:check_stability:Hello World'}),
                      {'firefox': 'Hello World'},
                      'It should include non-debug log statements that include ":check_stability:".')
Esempio n. 16
0
 def test_empty_dict(self):
     self.assertEqual(
         parse_logs([]), [],
         'It should return an empty dict if passed an empty list.')