Esempio n. 1
0
def setup_tgs(input, hostname, pid):
    project = input['project']
    version = input['version']
    qm = input['qm']
    tests = input['tests']
    redo = input.get('redo', False)
    verbose = input.get('verbose', False)

    work_dir, d4j_path, redis_url = map(
        lambda property: get_property(property, hostname, pid),
        ['work_dir', 'd4j_path', 'redis_url'])

    work_dir_path = local.path(work_dir) / ('child.%d' % os.getpid())
    print work_dir

    directory, sources = get_modified_sources(project, version)

    tools = ['cobertura', 'codecover', 'jmockit']

    r = StrictRedis.from_url(redis_url)
    rr = StrictRedis.from_url(REDIS_URL_TG)
    d4j_location = '/'.join(which('defects4j').rstrip().split('/')[:-1])

    with filter_key_list(
            r,
            key='qm-computed',
            bundle=[qm, project, version],
            list=tests,
            redo=redo,
            other_keys=[],
    ) as worklist:
        files_i_will_want = [[tool, project, version, test] for tool in tools
                             for (test, _) in worklist]
        prefetch(files_i_will_want)

        for test, callback in worklist:
            with refresh_dir(work_dir_path, cleanup=True):
                print test
                tgs = {
                    tool: get_tgs(d4j_location, tool, project, version, test)
                    for tool in tools
                }
                pp_tgs(rr, [qm, project, version],
                       test,
                       tgs,
                       tools,
                       verbose=verbose)
                callback()

    return "Success"
Esempio n. 2
0
    def decorated(input, f=f, *args, **kwargs):
        f_in = json.loads(input)
        work_dir, d4j_path, redis_url = map(
                lambda property: get_property_defaults(property),
                ['work_dir', 'd4j_path', 'redis_url']
        )

        work_dir_path = local.path(work_dir)
        print "Working directory {0}".format(work_dir_path)

        with refresh_dir(work_dir_path, cleanup=True):
            with add_to_path(d4j_path):
                with connect_to_redis(redis_url) as r:
                    return f(r, work_dir_path, f_in, *args, **kwargs)
Esempio n. 3
0
            def handle_single():
                with refresh_dir(work_dir / tc_idx, cleanup=True):
                    print tc_idx, tc

                    map_file_name = '{project}:{version}'.format(
                        project=project, version=version)
                    get_file_from_cache_or_s3(
                        'darioush-map-files', map_file_name,
                        str(work_dir / tc_idx / 'map.txt'))
                    # - prep the tmp dir
                    call_tgs = ALL_TGS
                    for tool in ['cobertura', 'codecover', 'jmockit', 'major']:
                        try:
                            get_files(work_dir / tc_idx, tool, project,
                                      version, suite, tc)
                        except NoFileOnS3:
                            exec_result = json.loads(
                                r.hget(mk_key('exec', [tool] + bundle),
                                       tc_idx))
                            print exec_result, tool
                            if exec_result is None:
                                has_failed = r.sismember(
                                    mk_key('fail', ['exec'] + bundle), tc_idx)
                                if has_failed:
                                    print "-- Has failed"
                                    return [], []
                            is_it_empty = is_empty(tool, exec_result)
                            if is_it_empty:
                                if tool in ('major', 'codecover', 'jmockit'):
                                    print "-> Empty results for {0} noticed, ignoring this tool".format(
                                        tool)
                                    call_tgs = [
                                        tg for tg in call_tgs
                                        if not tg.endswith(tool)
                                    ]
                                else:
                                    raise
                            else:
                                raise

                    result = jar()[work_dir / tc_idx](*call_tgs)
                    all_tgs = result.strip().split('\n')
                    tgs = [
                        tg for covered, _, tg in
                        [s.partition(' ') for s in all_tgs] if covered == '+'
                    ]

                    return all_tgs, tgs
Esempio n. 4
0
def main():
    parser = OptionParser()
    parser.add_option("-p",
                      "--project",
                      dest="restrict_project",
                      action="append")
    parser.add_option("-v",
                      "--version",
                      dest="restrict_version",
                      action="append")

    (options, args) = parser.parse_args(sys.argv)
    java = local['java']['-cp', JAR_PATH, 'edu.washington.cs.tgs.MapBuilder']
    for p, v in iter_versions(options.restrict_project,
                              options.restrict_version):
        print p, v
        src_dir, f_list = get_modified_sources(p, v)
        work_dir_path = '/tmp/work.{pid}'.format(pid=os.getpid())
        with refresh_dir(work_dir_path, cleanup=True):
            with checkout(p, v, work_dir_path):
                with local.cwd(src_dir):
                    (java > '/tmp/results/{p}:{v}'.format(p=p, v=v))(*f_list)
Esempio n. 5
0
def method_list_matches(input, hostname, pid):
    project = input['project']
    version = input['version']

    work_dir, d4j_path, redis_url = map(
            lambda property: get_property(property, hostname, pid),
            ['work_dir', 'd4j_path', 'redis_url']
    )

    r = StrictRedis.from_url(redis_url)
    key = mk_key('test-methods', [project, version])
    test_methods_from_redis = r.lrange(key, 0, -1)

    work_dir_path = local.path(work_dir) / ('child.%d' % os.getpid())
    print work_dir_path

    with refresh_dir(work_dir_path, cleanup=True):
        with add_to_path(d4j_path):
            with checkout(project, version, work_dir_path / 'checkout'):
                d4()('compile')
                test_methods_from_d4 = d4()('list-tests').rstrip().split('\n')
                with local.env(SUCCESS_OUT="passing-tests.txt"):
                    failing_tests = test()

                    with open("passing-tests.txt") as f:
                        test_methods_from_run = [x[len('--- '):] for x in f.read().rstrip().split('\n')]
                    with open("count-of-tests.txt") as f:
                        per_run_counts = [int(line.rstrip()) for line in f]
                        count_of_tests_from_run = sum(per_run_counts)


                if project == 'Lang' and version >= 37:
                    ## In this case, we know that some tests may fail
                    ## this is really ugly, but I'm doing it.
                    klass_name = 'org.apache.commons.%s.builder.ToStringBuilderTest' % (
                            'lang' if version > 39 else 'lang3',
                    )

                    expected_fails = [method for method in failing_tests if method.startswith(klass_name)]
                    single_run_fails = test(['-t', klass_name])
                    if len(single_run_fails) > 0:
                        raise TestFail('Single run failed: ' + ' '.join(single_run_fails))
                elif project == 'Time':
                    ## In this case, org.joda.time.chrono.gj.MainTest
                    ## isn't really a jUnit test because it doesn't have a public
                    ## constructor. We fix this during run by replacing it
                    ## with two classes with a public constructor, each of which
                    ## initializes the original class with parameters used during
                    ## testing

                    bad_class = 'org.joda.time.chrono.gj.MainTest'
                    good_class1, good_class2 = ['edu.washington.cs.testfixer.time.GjMainTest' + s for s in ('1', '2')]
                    tname = '::testChronology'
                    tcs = [tc for tc, _, _ in [method.partition('::') for method in test_methods_from_run]]
                    idx = tcs.index(bad_class)
                    test_methods_from_run[idx]   = good_class1 + tname
                    test_methods_from_run[idx+1] = good_class2 + tname

                    tcsd4 = [tc for tc, _, _ in [method.partition('::') for method in test_methods_from_d4]]
                    idxd4 = tcsd4.index(bad_class)
                    test_methods_from_d4 = test_methods_from_d4[:idxd4] + [good_class1 + tname,
                            good_class2 + tname] + test_methods_from_d4[idxd4+1:]

                    expected_fails = []

                else:
                    expected_fails = []

                unexpected_fails = [method for method in failing_tests if method not in expected_fails]

        # Sanity check #0 -- check out the test fails
        if len(unexpected_fails) > 0:
            raise TestFail(' '.join(unexpected_fails))

        # Sanity check #1 -- number of tests counted through the runner should equal
        #                    the length of the list of passing tests the runner outputs
        num_tests = len(test_methods_from_d4)
        if num_tests != count_of_tests_from_run:
            raise LenMismatch("Test methods from d4 (%d) don't match counter (%d)" %
                    (num_tests, count_of_tests_from_run))

        # Sanity check #2 -- we should not be running duplicate tests
        no_dups(test_methods_from_run, 'test methods from run')

        # Sanity check #3 -- we should not be list-outputting duplicate tests
        no_dups(test_methods_from_d4, 'test methods from d4')

        # Sanity check #4 -- we should not have duplicate tests in redis store
        no_dups(test_methods_from_redis, 'test methods from redis')

        # Sanity check #5 -- tests output from the runner should match the tests output from
        #                    d4 list-tests
        check_eq(test_methods_from_run, 'test methods from run', test_methods_from_d4, 'test methods from d4')

        # Sanity check #6 -- test methods from d4 should match ones in redis
        #
        #   Preprocess step: We know that these methods were wrongly inserted:
        #lang_methods = [
        #    'org.apache.commons.lang3.EnumUtilsTest::test_processBitVectors_longClass',
        #    'org.apache.commons.lang3.builder.ReflectionToStringBuilderConcurrencyTest::testLinkedList',
        #    'org.apache.commons.lang3.builder.ReflectionToStringBuilderConcurrencyTest::testArrayList',
        #    'org.apache.commons.lang3.builder.ReflectionToStringBuilderConcurrencyTest::testCopyOnWriteArrayList',
        #    'org.apache.commons.lang3.builder.ReflectionToStringBuilderMutateInspectConcurrencyTest::testConcurrency',
        #]
        #lang_methods_in_redis = [method for method in lang_methods if method in test_methods_from_redis]

        #for lang_method in lang_methods_in_redis:
        #    print "Removing %s from redis:" % lang_method
        #    print r.lrem(key, 1, lang_method)

        #if lang_methods_in_redis:
        #    print "Redis store was modified, reloading list before testing"
        #    test_methods_from_redis = r.lrange(key, 0, -1)

        check_eq(test_methods_from_redis, 'test methods from redis', test_methods_from_d4, 'test methods from d4')

    return "Success"