Exemple #1
0
def get_setting(session, name, force_default=False):
    schema = SCHEMA.get(name)
    if not schema:
        raise KeyError("No such setting %s" % name)

    if not force_default:
        setting = session.query(model.SystemConfig).get(name)
    else:
        setting = None

    if setting:
        if setting.value is not None:
            if schema['type'] == 'numerical':
                return float(setting.value)
            elif schema['type'] == 'boolean':
                return setting.value.lower() == 'true'
            else:
                return setting.value
        elif setting.data is not None:
            return setting.data
    elif 'default_value' in schema:
        return schema['default_value']
    elif 'default_file_path' in schema:
        path = os.path.join(get_package_dir(), schema['default_file_path'])
        with open(path, 'rb') as f:
            return f.read()

    raise KeyError("No such setting %s" % name)
def configure_logging():
    package_dir = get_package_dir()
    logconf_path = os.path.join(package_dir, "logging.cfg")
    if not os.path.exists(logconf_path):
        print("Warning: log config file %s does not exist." % logconf_path)
    else:
        logging.config.fileConfig(logconf_path)
Exemple #3
0
def get_resource(name, context=None):
    '''
    @param name - the name of the resource to load, without the file extension.
    '''
    if (name, context) in cache:
        return cache[(name, context)]

    if name in cache:
        config = cache[name]
    else:
        directory = get_package_dir()
        conf_path_stem = '%s/%s' % (directory, name)
        extensions = ('yml', 'yaml', 'json')
        for ext in extensions:
            conf_path = '%s.%s' % (conf_path_stem, ext)
            try:
                with open(conf_path) as f:
                    config = yaml.load(f)
                    break
            except FileNotFoundError:
                continue
        else:
            raise FileNotFoundError("No resource like %s.{%s}" %
                                    (conf_path_stem, ','.join(extensions)))
        cache[name] = config

    if context is not None:
        config = [d for d in config if d.get('context', context) == context]

    cache[(name, context)] = config
    return config
Exemple #4
0
def run_randoop(projectCP, class_name, randoop_jar_path, testdir,
                search_budget):
    def remove_randoop_error_test(testdir):
        for test in os.listdir(testdir):
            if "ErrorTest" in test:
                test_file = os.path.join(testdir, test)
                os.unlink(test_file)

            #if not test_file.endswith(".java"):
            #    continue
            #if not test[-1:].isdigit(): # Randoop generates a test file without tests.
            #    continue

    """def change_class_name(test_dir, new_classname):
        utils.rename_class(test_dir, "RegressionTest", new_classname)"""

    utils.make_dirs_if_not_exist(testdir)
    sep = os.path.pathsep
    err_file = os.path.join(testdir, "err.txt")
    out_file = os.path.join(testdir, "out.txt")
    package = class_name.split(".")[0:-1]
    packages_dir = utils.get_package_dir(package)
    command = 'java -classpath {}{}{} randoop.main.Main gentests --testclass={} --time-limit={} --usethreads=true --junit-package-name={} --npe-on-non-null-input=expected --junit-output-dir={} > {} 2> {}'.format(
        projectCP, sep, randoop_jar_path, class_name, search_budget,
        packages_dir.replace(os.path.sep, ".")[:-1], testdir, out_file,
        err_file)
    utils.print_command(command)
    try:
        subprocess.check_output(command, shell=True)
    except Exception as e:
        print("Error al correr randoop con el comando '{}'".format(command, e))
    testdir_full = os.path.join(testdir, packages_dir)
    remove_randoop_error_test(testdir_full)
Exemple #5
0
def get_minimal_settings():
    package_dir = get_package_dir()
    return {
        "template_path": os.path.join(package_dir, "..", "client"),
        "login_url": "/login/",
        "cookie_secret": 'dummy'
    }
Exemple #6
0
def get_file_path_jncss(strategy, class_name, test_dir, results_dir_name,
                        bug_type, stopping_condition, search_budget, criterion,
                        runid, javancss_jar_path):
    package = class_name.split(".")[0:-1]
    package_dir = utils.get_package_dir(package)

    if "randoop".upper() in criterion.upper():
        only_class_name = "RegressionTest?.java"  #Randoop genera varios .java, c/u con 500 tests
    else:
        only_class_name = class_name.split(".")[-1] + "_ESTest.java"
    test_suite_file_path = os.path.join(test_dir, package_dir, only_class_name)

    result_jncss_temp = os.path.join(
        results_dir_name, "javancss_temp",
        "{}_{}_{}_{}_{}_{}".format(strategy, bug_type, stopping_condition,
                                   search_budget, class_name, criterion))
    utils.make_dirs_if_not_exist(result_jncss_temp)

    result_jncss_temp = os.path.join(result_jncss_temp, "{}.txt".format(runid))
    command = "java -jar {} {} > {}".format(javancss_jar_path,
                                            test_suite_file_path,
                                            result_jncss_temp)
    utils.print_command(command)
    utils.lock_if_windows()
    try:
        subprocess.check_output(command, shell=True)
    except Exception as e:
        print("Error al ejecutar el comando '{}'. Error {}".format(command, e))
    finally:
        utils.release_if_windows()
    return result_jncss_temp
Exemple #7
0
def workaround_test(test_dir, class_name, file_name, add_fails, assert_type):
    packages = class_name.split(".")[0:-1]
    packages_dir = utils.get_package_dir(packages)
    java_file = os.path.join(test_dir, packages_dir, file_name)
    utils.replace_assert_catch_in_test(java_file, assert_type)
    if (add_fails):
        utils.add_fails_in_test(java_file)
Exemple #8
0
def cp_testsuite_if_exists_in_other_results(curr_bug_type, subdir_testgen,
                                            generated_test_report_evosuite_dir,
                                            class_name, name):
    other_bug_type = BugType.ALL.name.lower() if (
        curr_bug_type.upper()
        == BugType.ERRPROT.name) else BugType.ERRPROT.name.lower()
    other_generated_test_dir = subdir_testgen.replace(curr_bug_type,
                                                      other_bug_type)
    other_generated_test_report_evosuite_dir = generated_test_report_evosuite_dir.replace(
        curr_bug_type, other_bug_type)
    test_file_path = os.path.join(
        other_generated_test_dir, "test",
        utils.get_package_dir(class_name.split(".")[0:-1]),
        name + "_ESTest.java")
    testsuite_exists = check_if_exists_testgendir_in_other_bug_type(
        other_generated_test_dir, other_generated_test_report_evosuite_dir,
        class_name, test_file_path)
    if (testsuite_exists):
        if os.path.exists(subdir_testgen):
            shutil.rmtree(subdir_testgen)
        shutil.copytree(other_generated_test_dir, subdir_testgen)
        print("copy {}".format(test_file_path))
        #if other_bug_type == ERRPROT, then i need to move original test file (with asserts)
        if (other_bug_type.upper() == BugType.ERRPROT.name):
            test_file_path = test_file_path.replace(other_bug_type,
                                                    curr_bug_type)
            os.unlink(test_file_path)
            shutil.move(test_file_path + ".original", test_file_path)
    return testsuite_exists
Exemple #9
0
def read_app_version():
    package_dir = get_package_dir()
    try:
        with open(os.path.join(package_dir, '..', 'version.txt')) as f:
            version = f.readline().strip()
    except FileNotFoundError:
        version = None
    template.aq_version = version
Exemple #10
0
def exist_subject(bin_original_code_dir, bin_instrumented_code_dir,
                  class_name):
    package_dir = utils.get_package_dir(class_name.split(".")[0:-1])
    if os.path.exists(os.path.join(
            bin_original_code_dir, package_dir)) and os.path.exists(
                os.path.join(bin_instrumented_code_dir, package_dir)):
        return True
    return False
Exemple #11
0
def bower_versions():
    if '_bower_versions' in cache:
        return cache['_bower_versions']

    directory = os.path.join(get_package_dir(), '..', 'client',
                             'bower_components')
    versions = {}
    for path in Path(directory).glob('*/.bower.json'):
        with path.open() as f:
            component_meta = yaml.load(f)
        if 'version' in component_meta:
            name = component_meta['name']
            name = name.replace('-', '_')
            versions[name] = component_meta['version']

    cache['_bower_versions'] = versions
    return versions
Exemple #12
0
def cp_testsuite_if_exists_in_other_results(curr_bug_type, subdir_testgen,
                                            generated_test_report_evosuite_dir,
                                            class_name, criterion):
    other_bug_type = BugType.ALL.name.lower() if (
        curr_bug_type.upper()
        == BugType.ERRPROT.name) else BugType.ERRPROT.name.lower()
    other_generated_test_dir = subdir_testgen.replace(curr_bug_type,
                                                      other_bug_type)
    other_generated_test_report_evosuite_dir = generated_test_report_evosuite_dir.replace(
        curr_bug_type, other_bug_type)
    other_full_test_dir = os.path.join(
        other_generated_test_dir, "test",
        utils.get_package_dir(class_name.split(".")[0:-1]))
    testsuite_exists = check_if_exists_testgendir_in_other_bug_type(
        other_generated_test_report_evosuite_dir, other_full_test_dir,
        criterion)
    if (testsuite_exists):
        if os.path.exists(subdir_testgen):
            shutil.rmtree(subdir_testgen)
        shutil.copytree(other_generated_test_dir, subdir_testgen)
        print("TEST ALREADY GENERATED! copying from {}".format(
            other_generated_test_dir))
        #if other_bug_type == ERRPROT, then i need to move original test file (with asserts)
        if (other_bug_type.upper() == BugType.ERRPROT.name):
            curr_full_test_dir = other_full_test_dir.replace(
                other_bug_type, curr_bug_type)
            for test_file_name in os.listdir(curr_full_test_dir):
                if not test_file_name.endswith(".original"):
                    test_file_path = os.path.join(curr_full_test_dir,
                                                  test_file_name)
                    os.unlink(test_file_path)
            for test_file_name in os.listdir(curr_full_test_dir):
                if test_file_name.endswith(".original"):
                    #test_file_path = test_file_path.replace(other_bug_type, curr_bug_type)
                    #os.unlink(test_file_path)
                    test_file_path = os.path.join(curr_full_test_dir,
                                                  test_file_name)
                    shutil.move(test_file_path,
                                test_file_path.replace(".original", ""))
    return testsuite_exists
def setup_mujava(origin_mutants_dir, subject_name, subdir_mutants,
                 original_code_dir):
    def mk_and_cp_operator_mutant_dir(src_dir, subject_name, operator_dir_name,
                                      packages_dir):
        new_dirs = os.path.join(subdir_mutants, subject_name,
                                operator_dir_name)
        new_dirs_packages = os.path.join(new_dirs, packages_dir)
        src_dir = os.path.join(src_dir, operator_dir_name)
        # si existe el directorio, ya deberia contener los .class, entonces no copio los mutantes ni compilo
        if not os.path.exists(new_dirs):
            shutil.copytree(src_dir, new_dirs_packages)
            utils.compile_workdir(new_dirs, new_dirs, new_dirs,
                                  original_code_dir)

    print("Setting up mujava...")
    packages = subject_name.split(".")[0:-1]
    packages_dir = utils.get_package_dir(packages)
    class_mutant_dir = os.path.join(origin_mutants_dir, "class_mutants")
    traditional_mutant_dir = os.path.join(origin_mutants_dir,
                                          "traditional_mutants")

    for operator_dir_name in os.listdir(class_mutant_dir):
        if not os.path.isdir(os.path.join(class_mutant_dir,
                                          operator_dir_name)):
            continue
        mk_and_cp_operator_mutant_dir(class_mutant_dir, subject_name,
                                      operator_dir_name, packages_dir)

    for method_dir_name in os.listdir(traditional_mutant_dir):
        method_dir = os.path.join(traditional_mutant_dir, method_dir_name)
        if not os.path.isdir(method_dir):
            continue
        for operator_dir_name in os.listdir(method_dir):
            if not os.path.isdir(os.path.join(method_dir, operator_dir_name)):
                continue
            mk_and_cp_operator_mutant_dir(method_dir, subject_name,
                                          operator_dir_name, packages_dir)
    print("Mujava setup ready!")
Exemple #14
0
def start_web_server():
    settings = get_settings()
    default_settings()

    application = tornado.web.Application(get_mappings(), **settings)

    try:
        # If port is a string, *some* GNU/Linux systems try to look up the port
        # in /etc/services. So try to interpret it as an integer.
        # http://www.ducea.com/2006/09/11/error-servname-not-supported-for-ai_socktype/
        # https://github.com/pika/pika/issues/352#issuecomment-18704043
        port = int(tornado.options.options.port)
    except ValueError:
        port = tornado.options.options.port
    max_buffer_size = 10 * 1024**2  # 10MB

    config_dir = os.path.join(get_package_dir(), '..', 'config')
    if os.path.isfile(os.path.join(config_dir, "fullchain.pem")):
        # Some certificate. `fullchain.pem` should contain all the certificates
        # concatenated together:
        # https://docs.python.org/3.4/library/ssl.html#certificate-chains
        ssl_opts = {
            "certfile": os.path.join(config_dir, "fullchain.pem"),
            "keyfile": os.path.join(config_dir, "privkey.pem")
        }
    elif os.path.isdir("/etc/letsencrypt/live/aquamark"):
        # Certificate provided by Let's Encrypt
        ssl_opts = {
            "certfile": "/etc/letsencrypt/live/aquamark/fullchain.pem",
            "keyfile": "/etc/letsencrypt/live/aquamark/privkey.pem"
        }
    else:
        ssl_opts = None

    if ssl_opts is not None:
        logging.getLogger('tornado.general').addFilter(ssl_log_filter)
        # Disable old, vulnerable SSL versions
        # https://blog.qualys.com/ssllabs/2014/10/15/ssl-3-is-dead-killed-by-the-poodle-attack
        ssl_opts['ciphers'] = 'DEFAULT:!SSLv2:!SSLv3:!RC4:!EXPORT:!DES'

    http_server = tornado.httpserver.HTTPServer(application,
                                                max_body_size=max_buffer_size,
                                                ssl_options=ssl_opts)
    http_server.listen(port)

    if log.isEnabledFor(logging.INFO):
        log.info("Tornado version: %s", tornado.version)
        log.debug("Tornado settings: %s", settings)
        log.info("Starting web application. Will be available on port %s",
                 port)
        hostname = socket.gethostname()
        ip = None
        try:
            # Try to get Docker container IP
            with open('/etc/hosts', 'r') as f:
                for line in f:
                    match = re.match(r'^(\S+)\s+%s$' % hostname, line)
                    if match:
                        ip = match.group(1)
                        break
        except OSError:
            pass
        protocol = ssl_opts and 'https' or 'http'
        log.info("Try opening %s://%s:%s", protocol, hostname, port)
        if ip:
            log.info("         or %s://%s:%s", protocol, ip, port)
            log.info("Bound to: %s:%s", ip, port)
    tornado.ioloop.IOLoop.instance().start()
Exemple #15
0
def get_mappings():
    package_dir = get_package_dir()
    return [
        (r"/login/?", authn.LoginHandler, {
            'path': os.path.join(package_dir, "..", "client")
        }),
        (r"/impersonate/(.*)", authn.ImpersonateHandler, {}),
        (r"/logout/?", authn.LogoutHandler),
        (r"/()", template.TemplateHandler, {
            'path': '../client/templates/'
        }),
        (r"/(.*\.html)", tornado.web.StaticFileHandler, {
            'path': os.path.join(package_dir, "../client/templates/")
        }),
        (r"/(manifest.json|css/user_style.css)",
         template.UnauthenticatedTemplateHandler, {
             'path': '../client/'
         }),
        (r"/ping.*", protocol.PingHandler, {}),
        (r"/bower_components/(.*)", tornado.web.StaticFileHandler, {
            'path': os.path.join(package_dir, "..", "client",
                                 "bower_components")
        }),
        (r"/minify/(.*)", compile_handlers.MinifyHandler, {
            'path': '/minify/',
            'root': os.path.join(package_dir, "..", "client")
        }),
        (r"/(.*\.css)", compile_handlers.CssHandler, {
            'root': os.path.join(package_dir, "..", "client")
        }),
        (r"/images/icon-(.*)\.png", crud.image.IconHandler, {}),
        (r"/systemconfig.json", crud.config.SystemConfigHandler, {}),
        (r"/systemconfig/(.*)", crud.config.SystemConfigItemHandler, {}),
        (r"/custom_query/?([^/]*).json", crud.custom.CustomQueryHandler, {}),
        (r"/custom_query/?([^/]*)/history.json",
         crud.custom.CustomQueryHistoryHandler, {}),
        (r"/geo/(.*).json", crud.org.LocationSearchHandler, {}),
        (r"/surveygroup/?([^/]*).json", crud.surveygroup.SurveyGroupHandler,
         {}),
        (r"/surveygroup/icon/([^/]*)", crud.surveygroup.SurveyGroupIconHandler,
         {}),
        (r"/organisation/?([^/]*).json", crud.org.OrgHandler, {}),
        (r"/organisation/?([^/]*)/survey/?([^/]*).json",
         crud.org.PurchasedSurveyHandler, {}),
        (r"/user/?([^/]*).json", crud.user.UserHandler, {}),
        (r"/subscription/()([^/]*).json", crud.activity.SubscriptionHandler,
         {}),
        (r"/subscription/([^/]*)/(.*).json", crud.activity.SubscriptionHandler,
         {}),
        (r"/activity/?([^/]*).json", crud.activity.ActivityHandler, {}),
        (r"/card.json", crud.activity.CardHandler, {}),
        (r"/password.json", crud.user.PasswordHandler, {}),
        (r"/program/?([^/]*).json", crud.program.ProgramHandler, {}),
        (r"/program/?([^/]*)/history.json",
         crud.program.ProgramTrackingHandler, {}),
        (r"/survey/?([^/]*).json", crud.survey.SurveyHandler, {}),
        (r"/survey/?([^/]*)/program.json", crud.program.ProgramHistoryHandler,
         {
             'mapper': model.Survey
         }),
        (r"/qnode/?([^/]*).json", crud.qnode.QuestionNodeHandler, {}),
        (r"/qnode/?([^/]*)/program.json", crud.program.ProgramHistoryHandler, {
            'mapper': model.QuestionNode
        }),
        (r"/measure/?([^/]*).json", crud.measure.MeasureHandler, {}),
        (r"/measure/?([^/]*)/program.json", crud.program.ProgramHistoryHandler,
         {
             'mapper': model.Measure
         }),
        (r"/response_type/?([^/]*).json",
         crud.response_type.ResponseTypeHandler, {}),
        (r"/response_type/?([^/]*)/program.json",
         crud.program.ProgramHistoryHandler, {
             'mapper': model.ResponseType
         }),
        (r"/submission/?([^/]*).json", crud.submission.SubmissionHandler, {}),
        (r"/submission/([^/]*)/rnode/?([^/]*).json",
         crud.rnode.ResponseNodeHandler, {}),
        (r"/submission/([^/]*)/response/?([^/]*).json",
         crud.response.ResponseHandler, {}),
        (r"/submission/([^/]*)/response/?([^/]*)/history.json",
         crud.response.ResponseHistoryHandler, {}),
        (r"/submission/([^/]*)/measure/([^/]*)/attachment.json",
         crud.attachment.ResponseAttachmentsHandler, {}),
        (r"/submission/([^/]*)/measure/([^/]*)/submeasure/([^/]*)/attachment.json",
         crud.attachment.ResponseSubmeasureAttachmentsHandler, {}),
        (r"/attachment/([^/]*)(?:/(.*))?", crud.attachment.AttachmentHandler,
         {}),
        (r"/report/sub/stats/program/([^/]*)/survey/([^/]*).json",
         StatisticsHandler, {}),
        (r"/report/diff.json", DiffHandler, {}),
        (r"/report/prog/export/([^/]*)/survey/([^/]*)/([^.]+)\.(.+)",
         ExportProgramHandler, {}),
        (r"/report/exportAssetReport/([^/]*)/survey/([^/]*)/program/([^/]*)/([^.]+)\.(.+)",
         ExportAssetHandler, {}),
        (r"/report/sub/temporal/([^/]*)\.(.+)", TemporalReportHandler, {}),
        (r"/report/sub/export/([^/]*)/([^.]+)\.(.+)", ExportSubmissionHandler,
         {}),
        (r"/report/custom_query/reformat\.sql", report.custom.SqlFormatHandler,
         {}),
        (r"/report/custom_query/identifiers\.json",
         report.custom.SqlIdentifierHandler, {}),
        (r"/report/custom_query/preview\.(.+)",
         report.custom.CustomQueryPreviewHandler, {}),
        (r"/report/custom_query/config\.json",
         report.custom.CustomQueryConfigHandler, {}),
        (r"/report/custom_query/([^.]+)/\w+\.(.+)",
         report.custom.CustomQueryReportHandler, {}),
        (r"/import/structure.json", ImportStructureHandler, {}),
        (r"/import/submission.json", ImportSubmissionHandler, {}),
        (r"/redirect", protocol.RedirectHandler),
        (r"/remap.json", crud.remap.IdMapperHandler),

        # test use session to keep status
        #(r"/status", authn.StatusHandler),
        ########################
        (r"/(.*)", tornado.web.StaticFileHandler, {
            'path': os.path.join(package_dir, "..", "client")
        }),
    ]
Exemple #16
0
    def run(self):
        if self.method in [
                EpatestingMethod.ONLY_TESTGEN.value,
                EpatestingMethod.BOTH.value,
                EpatestingMethod.BOTH_WITHOUT_MUJAVA.value
        ]:
            print('GENERATING TESTS')
            code_dir = self.instrumented_code_dir if "epa".upper(
            ) in self.criterion.upper() else self.original_code_dir
            if "mining".upper() in self.criterion.upper(
            ) or "Compiler_" in self.name:  #hack for Compiler
                code_dir = self.mining_code_dir

            bin_code_dir = self.bin_instrumented_code_dir if "epa".upper(
            ) in self.criterion.upper() else self.bin_original_code_dir
            if "mining".upper() in self.criterion.upper(
            ) or "Compiler_" in self.name:  #hack for Compiler
                bin_code_dir = self.bin_mining_code_dir
            if len(self.extra_classpath) != 0:
                bin_code_dir += os.path.pathsep + self.extra_classpath

            # if exists testsuite in other bug_type, copy it!
            testsuite_exists = False
            curr_bug_type = self.bug_type
            try:
                lock.acquire()
                testsuite_exists = cp_testsuite_if_exists_in_other_results(
                    curr_bug_type, self.subdir_testgen,
                    self.generated_test_report_evosuite_dir, self.class_name,
                    self.criterion)
            except Exception as e:
                testsuite_exists = False
                print(
                    "error copying from other bug_type folder to {}. Error {}".
                    format(self.subdir_testgen, e))
            finally:
                lock.release()

            if (not testsuite_exists):
                if self.criterion == "randoop":
                    run_randoop(projectCP=bin_code_dir,
                                class_name=self.class_name,
                                randoop_jar_path=self.randoop_jar_path,
                                testdir=self.generated_test_dir,
                                search_budget=self.search_budget)
                else:
                    run_evosuite(
                        evosuite_jar_path=self.evosuite_jar_path,
                        strategy=self.strategy_value,
                        projectCP=bin_code_dir,
                        class_name=self.class_name,
                        criterion=self.criterion,
                        epa_path=self.epa_path,
                        inferred_epa_xml_path=self.inferred_epa_xml,
                        test_dir=self.generated_test_dir,
                        stopping_condition=self.stopping_condition,
                        search_budget=self.search_budget,
                        report_dir=self.generated_test_report_evosuite_dir)

            add_fails = False
            if (self.bug_type.upper() == BugType.ERRPROT.name):
                # If is run in errprot mode, then always remove asserts and specific exceptions
                self.assert_type = AssertType.NO_ASSERT_EXCEPTION.name
                #if("JDBCResultSet" in self.name):
                #add_fails= True;
            if self.assert_type.upper() in [
                    AssertType.NO_ASSERT.name,
                    AssertType.NO_ASSERT_EXCEPTION.name
            ]:
                if "randoop".upper() in self.criterion.upper():
                    test_dir = self.generated_test_dir
                    packages_dir = utils.get_package_dir(
                        self.class_name.split(".")[:-1])
                    test_dir_sub = os.path.join(test_dir, packages_dir)
                    for test_file_name in os.listdir(test_dir_sub):
                        test_file = os.path.join(test_dir_sub, test_file_name)
                        if not test_file.endswith(".java"):
                            continue
                        # ErrorTest files are generated by randoop. Contains error test. That fails in PIT
                        if "ErrorTest" in test_file:
                            continue
                        workaround_test(self.generated_test_dir,
                                        self.class_name, test_file_name,
                                        add_fails, self.assert_type)
                else:
                    test_file_name = self.class_name.split(
                        ".")[-1] + "_ESTest.java"
                    workaround_test(self.generated_test_dir, self.class_name,
                                    test_file_name, add_fails,
                                    self.assert_type)

            utils.compile_workdir(self.generated_test_dir,
                                  self.generated_test_dir, bin_code_dir,
                                  self.junit_jar, self.evosuite_classes,
                                  self.evosuite_runtime_jar_path,
                                  self.extra_classpath)

        criterion = get_alternative_criterion_names(self.criterion)

        if self.method in [
                EpatestingMethod.ONLY_METRICS.value,
                EpatestingMethod.BOTH.value,
                EpatestingMethod.BOTH_WITHOUT_MUJAVA.value,
                EpatestingMethod.ONLY_METRICS_WITHOUT_MUJAVA.value
        ]:
            print('GENERATING METRICS')
            packages_dir = utils.get_package_dir(
                self.class_name.split(".")[:-1])
            test_dir_sub = os.path.join(self.generated_test_dir, packages_dir)
            if not os.path.exists(test_dir_sub):
                print("not found test folder ! '{}'".format(test_dir_sub))
                exit(1)

            bin_code_dir = self.bin_instrumented_code_dir if "epa".upper(
            ) in self.criterion.upper() else self.bin_original_code_dir
            if "mining".upper() in self.criterion.upper(
            ) or "Compiler_" in self.name:  #hack for Compiler
                bin_code_dir = self.bin_mining_code_dir
            if len(self.extra_classpath) != 0:
                bin_code_dir += os.path.pathsep + self.extra_classpath

            ###to compile test suite
            #utils.compile_workdir(self.generated_test_dir, self.generated_test_dir, bin_code_dir, self.junit_jar, self.evosuite_classes, self.evosuite_runtime_jar_path, self.extra_classpath)

            #measure_evosuite(evosuite_jar_path=self.evosuite_jar_path, projectCP=self.bin_instrumented_code_dir, testCP=self.generated_test_dir, class_name=self.class_name, epa_path=self.epa_path, report_dir=self.generated_report_evosuite_dir, criterion="epatransition", inferred_epa_xml_path="", force_inferred_epa=False)
            #measure_evosuite(evosuite_jar_path=self.evosuite_jar_path, projectCP=self.bin_instrumented_code_dir, testCP=self.generated_test_dir, class_name=self.class_name, epa_path=self.epa_path, report_dir=self.generated_report_evosuite_dir, criterion="epaexception", inferred_epa_xml_path="", force_inferred_epa=False)
            #measure_evosuite(evosuite_jar_path=self.evosuite_jar_path, projectCP=self.bin_instrumented_code_dir, testCP=self.generated_test_dir, class_name=self.class_name, epa_path=self.epa_path, report_dir=self.generated_report_evosuite_dir, criterion="epaadjacentedges", inferred_epa_xml_path="", force_inferred_epa=False)
            # Hack to generate inferred epa for randoop. For other criteria it is generated in the generation process - only needed one true in force_inferred_epa
            force_inferred_epa_value = True if "randoop".upper(
            ) in self.criterion.upper() else False
            measure_evosuite(evosuite_jar_path=self.evosuite_jar_path,
                             projectCP=bin_code_dir,
                             testCP=self.generated_test_dir,
                             class_name=self.class_name,
                             epa_path=self.epa_path,
                             report_dir=self.generated_report_evosuite_dir,
                             criterion="epatransitionmining",
                             inferred_epa_xml_path=self.inferred_epa_xml,
                             force_inferred_epa=force_inferred_epa_value)
            measure_evosuite(evosuite_jar_path=self.evosuite_jar_path,
                             projectCP=self.bin_mining_code_dir,
                             testCP=self.generated_test_dir,
                             class_name=self.class_name,
                             epa_path=self.epa_path,
                             report_dir=self.generated_report_evosuite_dir,
                             criterion="epaexceptionmining",
                             inferred_epa_xml_path="",
                             force_inferred_epa=False)
            measure_evosuite(evosuite_jar_path=self.evosuite_jar_path,
                             projectCP=self.bin_mining_code_dir,
                             testCP=self.generated_test_dir,
                             class_name=self.class_name,
                             epa_path=self.epa_path,
                             report_dir=self.generated_report_evosuite_dir,
                             criterion="epaadjacentedgesmining",
                             inferred_epa_xml_path="",
                             force_inferred_epa=False)
            #if force_inferred_epa_value:
            #   return

            # Run Pitest to measure

            targetTests = "{}_ESTest".format(self.class_name)
            if "randoop".upper() in self.criterion.upper():
                targetTests = "{}.RegressionTest".format(
                    utils.get_package_name_from_qualifiedname(self.class_name))
            pitest_measure(
                self.generated_report_pitest_dir, self.class_name, targetTests,
                self.original_code_dir.replace("mining", "original"),
                self.generated_test_dir)
            #pitest_measure(self.generated_report_pitest_dir, self.class_name, self.original_code_dir, self.generated_test_dir, utils.get_package_dir(self.class_name.split(".")[0:-1]))

            #TODO: add strategy
            if self.method in [
                    EpatestingMethod.ONLY_METRICS.value,
                    EpatestingMethod.BOTH.value
            ]:
                mujava_measure(
                    self.bug_type, self.name, criterion, self.subdir_mutants,
                    self.error_prot_list, self.ignore_mutants_list,
                    self.bin_original_code_dir.replace("mining", "original"),
                    self.generated_test_dir, self.class_name, self.junit_jar,
                    self.hamcrest_jar_path, self.generated_report_mujava)

            # Resume the reports generated
            all_report_dir = os.path.join(self.subdir_metrics, 'all_reports')
            command_mkdir_report = 'mkdir {}'.format(all_report_dir)
            #utils.print_command(command_mkdir_report)
            if not os.path.exists(all_report_dir):
                os.makedirs(all_report_dir)

            copy_pitest_csv(self.name, self.generated_report_pitest_dir,
                            all_report_dir)

            statistics_csv = os.path.join(self.generated_report_evosuite_dir,
                                          "statistics.csv")
            copy_csv(statistics_csv, 'epacoverage_{}'.format(self.name),
                     all_report_dir)

            statistics_testgen_csv = ""
            if not self.criterion == "randoop":
                try:
                    statistics_testgen_csv = os.path.join(
                        self.generated_test_report_evosuite_dir,
                        "statistics.csv")
                    copy_csv(statistics_testgen_csv,
                             'statistics_testgen_{}'.format(self.name),
                             all_report_dir)
                except:
                    print(
                        "statistics_testgen_csv (generated by Evosuite) not found"
                    )
            mujava_csv = os.path.join(self.generated_report_mujava,
                                      "mujava_report.csv")
            if os.path.exists(mujava_csv):
                copy_csv(mujava_csv, 'mujava_{}'.format(self.name),
                         all_report_dir)
            else:
                print("Does not exists mujava file {}".format(mujava_csv))

            epacoverage_csv = os.path.join(
                all_report_dir, "epacoverage_{}.csv".format(self.name))
            if self.criterion != "randoop":
                statistics_testgen_csv = os.path.join(
                    all_report_dir,
                    "statistics_testgen_{}.csv".format(self.name))
            jacoco_csv = os.path.join(all_report_dir,
                                      "{}_jacoco.csv".format(self.name))
            mutations_csv = os.path.join(all_report_dir,
                                         "{}_mutations.csv".format(self.name))

            pit_mutants_histogram(self.strategy_name, self.bug_type, criterion,
                                  self.search_budget, self.stopping_condition,
                                  mutations_csv, self.generated_test_dir,
                                  self.generated_pitest_killer_test,
                                  self.runid)
            # For test suite LOC
            result_jncss_temp = get_file_path_jncss(
                self.strategy_name, self.class_name, self.generated_test_dir,
                self.results_dir_name, self.bug_type, self.stopping_condition,
                self.search_budget, criterion, self.runid,
                self.javancss_jar_path)
            # For covered exceptions goals
            testgen_log_file_path = os.path.join(self.subdir_testgen,
                                                 "testgen_out.txt")

            make_report_resume.resume(
                self.class_name, epacoverage_csv, statistics_testgen_csv,
                jacoco_csv, mutations_csv, self.resume_csv, self.runid,
                self.stopping_condition, self.search_budget, criterion,
                self.bug_type, self.strategy_name, mujava_csv,
                result_jncss_temp, testgen_log_file_path)

        if self.method in [EpatestingMethod.ONLY_PIT_MUTANTS_HISTOGRAM.value]:
            mutations_csv = get_mutation_csv_pit(
                self.generated_report_pitest_dir)
            pit_mutants_histogram(self.strategy_name, self.bug_type, criterion,
                                  self.search_budget, self.stopping_condition,
                                  mutations_csv, self.generated_test_dir,
                                  self.generated_pitest_killer_test,
                                  self.runid)

        # Hack (for old executions)
        if self.method in [
                EpatestingMethod.ONLY_TEST_SUITE_LOC_AND_EXCEPTION.value
        ]:
            #Para TS LOC
            result_jncss_temp = get_file_path_jncss(
                self.class_name, self.generated_test_dir,
                self.results_dir_name, self.bug_type, self.stopping_condition,
                self.search_budget, criterion, self.runid,
                self.javancss_jar_path)
            # Para obtener exceptions
            ####################
            testgen_log_file_path = os.path.join(self.subdir_testgen,
                                                 "testgen_out.txt")
            # Este archivo tiene la cantidad de goals cubiertos para cada criterio (-measureCoverage)
            all_report_dir = os.path.join(self.subdir_metrics, 'all_reports')
            epacoverage_csv = os.path.join(
                all_report_dir, "epacoverage_{}.csv".format(self.name))
            # Este archivo tiene la suma de goals cubiertos (incluyendo criterio exception)
            statistics_testgen_csv = os.path.join(
                all_report_dir, "statistics_testgen_{}.csv".format(self.name))

            utils.make_dirs_if_not_exist(self.subdir_metrics)
            make_report_resume.resume_test_suite_loc_and_exceptions(
                self.class_name, self.resume_csv, self.runid,
                self.stopping_condition, self.search_budget, criterion,
                self.bug_type, result_jncss_temp, testgen_log_file_path,
                epacoverage_csv, statistics_testgen_csv)
Exemple #17
0
def exist_subject(bin_code_dir, class_name):
    package_dir = utils.get_package_dir(class_name.split(".")[0:-1])
    if os.path.exists(os.path.join(bin_code_dir, package_dir)):
        return True
    return False
def copy_test(inputs, output, id, n, qualified_name, keywords_to_remove_file, replace_in_imports, replace_original_testsuite):
    def get_rename_test(java_file, class_name, new_test_file_name, replace_in_imports):
        replace_import = "=" in replace_in_imports
        old = ""
        new = ""
        if replace_import:
            old = replace_in_imports.split("=")[0]
            new = replace_in_imports.split("=")[1]
        new_file = ""
        test_name = class_name + "_ESTest"
        with open(java_file) as file:
            for line in file:
                if replace_import and old in line:
                    line = re.sub(old,new, line.rstrip())+"\n"
                if(test_name in line):
                    line = re.sub(test_name,new_test_file_name, line.rstrip())
                new_file += line
        return new_file

    def remove_flacky_test(new_test_file_name, java_file, keywords_to_remove_file):
        def get_keywords_to_remove(keywords_to_remove_file, wich):
            with open(keywords_to_remove_file) as f:
                lines = f.readlines()
                #lines = [line.strip() for line in lines]
            keywords_to_remove = []
            for line in lines:
                line = line.strip()
                # line comment
                if line.startswith("#") or len(line) == 0:
                    continue
                if("ALL" in line):

                    keywords_to_remove.extend(line.split('=')[1].strip().split(","))
                    continue
                if(wich in line):
                    keywords_to_remove.extend(line.split('=')[1].strip().split(","))
            return keywords_to_remove


        def remove_line(line, keywords_to_remove_array):
            remove_line = False
            for key in keywords_to_remove_array:
                if key in line:
                    remove_line = True
                    break
            return remove_line

        new_file = ""
        keywords_to_remove_all = get_keywords_to_remove(keywords_to_remove_file, "ALL")
        file = java_file.split("\n")
        start = False
        test = ""
        remove_test = False
        #with open(java_file) as file:
        for line in file:
            line += "\n"
            if("@Test" in line):
                start =True
                if not remove_test and len(test) != 0:
                    new_file += test +"\n"
                elif remove_test:
                    remove_test = False
                test = ""
            if remove_test:
                continue
            if(not start):
                if(not remove_line(line, keywords_to_remove_all)):
                    new_file += line
                    continue
            if(start):
                test += line
                if(remove_line(line, get_keywords_to_remove(keywords_to_remove_file, new_test_file_name))):
                    remove_test = True
        if remove_test:
            new_file += "\n}"
        else:
            new_file += test
        return new_file

    def get_short_criterion(criterion):
        #criterion = criterion.replace("line_branch_exception", "lbe")
        #criterion = criterion.replace("epaadjacentedgesmining", "pairs")
        criterion = criterion.replace("line_branch_exception_epaadjacentedgesmining", "pa")
        criterion = criterion.replace("line_branch_exception", "d")
        return criterion


    inputs_array = inputs.split(",")
    id = "ID"+id
    for input in inputs_array:
        print("parsing path " + input + "...")
        i = 0
        class_name = qualified_name.split(".")[-1]
        package_dir = utils.get_package_name_from_qualifiedname(qualified_name)
        package_dir = utils.get_package_dir(package_dir.split("."))
        criterion = get_short_criterion(input.split(os.path.sep)[-1])
        file_name = class_name+"_ESTest.java"
        test_file = os.path.join(input, str(i), "test", package_dir, file_name)
        while(i < n):
            while (not os.path.exists(test_file) and i < n):
                i += 1
                test_file = os.path.join(input, str(i), "test", package_dir, file_name)
            if i >= n:
                break
            new_test_file_name = class_name + "_" + id + "_" + criterion + "_" + str(i) + "_ESTest.java"
            new_test_file_name_without_extension = new_test_file_name.split(".")[0]
            test_file_output = os.path.join(output, new_test_file_name)
            new_test_file = get_rename_test(test_file, class_name, new_test_file_name_without_extension, replace_in_imports)
            new_test_file = remove_flacky_test(new_test_file_name_without_extension, new_test_file, keywords_to_remove_file)
            utils.save_file(test_file_output, new_test_file)

            ###Replace_original TS
            if replace_original_testsuite == "True":
                new_test_file = get_rename_test(test_file_output, new_test_file_name_without_extension.replace("_ESTest",""), class_name+"_ESTest", replace_in_imports)
                utils.save_file(test_file, new_test_file)
            i += 1
            test_file = os.path.join(input, str(i), "test", package_dir, file_name)
    print("saved files to "+ output + "...")