Esempio n. 1
0
def print_given():
    given_class = "com.fasterxml.jackson.databind.ser.std.StdScalarSerializer"
    given_test = "com.fasterxml.jackson.databind.creators.TestCreatorWithNamingStrategy556::testRenameViaFactory"

    statement_coverage = utils.read_json_file(file_path)
    statement_coverage_new = utils.read_json_file(file_path_new)

    for tests in statement_coverage_new[given_class]:
        print("{} -> {}".format(tests,
                                statement_coverage_new[given_class][tests]))

    stop_at = 5
    counter = 0
    print_from = 5
    for classes in statement_coverage:
        counter = counter + 1
        if counter >= print_from:
            print("{} -> {}".format(classes, ""))
            for tests in statement_coverage[classes]:
                try:
                    print("{}{} -> {}".format(
                        "\t", tests, statement_coverage[classes][tests]))
                    print("{}{} -> {}".format(
                        "\t", tests, statement_coverage_new[classes][tests]))
                except KeyError:
                    print(tests)
        if counter >= stop_at:
            break
Esempio n. 2
0
def iterate_all_print():
    statement_coverage = utils.read_json_file(file_path)
    # for classes in statement_coverage:
    #     print("{} ->".format(classes))
    #     # for tests in statement_coverage[classes]:
    #     #     print("{} -> {}".format(tests, statement_coverage[classes][tests]))

    print(len(utils.read_json_file(file_path)))
    print(len(utils.read_json_file(file_path_new)))
Esempio n. 3
0
def create_vocab(params):
    if not os.path.exists(params.vocab_dir):
        os.mkdir(params.vocab_dir)

    left, right, label = read_json_file(params.train_file)
    left = [[w.strip() for w in l] for l in left]
    left = list(itertools.chain(*left))
    right = [[w.strip() for w in l] for l in right]
    right = list(itertools.chain(*right))

    all_content = []
    all_content.extend(left)
    all_content.extend(right)
    content_counter = Counter(all_content)
    content_count_pairs = content_counter.most_common(params.vocab_size - 1)
    id_to_word, _ = list(zip(*content_count_pairs))
    id_to_word = ["<PAD>", "<UNK>"] + list(id_to_word)
    save_vocab(id_to_word, os.path.join(params.vocab_dir, "vocab.txt"))
    word_to_id = dict(zip(id_to_word, range(len(id_to_word))))

    label_counter = Counter(label)
    label_count_pairs = label_counter.most_common(params.num_classes)
    id_to_label, _ = list(zip(*label_count_pairs))
    save_vocab(id_to_label, os.path.join(params.vocab_dir, "label.txt"))
    label_to_id = dict(zip(id_to_label, range(len(id_to_label))))

    return word_to_id, id_to_word, label_to_id, id_to_label
Esempio n. 4
0
 def create_index(self, index_name):
     """Create index in elasticsearch"""
     logger.debug("Creating '%s' Elasticsearch index", str(index_name))
     logger.info("ES Url %s", utils.remove_credentials_from_url(self.host))
     try:
         response = self.es_client.indices.create(index=str(index_name), body={
             'settings': utils.read_json_file("", "index_settings.json", to_json=True),
             'mappings': utils.read_json_file("", "index_mapping_settings.json", to_json=True)
         })
         logger.debug("Created '%s' Elasticsearch index", str(index_name))
         return commons.launch_objects.Response(**response)
     except Exception as err:
         logger.error("Couldn't create index")
         logger.error("ES Url %s", utils.remove_credentials_from_url(self.host))
         logger.error(err)
         return commons.launch_objects.Response()
Esempio n. 5
0
 def create_index_for_stats_info(self, es_client, rp_aa_stats_index):
     index = None
     try:
         index = es_client.indices.get(index=rp_aa_stats_index)
     except Exception:
         pass
     if index is None:
         es_client.indices.create(index=rp_aa_stats_index, body={
             'settings': utils.read_json_file("", "index_settings.json", to_json=True),
             'mappings': utils.read_json_file(
                 "", "%s_mappings.json" % rp_aa_stats_index, to_json=True)
         })
     else:
         es_client.indices.put_mapping(
             index=rp_aa_stats_index,
             body=utils.read_json_file("", "%s_mappings.json" % rp_aa_stats_index, to_json=True))
 def _parse_config(self, config_file_path):
     file_json = utils.read_json_file(config_file_path)
     self.__ensure_json_is_dict(file_json)
     return ModelConfig(file_json["model_name"], file_json["model_path"],
                        file_json["dictionary_path"],
                        file_json.get("document_processor", "default"),
                        file_json.get("features", "word_indices"))
Esempio n. 7
0
def read_model_settings():
    """Reads paths to models"""
    model_settings = utils.read_json_file("", "model_settings.json", to_json=True)
    SEARCH_CONFIG["BoostModelFolder"] = model_settings["BOOST_MODEL_FOLDER"]
    SEARCH_CONFIG["SuggestBoostModelFolder"] = model_settings["SUGGEST_BOOST_MODEL_FOLDER"]
    SEARCH_CONFIG["SimilarityWeightsFolder"] = model_settings["SIMILARITY_WEIGHTS_FOLDER"]
    SEARCH_CONFIG["GlobalDefectTypeModelFolder"] = model_settings["GLOBAL_DEFECT_TYPE_MODEL_FOLDER"]
 def _parse_config(self, config_path):
     config_json = utils.read_json_file(config_path)
     model_configs = []
     for raw_model_config in config_json["models"]:
         model_config = ModelTrainingConfig(raw_model_config["model_name"], raw_model_config["model_architecture"],
                                            raw_model_config.get("document_processor", "default"), raw_model_config.get("features", "word_indices"),
                                            raw_model_config.get("model_params", dict()))
         model_configs.append(model_config)
     return TrainingConfig(config_json.get("number_epochs", 50), config_json["dataset_directory"], model_configs)
Esempio n. 9
0
 def _load_test_results(self, path):
     test_results = []
     for result_json in utils.read_json_file(path):
         test_result = TestResult(result_json["accuracy"],
                                  result_json["per_class_accuracies"])
         test_result.labels = result_json.get("labels", [])
         test_result.model_name = result_json.get("model_name", "")
         test_result.memory_usage_info = self.__parse_memory_usage_info(
             result_json.get("memory_usage_info"))
         test_results.append(test_result)
     return test_results
Esempio n. 10
0
 def create_grafana_data_source(self, esHostGrafanaDatasource, index_name,
                                time_field):
     index_exists = False
     index_properties = utils.read_json_file("",
                                             "%s_mappings.json" %
                                             index_name,
                                             to_json=True)
     if not self.index_exists(index_name, print_error=False):
         response = self.create_index(index_name, index_properties)
         if len(response):
             index_exists = True
     else:
         index_exists = True
     if index_exists:
         self.delete_grafana_datasource_by_name(index_name)
         es_user, es_pass = utils.get_credentials_from_url(
             esHostGrafanaDatasource)
         try:
             requests.post("%s/api/datasources" % self.grafanaHost,
                           data=json.dumps({
                               "name":
                               index_name,
                               "type":
                               "elasticsearch",
                               "url":
                               utils.remove_credentials_from_url(
                                   esHostGrafanaDatasource),
                               "access":
                               "proxy",
                               "basicAuth":
                               len(es_user) > 0,
                               "basicAuthUser":
                               es_user,
                               "secureJsonData": {
                                   "basicAuthPassword": es_pass
                               },
                               "database":
                               index_name,
                               "jsonData": {
                                   "esVersion": 70,
                                   "maxConcurrentShardRequests": "1",
                                   "timeField": time_field
                               }
                           }),
                           headers={
                               "content-type": "application/json"
                           }).raise_for_status()
             return True
         except Exception as err:
             logger.error("Can't create grafana datasource")
             logger.error(err)
             return False
     return False
Esempio n. 11
0
def class_diff_identifier():
    statement_coverage = utils.read_json_file(file_path)
    statement_coverage_new = utils.read_json_file(file_path_new)

    statement_tests = []
    statement_new_tests = []

    for classes in statement_coverage:
        statement_tests.append(classes)

    for classes in statement_coverage_new:
        statement_new_tests.append(classes)

    print(statement_tests)
    print(len(statement_new_tests))
    print(len(statement_tests))
    print(list_diff(statement_new_tests, statement_tests))


# class_diff_identifier()

# com.fasterxml.jackson.databind.DeserializationConfig
Esempio n. 12
0
 def _recreate_index_if_needed(self, bodies, formatted_exception):
     index_name = ""
     if bodies:
         index_name = bodies[0]["_index"]
     if not index_name.strip():
         return
     index_properties = utils.read_json_file("",
                                             "%s_mappings.json" %
                                             index_name,
                                             to_json=True)
     if "'type': 'mapper_parsing_exception'" in formatted_exception or\
             "RequestError(400, 'illegal_argument_exception'" in formatted_exception:
         if index_name in self.tables_to_recreate:
             self.delete_index(index_name)
             self.create_index(index_name, index_properties)
Esempio n. 13
0
    def bulk_index(self, index_name, bulk_actions):
        exists_index = False
        index_properties = utils.read_json_file("",
                                                "%s_mappings.json" %
                                                index_name,
                                                to_json=True)
        if not self.index_exists(index_name, print_error=False):
            response = self.create_index(index_name, index_properties)
            if len(response):
                exists_index = True
        else:
            exists_index = True
        if exists_index:
            try:
                try:
                    self.es_client.indices.put_mapping(index=index_name,
                                                       body=index_properties)
                except:  # noqa
                    formatted_exception = traceback.format_exc()
                    self._recreate_index_if_needed(bulk_actions,
                                                   formatted_exception)
                logger.debug('Indexing %d docs...' % len(bulk_actions))
                try:
                    success_count, errors = elasticsearch.helpers.bulk(
                        self.es_client,
                        bulk_actions,
                        chunk_size=1000,
                        request_timeout=30,
                        refresh=True)
                except:  # noqa
                    formatted_exception = traceback.format_exc()
                    self._recreate_index_if_needed(bulk_actions,
                                                   formatted_exception)
                    self.update_settings_after_read_only()
                    success_count, errors = elasticsearch.helpers.bulk(
                        self.es_client,
                        bulk_actions,
                        chunk_size=1000,
                        request_timeout=30,
                        refresh=True)

                logger.debug("Processed %d logs", success_count)
                if errors:
                    logger.debug("Occured errors %s", errors)
            except Exception as err:
                logger.error(err)
                logger.error("Bulking index for %s index finished with errors",
                             index_name)
Esempio n. 14
0
 def setUp(self):
     self.one_hit_search_rs_explained = "one_hit_search_rs_explained.json"
     self.two_hits_search_rs_explained = "two_hits_search_rs_explained.json"
     self.log_message = "log_message.json"
     self.log_message_wo_stacktrace = "log_message_wo_stacktrace.json"
     self.one_hit_search_rs_explained_wo_stacktrace =\
         "one_hit_search_rs_explained_wo_stacktrace.json"
     self.log_message_only_small_logs = "log_message_only_small_logs.json"
     self.one_hit_search_rs_small_logs = "one_hit_search_rs_small_logs.json"
     self.two_hits_search_rs_small_logs = "two_hits_search_rs_small_logs.json"
     self.three_hits_search_rs_explained = "three_hits_search_rs_explained.json"
     self.one_hit_search_rs_explained_wo_params = "one_hit_search_rs_explained_wo_params.json"
     self.epsilon = 0.0001
     model_settings = utils.read_json_file("", "model_settings.json", to_json=True)
     self.weights_folder = model_settings["SIMILARITY_WEIGHTS_FOLDER"]
     logging.disable(logging.CRITICAL)
 def setUp(self):
     self.one_hit_search_rs_explained = "one_hit_search_rs_explained.json"
     self.two_hits_search_rs_explained = "two_hits_search_rs_explained.json"
     self.two_hits_search_rs_small_logs = "two_hits_search_rs_small_logs.json"
     self.log_message = "log_message.json"
     self.log_message_only_small_logs = "log_message_only_small_logs.json"
     self.log_message_suggest = "log_message_suggest.json"
     self.boost_model_results = "boost_model_results.json"
     self.suggest_boost_model_results = "suggest_boost_model_results.json"
     self.epsilon = 0.0001
     model_settings = utils.read_json_file("",
                                           "model_settings.json",
                                           to_json=True)
     self.boost_model_folder = model_settings["BOOST_MODEL_FOLDER"]
     self.suggest_boost_model_folder =\
         model_settings["SUGGEST_BOOST_MODEL_FOLDER"]
     self.weights_folder = model_settings["SIMILARITY_WEIGHTS_FOLDER"]
     logging.disable(logging.CRITICAL)
Esempio n. 16
0
 def import_dashboard(self, dashboard_id):
     dashboard_info = utils.read_json_file("",
                                           "{}.json".format(dashboard_id),
                                           to_json=True)
     requests.post("%s/api/dashboards/db" % self.grafanaHost,
                   data=json.dumps({
                       "dashboard":
                       dashboard_info["dashboard"],
                       "folderId":
                       dashboard_info["meta"]["folderId"],
                       "refresh":
                       True,
                       "overwrite":
                       True
                   }),
                   headers={
                       'content-type': 'application/json'
                   }).raise_for_status()
Esempio n. 17
0
    def sort_and_pad(self):
        left, right, label = read_json_file(self.data_path)
        label = parse_one_hot(label, self.label_to_id)
        data = list(zip(left, right, label))

        self.len_data = len(data)
        # 计算一个epoch有几个batch
        self.num_batch = int(math.ceil(len(data) / self.config.batch_size))
        # 以序列长度升序排序
        sorted_data = sorted(data, key=lambda x: len(x[0]) + len(x[1]))
        batch_data = list()
        # 这里只进行了排序,不先进行shuffle,目的是使同一batch中序列长度差别不大
        # 对同一batch里的序列使用0进行padding,padding到当前batch中最长序列长度
        for i in range(self.num_batch):
            batch_data.append(
                self.pad_data(
                    sorted_data[i * self.config.batch_size:(i + 1) *
                                self.config.batch_size], self.word_to_id,
                    self.label_to_id))
        return batch_data
    parser.add_argument("num_runs", type=int, help="number of runs")
    parser.add_argument("num_episodes", type=int, help="number of episodes")
    parser.add_argument("--max_episode_length", type=int, default=100, help="maximum length of an episode")
    parser.add_argument("--plot_task_curves", action="store_true", help="whether to also plot task curves")
    parser.add_argument("--use_greedy_traces", "-g", action="store_true", help="whether to use the traces that use the greedy policy")
    parser.add_argument("--greedy_evaluation_frequency", type=int, default=1, help="every how many episodes was the greedy policy evaluated")
    parser.add_argument("--use_tex", action="store_true", help="whether to plot the strings using TeX")
    parser.add_argument("--window_size", "-w", type=int, default=10, help="size of the averaging window")
    parser.add_argument("--plot_title", "-t", default=None, help="the title of the plot")
    return parser


if __name__ == "__main__":
    args = create_argparser().parse_args()

    plt.rc('text', usetex=args.use_tex)
    plt.rc('font', family='serif')

    num_tasks, num_runs, num_episodes = args.num_tasks, args.num_runs, args.num_episodes
    config_obj = read_json_file(args.config)
    output_filename_base, output_path = os.path.basename(args.config)[:-len(".json")], os.path.abspath(os.path.dirname(args.config))
    total_rewards_sum, total_steps_sum, total_automaton_learning_episodes = process_tasks(config_obj, num_tasks, num_runs,
                                                                                          num_episodes, args.max_episode_length,
                                                                                          args.use_greedy_traces, args.greedy_evaluation_frequency,
                                                                                          args.plot_task_curves,
                                                                                          args.window_size, args.plot_title,
                                                                                          output_filename_base, output_path)
    plot_average_task_curves(config_obj, num_tasks, num_runs, args.max_episode_length, args.window_size, args.plot_title,
                             total_rewards_sum, total_steps_sum, total_automaton_learning_episodes, output_filename_base,
                             output_path)
        help="json file containing number of states, observables and examples")
    parser.add_argument("task_filename", help="filename of the ILASP task")
    parser.add_argument("solution_filename",
                        help="filename of the ILASP task solution")
    parser.add_argument("plot_filename", help="filename of the automaton plot")
    parser.add_argument(
        "--symmetry_breaking_method",
        "-s",
        default=None,
        help="method for symmetry breaking (bfs, increasing_path)")
    return parser


if __name__ == "__main__":
    args = get_argparser().parse_args()
    config = utils.read_json_file(args.task_config)

    generate_ilasp_task(config["num_states"],
                        "u_acc",
                        "u_rej",
                        config["observables"],
                        config["goal_examples"],
                        config["deadend_examples"],
                        config["inc_examples"],
                        ".",
                        args.task_filename,
                        args.symmetry_breaking_method,
                        config["max_disjunction_size"],
                        config["learn_acyclic"],
                        config["use_compressed_traces"],
                        config["avoid_learning_only_negative"],
Esempio n. 20
0
# coding:utf-8

import os
from tornado.options import define
from utils import utils

root_path = os.path.join(os.path.dirname(__file__), "..")
sub_static_path = "static"
static_path = os.path.join(root_path, sub_static_path)

# json配置文件里面的所有数据
_json_items = utils.read_json_file(
    os.path.dirname(os.path.realpath(__file__)) + '/config.json')

# 是否是开发模式中
IS_DEBUG = _json_items.get('is_debug')

# 日志路径
LOG_PATH = os.path.join(os.path.dirname(__file__), "logs")

# 杀进程前的等待时间
if IS_DEBUG:
    SHUTDOWN_WAIT_SECONDS = 1
else:
    SHUTDOWN_WAIT_SECONDS = 5


def get_by_key(key):
    return _json_items.get(key)

Esempio n. 21
0
 def load_dataset_params(self, path):
     dataset_dict = utils.read_json_file(path)
     return DatasetParams(dataset_dict.get("dictionary_length", None),
                          dataset_dict.get("num_classes", None),
                          dataset_dict.get("max_sequence_length", None))
def get_mutation_score(score_type, score_path):
    try:
        return utils.read_json_file(score_path + '/mutation_score.txt')[score_type]
    except KeyError:
        return 0
Esempio n. 23
0
def visualize_statement_percent_wise():
    path = str(get_project_root()) + results_folder
    directory = os.path.join(path)
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.endswith(".json"):
                print(file)
                contents = read_json_file(path + '/' + file)
                for stmt_tests in contents[0]['tests']:
                    for test_suites in stmt_tests['test_suites']:
                        try:
                            if 0 <= test_suites['statement_coverage'][
                                    'score'] <= 10:
                                statement_0_10.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))
                            if 11 <= test_suites['statement_coverage'][
                                    'score'] <= 20:
                                statement_10_20.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))
                            if 21 <= test_suites['statement_coverage'][
                                    'score'] <= 30:
                                statement_20_30.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))

                            if 31 <= test_suites['statement_coverage'][
                                    'score'] <= 40:
                                statement_30_40.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))
                            if 41 <= test_suites['statement_coverage'][
                                    'score'] <= 50:
                                statement_40_50.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))
                            if 51 <= test_suites['statement_coverage'][
                                    'score'] <= 60:
                                statement_50_60.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))

                            if 61 <= test_suites['statement_coverage'][
                                    'score'] <= 70:
                                statement_60_70.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))
                            if 71 <= test_suites['statement_coverage'][
                                    'score'] <= 80:
                                statement_70_80.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))
                            if 81 <= test_suites['statement_coverage'][
                                    'score'] <= 90:
                                statement_80_90.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))
                            if 91 <= test_suites['statement_coverage'][
                                    'score'] <= 100:
                                statement_90_100.append(
                                    len(test_suites['statement_coverage']
                                        ['tests']))

                            if 0 <= test_suites['checked_coverage'][
                                    'score'] <= 10:
                                checked_0_10.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))
                            if 11 <= test_suites['checked_coverage'][
                                    'score'] <= 20:
                                checked_10_20.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))
                            if 21 <= test_suites['checked_coverage'][
                                    'score'] <= 30:
                                checked_20_30.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))

                            if 31 <= test_suites['checked_coverage'][
                                    'score'] <= 40:
                                checked_30_40.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))
                            if 41 <= test_suites['checked_coverage'][
                                    'score'] <= 50:
                                checked_40_50.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))
                            if 51 <= test_suites['checked_coverage'][
                                    'score'] <= 60:
                                checked_50_60.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))

                            if 61 <= test_suites['checked_coverage'][
                                    'score'] <= 70:
                                checked_60_70.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))
                            if 71 <= test_suites['checked_coverage'][
                                    'score'] <= 80:
                                checked_70_80.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))
                            if 81 <= test_suites['checked_coverage'][
                                    'score'] <= 90:
                                checked_80_90.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))
                            if 91 <= test_suites['checked_coverage'][
                                    'score'] <= 100:
                                checked_90_100.append(
                                    len(test_suites['checked_coverage']
                                        ['tests']))

                        except KeyError:
                            pass

    data_to_plot = [
        statement_0_10, statement_10_20, statement_20_30, statement_30_40,
        statement_40_50, statement_50_60, statement_60_70, statement_70_80,
        statement_80_90, statement_90_100
    ]
    fig = plt.figure(1, figsize=(9, 6))
    # Create an axes instance
    ax = fig.add_subplot(111)

    ax.set_ylabel('No. of tests with % coverage score')
    ax.set_xlabel(
        'Indicates whether or not, a bug detecting test is included in \n generated test suite'
    )

    # ax.set_xticks(ind + width / 2)
    # ax.set_xticklabels(tuple(['False', 'True', 'False', 'True']))

    # Create the boxplot
    bp = ax.boxplot(data_to_plot)
    # bp['medians'][0].set(color='#3d85c6', linewidth=4)
    # bp['medians'][1].set(color='#3d85c6', linewidth=4)
    #
    # bp['medians'][2].set(color='#e69138', linewidth=4)
    # bp['medians'][3].set(color='#e69138', linewidth=4)

    ax.legend().remove()

    plt.show()
    # save_path = str(get_project_root()) + results_folder + '/' + str(project_list) + '_box-plot'
    # fig.savefig(save_path, dpi=100)

    data_to_plot = [
        checked_0_10, checked_10_20, checked_20_30, checked_30_40,
        checked_40_50, checked_50_60, checked_60_70, checked_70_80,
        checked_80_90, checked_90_100
    ]
    fig = plt.figure(1, figsize=(9, 6))
    # Create an axes instance
    ax = fig.add_subplot(111)

    ax.set_ylabel('No. of tests with % coverage score')
    ax.set_xlabel(
        'Indicates whether or not, a bug detecting test is included in \n generated test suite'
    )

    # ax.set_xticks(ind + width / 2)
    # ax.set_xticklabels(tuple(['False', 'True', 'False', 'True']))

    # Create the boxplot
    bp = ax.boxplot(data_to_plot)
    # bp['medians'][0].set(color='#3d85c6', linewidth=4)
    # bp['medians'][1].set(color='#3d85c6', linewidth=4)
    #
    # bp['medians'][2].set(color='#e69138', linewidth=4)
    # bp['medians'][3].set(color='#e69138', linewidth=4)

    ax.legend().remove()

    plt.show()
def compute():
    project_name = project_config.get('projects', 'project_list').split(",")
    if len(project_name) > 1:
        print("reduce number of projects to 1")
        exit(0)
    project_name = project_name.pop()

    project_range = project_config.get('projects', project_name).split(",")
    defects4j_project_path = project_config.get('paths',
                                                'defects4j_project_path')
    if len(range(int(project_range[0]), int(project_range[1]) + 1)) > 1:
        print('reduce length of projects')
        exit(0)

    for project_id in range(int(project_range[0]), int(project_range[1]) + 1):
        trace_file_path = defects4j_project_path + "/" + project_name + "/trace_files/" + str(
            project_id) + "f"

        checked_coverage = {}
        statement_coverage = {}
        checked_covering_tests = []
        bug_detecting_tests = []
        if path.isdir(trace_file_path):
            patch_path = defects4j_project_path + "/" + project_name + "/" + "/patches/" + str(project_id) + \
                ".src.patch"
            bug_detecting_tests = get_bug_detecting_tests(
                project_id, defects4j_project_path + "/" + project_name)
            current_project_path = defects4j_project_path + "/" + project_name
            modified_classes = utils.get_modified_classes(
                project_id, current_project_path)
            coverable_lines = utils.get_coverable_lines(
                project_id, current_project_path, modified_classes)
            dict_of_modified_coverable_lines = get_modified_coverable_lines(
                patch_path, coverable_lines)
            checked_coverage = utils.read_json_file(trace_file_path +
                                                    '/checked_coverage.json')
            statement_coverage = utils.read_json_file(trace_file_path +
                                                      '/line_coverage.json')
            detailed_checked_covering_tests = get_covering_tests(
                checked_coverage, dict_of_modified_coverable_lines,
                'checked_coverable_lines')
            for checked_covering_test in detailed_checked_covering_tests:
                checked_covering_tests.append(checked_covering_test['test'])


#        print(json.dumps(checked_coverage))

        for checked_covering_test in checked_covering_tests:
            for key, value in checked_coverage.items():
                try:
                    checked_covering_details = value[checked_covering_test]
                    statement_covering_details = statement_coverage[key][
                        checked_covering_test]
                    print(key)
                    print("cs {} {}".format(checked_covering_test,
                                            statement_covering_details))
                    print("cc {} {}".format(checked_covering_test,
                                            checked_covering_details))
                except KeyError:
                    pass
        print("#########################################################")
        for bug_detecting_test in bug_detecting_tests:
            for key, value in checked_coverage.items():
                try:
                    checked_covering_details = value[bug_detecting_test]
                    statement_covering_details = statement_coverage[key][
                        bug_detecting_test]
                    print(key)
                    print("bs {} {}".format(bug_detecting_test,
                                            statement_covering_details))
                    print("bc {} {}".format(bug_detecting_test,
                                            checked_covering_details))
                except KeyError:
                    pass
Esempio n. 25
0
 def create_from_file(path):
     file_content_json = utils.read_json_file(path)
     data_dict = {}
     for key, value in file_content_json.items():
         data_dict[key] = TestDataInfo(value["label"], value["index"], value["filenames"], value["path"])
     return TrainingDataMap(data_dict)
def checkpoints_exist(checkpoint_folder):
    return len(get_checkpoint_filenames(checkpoint_folder)) > 0


def get_last_checkpoint_filename(checkpoint_folder):
    checkpoint_filenames = get_checkpoint_filenames(checkpoint_folder)
    checkpoint_filenames.sort(
        key=lambda x: int(x[len("checkpoint-"):-len(".pickle")]))
    return os.path.join(checkpoint_folder, checkpoint_filenames[-1])


def load_last_checkpoint(checkpoint_folder):
    with open(get_last_checkpoint_filename(checkpoint_folder), 'rb') as f:
        return pickle.load(f)


if __name__ == "__main__":
    args = get_argparser().parse_args()
    config = utils.read_json_file(args.config_file)

    loaded_checkpoint = False

    if get_param(config, ISAAlgorithmBase.CHECKPOINT_ENABLE) \
            and checkpoints_exist(get_param(config, ISAAlgorithmBase.CHECKPOINT_FOLDER)):
        isa_algorithm = load_last_checkpoint(
            get_param(config, ISAAlgorithmBase.CHECKPOINT_FOLDER))
        loaded_checkpoint = True
    else:
        isa_algorithm = get_algorithm(args.algorithm, config)
    isa_algorithm.run(loaded_checkpoint)
Esempio n. 27
0
 def setUp(self):
     self.two_indices_rs = "two_indices_rs.json"
     self.index_created_rs = "index_created_rs.json"
     self.index_already_exists_rs = "index_already_exists_rs.json"
     self.index_deleted_rs = "index_deleted_rs.json"
     self.index_not_found_rs = "index_not_found_rs.json"
     self.launch_wo_test_items = "launch_wo_test_items.json"
     self.launch_w_test_items_wo_logs = "launch_w_test_items_wo_logs.json"
     self.launch_w_test_items_w_logs = "launch_w_test_items_w_logs.json"
     self.launch_w_test_items_w_empty_logs = "launch_w_test_items_w_empty_logs.json"
     self.launch_w_test_items_w_logs_to_be_merged =\
         "launch_w_test_items_w_logs_to_be_merged.json"
     self.index_logs_rq = "index_logs_rq.json"
     self.index_logs_rq_big_messages = "index_logs_rq_big_messages.json"
     self.index_logs_rs = "index_logs_rs.json"
     self.search_rq_first = "search_rq_first.json"
     self.search_rq_second = "search_rq_second.json"
     self.search_rq_third = "search_rq_third.json"
     self.search_rq_filtered = "search_rq_filtered.json"
     self.search_rq_another_log = "search_rq_another_log.json"
     self.search_rq_different_logs = "search_rq_different_logs.json"
     self.search_rq_to_be_merged = "search_rq_to_be_merged.json"
     self.no_hits_search_rs = "no_hits_search_rs.json"
     self.one_hit_search_rs = "one_hit_search_rs.json"
     self.one_hit_search_rs_search_logs = "one_hit_search_rs_search_logs.json"
     self.two_hits_search_rs = "two_hits_search_rs.json"
     self.two_hits_search_rs_search_logs = "two_hits_search_rs_search_logs.json"
     self.three_hits_search_rs = "three_hits_search_rs.json"
     self.launch_w_test_items_w_logs_different_log_level =\
         "launch_w_test_items_w_logs_different_log_level.json"
     self.index_logs_rq_different_log_level = "index_logs_rq_different_log_level.json"
     self.index_logs_rq_different_log_level_merged =\
         "index_logs_rq_different_log_level_merged.json"
     self.index_logs_rs_different_log_level = "index_logs_rs_different_log_level.json"
     self.delete_logs_rs = "delete_logs_rs.json"
     self.two_hits_search_with_big_messages_rs = "two_hits_search_with_big_messages_rs.json"
     self.search_not_merged_logs_for_delete = "search_not_merged_logs_for_delete.json"
     self.search_merged_logs = "search_merged_logs.json"
     self.search_not_merged_logs = "search_not_merged_logs.json"
     self.search_logs_rq = "search_logs_rq.json"
     self.search_logs_rq_not_found = "search_logs_rq_not_found.json"
     self.index_logs_rq_merged_logs = "index_logs_rq_merged_logs.json"
     self.suggest_test_item_info_w_logs = "suggest_test_item_info_w_logs.json"
     self.three_hits_search_rs_with_duplicate = "three_hits_search_rs_with_duplicate.json"
     self.one_hit_search_rs_merged = "one_hit_search_rs_merged.json"
     self.search_rq_merged_first = "search_rq_merged_first.json"
     self.search_rq_merged_second = "search_rq_merged_second.json"
     self.search_rq_merged_third = "search_rq_merged_third.json"
     self.suggest_test_item_info_w_merged_logs = "suggest_test_item_info_w_merged_logs.json"
     self.one_hit_search_rs_merged_wrong = "one_hit_search_rs_merged_wrong.json"
     self.three_hits_search_rs_with_one_unique_id = "three_hits_search_rs_with_one_unique_id.json"
     self.launch_w_items_clustering = "launch_w_items_clustering.json"
     self.cluster_update_all_the_same = "cluster_update_all_the_same.json"
     self.search_logs_rq_first_group = "search_logs_rq_first_group.json"
     self.search_logs_rq_second_group = "search_logs_rq_second_group.json"
     self.one_hit_search_rs_clustering = "one_hit_search_rs_clustering.json"
     self.search_logs_rq_first_group_2lines = "search_logs_rq_first_group_2lines.json"
     self.cluster_update_es_update = "cluster_update_es_update.json"
     self.cluster_update_all_the_same_es_update = "cluster_update_all_the_same_es_update.json"
     self.cluster_update = "cluster_update.json"
     self.app_config = {
         "esHost": "http://localhost:9200",
         "esVerifyCerts": False,
         "esUseSsl": False,
         "esSslShowWarn": False,
         "esCAcert": "",
         "esClientCert": "",
         "esClientKey": "",
         "appVersion": "",
         "minioRegion": "",
         "minioBucketPrefix": "",
         "filesystemDefaultPath": ""
     }
     self.model_settings = utils.read_json_file("",
                                                "model_settings.json",
                                                to_json=True)
     logging.disable(logging.CRITICAL)