Exemple #1
0
def predict_tta_all(path):
    for root, dirs, files in os.walk(path):
        for file in files:
            if "model" in file and "val" in file and ".py" in file and "pyc" not in file:
                print("start predict tta %s" % os.path.join(root, file))
                model_name = file.split(".")[0]
                attr_get_model, attr_model_config = keras_util.dynamic_model_import(
                    model_path_in=os.path.join(root, model_name))
                model = attr_get_model(output_dim=len(
                    attr_model_config.label_position),
                                       weights=None)
                attr_model_config.predict_tta_all(model)
Exemple #2
0
 def find_segmented_model(self):
     for val in self.meta_model_all:
         for label in val:
             for top_n in label:
                 meta_model_path = top_n[0]
                 unique_path = re.match(r".*competition[\\/]*(.*)",
                                        meta_model_path).group(1)
                 identifier = "-".join(unique_path.split("\\"))
                 cnn_result_path = os.path.join(path.CNN_RESULT_PATH,
                                                identifier)
                 weight_file = os.path.join(path.root_path,
                                            pathlib.Path(unique_path))
                 real_weight_file = os.path.join(self.meta_model_dir,
                                                 pathlib.Path(unique_path))
                 attr_get_model, attr_model_config = keras_util.dynamic_model_import(
                     weight_file)
                 if path.DATA_TYPE_SEGMENTED in attr_model_config.data_type:
                     print(meta_model_path)
def model_config_statistics(label_statis_all, save_dir):
    with open(os.path.join(save_dir, "model_config_statistics.txt"), "w+") as f:
        for label in range(13):
            f.write("##############################label %d##############################\n" % label)
            for val in range(5):
                f.write("---------------------val %d---------------------\n" % (val + 1))
                for rank in range(2):
                    weight_file, f2 = label_statis_all[val][label][rank]
                    _, model_config = keras_util.dynamic_model_import(weight_file)
                    assert model_config.val_index == val + 1
                    f.write("rank: %d, f2-score: %6f\n" % (rank, f2))
                    f.write("model_name=%s\n" % model_config.model_name)
                    f.write("image_resolution=%d\n" % model_config.image_resolution)
                    f.write("data_type=%s\n" % str(model_config.data_type))
                    f.write("label_position=%s\n" % str([str(i) for i in model_config.label_position]))

                    f.write("train_file_cnt=%d\n" % model_config.train_file_cnt)
                    f.write("val_file_cnt=%d\n" % model_config.val_file_cnt)
                    try:
                        f.write("label_color_augment=%s\n" % str([str(i) for i in model_config.label_color_augment]))
                        f.write("color_augment_cnt=%d\n" % model_config.color_augment_cnt)
                    except:
                        pass

                    try:
                        f.write("label_up_sampling=%s\n" % str([str(i) for i in model_config.label_up_sampling]))
                        f.write("label_up_sampling_cnt=%s\n" % str([str(i) for i in model_config.up_sampling_cnt]))
                    except:
                        pass

                    try:
                        f.write("down_sampling=%f\n" % model_config.downsampling)
                        f.write("down_sampling_cnt=%d\n" % model_config.down_sampling_cnt)
                    except:
                        pass

                    f.write("train_batch_size=%s\n" % str([str(i) for i in model_config.train_batch_size]))
                    f.write("epoch=%s\n" % str([str(i) for i in model_config.epoch]))
                    f.write("lr=%s\n" % str([str(i) for i in model_config.lr]))
                    f.write("freeze_layers=%s\n" % str([str(i) for i in model_config.freeze_layers]))
                    f.write("input_norm=%s\n" % model_config.input_norm)
                    f.write("tta_flip=%s\n" % model_config.tta_flip)
                    f.write("tta_crop=%s\n" % model_config.tta_crop)
                    f.write("\n")
def get_ablation_experiment_predict(mode_path, val):
    y_true = get_test_labels()

    original_test_file = []
    with open(path.TEST_DATA_TXT, 'r') as f:
        for i in f.readlines():
            image_name = i.split(",")[0] + ".jpg"
            original_test_file.append(
                os.path.join(path.ORIGINAL_TEST_IMAGES_PATH, image_name))

    weight_files = []
    predict_files = []
    _, _, thresholds = model_statistics.model_f2_statistics(
        path.MODEL_PATH, val)

    for root, dirs, files in os.walk(mode_path):
        for file in files:
            if "hdf5(test)" in file:
                predict_files.append(os.path.join(root, file))
                continue
            if not file.split(".")[-1] == "hdf5":
                continue
            if f"val{val}" not in root:
                continue

            model_num = re.match(r".*model([0-9]*).*", root).group(1)
            if int(model_num) < 100:
                continue

            weight_files.append(os.path.join(root, file))

    for predict_file in predict_files:
        print(f"evaluate {predict_file}")
        weight_file = predict_file.replace("(test).predict.npy", "")
        y_pred = np.load(predict_file)
        build_epoch_test(y_true, y_pred, weight_file)

    for weight_file in weight_files:
        print(f"evaluate {weight_file}")
        unique_path = re.match(r".*competition[\\/]*(.*)",
                               weight_file).group(1)
        identifier = "-".join(unique_path.split("\\"))
        print(f"id {identifier}")
        cnn_result_path = os.path.join(weight_file + "(test)")
        print(f"result {cnn_result_path}")
        if not os.path.exists(keras_util.get_prediction_path(cnn_result_path)):
            print(cnn_result_path)
            attr_get_model, attr_model_config = keras_util.dynamic_model_import(
                weight_file)
            model = attr_get_model(output_dim=len(
                attr_model_config.label_position),
                                   weights=None)
            model.load_weights(weight_file)
            attr_model_config.val_files = []
            for data_type in attr_model_config.data_type:
                if data_type == path.DATA_TYPE_ORIGINAL:
                    attr_model_config.val_files.append(original_test_file)

            attr_model_config.tta_flip = True
            y_pred = keras_util.predict_tta(model,
                                            attr_model_config,
                                            verbose=1)

            for i in range(13):
                y_pred[:, i] = y_pred[:, i] > thresholds[i][weight_file]

            y_pred = y_pred.astype(np.int8)

            keras_util.save_prediction_file(y_pred, cnn_result_path)
        else:
            y_pred = np.load(keras_util.get_prediction_path(cnn_result_path))

        build_epoch_test(y_true, y_pred, weight_file)
Exemple #5
0
    def get_meta_predict(self, val_index, get_segmented=False, debug=False):
        original_test_file = []
        segmented_test_file = []
        cnt = 0
        with open(path.TEST_DATA_TXT, 'r') as f:
            for i in f.readlines():
                image_name = i.split(",")[0] + ".jpg"
                original_test_file.append(
                    os.path.join(path.ORIGINAL_TEST_IMAGES_PATH, image_name))
                segmented_test_file.append(
                    os.path.join(path.SEGMENTED_TEST_IMAGES_PATH, image_name))

        for val in val_index:
            val_model = self.meta_model_all[val - 1]
            for label in val_model:
                for top_n in label:
                    meta_model_path = top_n[0]
                    if "ubuntu" in meta_model_path:
                        sep = "/"
                    else:
                        sep = "\\"
                    unique_path = re.match(r".*competition[\\/]*(.*)",
                                           meta_model_path).group(1)
                    identifier = "-".join(unique_path.split(sep))
                    cnn_result_path = os.path.join(path.CNN_RESULT_PATH,
                                                   identifier)
                    if os.path.exists(
                            keras_util.get_prediction_path(cnn_result_path)):
                        # self.save_log("file existed %s" % keras_util.get_prediction_path(cnn_result_path))
                        continue

                    weight_file = os.path.join(path.root_path,
                                               pathlib.Path(unique_path))
                    real_weight_file = os.path.join(self.meta_model_dir,
                                                    pathlib.Path(unique_path))
                    if not os.path.exists(real_weight_file):
                        self.save_log("weight not existed, %s " %
                                      real_weight_file)
                        continue

                    if debug:
                        print(f"{weight_file}")
                        cnt += 1
                        continue

                    # self.save_log("weight file %s, real weight file %s" % (weight_file, real_weight_file))
                    attr_get_model, attr_model_config = keras_util.dynamic_model_import(
                        weight_file)

                    if not get_segmented and path.DATA_TYPE_SEGMENTED in attr_model_config.data_type:
                        self.save_log("not train segmented model, %s" %
                                      real_weight_file)
                        continue

                    model = attr_get_model(output_dim=len(
                        attr_model_config.label_position),
                                           weights=None)
                    model.load_weights(real_weight_file)
                    attr_model_config.val_files = []
                    for data_type in attr_model_config.data_type:
                        if data_type == path.DATA_TYPE_ORIGINAL:
                            # self.save_log("model %s use original data" % unique_path)
                            attr_model_config.val_files.append(
                                original_test_file)
                        if data_type == path.DATA_TYPE_SEGMENTED:
                            self.save_log("model %s use segmented data" %
                                          unique_path)
                            attr_model_config.val_files.append(
                                segmented_test_file)
                    attr_model_config.tta_flip = True
                    y_pred = keras_util.predict_tta(model,
                                                    attr_model_config,
                                                    verbose=1)
                    keras_util.save_prediction_file(y_pred, cnn_result_path)

        print(f"need predict {cnt} model")