Exemplo n.º 1
0
def eval_multidec(args):
    print("Evaluate multidec")
    device = torch.device(args.gpu)
    print("Loading dataset...")
    full_dataset = load_multi_csv_data(args, CONFIG)
    print("Loading dataset completed")
    # full_loader = DataLoader(full_dataset, batch_size=args.batch_size, shuffle=False)

    image_encoder = MDEC_encoder(input_dim=args.input_dim, z_dim=args.latent_dim, n_clusters=args.n_clusters,
                                 encodeLayer=[500, 500, 2000], activation="relu", dropout=0)
    image_encoder.load_model(os.path.join(CONFIG.CHECKPOINT_PATH, "image_sdae_" + str(args.latent_dim)) + ".pt")
    text_encoder = MDEC_encoder(input_dim=args.input_dim, z_dim=args.latent_dim, n_clusters=args.n_clusters,
                                encodeLayer=[500, 500, 2000], activation="relu", dropout=0)
    text_encoder.load_model(os.path.join(CONFIG.CHECKPOINT_PATH, "text_sdae_" + str(args.latent_dim)) + ".pt")
    mdec = MultiDEC(device=device, image_encoder=image_encoder, text_encoder=text_encoder)
    mdec.load_model(
        os.path.join(CONFIG.CHECKPOINT_PATH, "mdec_" + str(args.latent_dim)) + '_' + str(args.n_clusters) + ".pt")
    short_codes, y_pred, y_confidence, pvalue = mdec.fit_predict(full_dataset, args.batch_size)

    result_df = pd.DataFrame(data={'cluster_id': y_pred, 'confidence': y_confidence}, index=short_codes)
    result_df.index.name = "short_code"
    result_df.sort_index(inplace=True)
    result_df.to_csv(
        os.path.join(CONFIG.CSV_PATH, 'multidec_result_' + str(args.latent_dim) + '_' + str(args.n_clusters) + '.csv'),
        encoding='utf-8-sig')

    pvalue_df = pd.DataFrame(data=pvalue, index=short_codes, columns=[str(i) for i in range(args.n_clusters)])
    pvalue_df.index.name = "short_code"
    pvalue_df.sort_index(inplace=True)
    pvalue_df.to_csv(
        os.path.join(CONFIG.CSV_PATH, 'multidec_pvalue_' + str(args.latent_dim) + '_' + str(args.n_clusters) + '.csv'),
        encoding='utf-8-sig')
Exemplo n.º 2
0
def train_multidec(args):
    print("Training multidec")
    device = torch.device(args.gpu)
    print("Loading dataset...")
    full_dataset = load_multi_csv_data(args, CONFIG)
    print("Loading dataset completed")
    # full_loader = DataLoader(full_dataset, batch_size=args.batch_size, shuffle=False)

    image_encoder = MDEC_encoder(input_dim=args.input_dim, z_dim=args.latent_dim, n_clusters=args.n_clusters,
                                 encodeLayer=[500, 500, 2000], activation="relu", dropout=0)
    image_encoder.load_model(os.path.join(CONFIG.CHECKPOINT_PATH, "image_sdae_" + str(args.latent_dim)) + ".pt")
    text_encoder = MDEC_encoder(input_dim=args.input_dim, z_dim=args.latent_dim, n_clusters=args.n_clusters,
                                encodeLayer=[500, 500, 2000], activation="relu", dropout=0)
    text_encoder.load_model(os.path.join(CONFIG.CHECKPOINT_PATH, "text_sdae_" + str(args.latent_dim)) + ".pt")
    mdec = MultiDEC(device=device, image_encoder=image_encoder, text_encoder=text_encoder, n_clusters=args.n_clusters)
    exp = Experiment("MDEC " + str(args.latent_dim) + '_' + str(args.n_clusters), capture_io=True)
    print(mdec)

    for arg, value in vars(args).items():
        exp.param(arg, value)
    try:
        mdec.fit(full_dataset, lr=args.lr, batch_size=args.batch_size, num_epochs=args.epochs,
                 save_path=CONFIG.CHECKPOINT_PATH)
        print("Finish!!!")

    finally:
        exp.end()
Exemplo n.º 3
0
def train_multidec(args):
    print("Training multidec")
    device = torch.device(args.gpu)
    df_image_data = pd.read_csv(os.path.join(CONFIG.CSV_PATH, args.image_csv),
                                index_col=0,
                                encoding='utf-8-sig')
    df_text_data = pd.read_csv(os.path.join(CONFIG.CSV_PATH, args.text_csv),
                               index_col=0,
                               encoding='utf-8-sig')

    df_label = pd.read_csv(os.path.join(CONFIG.CSV_PATH, args.label_csv),
                           index_col=0,
                           encoding='utf-8-sig')
    short_code_array = np.array(df_label.index)
    label_array = np.array(df_label['category'])
    n_clusters = np.max(label_array) + 1
    short_code_train, short_code_val, label_train, label_val = train_test_split(
        short_code_array, label_array, test_size=0.2, random_state=42)
    df_train = pd.DataFrame(data=label_train,
                            index=short_code_train,
                            columns=df_label.columns)
    df_val = pd.DataFrame(data=label_val,
                          index=short_code_val,
                          columns=df_label.columns)
    print("Loading dataset...")
    train_dataset, val_dataset = load_multi_csv_data(df_image_data,
                                                     df_text_data, df_train,
                                                     df_val, CONFIG)
    print("Loading dataset completed")

    image_encoder = MDEC_encoder(input_dim=args.input_dim,
                                 z_dim=args.latent_dim,
                                 n_clusters=n_clusters,
                                 encodeLayer=[500, 500, 2000],
                                 activation="relu",
                                 dropout=0)
    image_encoder.load_model(
        os.path.join(CONFIG.CHECKPOINT_PATH, "image_sdae_" +
                     str(args.latent_dim)) + ".pt")
    text_encoder = MDEC_encoder(input_dim=args.input_dim,
                                z_dim=args.latent_dim,
                                n_clusters=n_clusters,
                                encodeLayer=[500, 500, 2000],
                                activation="relu",
                                dropout=0)
    text_encoder.load_model(
        os.path.join(CONFIG.CHECKPOINT_PATH, "text_sdae_" +
                     str(args.latent_dim)) + ".pt")
    mdec = MultiDEC(device=device,
                    image_encoder=image_encoder,
                    text_encoder=text_encoder,
                    n_clusters=n_clusters)
    exp = Experiment("MDEC " + str(args.latent_dim), capture_io=True)
    print(mdec)

    for arg, value in vars(args).items():
        exp.param(arg, value)
    try:
        mdec.fit(train_dataset,
                 val_dataset,
                 lr=args.lr,
                 batch_size=args.batch_size,
                 num_epochs=args.epochs,
                 save_path=CONFIG.CHECKPOINT_PATH)
        print("Finish!!!")

    finally:
        exp.end()
def train_multidec(args):
    print("Training started")
    device = torch.device(args.gpu)
    df_image_data = pd.read_csv(os.path.join(CONFIG.CSV_PATH, args.image_csv),
                                index_col=0,
                                encoding='utf-8-sig')
    df_text_data = pd.read_csv(os.path.join(CONFIG.CSV_PATH, args.text_csv),
                               index_col=0,
                               encoding='utf-8-sig')

    df_label = pd.read_csv(os.path.join(CONFIG.CSV_PATH, args.label_csv),
                           index_col=0,
                           encoding='utf-8-sig')
    df_weight = pd.read_csv(os.path.join(CONFIG.CSV_PATH, args.weight_csv),
                            index_col=0,
                            encoding='utf-8-sig')
    short_code_array = np.array(df_label.index)
    label_array = np.array(df_label['category'])
    n_classes = np.max(label_array) + 1

    exp = Experiment("multi_classifier", capture_io=True)
    for arg, value in vars(args).items():
        exp.param(arg, value)
    try:
        kf = KFold(n_splits=5, random_state=42)
        image_score_list = []
        text_score_list = []
        multi_score_list = []
        kf_count = 0
        for train_index, val_index in kf.split(short_code_array):
            print("Current fold: ", kf_count)
            short_code_train = short_code_array[train_index]
            short_code_val = short_code_array[val_index]
            label_train = label_array[train_index]
            label_val = label_array[val_index]
            df_train = pd.DataFrame(data=label_train,
                                    index=short_code_train,
                                    columns=df_label.columns)
            df_val = pd.DataFrame(data=label_val,
                                  index=short_code_val,
                                  columns=df_label.columns)
            print("Loading dataset...")
            train_dataset, val_dataset = load_multi_csv_data(
                df_image_data, df_text_data, df_weight, df_train, df_val,
                CONFIG)
            print("\nLoading dataset completed")

            if args.fixed_weight is None:
                image_classifier = SingleClassifier(device=device,
                                                    input_dim=args.input_dim,
                                                    filter_num=64,
                                                    n_classes=n_classes)
                text_classifier = SingleClassifier(device=device,
                                                   input_dim=args.input_dim,
                                                   filter_num=64,
                                                   n_classes=n_classes)
                print("pretraining image classifier...")
                image_classifier.fit(
                    train_dataset,
                    val_dataset,
                    input_modal=1,
                    lr=args.lr,
                    num_epochs=args.pretrain_epochs,
                    save_path=os.path.join(CONFIG.CHECKPOINT_PATH,
                                           "image_classifier") + ".pt")
                image_classifier.load_model(
                    os.path.join(CONFIG.CHECKPOINT_PATH, "image_classifier") +
                    ".pt")
                print("pretraining text classifier...")
                text_classifier.fit(
                    train_dataset,
                    val_dataset,
                    input_modal=2,
                    lr=args.lr,
                    num_epochs=args.pretrain_epochs,
                    save_path=os.path.join(CONFIG.CHECKPOINT_PATH,
                                           "text_classifier") + ".pt")
                text_classifier.load_model(
                    os.path.join(CONFIG.CHECKPOINT_PATH, "text_classifier") +
                    ".pt")
                print("pretraining weight classifier...")
                weight_calculator = WeightCalculator(device=device,
                                                     input_dim=args.input_dim *
                                                     2,
                                                     n_classes=n_classes)
                weight_calculator.fit(
                    train_dataset,
                    val_dataset,
                    lr=args.lr,
                    num_epochs=args.pretrain_epochs,
                    save_path=os.path.join(CONFIG.CHECKPOINT_PATH,
                                           "weight_calculator") + ".pt")
                weight_calculator.load_model(
                    os.path.join(CONFIG.CHECKPOINT_PATH, "weight_calculator") +
                    ".pt")
                multi_classifier = MultiClassifier(
                    device=device,
                    image_classifier=image_classifier,
                    text_classifier=text_classifier,
                    weight_calculator=weight_calculator)
                print(multi_classifier)
                print("training multi classifier...")
                multi_classifier.fit(
                    train_dataset,
                    val_dataset,
                    lr=args.lr,
                    batch_size=args.batch_size,
                    num_epochs=args.epochs,
                    save_path=os.path.join(CONFIG.CHECKPOINT_PATH,
                                           "multi_classifier") + ".pt")
            else:
                image_classifier = SingleClassifier(device=device,
                                                    input_dim=args.input_dim,
                                                    filter_num=64,
                                                    n_classes=n_classes)
                text_classifier = SingleClassifier(device=device,
                                                   input_dim=args.input_dim,
                                                   filter_num=64,
                                                   n_classes=n_classes)
                print("pretraining image classifier...")
                image_classifier.fit(
                    train_dataset,
                    val_dataset,
                    input_modal=1,
                    lr=args.lr,
                    num_epochs=args.pretrain_epochs,
                    save_path=os.path.join(
                        CONFIG.CHECKPOINT_PATH, "image_classifier_fw_" +
                        str(args.fixed_weight)) + ".pt")
                image_classifier.load_model(
                    os.path.join(
                        CONFIG.CHECKPOINT_PATH, "image_classifier_fw_" +
                        str(args.fixed_weight)) + ".pt")
                print("pretraining text classifier...")
                text_classifier.fit(
                    train_dataset,
                    val_dataset,
                    input_modal=2,
                    lr=args.lr,
                    num_epochs=args.pretrain_epochs,
                    save_path=os.path.join(
                        CONFIG.CHECKPOINT_PATH, "text_classifier_fw_" +
                        str(args.fixed_weight)) + ".pt")
                text_classifier.load_model(
                    os.path.join(
                        CONFIG.CHECKPOINT_PATH, "text_classifier_fw_" +
                        str(args.fixed_weight)) + ".pt")
                multi_classifier = MultiClassifier(
                    device=device,
                    image_classifier=image_classifier,
                    text_classifier=text_classifier,
                    fixed_weight=args.fixed_weight)
                print(multi_classifier)
                print("training multi classifier with fixed weight...")
                multi_classifier.fit(
                    train_dataset,
                    val_dataset,
                    lr=args.lr,
                    batch_size=args.batch_size,
                    num_epochs=args.epochs,
                    save_path=os.path.join(
                        CONFIG.CHECKPOINT_PATH, "multi_classifier_fw_" +
                        str(args.fixed_weight)) + ".pt")

            print("Finish!!!")
            print(
                "#current fold best image score: %.6f, text score: %.6f multi score: %.6f"
                % (image_classifier.score, text_classifier.score,
                   multi_classifier.score))
            image_score_list.append(image_classifier.score)
            text_score_list.append(text_classifier.score)
            multi_score_list.append(multi_classifier.score)
            kf_count = kf_count + 1

        print(
            "#average image score: %.6f, text score: %.6f multi score: %.6f" %
            (np.mean(image_score_list), np.mean(text_score_list),
             np.mean(multi_score_list)))

    finally:
        exp.end()