def do_predict(FLAGS, datasets):
        """
        调用堆栈降噪自动编码器进行预测
        :param FLAGS: tensorflow.app.flags.FLAGS 类型。包含了定义的网络模型的各项参数
        :param datasets: ndarray 类型。制作好的测试数据
        :return: ndarray 类型。预测结果
        """
        # 将自动编码器层参数从字符串转换为其特定类型
        dae_layers = utilities.flag_to_list(FLAGS.dae_layers, 'int')
        dae_enc_act_func = utilities.flag_to_list(FLAGS.dae_enc_act_func, 'str')
        dae_dec_act_func = utilities.flag_to_list(FLAGS.dae_dec_act_func, 'str')
        dae_opt = utilities.flag_to_list(FLAGS.dae_opt, 'str')
        dae_loss_func = utilities.flag_to_list(FLAGS.dae_loss_func, 'str')
        dae_learning_rate = utilities.flag_to_list(FLAGS.dae_learning_rate, 'float')
        dae_regcoef = utilities.flag_to_list(FLAGS.dae_regcoef, 'float')
        dae_corr_type = utilities.flag_to_list(FLAGS.dae_corr_type, 'str')
        dae_corr_frac = utilities.flag_to_list(FLAGS.dae_corr_frac, 'float')
        dae_num_epochs = utilities.flag_to_list(FLAGS.dae_num_epochs, 'int')
        dae_batch_size = utilities.flag_to_list(FLAGS.dae_batch_size, 'int')

        # 检查参数
        assert all([0. <= cf <= 1. for cf in dae_corr_frac])
        assert all([ct in ['masking', 'salt_and_pepper', 'none'] for ct in dae_corr_type])
        assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
        assert len(dae_layers) > 0
        assert all([af in ['sigmoid', 'tanh'] for af in dae_enc_act_func])
        assert all([af in ['sigmoid', 'tanh', 'none'] for af in dae_dec_act_func])

        utilities.random_seed_np_tf(FLAGS.seed)

        # 创建编码、解码、微调函数和网络模型对象
        dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
        dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
        finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)

        sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
            do_pretrain=FLAGS.do_pretrain, name=FLAGS.name,
            layers=dae_layers, finetune_loss_func=FLAGS.finetune_loss_func,
            finetune_learning_rate=FLAGS.finetune_learning_rate,
            finetune_num_epochs=FLAGS.finetune_num_epochs,
            finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size,
            finetune_dropout=FLAGS.finetune_dropout,
            enc_act_func=dae_enc_act_func, dec_act_func=dae_dec_act_func,
            corr_type=dae_corr_type, corr_frac=dae_corr_frac, regcoef=dae_regcoef,
            loss_func=dae_loss_func, opt=dae_opt,
            learning_rate=dae_learning_rate, momentum=FLAGS.momentum,
            num_epochs=dae_num_epochs, batch_size=dae_batch_size,
            finetune_act_func=finetune_act_func)

        # 训练模型 (无监督预训练)
        # if FLAGS.do_pretrain:
        #     encoded_X, encoded_vX = sdae.pretrain(trX, vlX)

        teX = datasets
        # print('Saving the predictions for the test set...')
        internal_predictions = sdae.predict(teX)
        return internal_predictions
示例#2
0
def do_train(internal_FLAGS, trX, vlX, trY, vlY):
    # 将自动编码器层参数从字符串转换为其特定类型
    dae_layers = utilities.flag_to_list(internal_FLAGS.dae_layers, 'int')
    dae_enc_act_func = utilities.flag_to_list(internal_FLAGS.dae_enc_act_func, 'str')
    dae_dec_act_func = utilities.flag_to_list(internal_FLAGS.dae_dec_act_func, 'str')
    dae_opt = utilities.flag_to_list(internal_FLAGS.dae_opt, 'str')
    dae_loss_func = utilities.flag_to_list(internal_FLAGS.dae_loss_func, 'str')
    dae_learning_rate = utilities.flag_to_list(internal_FLAGS.dae_learning_rate, 'float')
    dae_regcoef = utilities.flag_to_list(internal_FLAGS.dae_regcoef, 'float')
    dae_corr_type = utilities.flag_to_list(internal_FLAGS.dae_corr_type, 'str')
    dae_corr_frac = utilities.flag_to_list(internal_FLAGS.dae_corr_frac, 'float')
    dae_num_epochs = utilities.flag_to_list(internal_FLAGS.dae_num_epochs, 'int')
    dae_batch_size = utilities.flag_to_list(internal_FLAGS.dae_batch_size, 'int')

    # 检查参数
    assert all([0. <= cf <= 1. for cf in dae_corr_frac])
    assert all([ct in ['masking', 'salt_and_pepper', 'none'] for ct in dae_corr_type])
    assert internal_FLAGS.dataset in ['mnist', 'cifar10', 'custom']
    assert len(dae_layers) > 0
    assert all([af in ['sigmoid', 'tanh'] for af in dae_enc_act_func])
    assert all([af in ['sigmoid', 'tanh', 'none'] for af in dae_dec_act_func])

    utilities.random_seed_np_tf(internal_FLAGS.seed)

    # 创建编码、解码、微调函数和网络模型对象
    sdae = None

    dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
    dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
    finetune_act_func = utilities.str2actfunc(internal_FLAGS.finetune_act_func)

    sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
        do_pretrain=internal_FLAGS.do_pretrain, name=internal_FLAGS.name,
        layers=dae_layers, finetune_loss_func=internal_FLAGS.finetune_loss_func,
        finetune_learning_rate=internal_FLAGS.finetune_learning_rate,
        finetune_num_epochs=internal_FLAGS.finetune_num_epochs,
        finetune_opt=internal_FLAGS.finetune_opt, finetune_batch_size=internal_FLAGS.finetune_batch_size,
        finetune_dropout=internal_FLAGS.finetune_dropout,
        enc_act_func=dae_enc_act_func, dec_act_func=dae_dec_act_func,
        corr_type=dae_corr_type, corr_frac=dae_corr_frac, regcoef=dae_regcoef,
        loss_func=dae_loss_func, opt=dae_opt,
        learning_rate=dae_learning_rate, momentum=internal_FLAGS.momentum,
        num_epochs=dae_num_epochs, batch_size=dae_batch_size,
        finetune_act_func=finetune_act_func)

    # 训练模型 (无监督预训练)
    if internal_FLAGS.do_pretrain:
        encoded_X, encoded_vX = sdae.pretrain(trX, vlX)

    trY = np.array(trY)
    vlY = np.array(vlY)
    # 有监督微调
    sdae.fit(trX, trY, vlX, vlY)
            trRef = trX
        if not vlRef:
            vlRef = vlX
        if not teRef:
            teRef = teX

    else:
        trX = None
        trRef = None
        vlX = None
        vlRef = None
        teX = None
        teRef = None

    finetune_enc_act_func = [
        utilities.str2actfunc(af) for af in finetune_enc_act_func
    ]
    finetune_dec_act_func = [
        utilities.str2actfunc(af) for af in finetune_dec_act_func
    ]

    # Create the object
    srbm = deep_autoencoder.DeepAutoencoder(
        name=FLAGS.name,
        do_pretrain=FLAGS.do_pretrain,
        layers=rbm_layers,
        learning_rate=rbm_learning_rate,
        gibbs_k=rbm_gibbs_k,
        num_epochs=rbm_num_epochs,
        momentum=FLAGS.momentum,
        batch_size=rbm_batch_size,
        trX, trY = load_from_np(FLAGS.train_dataset), load_from_np(
            FLAGS.train_labels)
        vlX, vlY = load_from_np(FLAGS.valid_dataset), load_from_np(
            FLAGS.valid_labels)
        teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(
            FLAGS.test_labels)

    else:
        trX, trY, vlX, vlY, teX, teY = None, None, None, None, None, None

    models_dir = os.path.join(config.models_dir, FLAGS.main_dir)
    data_dir = os.path.join(config.data_dir, FLAGS.main_dir)
    summary_dir = os.path.join(config.summary_dir, FLAGS.main_dir)

    # Create the object
    finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)

    srbm = dbn.DeepBeliefNetwork(
        models_dir=models_dir,
        data_dir=data_dir,
        summary_dir=summary_dir,
        model_name=FLAGS.model_name,
        do_pretrain=FLAGS.do_pretrain,
        rbm_layers=rbm_layers,
        dataset=FLAGS.dataset,
        main_dir=FLAGS.main_dir,
        finetune_act_func=finetune_act_func,
        rbm_learning_rate=rbm_learning_rate,
        verbose=FLAGS.verbose,
        rbm_num_epochs=rbm_num_epochs,
        rbm_gibbs_k=rbm_gibbs_k,
        def load_from_np(dataset_path):
            if dataset_path != '':
                return np.load(dataset_path)
            else:
                return None

        trX, trY = load_from_np(FLAGS.train_dataset), load_from_np(FLAGS.train_labels)
        vlX, vlY = load_from_np(FLAGS.valid_dataset), load_from_np(FLAGS.valid_labels)
        teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(FLAGS.test_labels)

    else:
        trX, trY, vlX, vlY, teX, teY = None, None, None, None, None, None

    # Create the object
    finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)

    srbm = dbn.DeepBeliefNetwork(
        name=FLAGS.name, do_pretrain=FLAGS.do_pretrain,
        rbm_layers=rbm_layers,
        finetune_act_func=finetune_act_func, rbm_learning_rate=rbm_learning_rate,
        rbm_num_epochs=rbm_num_epochs, rbm_gibbs_k = rbm_gibbs_k,
        rbm_gauss_visible=FLAGS.rbm_gauss_visible, rbm_stddev=FLAGS.rbm_stddev,
        momentum=FLAGS.momentum, rbm_batch_size=rbm_batch_size, finetune_learning_rate=FLAGS.finetune_learning_rate,
        finetune_num_epochs=FLAGS.finetune_num_epochs, finetune_batch_size=FLAGS.finetune_batch_size,
        finetune_opt=FLAGS.finetune_opt, finetune_loss_func=FLAGS.finetune_loss_func,
        finetune_dropout=FLAGS.finetune_dropout)

    # Fit the model (unsupervised pretraining)
    if FLAGS.do_pretrain:
        srbm.pretrain(trX, vlX)
示例#6
0
def do_predict(FLAGS, predictions_dir, num):
    dataset_path = "/media/files/yp/rbm/yinchuansanqu/dataset/"
    f_list1 = os.listdir(dataset_path)
    for i in f_list1:
        if os.path.splitext(i)[1] == '.npy':
            # 将自动编码器层参数从字符串转换为其特定类型
            dae_layers = utilities.flag_to_list(FLAGS.dae_layers, 'int')
            dae_enc_act_func = utilities.flag_to_list(FLAGS.dae_enc_act_func, 'str')
            dae_dec_act_func = utilities.flag_to_list(FLAGS.dae_dec_act_func, 'str')
            dae_opt = utilities.flag_to_list(FLAGS.dae_opt, 'str')
            dae_loss_func = utilities.flag_to_list(FLAGS.dae_loss_func, 'str')
            dae_learning_rate = utilities.flag_to_list(FLAGS.dae_learning_rate, 'float')
            dae_regcoef = utilities.flag_to_list(FLAGS.dae_regcoef, 'float')
            dae_corr_type = utilities.flag_to_list(FLAGS.dae_corr_type, 'str')
            dae_corr_frac = utilities.flag_to_list(FLAGS.dae_corr_frac, 'float')
            dae_num_epochs = utilities.flag_to_list(FLAGS.dae_num_epochs, 'int')
            dae_batch_size = utilities.flag_to_list(FLAGS.dae_batch_size, 'int')

            # 检查参数
            assert all([0. <= cf <= 1. for cf in dae_corr_frac])
            assert all([ct in ['masking', 'salt_and_pepper', 'none'] for ct in dae_corr_type])
            assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
            assert len(dae_layers) > 0
            assert all([af in ['sigmoid', 'tanh'] for af in dae_enc_act_func])
            assert all([af in ['sigmoid', 'tanh', 'none'] for af in dae_dec_act_func])

            utilities.random_seed_np_tf(FLAGS.seed)

            def load_from_np(dataset_path):
                if dataset_path != '':
                    return np.load(dataset_path)
                else:
                    return None

            # 创建编码、解码、微调函数和网络模型对象
            sdae = None

            dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
            dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
            finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)

            sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
                do_pretrain=FLAGS.do_pretrain, name=FLAGS.name,
                layers=dae_layers, finetune_loss_func=FLAGS.finetune_loss_func,
                finetune_learning_rate=FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.finetune_num_epochs,
                finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size,
                finetune_dropout=FLAGS.finetune_dropout,
                enc_act_func=dae_enc_act_func, dec_act_func=dae_dec_act_func,
                corr_type=dae_corr_type, corr_frac=dae_corr_frac, regcoef=dae_regcoef,
                loss_func=dae_loss_func, opt=dae_opt,
                learning_rate=dae_learning_rate, momentum=FLAGS.momentum,
                num_epochs=dae_num_epochs, batch_size=dae_batch_size,
                finetune_act_func=finetune_act_func)

            # 训练模型 (无监督预训练)
            if FLAGS.do_pretrain:
                encoded_X, encoded_vX = sdae.pretrain(trX, vlX)

            FLAGS.test_dataset = dataset_path + i
            # FLAGS.test_labels = "/media/files/yp/rbm/pic_div/label/binary/" + str(idx) + ".npy"
            FLAGS.save_predictions = predictions_dir + i
            # teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(FLAGS.test_labels)
            teX = load_from_np(FLAGS.test_dataset)
            # 计算模型在测试集上的准确率
            # print('Test set accuracy: {}'.format(sdae.score(teX, teY)))

            # 保存模型的预测
            if FLAGS.save_predictions:
                print('Saving the predictions for the test set...')
                predict = sdae.predict(teX).astype(np.float16)
                np.save(FLAGS.save_predictions, predict)

    # for idx in range(0, num):
    #     # 将自动编码器层参数从字符串转换为其特定类型
    #     dae_layers = utilities.flag_to_list(FLAGS.dae_layers, 'int')
    #     dae_enc_act_func = utilities.flag_to_list(FLAGS.dae_enc_act_func, 'str')
    #     dae_dec_act_func = utilities.flag_to_list(FLAGS.dae_dec_act_func, 'str')
    #     dae_opt = utilities.flag_to_list(FLAGS.dae_opt, 'str')
    #     dae_loss_func = utilities.flag_to_list(FLAGS.dae_loss_func, 'str')
    #     dae_learning_rate = utilities.flag_to_list(FLAGS.dae_learning_rate, 'float')
    #     dae_regcoef = utilities.flag_to_list(FLAGS.dae_regcoef, 'float')
    #     dae_corr_type = utilities.flag_to_list(FLAGS.dae_corr_type, 'str')
    #     dae_corr_frac = utilities.flag_to_list(FLAGS.dae_corr_frac, 'float')
    #     dae_num_epochs = utilities.flag_to_list(FLAGS.dae_num_epochs, 'int')
    #     dae_batch_size = utilities.flag_to_list(FLAGS.dae_batch_size, 'int')
    #
    #     # 检查参数
    #     assert all([0. <= cf <= 1. for cf in dae_corr_frac])
    #     assert all([ct in ['masking', 'salt_and_pepper', 'none'] for ct in dae_corr_type])
    #     assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
    #     assert len(dae_layers) > 0
    #     assert all([af in ['sigmoid', 'tanh'] for af in dae_enc_act_func])
    #     assert all([af in ['sigmoid', 'tanh', 'none'] for af in dae_dec_act_func])
    #
    #     utilities.random_seed_np_tf(FLAGS.seed)
    #
    #     def load_from_np(dataset_path):
    #         if dataset_path != '':
    #             return np.load(dataset_path)
    #         else:
    #             return None
    #
    #     # 创建编码、解码、微调函数和网络模型对象
    #     sdae = None
    #
    #     dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
    #     dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
    #     finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)
    #
    #     sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
    #         do_pretrain=FLAGS.do_pretrain, name=FLAGS.name,
    #         layers=dae_layers, finetune_loss_func=FLAGS.finetune_loss_func,
    #         finetune_learning_rate=FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.finetune_num_epochs,
    #         finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size,
    #         finetune_dropout=FLAGS.finetune_dropout,
    #         enc_act_func=dae_enc_act_func, dec_act_func=dae_dec_act_func,
    #         corr_type=dae_corr_type, corr_frac=dae_corr_frac, regcoef=dae_regcoef,
    #         loss_func=dae_loss_func, opt=dae_opt,
    #         learning_rate=dae_learning_rate, momentum=FLAGS.momentum,
    #         num_epochs=dae_num_epochs, batch_size=dae_batch_size,
    #         finetune_act_func=finetune_act_func)
    #
    #     # 训练模型 (无监督预训练)
    #     if FLAGS.do_pretrain:
    #         encoded_X, encoded_vX = sdae.pretrain(trX, vlX)
    #
    #     FLAGS.test_dataset = "/media/files/yp/rbm/pic_div/dataset/test" + str(idx) + "_RGB.npy"
    #     # FLAGS.test_labels = "/media/files/yp/rbm/pic_div/label/binary/" + str(idx) + ".npy"
    #     FLAGS.save_predictions = predictions_dir + str(idx) + ".npy"
    #     # teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(FLAGS.test_labels)
    #     teX = load_from_np(FLAGS.test_dataset)
    #     # 计算模型在测试集上的准确率
    #     # print('Test set accuracy: {}'.format(sdae.score(teX, teY)))
    #
    #     # 保存模型的预测
    #     if FLAGS.save_predictions:
    #         print('Saving the predictions for the test set...')
    #         np.save(FLAGS.save_predictions, sdae.predict(teX))
    #
    #     def save_layers_output(which_set):
    #
    #         if which_set == 'test':
    #             teout = sdae.get_layers_output(teX)
    #             for i, o in enumerate(teout):
    #                 np.save(FLAGS.save_layers_output_test + '-layer-' + str(i + 1) + '-test', o)
    #
    #     # 保存模型每一层对测试集的输出
    #     if FLAGS.save_layers_output_test:
    #         print('Saving the output of each layer for the test set')
    #         save_layers_output('test')
    #
    #     # 保存模型每一层对训练集的输出
    #     if FLAGS.save_layers_output_train:
    #         print('Saving the output of each layer for the train set')
    #         save_layers_output('train')
    #
    print '-------------------------------------------预测过程已经完成---------------------------------------------------'
        if not teRef:
            teRef = teX

    else:
        trX = None
        trRef = None
        vlX = None
        vlRef = None
        teX = None
        teRef = None

    models_dir = os.path.join(config.models_dir, FLAGS.main_dir)
    data_dir = os.path.join(config.data_dir, FLAGS.main_dir)
    summary_dir = os.path.join(config.summary_dir, FLAGS.main_dir)

    finetune_enc_act_func = [utilities.str2actfunc(af) for af in finetune_enc_act_func]
    finetune_dec_act_func = [utilities.str2actfunc(af) for af in finetune_dec_act_func]

    # Create the object
    srbm = deep_autoencoder.DeepAutoencoder(
        models_dir=models_dir, data_dir=data_dir, summary_dir=summary_dir,
        model_name=FLAGS.model_name, do_pretrain=FLAGS.do_pretrain,
        layers=rbm_layers, dataset=FLAGS.dataset, main_dir=FLAGS.main_dir,
        learning_rate=rbm_learning_rate, gibbs_k=rbm_gibbs_k,
        verbose=FLAGS.verbose, num_epochs=rbm_num_epochs, momentum=FLAGS.momentum,
        batch_size=rbm_batch_size, finetune_learning_rate=FLAGS.finetune_learning_rate,
        finetune_enc_act_func=finetune_enc_act_func, finetune_dec_act_func=finetune_dec_act_func,
        finetune_num_epochs=FLAGS.finetune_num_epochs, finetune_batch_size=FLAGS.finetune_batch_size,
        finetune_opt=FLAGS.finetune_opt, finetune_loss_func=FLAGS.finetune_loss_func, finetune_dropout=FLAGS.finetune_dropout,
        noise=FLAGS.rbm_noise, stddev=FLAGS.rbm_stddev)
示例#8
0
            FLAGS.valid_labels)
        teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(
            FLAGS.test_labels)

    else:
        trX = None
        trY = None
        vlX = None
        vlY = None
        teX = None
        teY = None

    # Create the object
    sdae = None

    dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
    dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
    finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)

    sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
        do_pretrain=FLAGS.do_pretrain,
        name=FLAGS.name,
        layers=dae_layers,
        finetune_loss_func=FLAGS.finetune_loss_func,
        finetune_learning_rate=FLAGS.finetune_learning_rate,
        finetune_num_epochs=FLAGS.finetune_num_epochs,
        finetune_opt=FLAGS.finetune_opt,
        finetune_batch_size=FLAGS.finetune_batch_size,
        finetune_dropout=FLAGS.finetune_dropout,
        enc_act_func=dae_enc_act_func,
        dec_act_func=dae_dec_act_func,
        trX, trY = load_from_np(FLAGS.train_dataset), load_from_np(FLAGS.train_labels)
        vlX, vlY = load_from_np(FLAGS.valid_dataset), load_from_np(FLAGS.valid_labels)
        teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(FLAGS.test_labels)

    else:
        trX = None
        trY = None
        vlX = None
        vlY = None
        teX = None
        teY = None

    # Create the object
    sdae = None

    dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
    dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
    finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)

    sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
        do_pretrain=FLAGS.do_pretrain, name=FLAGS.name,
        layers=dae_layers, finetune_loss_func=FLAGS.finetune_loss_func,
        finetune_learning_rate=FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.finetune_num_epochs,
        finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size,
        finetune_dropout=FLAGS.finetune_dropout,
        enc_act_func=dae_enc_act_func, dec_act_func=dae_dec_act_func,
        corr_type=dae_corr_type, corr_frac=dae_corr_frac, regcoef=dae_regcoef,
        loss_func=dae_loss_func, opt=dae_opt,
        learning_rate=dae_learning_rate, momentum=FLAGS.momentum,
        num_epochs=dae_num_epochs, batch_size=dae_batch_size,
        finetune_act_func=finetune_act_func)
示例#10
0
rbm_batch_size = [10]
rbm_gibbs_k = [1]
finetune_opt = 'adam'  # sgd/adagrad/momentum/adam
finetune_loss_func = 'softmax_cross_entropy'  # softmax_cross_entropy/mse
finetune_dropout = 1
finetune_num_epochs = 1

if __name__ == '__main__':

    utilities.random_seed_np_tf(2)

    trX, trY = np.load(sys.argv[1]), np.load(sys.argv[2])
    vlX, vlY = np.load(sys.argv[3]), np.load(sys.argv[4])

    # Create the object
    finetune_act_func = utilities.str2actfunc('relu')

    srbm = dbn.DeepBeliefNetwork(
        name='dbn',
        rbm_layers=rbm_layers,
        finetune_act_func=finetune_act_func,
        rbm_learning_rate=rbm_learning_rate,
        rbm_num_epochs=rbm_num_epochs,
        rbm_gibbs_k=rbm_gibbs_k,
        rbm_gauss_visible=True,
        rbm_stddev=rbm_stddev,
        momentum=0.9,
        rbm_batch_size=rbm_batch_size,
    )

    train_result, valid_result = srbm.pretrain(trX, trY, vlX, vlY)
        trX = load_from_np(FLAGS.train_dataset)
        vlX = load_from_np(FLAGS.valid_dataset)
        teX = load_from_np(FLAGS.test_dataset)

    else:
        trX = None
        vlX = None
        teX = None

    models_dir = os.path.join(config.models_dir, FLAGS.main_dir)
    data_dir = os.path.join(config.data_dir, FLAGS.main_dir)
    summary_dir = os.path.join(config.summary_dir, FLAGS.main_dir)

    # Create the object
    enc_act_func = utilities.str2actfunc(FLAGS.enc_act_func)
    dec_act_func = utilities.str2actfunc(FLAGS.dec_act_func)

    dae = denoising_autoencoder.DenoisingAutoencoder(
        model_name=FLAGS.model_name, n_components=FLAGS.n_components,
        models_dir=models_dir, data_dir=data_dir, summary_dir=summary_dir,
        enc_act_func=enc_act_func, dec_act_func=dec_act_func,
        corr_type=FLAGS.corr_type, corr_frac=FLAGS.corr_frac, dataset=FLAGS.dataset,
        loss_func=FLAGS.loss_func, main_dir=FLAGS.main_dir, opt=FLAGS.opt,
        learning_rate=FLAGS.learning_rate, momentum=FLAGS.momentum, l2reg=FLAGS.l2reg,
        verbose=FLAGS.verbose, num_epochs=FLAGS.num_epochs, batch_size=FLAGS.batch_size)

    # Fit the model
    W = None
    if FLAGS.weights:
        W = np.load(FLAGS.weights)
示例#12
0
            if dataset_path != '':
                return np.load(dataset_path)
            else:
                return None

        trX = load_from_np(FLAGS.train_dataset)
        vlX = load_from_np(FLAGS.valid_dataset)
        teX = load_from_np(FLAGS.test_dataset)

    else:
        trX = None
        vlX = None
        teX = None

    # Create the object
    enc_act_func = utilities.str2actfunc(FLAGS.enc_act_func)
    dec_act_func = utilities.str2actfunc(FLAGS.dec_act_func)

    dae = denoising_autoencoder.DenoisingAutoencoder(
        name=FLAGS.name,
        n_components=FLAGS.n_components,
        enc_act_func=enc_act_func,
        dec_act_func=dec_act_func,
        corr_type=FLAGS.corr_type,
        corr_frac=FLAGS.corr_frac,
        loss_func=FLAGS.loss_func,
        opt=FLAGS.opt,
        regcoef=FLAGS.regcoef,
        learning_rate=FLAGS.learning_rate,
        momentum=FLAGS.momentum,
        num_epochs=FLAGS.num_epochs,
示例#13
0
    def do_dbn(self, action='pp', opts=None):
        if action == 'pp':
            # Loading dataset
            digits = self._input_data[0]
            X, Y = digits[0], digits[1]

            # Training
            classifier = dbn_model.SupervisedDBNClassification(
                hidden_layers_structure=[100, 80, 50, 25, 5],
                learning_rate_rbm=0.1,
                learning_rate=0.1,
                n_epochs_rbm=100,
                n_iter_backprop=100,
                batch_size=64,
                activation_function='sigmoid',
                dropout_p=0.2)
            classifier.fit(np.array(X))

            # Test
            Y_pred = classifier.predict(X[0])

        elif action == 'yadlt':

            trX, trY = np.array(self._input_data[0][0]), trans_label_to_yadlt(
                self._input_data[0][1])
            vlX, vlY = np.array(
                self._input_data[1][0][:10000]), trans_label_to_yadlt(
                    self._input_data[1][1][:10000])
            teX, teY = np.array(self._input_data[1][0]), trans_label_to_yadlt(
                self._input_data[1][1])

            finetune_act_func = utilities.str2actfunc('relu')

            srbm = dbn.DeepBeliefNetwork(
                name='dbn',
                do_pretrain=True,
                rbm_layers=[100, 80, 50, 25, 5],
                finetune_act_func=finetune_act_func,
                rbm_learning_rate=[float(opts.learning_rate_rbm)]
                if opts is not None and opts.learning_rate_rbm is not None else
                [0.1],
                rbm_num_epochs=[int(opts.epochs_rbm)]
                if opts is not None and opts.epochs_rbm is not None else [100],
                rbm_gibbs_k=[1],
                rbm_gauss_visible=False,
                rbm_stddev=0.1,
                momentum=0.9,
                rbm_batch_size=[int(opts.batch_size)]
                if opts is not None and opts.batch_size is not None else [64],
                finetune_learning_rate=0.001,
                finetune_num_epochs=10,
                finetune_batch_size=64,
                finetune_opt='momentum',
                finetune_loss_func='softmax_cross_entropy',
                finetune_dropout=1)

            srbm.pretrain(trX, vlX)

            print('Start deep belief net finetuning...')
            srbm.fit(trX, trY, vlX, vlY)

            print('Test set accuracy: {}'.format(srbm.score(teX, teY)))

            def save_layers_output(which_set):
                if which_set == 'train':
                    trout = srbm.get_layers_output(trX)
                    for i, o in enumerate(trout):
                        np.save('train' + '-layer-' + str(i + 1) + '-train', o)

                elif which_set == 'test':
                    teout = srbm.get_layers_output(teX)
                    for i, o in enumerate(teout):
                        np.save('test' + '-layer-' + str(i + 1) + '-test', o)
                        # Save output from each layer of the model

            if False:
                print('Saving the output of each layer for the test set')
                save_layers_output('test')

            # Save output from each layer of the model
            if False:
                print('Saving the output of each layer for the train set')
                save_layers_output('train')
示例#14
0
def encode_data(trX, teX, vlX):

    model_name = 'deep-autoencoder'
    save_layers_output_train_label = 'deep-train'
    save_layers_output_test_label = 'deep-test'
    restore_previous_model = False
    do_pretrain = True
    momentum = 0.7
    verbose = 1
    # RBMs layers
    rbm_names = 'deep-autoencoder-rbm'
    rbm_layers = [4640,2482,321]
    rbm_noise = 'gauss'
    rbm_stddev = 0.1
    rbm_learning_rate = [0.1]
    rbm_num_epochs = [200]
    rbm_batch_size = [100]
    rbm_gibbs_k = [1]
    # Supervised fine tuning parameter
    finetune_learning_rate = 0.0001
    finetune_enc_act_func = ['relu']
    finetune_dec_act_func = ['tanh']
    finetune_num_epochs = 200
    finetune_batch_size =100
    finetune_opt = 'gradient_descent'
    finetune_loss_func = 'cross_entropy'
    finetune_dropout = 1

    main_dir = 'dae'

    trRef = trX
    vlRef = vlX
    teRef = teX

    models_dir = os.path.join(config.models_dir, main_dir)
    data_dir = os.path.join(config.data_dir, main_dir)
    summary_dir = os.path.join(config.summary_dir, main_dir)

    finetune_enc_act_func = [utilities.str2actfunc(af) for af in finetune_enc_act_func]
    finetune_dec_act_func = [utilities.str2actfunc(af) for af in finetune_dec_act_func]

    # Create the object
    srbm = deep_autoencoder.DeepAutoencoder(
        models_dir=models_dir, data_dir=data_dir, summary_dir=summary_dir,
        model_name=model_name, do_pretrain=do_pretrain,
        layers=rbm_layers, dataset=FLAGS.dataset, main_dir=main_dir,
        learning_rate=rbm_learning_rate, gibbs_k=rbm_gibbs_k,
        verbose=verbose, num_epochs=rbm_num_epochs, momentum=momentum,
        batch_size=rbm_batch_size, finetune_learning_rate=finetune_learning_rate,
        finetune_enc_act_func=finetune_enc_act_func, finetune_dec_act_func=finetune_dec_act_func,
        finetune_num_epochs=finetune_num_epochs, finetune_batch_size=finetune_batch_size,
        finetune_opt=finetune_opt, finetune_loss_func=finetune_loss_func, finetune_dropout=finetune_dropout,
        noise=rbm_noise, stddev=rbm_stddev)

    if do_pretrain:
        encoded_X, encoded_vX = srbm.pretrain(trX, vlX)

    # Supervised finetuning
    srbm.fit(trX, trRef, vlX, vlRef, restore_previous_model=restore_previous_model)

    # Compute the reconstruction loss of the model
    print('Test set reconstruction loss: {}'.format(srbm.compute_reconstruction_loss(teX, teRef)))

    # Save the predictions of the model
    print('Saving the reconstructions for the test set...')
    encoded_teX = srbm.reconstruct(teX)
    np.save('deep-vtrain.npy', encoded_teX)

    print('Saving the reconstructions for the train set...')
    encoded_trX = srbm.reconstruct(trX)
    np.save('deep-train1.npy', encoded_trX)

    print('Saving the reconstructions for the train set...')
    encoded_vlX = srbm.reconstruct(vlX)
    np.save('deep-train2.npy', encoded_vlX)


    def save_layers_output(which_set):
        if which_set == 'train':
            trout = srbm.get_layers_output(trX)
            for i, o in enumerate(trout):
                np.save(save_layers_output_train_label + '-layer-' + str(i + 1) + '-train', o)

        elif which_set == 'test':
            teout = srbm.get_layers_output(teX)
            for i, o in enumerate(teout):
                np.save(save_layers_output_test_label + '-layer-' + str(i + 1) + '-test', o)


    print('Saving the output of each layer for the test set')
    save_layers_output('test')

    print('Saving the output of each layer for the train set')
    save_layers_output('train')

    return encoded_trX, encoded_teX, encoded_vlX