# Supervised fine tuning parameters
flags.DEFINE_string('finetune_act_func', 'sigmoid', 'Activation function.')
flags.DEFINE_float('finetune_learning_rate', 0.9, 'Learning rate.')
flags.DEFINE_float('finetune_momentum', 0.7, 'Momentum parameter.')
flags.DEFINE_integer('finetune_num_epochs', 100, 'Number of epochs.')
flags.DEFINE_integer('finetune_batch_size', 100, 'Size of each mini-batch.')
flags.DEFINE_string('finetune_opt', 'gradient_descent',
                    '["gradient_descent", "ada_grad", "momentum", "adam"]')
flags.DEFINE_string(
    'finetune_loss_func', 'softmax_cross_entropy',
    'Loss function. ["mean_squared", "softmax_cross_entropy"]')
flags.DEFINE_float('finetune_dropout', 1, 'Dropout parameter.')

# Conversion of Autoencoder layers parameters from string to their specific type
rbm_layers = utilities.flag_to_list(FLAGS.rbm_layers, 'int')
rbm_learning_rate = utilities.flag_to_list(FLAGS.rbm_learning_rate, 'float')
rbm_num_epochs = utilities.flag_to_list(FLAGS.rbm_num_epochs, 'int')
rbm_batch_size = utilities.flag_to_list(FLAGS.rbm_batch_size, 'int')
rbm_gibbs_k = utilities.flag_to_list(FLAGS.rbm_gibbs_k, 'int')

# Parameters validation
assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
assert FLAGS.finetune_act_func in ['sigmoid', 'tanh', 'relu']
assert FLAGS.finetune_loss_func in ['mean_squared', 'softmax_cross_entropy']
assert len(rbm_layers) > 0

if __name__ == '__main__':

    utilities.random_seed_np_tf(FLAGS.seed)
    'finetune_enc_act_func', 'relu,',
    'Activation function for the encoder fine-tuning phase. ["sigmoid, "tanh", "relu"]'
)
flags.DEFINE_string(
    'finetune_dec_act_func', 'sigmoid,',
    'Activation function for the decoder fine-tuning phase. ["sigmoid, "tanh", "relu"]'
)
flags.DEFINE_integer('finetune_num_epochs', 10, 'Number of epochs.')
flags.DEFINE_integer('finetune_batch_size', 10, 'Size of each mini-batch.')
flags.DEFINE_string('finetune_opt', 'sgd',
                    '["sgd", "ada_grad", "momentum", "adam"]')
flags.DEFINE_string('finetune_loss_func', 'mse', 'Loss function.')
flags.DEFINE_float('finetune_dropout', 1, 'Dropout parameter.')

# Conversion of Autoencoder layers parameters from string to their specific type
rbm_names = utilities.flag_to_list(FLAGS.rbm_names, 'str')
rbm_layers = utilities.flag_to_list(FLAGS.rbm_layers, 'int')
rbm_noise = utilities.flag_to_list(FLAGS.rbm_noise, 'str')
rbm_learning_rate = utilities.flag_to_list(FLAGS.rbm_learning_rate, 'float')
rbm_num_epochs = utilities.flag_to_list(FLAGS.rbm_num_epochs, 'int')
rbm_batch_size = utilities.flag_to_list(FLAGS.rbm_batch_size, 'int')
rbm_gibbs_k = utilities.flag_to_list(FLAGS.rbm_gibbs_k, 'int')

finetune_enc_act_func = utilities.flag_to_list(FLAGS.finetune_enc_act_func,
                                               'str')
finetune_dec_act_func = utilities.flag_to_list(FLAGS.finetune_dec_act_func,
                                               'str')

# Parameters validation
assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
assert len(rbm_layers) > 0
flags.DEFINE_string('rbm_num_epochs', '10,', 'Number of epochs.')
flags.DEFINE_string('rbm_batch_size', '32,', 'Size of each mini-batch.')
flags.DEFINE_string('rbm_gibbs_k', '1,', 'Gibbs sampling steps.')

# Supervised fine tuning parameters
flags.DEFINE_string('finetune_act_func', 'relu', 'Activation function.')
flags.DEFINE_float('finetune_learning_rate', 0.01, 'Learning rate.')
flags.DEFINE_float('finetune_momentum', 0.9, 'Momentum parameter.')
flags.DEFINE_integer('finetune_num_epochs', 10, 'Number of epochs.')
flags.DEFINE_integer('finetune_batch_size', 32, 'Size of each mini-batch.')
flags.DEFINE_string('finetune_opt', 'momentum', '["sgd", "ada_grad", "momentum", "adam"]')
flags.DEFINE_string('finetune_loss_func', 'softmax_cross_entropy', 'Loss function. ["mse", "softmax_cross_entropy"]')
flags.DEFINE_float('finetune_dropout', 1, 'Dropout parameter.')

# Conversion of Autoencoder layers parameters from string to their specific type
rbm_layers = utilities.flag_to_list(FLAGS.rbm_layers, 'int')
rbm_learning_rate = utilities.flag_to_list(FLAGS.rbm_learning_rate, 'float')
rbm_num_epochs = utilities.flag_to_list(FLAGS.rbm_num_epochs, 'int')
rbm_batch_size = utilities.flag_to_list(FLAGS.rbm_batch_size, 'int')
rbm_gibbs_k = utilities.flag_to_list(FLAGS.rbm_gibbs_k, 'int')

# Parameters validation
assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
assert FLAGS.finetune_act_func in ['sigmoid', 'tanh', 'relu']
assert len(rbm_layers) > 0

if __name__ == '__main__':

    utilities.random_seed_np_tf(FLAGS.seed)

    if FLAGS.dataset == 'mnist':
# Autoencoder layers specific parameters
flags.DEFINE_string('dae_layers', '256,', 'Comma-separated values for the layers in the sdae.')
flags.DEFINE_string('dae_l2reg', '5e-4,', 'Regularization parameter for the autoencoders. If 0, no regularization.')
flags.DEFINE_string('dae_enc_act_func', 'sigmoid,', 'Activation function for the encoder. ["sigmoid", "tanh"]')
flags.DEFINE_string('dae_dec_act_func', 'none,', 'Activation function for the decoder. ["sigmoid", "tanh", "none"]')
flags.DEFINE_string('dae_loss_func', 'mean_squared,', 'Loss function. ["mean_squared" or "cross_entropy"]')
flags.DEFINE_string('dae_opt', 'gradient_descent,', '["gradient_descent", "ada_grad", "momentum", "adam"]')
flags.DEFINE_string('dae_learning_rate', '0.01,', 'Initial learning rate.')
flags.DEFINE_string('dae_num_epochs', '10,', 'Number of epochs.')
flags.DEFINE_string('dae_batch_size', '10,', 'Size of each mini-batch.')
flags.DEFINE_string('dae_corr_type', 'none,', 'Type of input corruption. ["none", "masking", "salt_and_pepper"]')
flags.DEFINE_string('dae_corr_frac', '0.0,', 'Fraction of the input to corrupt.')

# Conversion of Autoencoder layers parameters from string to their specific type
dae_layers = utilities.flag_to_list(FLAGS.dae_layers, 'int')
dae_enc_act_func = utilities.flag_to_list(FLAGS.dae_enc_act_func, 'str')
dae_dec_act_func = utilities.flag_to_list(FLAGS.dae_dec_act_func, 'str')
dae_opt = utilities.flag_to_list(FLAGS.dae_opt, 'str')
dae_loss_func = utilities.flag_to_list(FLAGS.dae_loss_func, 'str')
dae_learning_rate = utilities.flag_to_list(FLAGS.dae_learning_rate, 'float')
dae_l2reg = utilities.flag_to_list(FLAGS.dae_l2reg, 'float')
dae_corr_type = utilities.flag_to_list(FLAGS.dae_corr_type, 'str')
dae_corr_frac = utilities.flag_to_list(FLAGS.dae_corr_frac, 'float')
dae_num_epochs = utilities.flag_to_list(FLAGS.dae_num_epochs, 'int')
dae_batch_size = utilities.flag_to_list(FLAGS.dae_batch_size, 'int')

# Parameters validation
assert all([0. <= cf <= 1. for cf in dae_corr_frac])
assert all([ct in ['masking', 'salt_and_pepper', 'none'] for ct in dae_corr_type])
assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
    'rbm_layers', '100,',
    'Comma-separated values for the number of hidden units in the layers.')
flags.DEFINE_boolean('finetuned', False,
                     'default False, Whether to load a finetuned model.')
flags.DEFINE_string('samples', None, 'Npy file with the test data.')
flags.DEFINE_boolean(
    'memory_mapping', True,
    'default True, Whether to use memory mapping to avoid loading the full dataset into RAM.'
)
flags.DEFINE_boolean(
    'load_batch', False,
    'default False, Whether to load the current batch into RAM.')
flags.DEFINE_string('out_dir', 'samples', 'Full path to the out directory.')

if __name__ == '__main__':
    rbm_layers = utilities.flag_to_list(FLAGS.rbm_layers, 'int')

    models_dir = os.path.join(config.models_dir, FLAGS.main_dir)
    data_dir = os.path.join(config.data_dir, FLAGS.main_dir)
    summary_dir = os.path.join(config.summary_dir, FLAGS.main_dir)

    #    if FLAGS.finetuned:
    rbm_layers = [FLAGS.num_visible] + rbm_layers
    model = DBNFinetuned(layers=rbm_layers,
                         visible_unit_type=FLAGS.visible_unit_type,
                         hidden_unit_type=FLAGS.hidden_unit_type,
                         model_name=FLAGS.model_name,
                         main_dir=FLAGS.main_dir,
                         models_dir=models_dir,
                         data_dir=data_dir,
                         summary_dir=summary_dir)
Example #6
0
def do_predict(FLAGS, predictions_dir, num):
    dataset_path = "/media/files/yp/rbm/yinchuansanqu/dataset/"
    f_list1 = os.listdir(dataset_path)
    for i in f_list1:
        if os.path.splitext(i)[1] == '.npy':
            # 将自动编码器层参数从字符串转换为其特定类型
            dae_layers = utilities.flag_to_list(FLAGS.dae_layers, 'int')
            dae_enc_act_func = utilities.flag_to_list(FLAGS.dae_enc_act_func, 'str')
            dae_dec_act_func = utilities.flag_to_list(FLAGS.dae_dec_act_func, 'str')
            dae_opt = utilities.flag_to_list(FLAGS.dae_opt, 'str')
            dae_loss_func = utilities.flag_to_list(FLAGS.dae_loss_func, 'str')
            dae_learning_rate = utilities.flag_to_list(FLAGS.dae_learning_rate, 'float')
            dae_regcoef = utilities.flag_to_list(FLAGS.dae_regcoef, 'float')
            dae_corr_type = utilities.flag_to_list(FLAGS.dae_corr_type, 'str')
            dae_corr_frac = utilities.flag_to_list(FLAGS.dae_corr_frac, 'float')
            dae_num_epochs = utilities.flag_to_list(FLAGS.dae_num_epochs, 'int')
            dae_batch_size = utilities.flag_to_list(FLAGS.dae_batch_size, 'int')

            # 检查参数
            assert all([0. <= cf <= 1. for cf in dae_corr_frac])
            assert all([ct in ['masking', 'salt_and_pepper', 'none'] for ct in dae_corr_type])
            assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
            assert len(dae_layers) > 0
            assert all([af in ['sigmoid', 'tanh'] for af in dae_enc_act_func])
            assert all([af in ['sigmoid', 'tanh', 'none'] for af in dae_dec_act_func])

            utilities.random_seed_np_tf(FLAGS.seed)

            def load_from_np(dataset_path):
                if dataset_path != '':
                    return np.load(dataset_path)
                else:
                    return None

            # 创建编码、解码、微调函数和网络模型对象
            sdae = None

            dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
            dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
            finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)

            sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
                do_pretrain=FLAGS.do_pretrain, name=FLAGS.name,
                layers=dae_layers, finetune_loss_func=FLAGS.finetune_loss_func,
                finetune_learning_rate=FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.finetune_num_epochs,
                finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size,
                finetune_dropout=FLAGS.finetune_dropout,
                enc_act_func=dae_enc_act_func, dec_act_func=dae_dec_act_func,
                corr_type=dae_corr_type, corr_frac=dae_corr_frac, regcoef=dae_regcoef,
                loss_func=dae_loss_func, opt=dae_opt,
                learning_rate=dae_learning_rate, momentum=FLAGS.momentum,
                num_epochs=dae_num_epochs, batch_size=dae_batch_size,
                finetune_act_func=finetune_act_func)

            # 训练模型 (无监督预训练)
            if FLAGS.do_pretrain:
                encoded_X, encoded_vX = sdae.pretrain(trX, vlX)

            FLAGS.test_dataset = dataset_path + i
            # FLAGS.test_labels = "/media/files/yp/rbm/pic_div/label/binary/" + str(idx) + ".npy"
            FLAGS.save_predictions = predictions_dir + i
            # teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(FLAGS.test_labels)
            teX = load_from_np(FLAGS.test_dataset)
            # 计算模型在测试集上的准确率
            # print('Test set accuracy: {}'.format(sdae.score(teX, teY)))

            # 保存模型的预测
            if FLAGS.save_predictions:
                print('Saving the predictions for the test set...')
                predict = sdae.predict(teX).astype(np.float16)
                np.save(FLAGS.save_predictions, predict)

    # for idx in range(0, num):
    #     # 将自动编码器层参数从字符串转换为其特定类型
    #     dae_layers = utilities.flag_to_list(FLAGS.dae_layers, 'int')
    #     dae_enc_act_func = utilities.flag_to_list(FLAGS.dae_enc_act_func, 'str')
    #     dae_dec_act_func = utilities.flag_to_list(FLAGS.dae_dec_act_func, 'str')
    #     dae_opt = utilities.flag_to_list(FLAGS.dae_opt, 'str')
    #     dae_loss_func = utilities.flag_to_list(FLAGS.dae_loss_func, 'str')
    #     dae_learning_rate = utilities.flag_to_list(FLAGS.dae_learning_rate, 'float')
    #     dae_regcoef = utilities.flag_to_list(FLAGS.dae_regcoef, 'float')
    #     dae_corr_type = utilities.flag_to_list(FLAGS.dae_corr_type, 'str')
    #     dae_corr_frac = utilities.flag_to_list(FLAGS.dae_corr_frac, 'float')
    #     dae_num_epochs = utilities.flag_to_list(FLAGS.dae_num_epochs, 'int')
    #     dae_batch_size = utilities.flag_to_list(FLAGS.dae_batch_size, 'int')
    #
    #     # 检查参数
    #     assert all([0. <= cf <= 1. for cf in dae_corr_frac])
    #     assert all([ct in ['masking', 'salt_and_pepper', 'none'] for ct in dae_corr_type])
    #     assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
    #     assert len(dae_layers) > 0
    #     assert all([af in ['sigmoid', 'tanh'] for af in dae_enc_act_func])
    #     assert all([af in ['sigmoid', 'tanh', 'none'] for af in dae_dec_act_func])
    #
    #     utilities.random_seed_np_tf(FLAGS.seed)
    #
    #     def load_from_np(dataset_path):
    #         if dataset_path != '':
    #             return np.load(dataset_path)
    #         else:
    #             return None
    #
    #     # 创建编码、解码、微调函数和网络模型对象
    #     sdae = None
    #
    #     dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
    #     dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
    #     finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)
    #
    #     sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
    #         do_pretrain=FLAGS.do_pretrain, name=FLAGS.name,
    #         layers=dae_layers, finetune_loss_func=FLAGS.finetune_loss_func,
    #         finetune_learning_rate=FLAGS.finetune_learning_rate, finetune_num_epochs=FLAGS.finetune_num_epochs,
    #         finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size,
    #         finetune_dropout=FLAGS.finetune_dropout,
    #         enc_act_func=dae_enc_act_func, dec_act_func=dae_dec_act_func,
    #         corr_type=dae_corr_type, corr_frac=dae_corr_frac, regcoef=dae_regcoef,
    #         loss_func=dae_loss_func, opt=dae_opt,
    #         learning_rate=dae_learning_rate, momentum=FLAGS.momentum,
    #         num_epochs=dae_num_epochs, batch_size=dae_batch_size,
    #         finetune_act_func=finetune_act_func)
    #
    #     # 训练模型 (无监督预训练)
    #     if FLAGS.do_pretrain:
    #         encoded_X, encoded_vX = sdae.pretrain(trX, vlX)
    #
    #     FLAGS.test_dataset = "/media/files/yp/rbm/pic_div/dataset/test" + str(idx) + "_RGB.npy"
    #     # FLAGS.test_labels = "/media/files/yp/rbm/pic_div/label/binary/" + str(idx) + ".npy"
    #     FLAGS.save_predictions = predictions_dir + str(idx) + ".npy"
    #     # teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(FLAGS.test_labels)
    #     teX = load_from_np(FLAGS.test_dataset)
    #     # 计算模型在测试集上的准确率
    #     # print('Test set accuracy: {}'.format(sdae.score(teX, teY)))
    #
    #     # 保存模型的预测
    #     if FLAGS.save_predictions:
    #         print('Saving the predictions for the test set...')
    #         np.save(FLAGS.save_predictions, sdae.predict(teX))
    #
    #     def save_layers_output(which_set):
    #
    #         if which_set == 'test':
    #             teout = sdae.get_layers_output(teX)
    #             for i, o in enumerate(teout):
    #                 np.save(FLAGS.save_layers_output_test + '-layer-' + str(i + 1) + '-test', o)
    #
    #     # 保存模型每一层对测试集的输出
    #     if FLAGS.save_layers_output_test:
    #         print('Saving the output of each layer for the test set')
    #         save_layers_output('test')
    #
    #     # 保存模型每一层对训练集的输出
    #     if FLAGS.save_layers_output_train:
    #         print('Saving the output of each layer for the train set')
    #         save_layers_output('train')
    #
    print '-------------------------------------------预测过程已经完成---------------------------------------------------'
flags.DEFINE_string('rbm_learning_rate', '0.01,', 'Initial learning rate.')
flags.DEFINE_string('rbm_num_epochs', '10,', 'Number of epochs.')
flags.DEFINE_string('rbm_batch_size', '10,', 'Size of each mini-batch.')
flags.DEFINE_string('rbm_gibbs_k', '1,', 'Gibbs sampling steps.')
# Supervised fine tuning parameters
flags.DEFINE_float('finetune_learning_rate', 0.01, 'Learning rate.')
flags.DEFINE_string('finetune_enc_act_func', 'relu,', 'Activation function for the encoder fine-tuning phase. ["sigmoid, "tanh", "relu"]')
flags.DEFINE_string('finetune_dec_act_func', 'sigmoid,', 'Activation function for the decoder fine-tuning phase. ["sigmoid, "tanh", "relu"]')
flags.DEFINE_integer('finetune_num_epochs', 10, 'Number of epochs.')
flags.DEFINE_integer('finetune_batch_size', 10, 'Size of each mini-batch.')
flags.DEFINE_string('finetune_opt', 'gradient_descent', '["gradient_descent", "ada_grad", "momentum", "adam"]')
flags.DEFINE_string('finetune_loss_func', 'mean_squared', 'Loss function.')
flags.DEFINE_float('finetune_dropout', 1, 'Dropout parameter.')

# Conversion of Autoencoder layers parameters from string to their specific type
rbm_names = utilities.flag_to_list(FLAGS.rbm_names, 'str')
rbm_layers = utilities.flag_to_list(FLAGS.rbm_layers, 'int')
rbm_learning_rate = utilities.flag_to_list(FLAGS.rbm_learning_rate, 'float')
rbm_num_epochs = utilities.flag_to_list(FLAGS.rbm_num_epochs, 'int')
rbm_batch_size = utilities.flag_to_list(FLAGS.rbm_batch_size, 'int')
rbm_gibbs_k = utilities.flag_to_list(FLAGS.rbm_gibbs_k, 'int')

finetune_enc_act_func = utilities.flag_to_list(FLAGS.finetune_enc_act_func, 'str')
finetune_dec_act_func = utilities.flag_to_list(FLAGS.finetune_dec_act_func, 'str')

# Parameters validation
assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
assert FLAGS.finetune_loss_func in ["mean_squared", "cross_entropy"]
assert len(rbm_layers) > 0

if __name__ == '__main__':
Example #8
0
    'Activation function for the decoder. ["sigmoid", "tanh", "none"]')
flags.DEFINE_string('dae_loss_func', 'mse,',
                    'Loss function. ["mse" or "cross_entropy"]')
flags.DEFINE_string('dae_opt', 'sgd,',
                    '["sgd", "ada_grad", "momentum", "adam"]')
flags.DEFINE_string('dae_learning_rate', '0.01,', 'Initial learning rate.')
flags.DEFINE_string('dae_num_epochs', '10,', 'Number of epochs.')
flags.DEFINE_string('dae_batch_size', '10,', 'Size of each mini-batch.')
flags.DEFINE_string(
    'dae_corr_type', 'none,',
    'Type of input corruption. ["none", "masking", "salt_and_pepper"]')
flags.DEFINE_string('dae_corr_frac', '0.0,',
                    'Fraction of the input to corrupt.')

# Conversion of Autoencoder layers parameters from string to their specific type
dae_layers = utilities.flag_to_list(FLAGS.dae_layers, 'int')
dae_enc_act_func = utilities.flag_to_list(FLAGS.dae_enc_act_func, 'str')
dae_dec_act_func = utilities.flag_to_list(FLAGS.dae_dec_act_func, 'str')
dae_opt = utilities.flag_to_list(FLAGS.dae_opt, 'str')
dae_loss_func = utilities.flag_to_list(FLAGS.dae_loss_func, 'str')
dae_learning_rate = utilities.flag_to_list(FLAGS.dae_learning_rate, 'float')
dae_regcoef = utilities.flag_to_list(FLAGS.dae_regcoef, 'float')
dae_corr_type = utilities.flag_to_list(FLAGS.dae_corr_type, 'str')
dae_corr_frac = utilities.flag_to_list(FLAGS.dae_corr_frac, 'float')
dae_num_epochs = utilities.flag_to_list(FLAGS.dae_num_epochs, 'int')
dae_batch_size = utilities.flag_to_list(FLAGS.dae_batch_size, 'int')

finetune_enc_act_func = utilities.flag_to_list(FLAGS.finetune_enc_act_func,
                                               'str')
finetune_dec_act_func = utilities.flag_to_list(FLAGS.finetune_dec_act_func,
                                               'str')
def do_predict(internal_FLAGS, internal_dataset):
    # 将自动编码器层参数从字符串转换为其特定类型
    dae_layers = utilities.flag_to_list(internal_FLAGS.dae_layers, 'int')
    dae_enc_act_func = utilities.flag_to_list(internal_FLAGS.dae_enc_act_func,
                                              'str')
    dae_dec_act_func = utilities.flag_to_list(internal_FLAGS.dae_dec_act_func,
                                              'str')
    dae_opt = utilities.flag_to_list(internal_FLAGS.dae_opt, 'str')
    dae_loss_func = utilities.flag_to_list(internal_FLAGS.dae_loss_func, 'str')
    dae_learning_rate = utilities.flag_to_list(
        internal_FLAGS.dae_learning_rate, 'float')
    dae_regcoef = utilities.flag_to_list(internal_FLAGS.dae_regcoef, 'float')
    dae_corr_type = utilities.flag_to_list(internal_FLAGS.dae_corr_type, 'str')
    dae_corr_frac = utilities.flag_to_list(internal_FLAGS.dae_corr_frac,
                                           'float')
    dae_num_epochs = utilities.flag_to_list(internal_FLAGS.dae_num_epochs,
                                            'int')
    dae_batch_size = utilities.flag_to_list(internal_FLAGS.dae_batch_size,
                                            'int')

    # 检查参数
    assert all([0. <= cf <= 1. for cf in dae_corr_frac])
    assert all(
        [ct in ['masking', 'salt_and_pepper', 'none'] for ct in dae_corr_type])
    assert internal_FLAGS.dataset in ['mnist', 'cifar10', 'custom']
    assert len(dae_layers) > 0
    assert all([af in ['sigmoid', 'tanh'] for af in dae_enc_act_func])
    assert all([af in ['sigmoid', 'tanh', 'none'] for af in dae_dec_act_func])

    utilities.random_seed_np_tf(internal_FLAGS.seed)

    # 创建编码、解码、微调函数和网络模型对象
    sdae = None

    dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
    dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
    finetune_act_func = utilities.str2actfunc(internal_FLAGS.finetune_act_func)

    sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
        do_pretrain=internal_FLAGS.do_pretrain,
        name=internal_FLAGS.name,
        layers=dae_layers,
        finetune_loss_func=internal_FLAGS.finetune_loss_func,
        finetune_learning_rate=internal_FLAGS.finetune_learning_rate,
        finetune_num_epochs=internal_FLAGS.finetune_num_epochs,
        finetune_opt=internal_FLAGS.finetune_opt,
        finetune_batch_size=internal_FLAGS.finetune_batch_size,
        finetune_dropout=internal_FLAGS.finetune_dropout,
        enc_act_func=dae_enc_act_func,
        dec_act_func=dae_dec_act_func,
        corr_type=dae_corr_type,
        corr_frac=dae_corr_frac,
        regcoef=dae_regcoef,
        loss_func=dae_loss_func,
        opt=dae_opt,
        learning_rate=dae_learning_rate,
        momentum=internal_FLAGS.momentum,
        num_epochs=dae_num_epochs,
        batch_size=dae_batch_size,
        finetune_act_func=finetune_act_func)

    # 训练模型 (无监督预训练)
    if internal_FLAGS.do_pretrain:
        encoded_X, encoded_vX = sdae.pretrain(trX, vlX)

    teX = internal_dataset
    print('Saving the predictions for the test set...')
    internal_predictions = sdae.predict(teX)
    return internal_predictions
Example #10
0
    'Comma-separated values for the number of hidden units in the layers.')
flags.DEFINE_string('datafiles', 'filename_missing,',
                    'Comma-separated names of the input files.')
flags.DEFINE_string('rbm_batch_sizes', '10,', 'Size of each mini-batch.')
flags.DEFINE_boolean(
    'memory_mapping', True,
    'default True, Whether to use memory mapping to avoid loading the full dataset into RAM.'
)
flags.DEFINE_boolean(
    'load_batch', False,
    'default False, Whether to load the current batch into RAM.')
flags.DEFINE_boolean('finetuned', False,
                     'default False, Whether to load a finetuned model.')

if __name__ == '__main__':
    rbm_layers = utilities.flag_to_list(FLAGS.rbm_layers, 'int')
    datafiles = utilities.flag_to_list(FLAGS.datafiles, 'str')

    models_dir = os.path.join(config.models_dir, FLAGS.main_dir)
    data_dir = os.path.join(config.data_dir, FLAGS.main_dir)
    summary_dir = os.path.join(config.summary_dir, FLAGS.main_dir)

    if FLAGS.finetuned:
        rbm_layers = [FLAGS.num_visible] + rbm_layers
        model = DBNFinetuned(layers=rbm_layers,
                             visible_unit_type=FLAGS.visible_unit_type,
                             hidden_unit_type=FLAGS.hidden_unit_type,
                             model_name=FLAGS.model_name,
                             main_dir=FLAGS.main_dir,
                             models_dir=models_dir,
                             data_dir=data_dir,
    def do_predict(FLAGS, datasets):
        """
        调用堆栈降噪自动编码器进行预测
        :param FLAGS: tensorflow.app.flags.FLAGS 类型。包含了定义的网络模型的各项参数
        :param datasets: ndarray 类型。制作好的测试数据
        :return: ndarray 类型。预测结果
        """
        # 将自动编码器层参数从字符串转换为其特定类型
        dae_layers = utilities.flag_to_list(FLAGS.dae_layers, 'int')
        dae_enc_act_func = utilities.flag_to_list(FLAGS.dae_enc_act_func, 'str')
        dae_dec_act_func = utilities.flag_to_list(FLAGS.dae_dec_act_func, 'str')
        dae_opt = utilities.flag_to_list(FLAGS.dae_opt, 'str')
        dae_loss_func = utilities.flag_to_list(FLAGS.dae_loss_func, 'str')
        dae_learning_rate = utilities.flag_to_list(FLAGS.dae_learning_rate, 'float')
        dae_regcoef = utilities.flag_to_list(FLAGS.dae_regcoef, 'float')
        dae_corr_type = utilities.flag_to_list(FLAGS.dae_corr_type, 'str')
        dae_corr_frac = utilities.flag_to_list(FLAGS.dae_corr_frac, 'float')
        dae_num_epochs = utilities.flag_to_list(FLAGS.dae_num_epochs, 'int')
        dae_batch_size = utilities.flag_to_list(FLAGS.dae_batch_size, 'int')

        # 检查参数
        assert all([0. <= cf <= 1. for cf in dae_corr_frac])
        assert all([ct in ['masking', 'salt_and_pepper', 'none'] for ct in dae_corr_type])
        assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
        assert len(dae_layers) > 0
        assert all([af in ['sigmoid', 'tanh'] for af in dae_enc_act_func])
        assert all([af in ['sigmoid', 'tanh', 'none'] for af in dae_dec_act_func])

        utilities.random_seed_np_tf(FLAGS.seed)

        # 创建编码、解码、微调函数和网络模型对象
        dae_enc_act_func = [utilities.str2actfunc(af) for af in dae_enc_act_func]
        dae_dec_act_func = [utilities.str2actfunc(af) for af in dae_dec_act_func]
        finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)

        sdae = stacked_denoising_autoencoder.StackedDenoisingAutoencoder(
            do_pretrain=FLAGS.do_pretrain, name=FLAGS.name,
            layers=dae_layers, finetune_loss_func=FLAGS.finetune_loss_func,
            finetune_learning_rate=FLAGS.finetune_learning_rate,
            finetune_num_epochs=FLAGS.finetune_num_epochs,
            finetune_opt=FLAGS.finetune_opt, finetune_batch_size=FLAGS.finetune_batch_size,
            finetune_dropout=FLAGS.finetune_dropout,
            enc_act_func=dae_enc_act_func, dec_act_func=dae_dec_act_func,
            corr_type=dae_corr_type, corr_frac=dae_corr_frac, regcoef=dae_regcoef,
            loss_func=dae_loss_func, opt=dae_opt,
            learning_rate=dae_learning_rate, momentum=FLAGS.momentum,
            num_epochs=dae_num_epochs, batch_size=dae_batch_size,
            finetune_act_func=finetune_act_func)

        # 训练模型 (无监督预训练)
        # if FLAGS.do_pretrain:
        #     encoded_X, encoded_vX = sdae.pretrain(trX, vlX)

        teX = datasets
        # print('Saving the predictions for the test set...')
        internal_predictions = sdae.predict(teX)
        return internal_predictions