예제 #1
0
파일: AML.py 프로젝트: glgerard/MDBN
def train_ME(datafile,
             rng,
             clip=None,
             batch_size=20,
             k=10,
             lambda_1=0.0,
             lambda_2=0.1,
             layers_sizes=[40],
             pretraining_epochs=[80000],
             pretrain_lr=[0.005],
             holdout=0.1,
             repeats=10,
             graph_output=False,
             datadir='data'):
    print('*** Training on ME ***')

    train_set, validation_set = load_n_preprocess_data(datafile,
                                                       clip=clip,
                                                       holdout=holdout,
                                                       repeats=repeats,
                                                       datadir=datadir)

    return train_bottom_layer(train_set,
                              validation_set,
                              batch_size=batch_size,
                              k=k,
                              layers_sizes=layers_sizes,
                              pretraining_epochs=pretraining_epochs,
                              pretrain_lr=pretrain_lr,
                              lambda_1=lambda_1,
                              lambda_2=lambda_2,
                              rng=rng,
                              graph_output=graph_output)
예제 #2
0
파일: AMLsm2.py 프로젝트: glgerard/MDBN
def train_DM(datafile,
             rng,
             clip=None,
             batch_size=20,
             k=1,
             lambda_1=0,
             lambda_2=1,
             layers_sizes=[400, 40],
             pretraining_epochs=[8000, 800],
             pretrain_lr=[0.005, 0.1],
             holdout=0.1,
             repeats=10,
             graph_output=False,
             datadir='data'):
    print('*** Training on DM ***')

    train_set, validation_set = load_n_preprocess_data(datafile,
                                                       clip=clip,
                                                       holdout=holdout,
                                                       repeats=repeats,
#                                                       transform_fn=numpy.power,
#                                                       exponent=1.0/6.0,
                                                       datadir=datadir)

    return train_bottom_layer(train_set, validation_set,
                              batch_size=batch_size,
                              k=k,
                              layers_sizes=layers_sizes,
                              pretraining_epochs=pretraining_epochs,
                              pretrain_lr=pretrain_lr,
                              lambda_1=lambda_1,
                              lambda_2=lambda_2,
                              rng=rng,
                              graph_output=graph_output)
예제 #3
0
    def MLP_output_from_datafile(self,
                                 datafile,
                                 holdout=0.0,
                                 repeats=1,
                                 clip=None,
                                 transform_fn=None,
                                 exponent=1.0,
                                 datadir='data'):
        train_set, validation_set = load_n_preprocess_data(datafile,
                                                           holdout=holdout,
                                                           clip=clip,
                                                           transform_fn=transform_fn,
                                                           exponent=exponent,
                                                           repeats=repeats,
                                                           shuffle=False,
                                                           datadir=datadir)

        return (self.get_output(train_set), self.get_output(validation_set))