コード例 #1
0
    def run(self):

        dataset = self.build_dataset()
        learning_rule = self.build_learning_rule()
        learn_method = self.build_learning_method()

        model = self.build_model(dataset.feature_size())
        model.layers[0].dropout_below = self.state.hidden1.dropout_below

        if self.state.log.save_to_database_name:
            database = self.build_database(dataset, learning_rule,
                                           learn_method, model)
            database['records']['h1_model'] = self.state.hidden1.model
            database['records']['h2_model'] = self.state.hidden2.model
            log = self.build_log(database)

        log.info("Fine Tuning")
        for layer in model.layers:
            layer.dropout_below = None
            layer.noise = None

        train_obj = TrainObject(log=log,
                                dataset=dataset,
                                learning_rule=learning_rule,
                                learning_method=learn_method,
                                model=model)

        train_obj.run()
コード例 #2
0
ファイル: Laura_Two_Layers.py プロジェクト: hycis/Pynet
    def run(self):

        dataset = self.build_dataset()
        learning_rule = self.build_learning_rule()
        learn_method = self.build_learning_method()

        model = self.build_model(dataset.feature_size())
        model.layers[0].dropout_below = self.state.hidden1.dropout_below

        if self.state.log.save_to_database_name:
            database = self.build_database(dataset, learning_rule, learn_method, model)
            database['records']['h1_model'] = self.state.hidden1.model
            database['records']['h2_model'] = self.state.hidden2.model
            log = self.build_log(database)

        log.info("Fine Tuning")
        for layer in model.layers:
            layer.dropout_below = None
            layer.noise = None

        train_obj = TrainObject(log = log,
                                dataset = dataset,
                                learning_rule = learning_rule,
                                learning_method = learn_method,
                                model = model)

        train_obj.run()
コード例 #3
0
    def run(self):
        log = self.build_log()
        dataset = self.build_dataset()

        learning_rule = self.build_learning_rule()
        model = self.build_model(dataset)
        train_obj = TrainObject(log = log,
                                dataset = dataset,
                                learning_rule = learning_rule,
                                model = model)
        train_obj.run()
コード例 #4
0
ファイル: NN.py プロジェクト: hycis/Pynet
 def run(self):
     dataset = self.build_dataset()
     learning_rule = self.build_learning_rule()
     model = self.build_model(dataset)
     learn_method = self.build_learning_method()
     database = self.build_database(dataset, learning_rule, learn_method, model)
     log = self.build_log(database)
     train_obj = TrainObject(log = log,
                             dataset = dataset,
                             learning_rule = learning_rule,
                             learning_method = learn_method,
                             model = model)
     train_obj.run()
コード例 #5
0
ファイル: AE_example.py プロジェクト: hycis/Pynet
def autoencoder():

    # building dataset, batch_size and preprocessor
    data = Mnist(train_valid_test_ratio=[8, 1, 1], batch_size=100, preprocessor=GCN())

    # for AutoEncoder, the inputs and outputs must be the same
    train = data.get_train()
    data.set_train(train.X, train.X)

    valid = data.get_valid()
    data.set_valid(valid.X, valid.X)

    test = data.get_test()
    data.set_test(test.X, test.X)

    # building autoencoder
    ae = AutoEncoder(input_dim=data.feature_size(), rand_seed=123)
    h1_layer = Tanh(dim=500, name="h1_layer", W=None, b=None)

    # adding encoding layer
    ae.add_encode_layer(h1_layer)

    # mirror layer has W = h1_layer.W.T
    h1_mirror = Tanh(dim=ae.input_dim, name="h1_mirror", W=h1_layer.W.T, b=None)

    # adding decoding mirror layer
    ae.add_decode_layer(h1_mirror)

    # build learning method
    learning_method = AdaGrad(learning_rate=0.1, momentum=0.9)

    # set the learning rules
    learning_rule = LearningRule(
        max_col_norm=10,
        L1_lambda=None,
        L2_lambda=None,
        training_cost=Cost(type="mse"),
        learning_rate_decay_factor=None,
        stopping_criteria={
            "max_epoch": 300,
            "epoch_look_back": 10,
            "cost": Cost(type="error"),
            "percent_decrease": 0.01,
        },
    )

    # put all the components into a TrainObject
    train_object = TrainObject(model=ae, dataset=data, learning_rule=learning_rule, learning_method=learning_method)

    # finally run the training
    train_object.run()
コード例 #6
0
 def run(self):
     dataset = self.build_dataset()
     learning_rule = self.build_learning_rule()
     model = self.build_model(dataset)
     learn_method = self.build_learning_method()
     database = self.build_database(dataset, learning_rule, learn_method,
                                    model)
     log = self.build_log(database)
     train_obj = TrainObject(log=log,
                             dataset=dataset,
                             learning_rule=learning_rule,
                             learning_method=learn_method,
                             model=model)
     train_obj.run()
コード例 #7
0
ファイル: Laura_Continue.py プロジェクト: hycis/Pynet
    def run(self):

        dataset = self.build_dataset()
        learning_rule = self.build_learning_rule()
        learn_method = self.build_learning_method()

        model = self.build_model()

        if self.state.fine_tuning_only:
            for layer in model.layers:
                layer.dropout_below = None
                layer.noise = None
            print "Fine Tuning Only"

        if self.state.log.save_to_database_name:
            database = self.build_database(dataset, learning_rule, learn_method, model)
            database['records']['model'] = self.state.hidden1.model
            log = self.build_log(database)

        train_obj = TrainObject(log = log,
                                dataset = dataset,
                                learning_rule = learning_rule,
                                learning_method = learn_method,
                                model = model)

        train_obj.run()

        if not self.state.fine_tuning_only:
            log.info("..Fine Tuning after Noisy Training")
            for layer in train_obj.model.layers:
                layer.dropout_below = None
                layer.noise = None
            train_obj.setup()
            train_obj.run()
コード例 #8
0
    def run(self):

        dataset = self.build_dataset()
        learning_rule = self.build_learning_rule()
        learn_method = self.build_learning_method()

        if self.state.num_layers == 1:
            model = self.build_one_hid_model_no_transpose(dataset.feature_size())
        else:
            raise ValueError()

        if self.state.log.save_to_database_name:
            database = self.build_database(dataset, learning_rule, learn_method, model)
            log = self.build_log(database)

        train_obj = TrainObject(log = log,
                                dataset = dataset,
                                learning_rule = learning_rule,
                                learning_method = learn_method,
                                model = model)

        train_obj.run()

        # fine tuning
        log.info("fine tuning")
        train_obj.model.layers[0].dropout_below = None
        train_obj.setup()
        train_obj.run()
コード例 #9
0
ファイル: Laura.py プロジェクト: hycis/Pynet
    def run(self):

        dataset = self.build_dataset()
        learning_rule = self.build_learning_rule()
        learn_method = self.build_learning_method()

        if self.state.num_layers == 1:
            model = self.build_one_hid_model(dataset.feature_size())
        elif self.state.num_layers == 2:
            model = self.build_two_hid_model(dataset.feature_size())
        elif self.state.num_layers == 3:
            model = self.build_three_hid_model(dataset.feature_size())
        else:
            raise ValueError()

        database = self.build_database(dataset, learning_rule, learn_method, model)
        log = self.build_log(database)

        dataset.log = log

        train_obj = TrainObject(
            log=log, dataset=dataset, learning_rule=learning_rule, learning_method=learn_method, model=model
        )

        train_obj.run()

        log.info("Fine Tuning")

        for layer in train_obj.model.layers:
            layer.dropout_below = None
            layer.noise = None

        train_obj.setup()
        train_obj.run()
コード例 #10
0
    def run(self):
        preprocessor = None if self.state.dataset.preprocessor is None else \
                       getattr(preproc, self.state.dataset.preprocessor)()
        dataset = getattr(mapping, self.state.dataset.type)(feature_size = self.state.dataset.feature_size,
                                                            target_size = self.state.dataset.target_size,
                                                            train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
                                                            preprocessor = preprocessor,
                                                            batch_size = self.state.dataset.batch_size,
                                                            num_batches = self.state.dataset.num_batches,
                                                            iter_class = self.state.dataset.iter_class,
                                                            rng = self.state.dataset.rng)
        model = MLP(input_dim = self.state.dataset.feature_size, rand_seed=self.state.model.rand_seed)
        hidden1 = getattr(layer, self.state.hidden1.type)(dim=self.state.hidden1.dim,
                                                        name=self.state.hidden1.name,
                                                        dropout_below=self.state.hidden1.dropout_below)
        model.add_layer(hidden1)

        hidden2 = getattr(layer, self.state.hidden2.type)(dim=self.state.hidden2.dim,
                                                        name=self.state.hidden2.name,
                                                        dropout_below=self.state.hidden2.dropout_below)
        model.add_layer(hidden2)

        output = getattr(layer, self.state.output.type)(dim=self.state.output.dim,
                                                        name=self.state.output.name,
                                                        dropout_below=self.state.output.dropout_below)
        model.add_layer(output)

        learning_rule = self.build_learning_rule()
        learn_method = self.build_learning_method()
        database = self.build_database(dataset, learning_rule, learn_method, model)
        log = self.build_log(database)

        train_obj = TrainObject(log = log,
                                dataset = dataset,
                                learning_rule = learning_rule,
                                learning_method = learn_method,
                                model = model)

        train_obj.run()
コード例 #11
0
ファイル: MLP.py プロジェクト: hycis/Pynet
 def run(self):
     dataset = self.build_dataset()
     learning_rule = self.build_learning_rule()
     model = self.build_model(dataset)
     learn_method = self.build_learning_method()
     database = self.build_database(dataset, learning_rule, learn_method,
                                    model)
     log = self.build_log(database)
     train_obj = TrainObject(log=log,
                             dataset=dataset,
                             learning_rule=learning_rule,
                             learning_method=learn_method,
                             model=model)
     train_obj.run()
     log.info("fine tuning")
     for layer in train_obj.model.layers:
         layer.dropout_below = None
         layer.noise = None
     train_obj.setup()
     train_obj.run()
コード例 #12
0
ファイル: Laura_No_Transpose.py プロジェクト: hycis/Pynet
    def run(self):

        dataset = self.build_dataset()
        learning_rule = self.build_learning_rule()
        learn_method = self.build_learning_method()

        if self.state.num_layers == 1:
            model = self.build_one_hid_model_no_transpose(
                dataset.feature_size())
        elif self.state.num_layers == 2:
            model = self.build_two_hid_model_no_transpose(
                dataset.feature_size())
        elif self.state.num_layers == 3:
            model = self.build_three_hid_model_no_transpose(
                dataset.feature_size())
        else:
            raise ValueError()

        database = self.build_database(dataset, learning_rule, learn_method,
                                       model)
        log = self.build_log(database)

        dataset.log = log

        train_obj = TrainObject(log=log,
                                dataset=dataset,
                                learning_rule=learning_rule,
                                learning_method=learn_method,
                                model=model)

        train_obj.run()

        log.info("Fine Tuning")

        for layer in train_obj.model.layers:
            layer.dropout_below = None
            layer.noise = None

        train_obj.setup()
        train_obj.run()
コード例 #13
0
ファイル: MLP.py プロジェクト: hycis/Pynet
 def run(self):
     dataset = self.build_dataset()
     learning_rule = self.build_learning_rule()
     model = self.build_model(dataset)
     learn_method = self.build_learning_method()
     database = self.build_database(dataset, learning_rule, learn_method, model)
     log = self.build_log(database)
     train_obj = TrainObject(log = log,
                             dataset = dataset,
                             learning_rule = learning_rule,
                             learning_method = learn_method,
                             model = model)
     train_obj.run()
     log.info("fine tuning")
     for layer in train_obj.model.layers:
         layer.dropout_below = None
         layer.noise = None
     train_obj.setup()
     train_obj.run()
コード例 #14
0
ファイル: mlp_example.py プロジェクト: hycis/Pynet
def mlp():

    # build dataset
    data = Mnist(preprocessor=None, train_valid_test_ratio=[5, 1, 1])

    # build mlp
    mlp = MLP(input_dim=data.feature_size())

    W1 = GaussianWeight(prev_dim=mlp.input_dim, this_dim=1000)
    hidden1 = PRELU(dim=1000,
                    name='h1_layer',
                    W=W1(mean=0, std=0.1),
                    b=None,
                    dropout_below=None)

    mlp.add_layer(hidden1)

    W2 = XavierWeight(prev_dim=hidden1.dim, this_dim=data.target_size())
    output = Softmax(dim=data.target_size(),
                     name='output_layer',
                     W=W2(),
                     b=None,
                     dropout_below=None)

    mlp.add_layer(output)

    # build learning method
    learning_method = AdaGrad(learning_rate=0.1, momentum=0.9)

    # set the learning rules
    learning_rule = LearningRule(max_col_norm=10,
                                 L1_lambda=None,
                                 L2_lambda=None,
                                 training_cost=Cost(type='mse'),
                                 learning_rate_decay_factor=None,
                                 stopping_criteria={
                                     'max_epoch': 300,
                                     'epoch_look_back': 10,
                                     'cost': Cost(type='error'),
                                     'percent_decrease': 0.01
                                 })

    # (optional) build the logging object
    log = Log(experiment_name='mnist',
              description='This is tutorial example',
              save_outputs=True,
              save_learning_rule=True,
              save_model=True,
              save_epoch_error=True,
              save_to_database={
                  'name': 'Example.db',
                  'records': {
                      'Dataset':
                      data.__class__.__name__,
                      'max_col_norm':
                      learning_rule.max_col_norm,
                      'Weight_Init_Seed':
                      mlp.rand_seed,
                      'Dropout_Below':
                      str([layer.dropout_below for layer in mlp.layers]),
                      'Batch_Size':
                      data.batch_size,
                      'Layer_Dim':
                      str([layer.dim for layer in mlp.layers]),
                      'Layer_Types':
                      str([layer.__class__.__name__ for layer in mlp.layers]),
                      'Preprocessor':
                      data.preprocessor.__class__.__name__,
                      'Learning_Rate':
                      learning_method.learning_rate,
                      'Momentum':
                      learning_method.momentum,
                      'Training_Cost':
                      learning_rule.cost.type,
                      'Stopping_Cost':
                      learning_rule.stopping_criteria['cost'].type
                  }
              })  # end log

    # put everything into the train object
    train_object = TrainObject(model=mlp,
                               dataset=data,
                               learning_rule=learning_rule,
                               learning_method=learning_method,
                               log=log)
    # finally run the code
    train_object.run()
コード例 #15
0
ファイル: AE_example.py プロジェクト: hycis/Pynet
def stacked_autoencoder():

    name = "Stacked_AE"

    # =====[ Train First layer of stack autoencoder ]=====#
    print("Start training First Layer of AutoEncoder")

    # building dataset, batch_size and preprocessor
    data = Mnist(train_valid_test_ratio=[8, 1, 1], batch_size=100)

    # for AutoEncoder, the inputs and outputs must be the same
    train = data.get_train()
    data.set_train(train.X, train.X)

    valid = data.get_valid()
    data.set_valid(valid.X, valid.X)

    test = data.get_test()
    data.set_test(test.X, test.X)

    # building autoencoder
    ae = AutoEncoder(input_dim=data.feature_size(), rand_seed=123)
    h1_layer = RELU(dim=500, name="h1_layer", W=None, b=None)

    # adding encoding layer
    ae.add_encode_layer(h1_layer)

    # mirror layer has W = h1_layer.W.T
    h1_mirror = RELU(dim=ae.input_dim, name="h1_mirror", W=h1_layer.W.T, b=None)

    # adding decoding mirror layer
    ae.add_decode_layer(h1_mirror)

    # build learning method
    learning_method = SGD(learning_rate=0.001, momentum=0.9)

    # set the learning rules
    learning_rule = LearningRule(
        max_col_norm=10,
        L1_lambda=None,
        L2_lambda=None,
        training_cost=Cost(type="mse"),
        learning_rate_decay_factor=None,
        stopping_criteria={"max_epoch": 3, "epoch_look_back": 1, "cost": Cost(type="error"), "percent_decrease": 0.01},
    )

    # put all the components into a TrainObject
    train_object = TrainObject(model=ae, dataset=data, learning_rule=learning_rule, learning_method=learning_method)

    # finally run the training
    train_object.run()

    # =====[ Train Second Layer of autoencoder ]=====#

    print("Start training Second Layer of AutoEncoder")

    # fprop == forward propagation
    reduced_train_X = ae.encode(train.X)
    reduced_valid_X = ae.encode(valid.X)
    reduced_test_X = ae.encode(test.X)

    data.set_train(X=reduced_train_X, y=reduced_train_X)
    data.set_valid(X=reduced_valid_X, y=reduced_valid_X)
    data.set_test(X=reduced_test_X, y=reduced_test_X)

    # create a new mlp taking inputs from the encoded outputs of first autoencoder
    ae2 = AutoEncoder(input_dim=data.feature_size(), rand_seed=None)

    h2_layer = RELU(dim=100, name="h2_layer", W=None, b=None)
    ae2.add_encode_layer(h2_layer)

    h2_mirror = RELU(dim=h1_layer.dim, name="h2_mirror", W=h2_layer.W.T, b=None)
    ae2.add_decode_layer(h2_mirror)

    train_object = TrainObject(model=ae2, dataset=data, learning_rule=learning_rule, learning_method=learning_method)

    train_object.run()

    # =====[ Fine Tuning ]=====#
    print("Fine Tuning")

    data = Mnist()

    train = data.get_train()
    data.set_train(train.X, train.X)

    valid = data.get_valid()
    data.set_valid(valid.X, valid.X)

    test = data.get_test()
    data.set_test(test.X, test.X)

    ae3 = AutoEncoder(input_dim=data.feature_size(), rand_seed=None)
    ae3.add_encode_layer(h1_layer)
    ae3.add_encode_layer(h2_layer)
    ae3.add_decode_layer(h2_mirror)
    ae3.add_decode_layer(h1_mirror)

    train_object = TrainObject(model=ae3, dataset=data, learning_rule=learning_rule, learning_method=learning_method)

    train_object.run()
    print("Training Done")
コード例 #16
0
ファイル: mlp_example.py プロジェクト: hycis/Pynet
def mlp():

    # build dataset
    data = Mnist(preprocessor=None, train_valid_test_ratio=[5,1,1])

    # build mlp
    mlp = MLP(input_dim = data.feature_size())

    W1 = GaussianWeight(prev_dim=mlp.input_dim, this_dim=1000)
    hidden1 = PRELU(dim=1000, name='h1_layer', W=W1(mean=0, std=0.1),
                    b=None, dropout_below=None)

    mlp.add_layer(hidden1)

    W2 = XavierWeight(prev_dim=hidden1.dim, this_dim=data.target_size())
    output = Softmax(dim=data.target_size(), name='output_layer', W=W2(),
                     b=None, dropout_below=None)

    mlp.add_layer(output)

    # build learning method
    learning_method = AdaGrad(learning_rate=0.1, momentum=0.9)

    # set the learning rules
    learning_rule = LearningRule(max_col_norm = 10,
                                L1_lambda = None,
                                L2_lambda = None,
                                training_cost = Cost(type='mse'),
                                learning_rate_decay_factor = None,
                                stopping_criteria = {'max_epoch' : 300,
                                                      'epoch_look_back' : 10,
                                                      'cost' : Cost(type='error'),
                                                      'percent_decrease' : 0.01}
                                )


    # (optional) build the logging object
    log = Log(experiment_name = 'mnist',
              description = 'This is tutorial example',
              save_outputs = True,
              save_learning_rule = True,
              save_model = True,
              save_epoch_error = True,
              save_to_database = {'name': 'Example.db',
                                  'records' : {'Dataset' : data.__class__.__name__,
                                  'max_col_norm'     : learning_rule.max_col_norm,
                                  'Weight_Init_Seed' : mlp.rand_seed,
                                  'Dropout_Below'    : str([layer.dropout_below for layer in mlp.layers]),
                                  'Batch_Size'       : data.batch_size,
                                  'Layer_Dim'        : str([layer.dim for layer in mlp.layers]),
                                  'Layer_Types'      : str([layer.__class__.__name__ for layer in mlp.layers]),
                                  'Preprocessor'     : data.preprocessor.__class__.__name__,
                                  'Learning_Rate'    : learning_method.learning_rate,
                                  'Momentum'         : learning_method.momentum,
                                  'Training_Cost'    : learning_rule.cost.type,
                                  'Stopping_Cost'    : learning_rule.stopping_criteria['cost'].type}}
              ) # end log

    # put everything into the train object
    train_object = TrainObject(model = mlp,
                               dataset = data,
                               learning_rule = learning_rule,
                               learning_method = learning_method,
                               log = log)
    # finally run the code
    train_object.run()