Beispiel #1
0
def neuralnet_coredeeplearning():
    model = Seq(
        Linear(2, 128),
        PlusBias(128),
        Sigmoid(),
        # Linear(256, 256), PlusBias(256),
        # Sigmoid(),
        # Linear(256, 256), PlusBias(256), Relu(),
        # Linear(128, 128), PlusBias(128),# Sigmoid(),
        Linear(128, 1),
        # Sum(),
    )
    # neural_controller = NeuralControl(model=model, loss=SquaredLoss(),
    #                                   optimizer= RMSProp(learning_rate=0.1,decay_rate=0.9)
    #                                   # AdaGrad(learning_rate=0.1),
    #                                   # MomentumSGD(learning_rate=0.1, momentum=0.9)
    #                                   )
    trainset = list(zip(X, Y))
    np.random.shuffle(trainset)
    MinibatchTrainer().train_minibatches(
        model=model,
        train_set=trainset,
        batch_size=32,
        loss=MSE(),
        optimizer=RMSProp(learning_rate=0.1, decay_rate=0.8),
        # optimizer=AdaGrad(learning_rate=0.1),
        epochs=200,
        show_progress=True)

    print model.forward(np.array([0.5, 0.5]))
    return model.forward
Beispiel #2
0
class WxBiasLinear(Layer):
    def __init__(self, in_size, out_size, initialize_W, initialize_b):
        self.Wx = Wx(in_size, out_size, initialize_W)
        self.bias = PlusBias(out_size, initialize_b)
        self.model = Seq(self.Wx, self.bias)

    def forward(self, x, is_training=False):
        return self.model.forward(x, is_training)

    def backward(self, dJdy):
        return self.model.backward(dJdy)

    def update_weights(self, optimizer):
        return self.model.update_weights(optimizer)
Beispiel #3
0
    def run(self):
        train_set, valid_set, test_set = gen_data(dataset='regression')

        print test_set
        exit()
        l1 = 0.
        l2 = 0.
        model = Seq([
            Linear(2, 100),
            # Dropout(0.8),
            Tanh(),
            Linear(100, 4),
            # Tanh(),
            # Dropout(0.6)
        ])
        #
        # model = SyntaxLayer(
        #     syntax.Linear(10, 4,
        #         syntax.Tanh(syntax.Linear(2, 10, input=syntax.Var('x')))))

        # trainer = SimpleTrainer()
        # trainer.train(model, train_set,
        #               loss=CrossEntropyLoss(),
        #               optimizer=SGD(learning_rate=0.1),
        #               # optimizer=MomentumSGD(learning_rate=0.1, momentum=0.8),
        #               # optimizer=AdaGrad(learning_rate=0.9),
        #               # optimizer=RMSProp(learning_rate=0.1, decay_rate=0.9),
        #               epochs=100)

        trainer = MinibatchTrainer()
        batch_size = 40
        batches_n = (float(len(train_set)) / batch_size)
        learning_rate = 0.1 / batches_n
        # print learning_rate
        mean_losses = trainer.train_minibatches(model,
                                                train_set,
                                                batch_size,
                                                epochs=100,
                                                loss=SquaredLoss(),
                                                optimizer=MomentumSGD(
                                                    learning_rate,
                                                    momentum=0.4),
                                                show_progress=True)

        # trainer = PatienceTrainer()
        # mean_losses = trainer.train(model, train_set, valid_set, test_set,
        #                             batch_size=1,
        #                             max_epochs=10000,
        #                             loss=CrossEntropyLoss(),
        #                             test_score_function=CrossEntropyLoss.test_score,
        #                             optimizer=SGD(learning_rate=0.1))

        draw_decision_surface(model)
        scatter_train_data(train_set)

        plot_mean_loss(mean_losses)
        plt.show()
Beispiel #4
0
    def test_Perceptron(self):
        train_set, test_set = gen_data()

        model = Seq([
            Linear(2, 5, initialize='random'),
            Sigmoid(),
            Linear(5, 2, initialize='random'),
            Sigmoid(),
        ])

        OnlineTrainer().train(
            model,
            train_set=train_set,
            loss=SquaredLoss(),
            # optimizer=SGD(learning_rate=0.1),
            optimizer=MomentumSGD(learning_rate=0.1, momentum=0.9),
            # optimizer=AdaGrad(learning_rate=0.9),
            # optimizer=RMSProp(learning_rate=0.1, decay_rate=0.9),
            epochs=200,
            save_progress=False)
        # OnlineTrainer().train_one(model,np.random.rand(1,2),[2])

        # model.learn_minibatch(
        # input_data=train_data,
        # target_data=train_targets,
        # loss=SquaredLoss(),
        # batch_size=5,
        # # optimizer=SGD(learning_rate=0.1),
        # # optimizer=MomentumSGD(learning_rate=0.1, momentum=0.9),
        # optimizer=AdaGrad(learning_rate=0.9),
        # # optimizer=RMSProp(learning_rate=0.1, decay_rate=0.9),
        #
        # epochs=100,
        # save_progress=True)

        model.save_to_file('perceptron.pkl')

        scatter_test_data(test_set, model)

        # model.plot_errors_history()
        # model.plot_loss_gradient_history()
        plt.show()
Beispiel #5
0
    def run(self,
            batch_size=10,
            learning_rate=0.6,
            train_set_percentage=1.0,
            epochs=3):
        model = Seq(
            WxBiasLinear(784, 10, initialize='random')
            # Dropout(0.5)
        )

        train_set_sliced = slice_percentage(self.train_set,
                                            train_set_percentage)

        trainer = MinibatchTrainer()
        trainer.train_minibatches(
            model,
            train_set_sliced,
            batch_size=batch_size,
            loss=CrossEntropyLoss(),
            epochs=epochs,
            optimizer=SGD(learning_rate=learning_rate),
            # optimizer=RMSProp(learning_rate=learning_rate, decay_rate=0.6),
            # optimizer=AdaGrad(learning_rate=learning_rate),
            show_progress=True)

        # self.show_mnist_grid(model, self.test_set)

        # trainer = PatienceTrainer()
        # trainer.train(model,
        #               train_set_sliced, self.valid_set, self.test_set,
        #               batch_size=batch_size,
        #               loss=CrossEntropyLoss(),
        #               max_epochs=100,
        #               # optimizer=MomentumSGD(learning_rate=learning_rate, momentum=0.5),
        #               optimizer=RMSProp(learning_rate=learning_rate, decay_rate=0.9),
        #               # optimizer=AdaGrad(learning_rate=learning_rate),
        #               test_score_function=self.test_score_fun
        # )

        test_score = CrossEntropyLoss().test_score(model, self.test_set)

        return {
            # 'train_score': train_score,
            'test_score': test_score,
        }
Beispiel #6
0
def buildneuralnetwork(input_size, output_size):
    """

    :return:
    """

    model = Seq([
        Linear(input_size, 32, initialize='random'),
        PlusBias(32),
        Sigmoid(),
        Dropout(0.9),
        # Linear(128,32),
        # PlusBias(32),
        # Relu(),
        # Dropout(0.9),
        Linear(32, out_size=output_size, initialize='random'),

        # Sum()
    ])
    neural_controller =  NeuralControl(model=model,loss=SquaredLoss(),optimizer=MomentumSGD(learning_rate=0.1, momentum=0.9))

    return neural_controller
Beispiel #7
0
 def __init__(self, in_size, out_size, initialize_W, initialize_b):
     self.Wx = Wx(in_size, out_size, initialize_W)
     self.bias = PlusBias(out_size, initialize_b)
     self.model = Seq(self.Wx, self.bias)