Example #1
0
    def test_sew_together_when_cutted_piece_already_in_use(self):
        autoencoder = algorithms.Momentum([
            layers.Input(25),
            layers.Sigmoid(15),
            layers.Sigmoid(25),
        ])

        encoder = surgery.cut(autoencoder, start=0, end=2)
        self.assertEqual(len(encoder), 2)

        classifier = algorithms.Momentum(encoder > layers.Softmax(10))

        network = algorithms.GradientDescent([
            layers.Input(5),

            surgery.CutLine(),  # <- first cut point

            layers.Sigmoid(10),
            layers.Sigmoid(20),
            layers.Sigmoid(30),

            surgery.CutLine(),  # <- second cut point

            layers.Sigmoid(1),
        ])
        _, hidden_layers, _ = surgery.cut_along_lines(network)
        self.assertEqual(len(hidden_layers), 3)

        connected_layers = surgery.sew_together([
            encoder,
            layers.Relu(5),
            hidden_layers
        ])
        self.assertEqual(len(connected_layers), 6)
Example #2
0
    def test_mixture_of_experts_multi_class_classification(self):
        import copy
        insize, outsize = (10, 3)
        n_epochs = 10

        default_configs = dict(step=0.1,
                               batch_size=10,
                               error='categorical_crossentropy',
                               verbose=False)

        architecture = layers.join(layers.Input(insize), layers.Relu(20),
                                   layers.Softmax(outsize))

        data, target = datasets.make_classification(n_samples=200,
                                                    n_features=insize,
                                                    n_classes=outsize,
                                                    n_clusters_per_class=2,
                                                    n_informative=5)

        input_scaler = preprocessing.MinMaxScaler((-1, 1))
        one_hot = preprocessing.OneHotEncoder()

        target = target.reshape((-1, 1))
        encoded_target = one_hot.fit_transform(target)
        x_train, x_test, y_train, y_test = model_selection.train_test_split(
            input_scaler.fit_transform(data),
            np.asarray(encoded_target.todense()),
            test_size=0.2)

        # -------------- Train single GradientDescent -------------- #

        bpnet = algorithms.Momentum(copy.deepcopy(architecture),
                                    **default_configs)

        bpnet.train(x_train, y_train, epochs=n_epochs)
        network_output = bpnet.predict(x_test)

        network_error = categorical_crossentropy(y_test, network_output)

        # -------------- Train ensemlbe -------------- #

        moe = algorithms.Momentum(
            architectures.mixture_of_experts([
                copy.deepcopy(architecture),
                copy.deepcopy(architecture),
                copy.deepcopy(architecture),
            ]), **default_configs)
        moe.train(x_train, y_train, epochs=n_epochs)
        ensemble_output = moe.predict(x_test)

        ensemlbe_error = categorical_crossentropy(y_test, ensemble_output)
        self.assertGreater(network_error, ensemlbe_error)
    def test_mixture_of_experts(self):
        dataset = datasets.load_diabetes()
        data, target = asfloat(dataset.data), asfloat(dataset.target)
        insize, outsize = data.shape[1], 1

        input_scaler = preprocessing.MinMaxScaler((-1, 1))
        output_scaler = preprocessing.MinMaxScaler()
        x_train, x_test, y_train, y_test = model_selection.train_test_split(
            input_scaler.fit_transform(data),
            output_scaler.fit_transform(target.reshape(-1, 1)),
            train_size=0.8)

        n_epochs = 10
        scaled_y_test = output_scaler.inverse_transform(y_test)
        scaled_y_test = scaled_y_test.reshape((y_test.size, 1))

        # -------------- Train single GradientDescent -------------- #

        bpnet = algorithms.GradientDescent((insize, 20, outsize),
                                           step=0.1,
                                           verbose=False)
        bpnet.train(x_train, y_train, epochs=n_epochs)
        network_output = bpnet.predict(x_test)
        network_error = rmsle(output_scaler.inverse_transform(network_output),
                              scaled_y_test)

        # -------------- Train ensemlbe -------------- #

        moe = algorithms.MixtureOfExperts(
            networks=[
                algorithms.Momentum((insize, 20, outsize),
                                    step=0.1,
                                    batch_size=1,
                                    verbose=False),
                algorithms.Momentum((insize, 20, outsize),
                                    step=0.1,
                                    batch_size=1,
                                    verbose=False),
            ],
            gating_network=algorithms.Momentum(
                layers.Input(insize) > layers.Softmax(2),
                step=0.1,
                verbose=False))
        moe.train(x_train, y_train, epochs=n_epochs)
        ensemble_output = moe.predict(x_test)

        ensemlbe_error = rmsle(
            output_scaler.inverse_transform(ensemble_output), scaled_y_test)

        self.assertGreater(network_error, ensemlbe_error)
Example #4
0
    def test_max_norm_regularizer(self):
        def on_epoch_end(network):
            layer = network.layers[1]

            weight = layer.weight.get_value()
            weight_norm = np.round(np.linalg.norm(weight), 5)

            bias = layer.bias.get_value()
            bias_norm = np.round(np.linalg.norm(bias), 5)

            error_message = "Epoch #{}".format(network.last_epoch)
            self.assertLessEqual(weight_norm, 2, msg=error_message)
            self.assertLessEqual(bias_norm, 2, msg=error_message)

        mnet = algorithms.Momentum(
            [
                layers.Input(10),
                layers.Relu(20),
                layers.Sigmoid(1),
            ],
            step=0.1,
            momentum=0.95,
            verbose=False,
            epoch_end_signal=on_epoch_end,
            max_norm=2,
            addons=[algorithms.MaxNormRegularization],
        )

        x_train, _, y_train, _ = simple_classification()
        mnet.train(x_train, y_train, epochs=100)
Example #5
0
def ANN(X_train, X_test, y_train, y_test, X_dummy):
    environment.reproducible()
    target_scaler = OneHotEncoder()
    net = algorithms.Momentum(
        [
            layers.Input(17),
            layers.Relu(100),
            layers.Relu(70),
            layers.Softmax(32),
        ],
        error='categorical_crossentropy',
        step=0.01,
        verbose=True,
        shuffle_data=True,
        momentum=0.99,
        nesterov=True,
    )
    # converting vector to one hot encoding
    d1 = int(y_train.shape[0])
    d2 = int(y_test.shape[0])
    Y_train = np.zeros((d1, 32))
    Y_test = np.zeros((d2, 32))
    Y_train[np.arange(d1), y_train] = 1
    Y_test[np.arange(d2), y_test] = 1

    net.architecture()
    net.train(X_train, Y_train, X_test, Y_test, epochs=20)
    y_predicted = net.predict(X_test).argmax(axis=1)
    y_dummy = net.predict(X_dummy).argmax(axis=1)
    #print 'predicted values'
    #print y_predicted
    Y_test = np.asarray(Y_test.argmax(axis=1)).reshape(len(Y_test))
    #print(metrics.classification_report(Y_test, y_predicted))
    return y_dummy, y_predicted, metrics.accuracy_score(Y_test, y_predicted)
Example #6
0
    def test_dan_repr(self):
        dan = algorithms.DynamicallyAveragedNetwork([
            algorithms.Momentum((3, 2, 1)),
            algorithms.GradientDescent((3, 2, 1)),
        ])
        dan_repr = str(dan)

        self.assertIn('DynamicallyAveragedNetwork', dan_repr)
        self.assertIn('Momentum', dan_repr)
        self.assertIn('GradientDescent', dan_repr)
Example #7
0
    def test_simple_momentum(self):
        x_train, x_test, y_train, y_test = simple_classification()
        mnet = algorithms.Momentum(
            (10, 20, 1),
            step=0.35,
            momentum=0.99,
            batch_size='full',
            verbose=False,
            nesterov=True,
        )

        mnet.train(x_train, y_train, x_test, y_test, epochs=30)
        self.assertGreater(0.15, mnet.validation_errors.last())
Example #8
0
    def test_simple_momentum(self):
        x_train, _, y_train, _ = simple_classification()
        mnet = algorithms.Momentum(
            (10, 20, 1),
            step=0.35,
            momentum=0.99,
            batch_size='full',
            verbose=False,
            nesterov=True,
        )

        mnet.train(x_train, y_train, epochs=40)
        self.assertAlmostEqual(0.017, mnet.errors.last(), places=3)
Example #9
0
    def test_momentum(self):
        x_train, x_test, y_train, y_test = simple_classification()
        optimizer = algorithms.Momentum(
            self.network,
            step=0.35,
            momentum=0.99,
            batch_size=None,
            verbose=False,
            nesterov=True,
        )

        optimizer.train(x_train, y_train, x_test, y_test, epochs=30)
        self.assertGreater(0.15, optimizer.errors.valid[-1])
    def test_mixture_of_experts_repr(self):
        moe = algorithms.MixtureOfExperts(
            networks=[
                algorithms.Momentum((3, 2, 1)),
                algorithms.GradientDescent((3, 2, 1)),
            ],
            gating_network=algorithms.Adadelta(
                layers.Input(3) > layers.Softmax(2), ))
        moe_repr = str(moe)

        self.assertIn('MixtureOfExperts', moe_repr)
        self.assertIn('Momentum', moe_repr)
        self.assertIn('GradientDescent', moe_repr)
        self.assertIn('Adadelta', moe_repr)
Example #11
0
 def test_training_with_l2_regularization(self):
     x_train, x_test, y_train, y_test = simple_classification()
     mnet = algorithms.Momentum(
         [layers.Input(10),
          layers.Sigmoid(20),
          layers.Sigmoid(1)],
         step=0.35,
         momentum=0.99,
         batch_size=None,
         verbose=False,
         nesterov=True,
         regularizer=algorithms.l2(0.001),
     )
     mnet.train(x_train, y_train, x_test, y_test, epochs=40)
     self.assertGreater(0.15, mnet.errors.valid[-1])
Example #12
0
	def select_algorithm(self, algorithm, options=None):
		try:
			self.network = algorithms.LevenbergMarquardt(self.layers)
			opt = options
			print(opt[1])
			print("Wybrano optymalizator: " + str(algorithm))
		except RecursionError:
			print("Problem rekursji")
			return None

		if algorithm == 'GradientDescent':
			self.network = algorithms.GradientDescent(self.layers)
		if algorithm == 'LevenbergMarquardt':
			self.network = algorithms.LevenbergMarquardt(connection=self.layers, mu=opt[0], mu_update_factor=opt[1])
		if algorithm == 'Adam':
			self.network = algorithms.Adam(self.layers)
		if algorithm == 'QuasiNewton':
			self.network = algorithms.QuasiNewton(self.layers)
		if algorithm == 'Quickprop':
			self.network = algorithms.Quickprop(self.layers)
		if algorithm == 'MinibatchGradientDescent':
			self.network = algorithms.MinibatchGradientDescent(self.layers)
		if algorithm == 'ConjugateGradient':
			self.network = algorithms.ConjugateGradient(self.layers)
		if algorithm == 'Hessian':
			self.network = algorithms.Hessian(self.layers)
		if algorithm == 'HessianDiagonal':
			self.network = algorithms.HessianDiagonal(self.layers)
		if algorithm == 'Momentum':
			self.network = algorithms.Momentum(self.layers)
		if algorithm == 'RPROP':
			self.network = algorithms.RPROP(self.layers)
		if algorithm == 'IRPROPPlus':
			self.network = algorithms.IRPROPPlus(self.layers)
		if algorithm == 'Adadelta':
			self.network = algorithms.Adadelta(self.layers)
		if algorithm == 'Adagrad':
			self.network = algorithms.Adagrad(self.layers)
		if algorithm == 'RMSProp':
			self.network = algorithms.RMSProp(self.layers)
		if algorithm == 'Adamax':
			self.network = algorithms.Adamax(self.layers)
Example #13
0
def train(X, Y):

    environment.reproducible()
    img_size = X.shape[1]
    network = algorithms.Momentum(
        [
            layers.Input(img_size),
            layers.Relu(100),
            layers.Softmax(Y.shape[1]),
        ],
        error='categorical_crossentropy',
        step=0.01,
        verbose=True,
        shuffle_data=True,
        momentum=0.9,
        nesterov=True,
    )
    network.architecture()
    network.train(X, Y, epochs=20)
    return network
Example #14
0
    def initialize(self):
        self.network = algorithms.Momentum(
            [
                layers.Input(20),
                layers.Linear(20, weight=init.Uniform(-0.5, 0.5)) ,
                layers.LeakyRelu(15, weight=init.Uniform(-0.5, 0.5)),
                layers.LeakyRelu(15, weight=init.Uniform(-0.5, 0.5)),
                layers.LeakyRelu(12, weight=init.Uniform(-0.5, 0.5)),
                layers.Linear(9, weight=init.Uniform(-0.5, 0.5)),
            ],

            error='categorical_crossentropy',
            step=0.01,
            verbose=False,
            shuffle_data=True,

            momentum=0.99,
            nesterov=True,

        )
        self.network.architecture()
Example #15
0
    def initialize(self):
        self.network = algorithms.Momentum(
            [
                layers.Input(20),
                layers.Relu(30, weight=init.Uniform(-1, 1)),
                layers.Tanh(40, weight=init.Uniform(-1, 1)),
                # layers.Embedding(40, 1),
                # layers.GRU(40),
                layers.Relu(25, weight=init.Uniform(-1, 1)),
                layers.Linear(9, weight=init.Uniform(-1, 1)),
            ],

            error='categorical_crossentropy',
            step=0.01,
            verbose=False,
            shuffle_data=True,

            momentum=0.99,
            nesterov=True,

        )
        self.network.architecture()
Example #16
0
def train_network(n_hidden, x_train, x_test, y_train, y_test):
    network = algorithms.Momentum(
        [
            layers.Input(64),
            layers.Relu(n_hidden),
            layers.Softmax(10),
        ],

        # Randomly shuffle dataset before each
        # training epoch.
        shuffle_data=True,

        # Do not show training progress in output
        verbose=False,
        step=0.001,
        batch_size=128,
        error='categorical_crossentropy',
    )
    network.train(x_train, y_train, epochs=100)

    # Calculates categorical cross-entropy error between
    # predicted value for x_test and y_test value
    return network.prediction_error(x_test, y_test)
Example #17
0
def MomentumAdaptation(col_predict, no_of_output_para, input_par, link, epoch,
                       units, tf):
    global graph
    with graph.as_default():

        dataset = pd.read_excel(link)

        #check for empty column
        cols_out = dataset.columns[col_predict:col_predict + 1]
        for col in cols_out:
            if "Unnamed" in col:
                return 0

        X = dataset.iloc[:,
                         no_of_output_para + 1:dataset.values[0].size].values
        Y = dataset.iloc[:, col_predict].values
        # np.random.seed(0)

        X_train = np.array(X)
        y_train = np.array(Y)
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            Y,
                                                            test_size=0.2,
                                                            random_state=0)

        sc = StandardScaler()
        X_train = sc.fit_transform(X_train)
        X_test = sc.transform(X_test)

        network = Input(input_par) >> Sigmoid(int(units / 10) + 1) >> Relu(1)
        optimizer = algorithms.Momentum([network],
                                        verbose=False,
                                        shuffle_data=False)

        optimizer.train(X_train, y_train, epochs=epoch)

        joblib.dump(optimizer, link + "-" + str(col_predict) + ".pkl")
Example #18
0
    def assert_invalid_step_values(self, step, initial_value,
                                   final_value, epochs):

        x_train, x_test, y_train, y_test = simple_classification()
        optimizer = algorithms.Momentum(
            [
                layers.Input(10),
                layers.Sigmoid(5),
                layers.Sigmoid(1),
            ],
            step=step,
            momentum=0.99,
            batch_size=None,
            verbose=False,
            nesterov=True,
        )

        step = self.eval(optimizer.step)
        self.assertAlmostEqual(step, initial_value)

        optimizer.train(x_train, y_train, x_test, y_test, epochs=epochs)

        step = self.eval(optimizer.step)
        self.assertAlmostEqual(step, final_value)
Example #19
0
def train_network(n_hidden, x_train, x_test, y_train, y_test, n_classes,
                  n_dimensionality):
    network = algorithms.Momentum(
        [
            layers.Input(n_dimensionality),  #input dimensionality
            layers.Relu(n_hidden),  #optimisable hyperparam
            layers.Softmax(n_classes),  #class output
        ],

        # Randomly shuffle dataset before each
        # training epoch.
        shuffle_data=True,

        # Do not show training progress in output
        verbose=False,
        step=0.001,
        batch_size=128,
        error='categorical_crossentropy',
    )
    network.train(x_train, y_train, x_test, y_test, epochs=100)

    # Calculates categorical cross-entropy error between
    # predicted value for x_test and y_test value
    return network, network.prediction_error(x_test, y_test)
        ]),

    # Concatenate (batch_size, 12) and (batch_size, 17)
    # into one matrix with shape (batch_size, 29)
    layers.Concatenate(),
    layers.Relu(128),
    layers.Relu(32) >> layers.Dropout(0.5),
    layers.Sigmoid(1),
)

optimizer = algorithms.Momentum(
    network,
    step=0.05,
    verbose=True,
    loss='binary_crossentropy',
    momentum=0.9,
    nesterov=True,

    # Apply L2 (Weight Decay) regularziation in
    # order to prevent overfitting
    regularizer=algorithms.l2(0.01),
)

# Categorical input should be first, because input layer
# for categorical matrices was defined first.
optimizer.train([x_train_cat, x_train_num],
                y_train, [x_test_cat, x_test_num],
                y_test,
                epochs=50)

y_predicted = optimizer.predict(x_test_cat, x_test_num)
accuracy = accuracy_score(y_test, y_predicted.round())
Example #21
0
network = algorithms.Momentum(
    [
        [
            [
                # 3 categorical inputs
                layers.Input(3),

                # Train embedding matrix for categorical inputs.
                # It has 18 different unique categories (6 categories
                # per each of the 3 columns). Next layer projects each
                # category into 4 dimensional space. Output shape from
                # the layer should be: (batch_size, 3, 4)
                layers.Embedding(n_unique_categories, 4),

                # Reshape (batch_size, 3, 4) to (batch_size, 12)
                layers.Reshape(),
            ],
            [
                # 17 numerical inputs
                layers.Input(17),
            ]
        ],

        # Concatenate (batch_size, 12) and (batch_size, 17)
        # into one matrix with shape (batch_size, 29)
        layers.Concatenate(),
        layers.Relu(128),
        layers.Relu(32) > layers.Dropout(0.5),
        layers.Sigmoid(1)
    ],
    step=0.2,
    verbose=True,
    momentum=0.9,
    nesterov=True,
    error='binary_crossentropy',

    # Applied max-norm regularizer to prevent overfitting.
    # Maximum possible norm for any weight is specified by
    # the `max_norm` parameter.
    addons=[algorithms.MaxNormRegularization],
    max_norm=10,
)
Example #22
0
SAVE_SPAN = 5

fundo_size = 150

esqYpos = 300 + (fundo_size / 2) - 5
dirYpos = 300 - (fundo_size / 2) + 5

nn = layers.join(
    layers.Input(4),
    layers.Sigmoid(12),
    layers.Sigmoid(8),
    layers.Softmax(3),
)

#out-> [0]-acelerar [1]-esquerda [2]-direita
otimizador = algorithms.Momentum(nn)


class MyGame(arcade.Window):
    """ Main application class. """
    def __init__(self, width, height):
        super().__init__(width, height)
        self.time_elapsed = 0.0
        self.distance_map = 0.0
        self.distance_car = 0.0
        self.maxDistance = 0.0
        self.last_checkUpX = 0.0
        self.resetCount = 1
        self.spdX_ = 0
        self.spdY_esq = 0
        self.spdY_dir = 0
Example #23
0
x_test_4d = x_test.reshape((10000, 1, 28, 28))

conv_autoencoder = algorithms.Momentum(
    [
        layers.Input((1, 28, 28)),
        layers.Convolution((16, 3, 3)) > layers.Relu(),
        layers.Convolution((16, 3, 3)) > layers.Relu(),
        layers.MaxPooling((2, 2)),
        layers.Convolution((32, 3, 3)) > layers.Relu(),
        layers.MaxPooling((2, 2)),
        layers.Reshape(),
        layers.Relu(128),
        layers.Relu(16),
        layers.Relu(128),
        layers.Relu(800),
        layers.Reshape((32, 5, 5)),
        layers.Upscale((2, 2)),
        layers.Convolution((16, 3, 3), border_mode='full') > layers.Relu(),
        layers.Upscale((2, 2)),
        layers.Convolution((16, 3, 3), border_mode='full') > layers.Relu(),
        layers.Convolution((1, 3, 3), border_mode='full') > layers.Sigmoid(),
        layers.Reshape(),
    ],
    verbose=True,
    step=0.1,
    momentum=0.99,
    shuffle_data=True,
    batch_size=128,
    error='rmse',
)
conv_autoencoder.architecture()
conv_autoencoder.train(x_train_4d, x_train, x_test_4d, x_test, epochs=100)
Example #24
0
    layers.Upscale((2, 2)),
    layers.Convolution((16, 3, 3), padding='full') > layers.Relu(),

    layers.Upscale((2, 2)),
    layers.Convolution((16, 3, 3), padding='full') > layers.Relu(),
    layers.Convolution((1, 3, 3), padding='full') > layers.Sigmoid(),

    layers.Reshape(),
)

conv_autoencoder = algorithms.Momentum(
    connection=encoder > decoder,
    verbose=True,
    step=0.1,
    momentum=0.99,
    shuffle_data=True,
    batch_size=64,
    error='binary_crossentropy',
)
conv_autoencoder.architecture()
conv_autoencoder.train(x_unlabeled_4d, x_unlabeled,
                       x_labeled_4d, x_labeled, epochs=10)

x_labeled_encoded = encoder.output(x_labeled_4d).eval()
x_unlabeled_encoded = encoder.output(x_unlabeled_4d).eval()

classifier_network = layers.join(
    layers.PRelu(512),
    layers.Dropout(0.25),
    layers.Softmax(10),
Example #25
0
    test_size=(1 / 7.)
)

network = algorithms.Momentum(
    [
        layers.Input(784),
        layers.Relu(500),
        layers.Relu(300),
        layers.Softmax(10),
    ],

    # Using categorical cross-entropy as a loss function.
    # It's suitable for classification with 3 and more classes.
    error='categorical_crossentropy',

    # Learning rate
    step=0.01,

    # Shows information about algorithm and
    # training progress in terminal
    verbose=True,

    # Randomly shuffles training dataset before every epoch
    shuffle_data=True,

    momentum=0.99,
    # Activates Nesterov momentum
    nesterov=True,
)
network.architecture()
network.train(x_train, y_train, x_test, y_test, epochs=20)
target_scaler = OneHotEncoder()
target = mnist.target.reshape((-1, 1))
target = target_scaler.fit_transform(target).todense()

data = mnist.data / 255.
data = data - data.mean(axis=0)

x_train, x_test, y_train, y_test = model_selection.train_test_split(
    data.astype(np.float32), target.astype(np.float32), train_size=(6 / 7.))

network = layers.join(layers.Input(784), layers.Relu(25), layers.Softmax(10))
'''
network.architecture()
network.train(x_train, y_train, x_test, y_test, epochs=2)
'''
gd = algorithms.Momentum(network)
gd.batchsize = 128
l = []
for i in range(10):
    gd.train(x_train, y_train, epochs=10)
    y_predicted = gd.predict(x_test).argmax(axis=1)
    y_testt = np.asarray(y_test.argmax(axis=1)).reshape(len(y_test))
    print(metrics.classification_report(y_testt, y_predicted))
    score = metrics.accuracy_score(y_testt, y_predicted)
    print("Validation accuracy: {:.2%}".format(score))
    l.append(round(score, 4))
    print(l)

hess = algorithms.Hessian(network)
hess.iter = ('momentum')
hess.train(x_train, y_train, epochs=1)
Example #27
0
target = mnist.target.reshape((-1, 1))
target = target_scaler.fit_transform(target).todense()

data = mnist.data / 255.
data = data - data.mean(axis=0)

x_train, x_test, y_train, y_test = cross_validation.train_test_split(
    data.astype(np.float32), target.astype(np.float32), train_size=(6 / 7.))

network = algorithms.Momentum(
    [
        layers.Input(784),
        layers.Relu(500),
        layers.Relu(300),
        layers.Softmax(10),
    ],
    error='categorical_crossentropy',
    step=0.01,
    verbose=True,
    shuffle_data=True,
    momentum=0.99,
    nesterov=True,
)
network.architecture()
network.train(x_train, y_train, x_test, y_test, epochs=20)

y_predicted = network.predict(x_test).argmax(axis=1)
y_test = np.asarray(y_test.argmax(axis=1)).reshape(len(y_test))

print(metrics.classification_report(y_test, y_predicted))
score = metrics.accuracy_score(y_test, y_predicted)
print("Validation accuracy: {:.2%}".format(score))
Example #28
0
        predicted_image = predicted_image.reshape((28, 28))

        left_ax.imshow(real_image, cmap=plt.cm.binary)
        right_ax.imshow(predicted_image, cmap=plt.cm.binary)

    plt.show()


if __name__ == '__main__':
    autoencoder = algorithms.Momentum(
        [
            layers.Input(784),
            layers.GaussianNoise(mean=0.5, std=0.1),
            layers.Sigmoid(100),
            layers.Sigmoid(784),
        ],
        step=0.1,
        verbose=True,
        momentum=0.9,
        nesterov=True,
        loss='rmse',
    )

    print("Preparing data...")
    x_train, x_test = load_data()

    print("Training autoencoder...")
    autoencoder.train(x_train, x_train, x_test, x_test, epochs=40)

    visualize_reconstructions(autoencoder, x_test)
Example #29
0
        Relu(128),

        # 800 is a shape that we got after we reshaped our image in the
        # Reshape layer
        Relu(800),
        Reshape((5, 5, 32)),

        # Upscaling layer reverts changes from the max pooling layer
        Upscale((2, 2)),

        # Deconvolution (a.k.a Transposed Convolution) reverts
        # changes done by Convolution
        Deconvolution((3, 3, 16)) >> Relu(),
        Upscale((2, 2)),
        Deconvolution((3, 3, 16)) >> Relu(),
        Deconvolution((3, 3, 1)) >> Sigmoid())
    optimizer = algorithms.Momentum(
        network,
        step=0.02,
        momentum=0.9,
        batch_size=128,
        loss='rmse',
        shuffle_data=True,
        verbose=True,
        regularizer=algorithms.l2(0.01),
    )

    x_train_4d, x_test_4d = load_data()
    optimizer.train(x_train_4d, x_train_4d, x_test_4d, x_test_4d, epochs=1)
    visualize_reconstructions(x_test_4d, n_samples=6)
Example #30
0
optimizer = algorithms.Momentum(
    [
        Input((28, 28, 1)),
        Convolution((3, 3, 32)) >> BatchNorm() >> Relu(),
        Convolution((3, 3, 48)) >> BatchNorm() >> Relu(),
        MaxPooling((2, 2)),
        Convolution((3, 3, 64)) >> BatchNorm() >> Relu(),
        MaxPooling((2, 2)),
        Reshape(),
        Linear(1024) >> BatchNorm() >> Relu(),
        Softmax(10),
    ],

    # Using categorical cross-entropy as a loss function.
    # It's suitable for classification with 3 and more classes.
    loss='categorical_crossentropy',

    # Mini-batch size. It defined how many samples will be propagated
    # through the network at once. During the training, weights will
    # be updated after every mini-batch propagation.
    # Note: When number of training samples is not divisible by 128
    # the last mini-batch will have less than 128 samples.
    batch_size=128,

    # Step == Learning rate
    # Step decay algorithm minimizes learning step
    # monotonically after each weight update.
    step=algorithms.step_decay(
        initial_value=0.05,
        # Parameter controls step redution frequency. The higher
        # the value the slower step parameter decreases.
        reduction_freq=500,
    ),

    # Shows information about algorithm and
    # training progress in terminal
    verbose=True,

    # Randomly shuffles training dataset before every epoch
    shuffle_data=True,
)