Esempio n. 1
0
    def test_l2_regularization(self):
        network = layers.join(
            layers.Input(10),
            layers.Relu(5, weight=2, bias=2),
        )
        regularizer = algorithms.l2(0.01, exclude=['bias'])

        regularization_cost = self.eval(regularizer(network))
        self.assertAlmostEqual(regularization_cost, 2.0)
Esempio n. 2
0
def train_network(num_pages=1):
	training_set, vectorizer = vectorize(make_training_set(num_pages))
	examples = training_set[:, :-1]
	labels = training_set[:, -1:]
	new_examples = np.array([example[0] for example in examples])
	new_examples = add_padding(new_examples)
	training_examples, test_examples, training_labels, test_labels = train_test_split(new_examples, labels, test_size=0.4)
	input_size = len(new_examples[0])
	scale = int(input_size/10 * (2/3))+1
	fourth = int(scale/4)
	thirds = int(scale/3)

	concat_noisynormdrop_one = Concatenate() >> GaussianNoise(std=1) >> BatchNorm() >> Dropout(proba=.6)
	concat_noisynormdrop_two = Concatenate()>> GaussianNoise(std=1) >> BatchNorm() >> Dropout(proba=.3)
	concat_noisynormdrop_three = Concatenate() >> GaussianNoise(std=1) >> BatchNorm() >> Dropout(proba=.3)

	sub_tri = Elu(fourth) >> Sigmoid(fourth)
	sub_tri_leaky_relu = LeakyRelu(thirds)>>LeakyRelu(thirds)>>LeakyRelu(thirds)

	noisy_para_seq = Input(input_size)>>\
							Linear(scale)>>\
							(Tanh(scale)|Elu(scale)|sub_tri_leaky_relu|sub_tri)>>\
							concat_noisynormdrop_one>>\
							(Tanh(scale)>>Tanh(scale)|Elu(scale)>>Elu(scale)|Sigmoid(fourth)>>Sigmoid(fourth))>>\
							concat_noisynormdrop_two >>\
							(Tanh(scale)|Elu(scale)|LeakyRelu(scale)|Sigmoid(scale))>>\
							concat_noisynormdrop_three>>\
							Sigmoid(1)


	optimizer = algorithms.Adam(
		noisy_para_seq,
		batch_size = 64,
		shuffle_data=True,
		loss='binary_crossentropy',
		verbose=True,
		regularizer=algorithms.l2(0.001),
		step=algorithms.step_decay(
        	initial_value=0.10,
        	reduction_freq=10,
    	)
	)

	optimizer.train(training_examples, training_labels, test_examples, test_labels, epochs=200)
	prediction = [1 if i > .5 else 0 for i in optimizer.predict(test_examples)]
	accuracy = [1 if prediction[i] == test_labels[i] else 0 for i in range(len(prediction))].count(1) / len(
		prediction)
	print(f'{accuracy * 100:.2f}%')
	optimizer.plot_errors(show=False)
	bytes = io.BytesIO()
	plt.savefig(bytes)
	bytes.seek(0)
	encoded = base64.b64encode(bytes.read())

	return optimizer, vectorizer, [new_examples[0]], encoded
Esempio n. 3
0
 def test_training_with_l2_regularization(self):
     x_train, x_test, y_train, y_test = simple_classification()
     mnet = algorithms.Momentum(
         [layers.Input(10),
          layers.Sigmoid(20),
          layers.Sigmoid(1)],
         step=0.35,
         momentum=0.99,
         batch_size=None,
         verbose=False,
         nesterov=True,
         regularizer=algorithms.l2(0.001),
     )
     mnet.train(x_train, y_train, x_test, y_test, epochs=40)
     self.assertGreater(0.15, mnet.errors.valid[-1])
Esempio n. 4
0
        Relu(128),

        # 800 is a shape that we got after we reshaped our image in the
        # Reshape layer
        Relu(800),
        Reshape((5, 5, 32)),

        # Upscaling layer reverts changes from the max pooling layer
        Upscale((2, 2)),

        # Deconvolution (a.k.a Transposed Convolution) reverts
        # changes done by Convolution
        Deconvolution((3, 3, 16)) >> Relu(),
        Upscale((2, 2)),
        Deconvolution((3, 3, 16)) >> Relu(),
        Deconvolution((3, 3, 1)) >> Sigmoid())
    optimizer = algorithms.Momentum(
        network,
        step=0.02,
        momentum=0.9,
        batch_size=128,
        loss='rmse',
        shuffle_data=True,
        verbose=True,
        regularizer=algorithms.l2(0.01),
    )

    x_train_4d, x_test_4d = load_data()
    optimizer.train(x_train_4d, x_train_4d, x_test_4d, x_test_4d, epochs=1)
    visualize_reconstructions(x_test_4d, n_samples=6)
Esempio n. 5
0
    Relu(256),
    Relu(32 * 5 * 5),
    Reshape((5, 5, 32)),
    Upscale((2, 2)),
    Deconvolution((3, 3, 16)) >> Relu(),
    Upscale((2, 2)),
    Deconvolution((3, 3, 16)) >> Relu(),
    Deconvolution((3, 3, 1)) >> Sigmoid(),
)

conv_autoencoder = algorithms.Momentum(
    network=(encoder >> decoder),
    loss='rmse',
    step=0.02,
    batch_size=128,
    regularizer=algorithms.l2(0.001),
    shuffle_data=True,
    verbose=True,
)
conv_autoencoder.train(
    x_unlabeled_4d,
    x_unlabeled_4d,
    x_labeled_4d,
    x_labeled_4d,
    epochs=1,
)

# In order to speed up training for the upper layers we generate
# output from the encoder. In this way we won't need to regenerate
# encoded inputs for every epoch.
x_labeled_encoded = encoder.predict(x_labeled_4d)
Esempio n. 6
0
    def test_l2_repr(self):
        l2_repr = repr(algorithms.l2(0.01, exclude=['bias']))
        self.assertEqual(l2_repr, "l2(0.01, exclude=['bias'])")

        l2_repr = repr(algorithms.l2(decay_rate=0.01, exclude=['bias']))
        self.assertEqual(l2_repr, "l2(decay_rate=0.01, exclude=['bias'])")