예제 #1
0
def train_network(num_pages=1):
	training_set, vectorizer = vectorize(make_training_set(num_pages))
	examples = training_set[:, :-1]
	labels = training_set[:, -1:]
	new_examples = np.array([example[0] for example in examples])
	new_examples = add_padding(new_examples)
	training_examples, test_examples, training_labels, test_labels = train_test_split(new_examples, labels, test_size=0.4)
	input_size = len(new_examples[0])
	scale = int(input_size/10 * (2/3))+1
	fourth = int(scale/4)
	thirds = int(scale/3)

	concat_noisynormdrop_one = Concatenate() >> GaussianNoise(std=1) >> BatchNorm() >> Dropout(proba=.6)
	concat_noisynormdrop_two = Concatenate()>> GaussianNoise(std=1) >> BatchNorm() >> Dropout(proba=.3)
	concat_noisynormdrop_three = Concatenate() >> GaussianNoise(std=1) >> BatchNorm() >> Dropout(proba=.3)

	sub_tri = Elu(fourth) >> Sigmoid(fourth)
	sub_tri_leaky_relu = LeakyRelu(thirds)>>LeakyRelu(thirds)>>LeakyRelu(thirds)

	noisy_para_seq = Input(input_size)>>\
							Linear(scale)>>\
							(Tanh(scale)|Elu(scale)|sub_tri_leaky_relu|sub_tri)>>\
							concat_noisynormdrop_one>>\
							(Tanh(scale)>>Tanh(scale)|Elu(scale)>>Elu(scale)|Sigmoid(fourth)>>Sigmoid(fourth))>>\
							concat_noisynormdrop_two >>\
							(Tanh(scale)|Elu(scale)|LeakyRelu(scale)|Sigmoid(scale))>>\
							concat_noisynormdrop_three>>\
							Sigmoid(1)


	optimizer = algorithms.Adam(
		noisy_para_seq,
		batch_size = 64,
		shuffle_data=True,
		loss='binary_crossentropy',
		verbose=True,
		regularizer=algorithms.l2(0.001),
		step=algorithms.step_decay(
        	initial_value=0.10,
        	reduction_freq=10,
    	)
	)

	optimizer.train(training_examples, training_labels, test_examples, test_labels, epochs=200)
	prediction = [1 if i > .5 else 0 for i in optimizer.predict(test_examples)]
	accuracy = [1 if prediction[i] == test_labels[i] else 0 for i in range(len(prediction))].count(1) / len(
		prediction)
	print(f'{accuracy * 100:.2f}%')
	optimizer.plot_errors(show=False)
	bytes = io.BytesIO()
	plt.savefig(bytes)
	bytes.seek(0)
	encoded = base64.b64encode(bytes.read())

	return optimizer, vectorizer, [new_examples[0]], encoded
예제 #2
0
 def test_simple_adam(self):
     x_train, x_test, y_train, y_test = simple_classification()
     mnet = algorithms.Adam(
         (10, 20, 1),
         step=0.1,
         verbose=True,
         epsilon=1e-4,
         beta1=0.9,
         beta2=0.99,
     )
     mnet.train(x_train, y_train, x_test, y_test, epochs=200)
     self.assertGreater(0.2, mnet.validation_errors.last())
예제 #3
0
 def test_adam(self):
     x_train, x_test, y_train, y_test = simple_classification()
     optimizer = algorithms.Adam(
         self.network,
         step=0.1,
         verbose=False,
         epsilon=1e-4,
         beta1=0.9,
         beta2=0.99,
     )
     optimizer.train(x_train, y_train, x_test, y_test, epochs=200)
     self.assertGreater(0.2, optimizer.errors.valid[-1])
예제 #4
0
 def test_simple_adam(self):
     x_train, _, y_train, _ = simple_classification()
     mnet = algorithms.Adam(
         (10, 20, 1),
         step=15.,
         batch_size='full',
         verbose=False,
         epsilon=1e-8,
         beta1=0.9,
         beta2=0.999,
     )
     mnet.train(x_train, y_train, epochs=100)
     self.assertAlmostEqual(0.06, mnet.errors.last(), places=2)
예제 #5
0
	def select_algorithm(self, algorithm, options=None):
		try:
			self.network = algorithms.LevenbergMarquardt(self.layers)
			opt = options
			print(opt[1])
			print("Wybrano optymalizator: " + str(algorithm))
		except RecursionError:
			print("Problem rekursji")
			return None

		if algorithm == 'GradientDescent':
			self.network = algorithms.GradientDescent(self.layers)
		if algorithm == 'LevenbergMarquardt':
			self.network = algorithms.LevenbergMarquardt(connection=self.layers, mu=opt[0], mu_update_factor=opt[1])
		if algorithm == 'Adam':
			self.network = algorithms.Adam(self.layers)
		if algorithm == 'QuasiNewton':
			self.network = algorithms.QuasiNewton(self.layers)
		if algorithm == 'Quickprop':
			self.network = algorithms.Quickprop(self.layers)
		if algorithm == 'MinibatchGradientDescent':
			self.network = algorithms.MinibatchGradientDescent(self.layers)
		if algorithm == 'ConjugateGradient':
			self.network = algorithms.ConjugateGradient(self.layers)
		if algorithm == 'Hessian':
			self.network = algorithms.Hessian(self.layers)
		if algorithm == 'HessianDiagonal':
			self.network = algorithms.HessianDiagonal(self.layers)
		if algorithm == 'Momentum':
			self.network = algorithms.Momentum(self.layers)
		if algorithm == 'RPROP':
			self.network = algorithms.RPROP(self.layers)
		if algorithm == 'IRPROPPlus':
			self.network = algorithms.IRPROPPlus(self.layers)
		if algorithm == 'Adadelta':
			self.network = algorithms.Adadelta(self.layers)
		if algorithm == 'Adagrad':
			self.network = algorithms.Adagrad(self.layers)
		if algorithm == 'RMSProp':
			self.network = algorithms.RMSProp(self.layers)
		if algorithm == 'Adamax':
			self.network = algorithms.Adamax(self.layers)
예제 #6
0
    network = algorithms.Adam(
        [
            Input((32, 32, 3)),

            Convolution((3, 3, 32)) >> Relu(),
            Convolution((3, 3, 32)) >> Relu(),
            MaxPooling((2, 2)),

            Convolution((3, 3, 64)) >> Relu(),
            Convolution((3, 3, 64)) >> Relu(),
            MaxPooling((2, 2)),

            Reshape(),
            Relu(256) >> Dropout(0.5),
            Softmax(10),
        ],

        step=algorithms.step_decay(
            initial_value=0.001,
            # Parameter controls step redution frequency. The larger
            # the value the slower step parameter decreases. Step will
            # be reduced after every mini-batch update. In the training
            # data we have 500 mini-batches.
            reduction_freq=5 * 500,
        ),
        regularizer=algorithms.l2(0.01),

        loss='categorical_crossentropy',
        batch_size=100,
        shuffle_data=True,
        verbose=True,
    )
예제 #7
0
    layers.join(
        layers.Input(58),
        layers.Softmax(22),
        layers.Softmax(1),
    ),
    layers.join(
        layers.Input(58),
        layers.Relu(60),
        layers.Relu(40),
        layers.Softmax(22),
        layers.Softmax(1),
    ),
    layers.join(
        layers.Input(58),
        layers.Tanh(12),
        layers.Tanh(25),
        layers.Tanh(1),
    ),
])

network
gdnet = algorithms.Adam(network, verbose=True)
print("Start moe training")

gdnet.train(x_train, y_train, epochs=500)

gresult = gdnet.predict(x_test)

gerror = estimators.rmse(gresult, y_test)
print(gerror)
예제 #8
0
    y_train, y_test = one_hot_encoder(y_train, y_test)

    network = algorithms.Adam(
        [
            Input((32, 32, 3)),
            Convolution((3, 3, 32)) > Relu(),
            Convolution((3, 3, 32)) > Relu(),
            MaxPooling((2, 2)),
            Dropout(0.2),
            Convolution((3, 3, 64)) > Relu(),
            Convolution((3, 3, 64)) > Relu(),
            MaxPooling((2, 2)),
            Dropout(0.2),
            Reshape(),
            Relu(512) > Dropout(0.5),
            Softmax(10),
        ],
        step=0.001,
        batch_size=100,
        error='categorical_crossentropy',
        shuffle_data=True,
        verbose=True,

        # Parameter controls step redution frequency. The larger
        # the value the slower step parameter decreases.
        # Step will be reduced after every mini-batch update. In the
        # training data we have 500 mini-batches.
        reduction_freq=5 * 500,
        addons=[algorithms.StepDecay],
    )
    network.architecture()
    network.train(x_train, y_train, x_test, y_test, epochs=30)
예제 #9
0
파일: train.py 프로젝트: itdxer/deeplab
        args.batch_size,
        use_augmentation=True,
    )

    print("Loading validation data...")
    vaidation_iterator = create_data_iterator(
        VALIDATION_SET,
        args.image_size,
        batch_size=60,
        use_augmentation=False,
    )

    optimizer = algorithms.Adam(
        deeplab,
        error='categorical_crossentropy',
        step=0.00001,
        verbose=True,
        addons=[algorithms.WeightDecay],
        decay_rate=0.0001,
    )

    for i in range(args.epochs):
        print("Epoch #{}".format(i + 1))

        for x_batch, y_batch in training_iterator():
            x_batch = resnet50.predict(x_batch)
            optimizer.train(x_batch, y_batch, epochs=1, summary='inline')

        print("Start validation")
        val_images, val_annotations = next(vaidation_iterator())
        segmentation = deeplab.predict(resnet50.predict(val_images))
        confusion = get_confusion_matrix(val_annotations, segmentation)