コード例 #1
0
ファイル: utils.py プロジェクト: LiuFang816/SALSTM_py_data
def reproducible_network_train(seed=0, epochs=500, **additional_params):
    """
    Make a reproducible train for Gradient Descent based neural
    network with a XOR problem and return trained network.

    Parameters
    ----------
    seed : int
        Random State seed number for reproducibility. Defaults to ``0``.
    epochs : int
        Number of epochs for training. Defaults to ``500``.
    **additional_params
        Aditional parameters for Neural Network.

    Returns
    -------
    GradientDescent instance
        Returns trained network.
    """
    np.random.seed(seed)
    network = algorithms.GradientDescent(connection=[
        layers.Input(2),
        layers.Tanh(5),
        layers.Tanh(1),
        StepOutput(),
    ],
                                         **additional_params)
    network.train(xor_input_train, xor_target_train, epochs=epochs)
    return network
コード例 #2
0
def reproducible_network_train(seed=0, epochs=500, **additional_params):
    """
    Make a reproducible train for Gradient Descent based neural
    network with a XOR problem and return trained network.

    Parameters
    ----------
    seed : int
        Random State seed number for reproducibility. Defaults to ``0``.
    epochs : int
        Number of epochs for training. Defaults to ``500``.
    **additional_params
        Aditional parameters for Neural Network.

    Returns
    -------
    GradientDescent instance
        Returns trained network.
    """
    environment.reproducible(seed)

    xavier_normal = init.XavierNormal()
    tanh_weight1 = xavier_normal.sample((2, 5), return_array=True)
    tanh_weight2 = xavier_normal.sample((5, 1), return_array=True)

    network = algorithms.GradientDescent(connection=[
        layers.Input(2),
        layers.Tanh(5, weight=tanh_weight1),
        layers.Tanh(1, weight=tanh_weight2),
    ],
                                         batch_size='all',
                                         **additional_params)
    network.train(xor_input_train, xor_target_train, epochs=epochs)
    return network
コード例 #3
0
ファイル: test_simple_step_min.py プロジェクト: disc5/neupy
 def setUp(self):
     super(LearningRateUpdatesTestCase, self).setUp()
     self.first_step = 0.3
     self.connection = [
         layers.Tanh(2),
         layers.Tanh(3),
         layers.StepOutput(1, output_bounds=(-1, 1))
     ]
コード例 #4
0
    def test_gd(self):
        x_train, _, y_train, _ = simple_classification()

        network = algorithms.GradientDescent(
            layers.Input(10) > layers.Tanh(20) > layers.Tanh(1),
            step=0.3,
            verbose=False)
        network.train(x_train, y_train, epochs=500)
        self.assertAlmostEqual(network.errors.last(), 0.014, places=3)
コード例 #5
0
    def test_gd(self):
        environment.reproducible()
        x_train, _, y_train, _ = simple_classification()

        network = algorithms.BaseGradientDescent(
            layers.Input(10) > layers.Tanh(20) > layers.Tanh(1),
            step=0.1,
            verbose=False)
        network.train(x_train, y_train, epochs=100)
        self.assertLess(network.errors.last(), 0.05)
コード例 #6
0
 def test_that_alg_works(self):
     network = algorithms.GradientDescent([
         layers.Input(2),
         layers.Tanh(3),
         layers.Tanh(1),
     ],
                                          step=0.3,
                                          decay_rate=0.0001,
                                          addons=[algorithms.WeightDecay])
     network.train(xor_input_train, xor_target_train, epochs=500)
     self.assertAlmostEqual(network.errors.last(), 0, places=2)
コード例 #7
0
 def test_that_alg_works(self):
     network = algorithms.GradientDescent(
         [
             layers.Tanh(2),
             layers.Tanh(3),
             layers.StepOutput(1, output_bounds=(-1, 1))
         ],
         step=0.3,
         zero_weight=20,
         addons=[algorithms.WeightElimination])
     network.train(xor_input_train, xor_target_train, epochs=350)
     self.assertAlmostEqual(network.errors.last(), 0, places=2)
コード例 #8
0
 def test_that_alg_works(self):
     network = algorithms.GradientDescent(
         [
             layers.Input(2),
             layers.Tanh(3),
             layers.Tanh(1),
         ],
         step=0.3,
         batch_size='all',
         zero_weight=20,
         addons=[algorithms.WeightElimination])
     network.train(xor_input_train, xor_target_train, epochs=350)
     self.assertAlmostEqual(network.errors.last(), 0, places=2)
コード例 #9
0
ファイル: test_errdiff.py プロジェクト: disc5/neupy
 def test_errdiff(self):
     network = algorithms.GradientDescent(
         [
             layers.Tanh(2),
             layers.Tanh(3),
             layers.StepOutput(1, output_bounds=(-1, 1))
         ],
         step=0.3,
         update_for_smaller_error=1.05,
         update_for_bigger_error=0.7,
         error_difference=1.04,
         addons=[algorithms.ErrDiffStepUpdate])
     network.train(xor_input_train, xor_target_train, epochs=200)
     self.assertAlmostEqual(network.errors.last(), 0, places=5)
コード例 #10
0
ファイル: test_gd.py プロジェクト: zeroyou/neupy
    def test_network_initializations(self):
        possible_networks = (
            # as a list
            [layers.Input(2), layers.Sigmoid(3), layers.Tanh(1)],

            # as forward sequence with inline operators
            layers.Input(2) > layers.Relu(10) > layers.Tanh(1),
            layers.Input(2) >> layers.Relu(10) >> layers.Tanh(1),
        )

        for i, network in enumerate(possible_networks, start=1):
            optimizer = algorithms.GradientDescent(network)
            message = "[Test #{}] Network: {}".format(i, network)
            self.assertEqual(len(optimizer.network.layers), 3, msg=message)
コード例 #11
0
 def test_minibatch_gd(self):
     x_train, _, y_train, _ = simple_classification()
     compare_networks(
         # Test classes
         algorithms.GradientDescent,
         partial(algorithms.MinibatchGradientDescent, batch_size=1),
         # Test data
         (x_train, y_train),
         # Network configurations
         connection=(layers.Input(10) > layers.Tanh(20) > layers.Tanh(1)),
         step=0.1,
         shuffle_data=True,
         verbose=False,
         # Test configurations
         epochs=40,
         show_comparison_plot=False)
コード例 #12
0
 def test_simple_learning_rate_minimization(self):
     first_step = 0.3
     network = algorithms.GradientDescent([
         layers.Input(2),
         layers.Tanh(3),
         layers.Tanh(1),
     ],
                                          step=first_step,
                                          reduction_freq=50,
                                          addons=[algorithms.StepDecay])
     network.train(xor_input_train, xor_target_train, epochs=100)
     self.assertAlmostEqual(
         network.variables.step.get_value(),
         asfloat(first_step / 3),
         places=5,
     )
コード例 #13
0
    def test_leak_step_adaptation(self):
        compare_networks(
            # Test classes
            algorithms.GradientDescent,
            partial(
                algorithms.GradientDescent,
                leak_size=0.05,
                alpha=0.05,
                beta=5,
                addons=[algorithms.LeakStepAdaptation]
            ),

            # Test data
            (even_input_train, even_target_train),

            # Network configurations
            connection=[
                layers.Sigmoid(2),
                layers.Tanh(3),
                layers.Output(1)
            ],
            step=0.1,
            verbose=False,
            shuffle_data=True,
            epochs=30,
            # show_comparison_plot=True,
        )
コード例 #14
0
 def test_search_then_converge(self):
     network = algorithms.GradientDescent(
         [
             layers.Input(2),
             layers.Tanh(3),
             layers.Tanh(1),
         ],
         step=0.3,
         reduction_freq=50,
         rate_coefitient=0.2,
         addons=[algorithms.SearchThenConverge])
     network.train(xor_input_train, xor_target_train, epochs=6)
     self.assertAlmostEqual(
         network.variables.step.get_value(),
         0.18,
         places=5,
     )
コード例 #15
0
    def test_inline_definition(self):
        network = layers.Input(2) >> layers.Relu(10) >> layers.Tanh(1)
        self.assertShapesEqual(network.input_shape, (None, 2))
        self.assertShapesEqual(network.output_shape, (None, 1))

        input_value = asfloat(np.random.random((10, 2)))
        output_value = self.eval(network.output(input_value))
        self.assertEqual(output_value.shape, (10, 1))
コード例 #16
0
 def test_search_then_converge(self):
     network = algorithms.GradientDescent(
         [
             layers.Tanh(2),
             layers.Tanh(3),
             layers.StepOutput(1, output_bounds=(-1, 1))
         ],
         step=0.3,
         epochs_step_minimizator=50,
         rate_coefitient=0.2,
         addons=[algorithms.SearchThenConverge])
     network.train(xor_input_train, xor_target_train, epochs=6)
     self.assertAlmostEqual(
         network.variables.step.get_value(),
         0.18,
         places=5,
     )
コード例 #17
0
 def Initialize_Connection(self):
     neuronsLayers=self.NeuronsInEveryLayer
     activationFsLayers=self.ActivationFunctionInEveryLayer        
     connectionInit=[layerNeupy.Input(neuronsLayers[0])]      
     for i in range(1,len(self.NeuronsInEveryLayer)):
         if(activationFsLayers[i]=='logsig'):connectionInit.append(layerNeupy.Sigmoid(neuronsLayers[i])) 
         elif(activationFsLayers[i]=='tansig'):connectionInit.append(layerNeupy.Tanh(neuronsLayers[i])) 
         else:connectionInit.append(layerNeupy.Linear(neuronsLayers[i]))
     return connectionInit
コード例 #18
0
ファイル: test_errdiff.py プロジェクト: degerli/neupy
    def test_errdiff(self):
        initial_step = 0.3
        network = algorithms.GradientDescent(
            [
                layers.Input(2),
                layers.Tanh(3),
                layers.Tanh(1),
            ],
            batch_size='all',
            step=initial_step,
            update_for_smaller_error=1.05,
            update_for_bigger_error=0.7,
            error_difference=1.04,
            addons=[algorithms.ErrDiffStepUpdate]
        )
        network.train(xor_input_train, xor_target_train, epochs=200)

        self.assertNotEqual(self.eval(network.variables.step), initial_step)
        self.assertAlmostEqual(network.errors.last(), 0, places=4)
コード例 #19
0
ファイル: test_connections.py プロジェクト: webdiscover/neupy
    def test_connection_initializations(self):
        possible_connections = (
            (2, 3, 1),

            # as a list
            [layers.Input(2),
             layers.Sigmoid(3),
             layers.Tanh(1)],

            # as forward sequence with inline operators
            layers.Input(2) > layers.Relu(10) > layers.Tanh(1),
            layers.Input(2) >> layers.Relu(10) >> layers.Tanh(1),

            # as backward sequence with inline operators
            layers.Tanh(1) < layers.Relu(10) < layers.Input(2),
            layers.Tanh(1) << layers.Relu(10) << layers.Input(2),
        )

        for connection in possible_connections:
            network = algorithms.GradientDescent(connection)
            self.assertEqual(len(network.layers), 3, msg=connection)
コード例 #20
0
    def test_connection_initializations(self):
        possible_connections = (
            (2, 3, 1),

            # as a list
            [layers.Input(2),
             layers.Sigmoid(3),
             layers.Tanh(1)],

            # as forward sequence with inline operators
            layers.Input(2) > layers.Relu(10) > layers.Tanh(1),
            layers.Input(2) >> layers.Relu(10) >> layers.Tanh(1),

            # as backward sequence with inline operators
            layers.Tanh(1) < layers.Relu(10) < layers.Input(2),
            layers.Tanh(1) << layers.Relu(10) << layers.Input(2),
        )

        for i, connection in enumerate(possible_connections, start=1):
            network = algorithms.GradientDescent(connection)
            message = "[Test #{}] Connection: {}".format(i, connection)
            self.assertEqual(len(network.layers), 3, msg=message)
コード例 #21
0
	def init_network(self, member):
		network = layers.join(layers.Input(self.inputneurons))
		for index in range(0, len(member[1][0])):
			if member[1][1][index] == 1:
				network = network > layers.Sigmoid(member[1][0][index])
			elif member[1][1][index] == 2:
				network = network > layers.Relu(member[1][0][index])
			elif member[1][1][index] == 3:
				network = network > layers.Softmax(member[1][0][index])
			elif member[1][1][index] == 4:
				network = network > layers.Tanh(member[1][0][index])
			elif member[1][1][index] == 5:
				network = network > layers.LeakyRelu(member[1][0][index])
		network = network > layers.Sigmoid(self.outputneurons)
		return(network)
コード例 #22
0
	def __init__(self):
		self.population = []

		self.size_population = 20

		self.inputneurons = 4
		self.outputneurons = 1
		self.data = datasets.load_iris()

		for i in range(0,self.size_population):
			#connections
			network = layers.join(layers.Input(self.inputneurons))
			num = random.randint(1,4)
			temp1 = list(random.randint(1,50) for i in range(0, num))
			#print(temp1, end="\n\n")
			temp2 = []
			for neu in temp1:
				n = random.randint(1,5)
				temp2.append(n)
				if n == 1:
					network = network > layers.Sigmoid(neu)
				elif n == 2:
					network = network > layers.Relu(neu)
				elif n == 3:
					network = network > layers.Softmax(neu)
				elif n == 4:
					network = network > layers.Tanh(neu)
				elif n == 5:
					network = network > layers.LeakyRelu(neu)
				#print(network, end="\n~\n")
			network = network > layers.Sigmoid(self.outputneurons)
			attributes = [temp1, temp2]
			self.population.append([network, attributes, 0]) # 0 --> fitness

		#print(self.population)

		self.run()
		while self.best_members[0][2][0] > 1:
			print("next iteration")
			print(self.population)

			self.run()

		file = open("pickle_bestnet.txt", "w")
コード例 #23
0
	def model_network(self, algorithm='LevenbergMarquardt', model=None, opt=None):

		model = self.decode_model(model)
		if model is None:
			model = [
				[1, 'hidden', 15, 'Linear'],
				[2, 'hidden', 10, 'Linear'],
				[3, 'output', self.output_classes, 'Elu']
			]
			# [Input(4), Elu(1)]
			# [Input(4), Elu(6), Elu(1)] EP: 100
		layer_model = [layers.Input(self.input_features)]
		for layer in model:
			if layer[3] == 'Linear':
				layer_model.append(layers.Linear(layer[2]))
			if layer[3] == 'Relu':
				layer_model.append(layers.Relu(layer[2]))
			if layer[3] == 'Sigmoid':
				layer_model.append(layers.Sigmoid(layer[2]))
			if layer[3] == 'HardSigmoid':
				layer_model.append(layers.HardSigmoid(layer[2]))
			if layer[3] == 'Step':
				layer_model.append(layers.Step(layer[2]))
			if layer[3] == 'Tanh':
				layer_model.append(layers.Tanh(layer[2]))
			if layer[3] == 'Softplus':
				layer_model.append(layers.Softplus(layer[2]))
			if layer[3] == 'Softmax':
				layer_model.append(layers.Softmax(layer[2]))
			if layer[3] == 'Elu':
				layer_model.append(layers.Elu(layer[2]))
			if layer[3] == 'PRelu':
				layer_model.append(layers.PRelu(layer[2]))
			if layer[3] == 'LeakyRelu':
				layer_model.append(layers.LeakyRelu(layer[2]))

		print('Model warstw: ' + str(layer_model))

		self.layers = layer_model
		self.select_algorithm(algorithm, options=opt)
コード例 #24
0
    def initialize(self):
        self.network = algorithms.Momentum(
            [
                layers.Input(20),
                layers.Relu(30, weight=init.Uniform(-1, 1)),
                layers.Tanh(40, weight=init.Uniform(-1, 1)),
                # layers.Embedding(40, 1),
                # layers.GRU(40),
                layers.Relu(25, weight=init.Uniform(-1, 1)),
                layers.Linear(9, weight=init.Uniform(-1, 1)),
            ],

            error='categorical_crossentropy',
            step=0.01,
            verbose=False,
            shuffle_data=True,

            momentum=0.99,
            nesterov=True,

        )
        self.network.architecture()
コード例 #25
0
network = architectures.mixture_of_experts([
    layers.join(
        layers.Input(58),
        layers.Softmax(22),
        layers.Softmax(1),
    ),
    layers.join(
        layers.Input(58),
        layers.Relu(60),
        layers.Relu(40),
        layers.Softmax(22),
        layers.Softmax(1),
    ),
    layers.join(
        layers.Input(58),
        layers.Tanh(12),
        layers.Tanh(25),
        layers.Tanh(1),
    ),
])

network
gdnet = algorithms.Adam(network, verbose=True)
print("Start moe training")

gdnet.train(x_train, y_train, epochs=500)

gresult = gdnet.predict(x_test)

gerror = estimators.rmse(gresult, y_test)
print(gerror)
コード例 #26
0
ファイル: test_activations.py プロジェクト: zeroyou/neupy
 def test_tanh_layer(self):
     layer1 = layers.Tanh(1)
     self.assertGreater(1, self.eval(layer1.activation_function(1.)))
コード例 #27
0
training_set = numpy.loadtxt('training_set.txt')
print training_set.shape
testing_set = numpy.loadtxt('testing_set.txt')
print testing_set.shape

testing_attributes = numpy.delete(testing_set, 18, 1)
training_attributes = numpy.delete(training_set, 18, 1)

training_results = training_set[:, [18]]
testing_results = testing_set[:, [18]]

network = algorithms.Momentum(
    [layers.Input(18),
     layers.Sigmoid(40),
     layers.Sigmoid(20),
     layers.Tanh(1)],
    error='categorical_crossentropy',
    step=0.15,
    verbose=True,
    shuffle_data=True,
    momentum=0.99,
    nesterov=True)

network.architecture()

network.train(training_attributes,
              training_results,
              testing_attributes,
              testing_results,
              epochs=20)
'''for layer in network:
コード例 #28
0
    def test_connection_shapes(self):
        connection = layers.Input(2) > layers.Relu(10) > layers.Tanh(1)

        self.assertEqual(connection.input_shape, (2, ))
        self.assertEqual(connection.output_shape, (1, ))
コード例 #29
0
    mean = (encoder > mu).output(x)
    log_var = (encoder > sigma).output(x)

    epsilon = 1e-7
    predicted = tf.clip_by_value(predicted, epsilon, 1.0 - epsilon)

    crossentropy_loss = binary_crossentropy(expected, predicted)
    kl_loss = tf.reduce_sum(
        1 + 2 * log_var - tf.square(mean) - tf.exp(2 * log_var),
        axis=1
    )
    return tf.reduce_mean(crossentropy_loss - 0.5 * kl_loss)


# Construct Variational Autoencoder
encoder = layers.Input(784, name='input') > layers.Tanh(256)

# Two is the maximum number of dimensions that we can visualize
mu = layers.Linear(2, name='mu')
sigma = layers.Linear(2, name='sigma')
sampler = [mu, sigma] > GaussianSample()

decoder = layers.Tanh(256) > layers.Sigmoid(784)

# Train network
network = algorithms.RMSProp(
    [
        encoder,
        sampler,
        decoder,
    ],
コード例 #30
0
    predicted = T.clip(predicted, epsilon, 1.0 - epsilon)

    crossentropy_loss = T.sum(
        T.nnet.binary_crossentropy(predicted, expected),
        axis=1
    )
    kl_loss = -0.5 * T.sum(
        1 + 2 * log_var - T.square(mean) - T.exp(2 * log_var),
        axis=1
    )

    return (crossentropy_loss + kl_loss).mean()


# Construct Variational Autoencoder
encoder = layers.Input(784) > layers.Tanh(500)

mu = layers.Linear(2, name='mu')
sigma = layers.Linear(2, name='sigma')
sampler = [mu, sigma] > GaussianSample()

decoder = layers.Tanh(500) > layers.Sigmoid(784)

# Train network
network = algorithms.RMSProp(
    encoder > sampler > decoder,

    error=vae_loss,
    batch_size=128,
    shuffle_data=True,
    step=0.001,