def setUpClass( self ): from opencl import OpenCL from layer import InputLayer, OutputLayer, ExecutionContext self.ocl = OpenCL( pyopencl.create_some_context() ) self.i = InputLayer( 2, self.ocl ) self.o = OutputLayer( 1, self.ocl ) self.i.link_next( self.o ) self.nnc = ExecutionContext( self.i, self.o, allow_training = True ) self.i.set_weights( numpy.array( [ 0.1 ] * self.i.weights_count, numpy.float32 ) ) self.o.set_weights( numpy.array( [ 0.3 ] * self.o.weights_count, numpy.float32 ) ) self.tr = TrainingResults() self._create_method()
def __init_layers(self, layer_spec): self.layers = [] last_index = len(layer_spec) - 1 for i, size in enumerate(layer_spec): if i == 0: self.layers.append(InputLayer(size, self.activation_fn)) elif i == last_index: self.layers.append(OutputLayer(size, self.activation_fn)) else: self.layers.append(HiddenLayer(size, self.activation_fn)) for i in range(len(self.layers) - 1): self.__join_layers(self.layers[i], self.layers[i+1])
def main(): sess = tf.Session() image = read_image('../data/heart.jpg') image = np.reshape(image, [1, 224, 224, 3]) # type numpy.ndarray image.astype(np.float32) parser = Parser('../data/alexnet.cfg') network_builder = NetworkBuilder("test") # type: NetworkBuilder network_builder.set_parser(parser) network = network_builder.build() # type: Network network.add_input_layer(InputLayer(tf.float32, [None, 224, 224, 3])) network.add_output_layer(OutputLayer()) network.connect_each_layer() sess.run(tf.global_variables_initializer()) fc_layer = sess.run(network.output, feed_dict={network.input: image})
def trainModel(X, Y, devX, devY): m = Model() # Vstupne a vystupne rozmery n_x, _ = X.shape n_y, _ = Y.shape # Input -> Dense(3) -> Dense(1) m.addLayer(InputLayer(n_x)) #m.addLayer(DenseLayer(5, act="linear")) #m.addLayer(BatchNormLayer()) #m.addLayer(ActivationLayer(act="relu")) m.addLayer(DenseLayer(5, act="relu")) #m.addLayer(BatchNormLayer()) #m.addLayer(ActivationLayer(act="relu")) m.addLayer(DenseLayer(n_y, act="sigmoid")) # Prepare all layer internals params = { # Optimizer parameters "learning_rate": 0.01, "beta": 0.9, # Momentum, RMSProp "beta1": 0.9, # Adam "beta2": 0.999, # MiniBatch "batch_size": 0 # 0 = Gradient descent. No minibatches } m.initialize(params, lossName="bce", optName="gd") # Train the shit data = m.train(X, Y, 10000, devX, devY) PlotModel(m, data, devX, devY)
class TrainingMethodTest( unittest.TestCase ): @classmethod def setUpClass( self ): from opencl import OpenCL from layer import InputLayer, OutputLayer, ExecutionContext self.ocl = OpenCL( pyopencl.create_some_context() ) self.i = InputLayer( 2, self.ocl ) self.o = OutputLayer( 1, self.ocl ) self.i.link_next( self.o ) self.nnc = ExecutionContext( self.i, self.o, allow_training = True ) self.i.set_weights( numpy.array( [ 0.1 ] * self.i.weights_count, numpy.float32 ) ) self.o.set_weights( numpy.array( [ 0.3 ] * self.o.weights_count, numpy.float32 ) ) self.tr = TrainingResults() self._create_method() @classmethod def _create_method( self ): pass def assertArrayEqual( self, ar1, ar2 ): self.assertEqual( len( ar1 ), len( ar2 ) ) for x, y in zip( numpy.array( ar1, numpy.float32 ), numpy.array( ar2, numpy.float32 ) ): self.assertAlmostEqual( x, y, places = 5 ) def test_create( self ): self.setUpClass() if not getattr( self, 'method', None ): return self.assertAlmostEqual( self.method.n, 0.5 ) self.assertAlmostEqual( self.method.alpha, 0.2 ) self.assertAlmostEqual( self.method.kw, 1.03 ) self.assertAlmostEqual( self.method.pd, 0.7 ) self.assertAlmostEqual( self.method.pi, 1.02 ) self.assertAlmostEqual( self.method.last_error, 0.0 ) self.assertEqual( self.method.offline, False ) def test_randomize_weights( self ): if not getattr( self, 'method', None ): return self.i.set_weights( numpy.array( [ 0.1 ] * self.i.weights_count, numpy.float32 ) ) self.assertTrue( all( map( lambda x: abs( x - 0.1 ) < 0.0001, self.i.get_weights() ) ) ) self.method.randomize_weights( self.nnc ) w1 = self.i.get_weights() self.assertFalse( all( map( lambda x: abs( x - 0.1 ) < 0.0001, self.i.get_weights() ) ) ) self.method.randomize_weights( self.nnc ) self.assertFalse( all( map( lambda x: abs( x - 0.1 ) < 0.0001, self.i.get_weights() ) ) ) self.assertNotAlmostEqual( ( w1 - self.i.get_weights() ).sum(), 0.0 ) def test_adjust_weights( self ): if not getattr( self, 'method', None ): return self.method.last_error = numpy.float32( 1.0 ) self.method.n = numpy.float32( 0.5 ) self.method.kw = numpy.float32( 1.03 ) self.method.pd = numpy.float32( 0.5 ) self.method.pi = numpy.float32( 1.5 ) self.method.adjust_training_parameters( 1.2 ) self.assertAlmostEqual( self.method.n, 0.25 ) self.assertAlmostEqual( self.method.last_error, 1.2 ) self.method.adjust_training_parameters( 1.0 ) self.assertAlmostEqual( self.method.n, 0.375 ) self.assertAlmostEqual( self.method.last_error, 1.0 ) self.method.adjust_training_parameters( 1.0 ) self.assertAlmostEqual( self.method.n, 0.5625 ) self.assertAlmostEqual( self.method.last_error, 1.0 ) def test_prepare_training( self ): if not getattr( self, 'method', None ): return self.method.prepare_training( self.nnc ) self.assertIsInstance( self.method._weights_delta_buf, pyopencl.Buffer )
class TrainingResultsTest( unittest.TestCase ): def setUp( self ): self.tr = TrainingResults() from opencl import OpenCL from layer import InputLayer, Layer, OutputLayer, ExecutionContext self.ocl = OpenCL( pyopencl.create_some_context() ) self.i = InputLayer( 2, self.ocl ) self.h = Layer( 3, self.ocl ) self.o = OutputLayer( 1, self.ocl ) self.i.link_next( self.h ) self.h.link_next( self.o, 0, 3 ) self.nnc = ExecutionContext( self.i, self.o, allow_training = True ) self.i.set_weights( numpy.array( [ 0.1 ] * self.i.weights_count, numpy.float32 ) ) self.h.set_weights( numpy.array( [ 0.2 ] * self.h.weights_count, numpy.float32 ) ) self.o.set_weights( numpy.array( [ 0.3 ] * self.o.weights_count, numpy.float32 ) ) def assertArrayEqual( self, ar1, ar2 ): self.assertEqual( len( ar1 ), len( ar2 ) ) for x, y in zip( numpy.array( ar1, numpy.float32 ), numpy.array( ar2, numpy.float32 ) ): self.assertAlmostEqual( x, y, places = 5 ) def test_store( self ): self.tr.reset() self.assertEqual( self.tr.iterations, numpy.int32( 0 ) ) self.assertGreater( self.tr.minimal_error, numpy.float32( 1e6 ) ) self.assertIsNone( self.tr.optimal_weights ) self.assertAlmostEqual( self.tr.total_time, 0.0 ) self.assertAlmostEqual( self.tr.opencl_time, 0.0 ) self.i.set_inputs( numpy.array( [1.0, 1.0], numpy.float32 ), is_blocking = True ) self.i.process() initial_result = self.o.get_outputs() self.tr.store_weights( self.nnc ) self.i.set_weights( numpy.array( [ 0.4 ] * self.i.weights_count, numpy.float32 ) ) self.i.process() self.assertNotEqual( initial_result, self.o.get_outputs() ) self.tr.apply_weights( self.nnc ) self.i.process() self.assertArrayEqual( initial_result , self.o.get_outputs() )
def add(self, layer): """ Adds a layer to the neural network's layer stack Inputs ------ @param layer : A layer instance """ if (not isinstance(layer, Layer)): raise TypeError("The added layer must be an instance " "of class Layer. Found {}".format(type(layer))) if (not self.outputs): if (len(layer.inbound_connections) == 0): #create an input layer if (not hasattr(layer, 'input_shape') or layer.input_shape is None): raise ValueError("The first layer in a NeuralNetwork " "model must have an 'input_shape'") input_shape = layer.input_shape self.add(InputLayer(input_shape=input_shape)) self.add(layer) return if (len(layer.inbound_connections) != 1): raise ValueError("The layer added to NeuralNetwork model " "must not be connected elsewhere." "Receiver layer {}".format(layer.name) + \ " which has " + \ str(len(layer.inbound_connections)) +\ " inbound connections") if (len(layer.inbound_connections[0].output_tensors) != 1): raise ValueError( "The layer added to NeuralNetwork " "must have a single output tensor." " Use a different API for multi-output layers") self.outputs = [layer.inbound_connections[0].output_tensors[0]] self.inputs = get_source_inputs(self.outputs[0]) MLConnection(outbound_model=self, inbound_models=[], connection_indices=[], tensor_indices=[], input_tensors=self.inputs, output_tensors=self.outputs, input_shapes=[x._nuro_shape for x in self.inputs], output_shapes=[self.outputs[0]._nuro_shape]) else: output_tensor = layer(self.outputs[0]) if (isinstance(output_tensor, list)): raise ValueError( "The layer added to NeuralNetwork " "must have a single output tensor." " Use a different API for multi-output layers") self.outputs = [output_tensor] self.inbound_connections[0].output_tensors = self.outputs self.inbound_connections[0].outputs_shapes = [ self.outputs[0]._nuro_shape ] self.layers.append(layer) self.is_built = False
def __init__(self, inputs=3, outs=1, hidden_layers_num=5, layer_neurons_num=5, activation_function="sigmoid"): self.inputs = inputs self.outs = outs self.layers = [] self.layer_neurons_num = layer_neurons_num self.hidden_layers_num = hidden_layers_num self.layers_num = hidden_layers_num + 2 self.activation_function = activation_function # input layer layer = InputLayer() layer.init(self.inputs) self.layers.append(layer) # first hidden layer to interact with inputs layer = Layer() layer.init(self.layer_neurons_num, self.inputs, activation_function) self.layers.append(layer) for i in range(self.hidden_layers_num-1): layer = Layer() layer.init(self.layer_neurons_num, self.layer_neurons_num, activation_function) self.layers.append(layer) # output layer layer = OutLayer() layer.init(self.outs, self.layer_neurons_num, activation_function) self.layers.append(layer)
# y = [] # for i in range(-1000, 1000, 2): # a = np.random.rand()/10 # X.append(np.array([i*a]).reshape(1, )) # y.append(np.array([f(i*a)]).reshape(1, )) # X = np.array(X) # y = np.array(y) # N = NeuralNetwork(eta=.001, optimizer="adam") # N.add_layer(InputLayer(784)) # N.add_layer(Layer(128, activation='leaky')) # N.add_layer(Layer(32, activation='leaky')) # N.add_layer(Layer(16, activation='leaky')) # N.add_layer(Layer(10, activation='sig')) # N.fit(X_train, y_train, epochs=10) # # for i in range(-100, 100, 3): # # print(str(N.predict(np.array([i]).reshape(1, ))) + " " + str(f(i))) # print(N.evaluate(X_test, y_test)) network = NeuralNetwork() network.add_layer(InputLayer(3)) network.add_layer(Layer(2)) network.add_layer(Layer(1)) result = network.feedforward(np.array([1, 1, 1])) print(result)