def main(): done = True args = parse_args() data_cfg = data_config(args.data_cfg) args.netcfg = data_cfg.get('cfg', default=args.netcfg) args.weights = data_cfg.get('weights', default=args.weights) args.namesfile = data_cfg.get('names', default=args.namesfile) if not args.netcfg or not args.weights: raise ValueError('Network config AND network weights must be given') names = get_labels(args.namesfile, args.classes) net = Network(batch=32) net.load(args.netcfg, args.weights) net_w, net_h, _ = net.input_shape if not args.input: args.input = input('Enter Image Path: ') done = False if not args.input else True # set the output filename args.outfile = args.outfile if args.outfile else splitext( args.input)[0] + '_detected' while done: # load image from file input_image = Image(filename=args.input) # pad-resize image if it is necessary input_image = input_image.letterbox(net_dim=( net_w, net_h)) if input_image.shape[:2] != net.shape else input_image _ = net.predict(X=input_image) # insert boxes evaluation and draw-detection and show image input_image.show(window_name=args.outfile, ms=0, fullscreen=args.fullscreen) if args.save: input_image.save(filename=args.outfile) args.input = input('Enter Image Path: ') done = False if not args.input else True
def test_add_metrics(self): check_function_equality = lambda f1, f2: f1.__code__.co_code == f2.__code__.co_code custom_metrics_wrong = lambda y_true, y_pred, a: None custom_metrics_default = lambda y_true, y_pred, a=3.14: None model = Network(batch=42, input_shape=(1, 1, 1)) model.compile(optimizer=Adam(), metrics=[mean_accuracy_score]) assert model.metrics == [mean_accuracy_score] assert all( check_function_equality(x1, x2) for x1, x2 in zip(model.metrics, [mean_accuracy_score])) model.compile(optimizer=Adam(), metrics=[custom_metrics_default]) assert model.metrics == [custom_metrics_default] assert all( check_function_equality(x1, x2) for x1, x2 in zip(model.metrics, [custom_metrics_default])) with pytest.raises(MetricsError): model.compile(optimizer=Adam(), metrics=[custom_metrics_wrong])
X_train = X_train[:train_size, ...] y_train = y_train[:train_size] X_test = X_test[:test_size, ...] y_test = y_test[:test_size] ############################################ n_train = X_train.shape[0] n_test = X_test.shape[0] # transform y to array of dimension 10 and in 4 dimension y_train = to_categorical(y_train).reshape(n_train, 1, 1, -1) y_test = to_categorical(y_test).reshape(n_test, 1, 1, -1) # Create the model and training model = Network(batch=batch, input_shape=X_train.shape[1:]) model.add( Convolutional_layer(size=3, filters=32, stride=1, pad=True, activation='Relu')) model.add(BatchNorm_layer()) model.add(Maxpool_layer(size=2, stride=1, padding=True)) model.add(Connected_layer(outputs=100, activation='Relu')) model.add(BatchNorm_layer())
def test_route_layer(): np.random.seed(123) batch, w, h, c = (1, 5, 5, 3) input = np.random.uniform(low=-10, high=10. ,size=(batch, w, h, c)) # from -10 to 10 to see both the effect of Relu and TanH activation # init keras model inp = Input(shape=(w, h, c), batch_shape=(batch, w, h, c)) x = Activation(activation='relu')(inp) y = Activation(activation='tanh')(x) Concat = Concatenate( axis=-1)([x, y]) # concatenate of x and y model = Model(inputs=[inp], outputs=Concat) # init NumPyNet model net = Network(batch=batch, input_shape=(w, h, c)) net.add(Activation_layer(activation='relu')) # layer 1 net.add(Activation_layer(activation='tanh')) # layer 2 net.add(Route_layer(input_layers=(1,2), by_channels=True)) net._fitted = True # False control # FORWARDS fwd_out_numpynet = net.predict(X=input) fwd_out_keras = model.predict(x=input, batch_size=batch) assert np.allclose(fwd_out_keras, fwd_out_numpynet) # ok net.fitted = False # the correct state of the network # BACKWARD # try some derivatives gradient = K.gradients(model.output, model.inputs) func = K.function(model.inputs + model.outputs ,gradient) delta_keras = func([input])[0] net._net[3].delta = np.ones(shape=fwd_out_numpynet.shape) net._backward(X=input) delta_numpynet = net._net[0].delta
def test_forward(self, b, w, h, c): input = np.random.uniform(low=-10, high=10., size=(b, w, h, c)).astype(float) # init keras model inp = Input(batch_shape=(b, w, h, c)) x = Activation(activation='relu')(inp) y = Activation(activation='tanh')(x) Concat = Concatenate(axis=-1)([x, y]) # concatenate of x and y model = Model(inputs=[inp], outputs=Concat) model.compile(optimizer='sgd', loss='mse') # init NumPyNet model net = Network(batch=b, input_shape=(w, h, c)) net.add(Activation_layer(activation='relu')) # layer 1 net.add(Activation_layer(activation='tanh')) # layer 2 net.add(Route_layer(input_layers=(1, 2), by_channels=True)) net.add( Cost_layer(cost_type='mse', scale=1., ratio=0., noobject_scale=1., threshold=0., smoothing=0.)) net.compile(optimizer=SGD()) net.summary() assert net._fitted == False net._fitted = True # False control # FORWARDS fwd_out_numpynet = net.predict(X=input) fwd_out_keras = model.predict(x=input, batch_size=b) np.testing.assert_allclose(fwd_out_keras, fwd_out_numpynet, rtol=1e-5, atol=1e-8)
def test_printer(self, b, w, h, c): net = Network(batch=b, input_shape=(w, h, c)) net.add(Activation_layer(activation='relu')) # layer 1 net.add(Activation_layer(activation='tanh')) # layer 2 net.add(Route_layer(input_layers=(1, 2), by_channels=True)) net.add( Cost_layer(cost_type='mse', scale=1., ratio=0., noobject_scale=1., threshold=0., smoothing=0.)) net.compile(optimizer=SGD()) net.summary()
def test_backward(self, b, w, h, c): # TODO: test backward correctly input = np.random.uniform(low=-10, high=10., size=(b, w, h, c)) tf_input = tf.Variable(input) # init keras model inp = Input(batch_shape=(b, w, h, c)) x = Activation(activation='relu')(inp) y = Activation(activation='tanh')(x) Concat = Concatenate(axis=-1)([x, y]) # concatenate of x and y model = Model(inputs=[inp], outputs=Concat) model.compile(optimizer='sgd', loss='mse') # init NumPyNet model net = Network(batch=b, input_shape=(w, h, c)) net.add(Activation_layer(activation='relu')) # layer 1 net.add(Activation_layer(activation='tanh')) # layer 2 net.add(Route_layer(input_layers=(1, 2), by_channels=True)) net.add( Cost_layer(cost_type='mse', scale=1., ratio=0., noobject_scale=1., threshold=0., smoothing=0.)) net.compile(optimizer=SGD()) net._fitted = True # FORWARDS fwd_out_numpynet = net.predict(X=input) with tf.GradientTape() as tape: preds = model(tf_input) grads = tape.gradient(preds, tf_input) fwd_out_keras = preds.numpy() delta_keras = grads.numpy() np.testing.assert_allclose(fwd_out_keras, fwd_out_numpynet, rtol=1e-5, atol=1e-8) net._fitted = False # BACKWARD net._net[3].delta = np.ones(shape=fwd_out_numpynet.shape, dtype=float) net._backward(X=input) delta_numpynet = net._net[0].delta assert delta_numpynet.shape == delta_keras.shape
if X.max() > noisy_signal.max() or X.min() < noisy_signal.min(): raise ValueError('Something went wrong with the stride trick!') X = X.reshape(num_samples, 1, 1, size) X_train, X_test = X[:train_size, ...], X[train_size:train_size+180, ...] y_train, y_test = y[:train_size, ...], y[train_size:train_size+180, ...] batch = 20 step = batch y_train = y_train.reshape(-1, 1, 1, 1) y_test = y_test.reshape(-1, 1, 1, 1) # Create the model and training model = Network(batch=batch, input_shape=X_train.shape[1:]) model.add(RNN_layer(outputs=32, steps=step, activation='linear')) model.add(Connected_layer(outputs=8, activation='relu')) model.add(Connected_layer(outputs=1, activation='linear')) model.add(Cost_layer(cost_type='mse')) # keras standard arguments model.compile(optimizer=RMSprop(lr=0.001, epsilon=1e-7))#, metrics=[mean_absolute_error]) print('*************************************') print('\n Total input dimension: {}'.format(X_train.shape), '\n') print('**************MODEL SUMMARY***********') model.summary() print('\n***********START TRAINING***********\n')
X_train = X_train[:train_size, ...] y_train = y_train[:train_size] X_test = X_test[:test_size, ...] y_test = y_test[:test_size] ############################################ n_train = X_train.shape[0] n_test = X_test.shape[0] # transform y to array of dimension 10 and in 4 dimension y_train = to_categorical(y_train).reshape(n_train, 1, 1, -1) y_test = to_categorical(y_test).reshape(n_test, 1, 1, -1) # Create the modeland training model = Network(batch=batch, input_shape=X_train.shape[1:]) # model.add(Input_layer(input_shape=(batch, 32, 32, 3))) # not necessary if input_shape is given to Network model.add( Convolutional_layer(input_shape=(batch, 8, 8, 3), size=3, filters=32, stride=1, pad=True, activation='Relu')) model.add( Convolutional_layer(input_shape=(batch, 8, 8, 32), size=3, filters=64, stride=1, pad=True,