def test_batch_norm_storage(self): x_train, x_test, y_train, y_test = simple_classification() batch_norm = layers.BatchNorm() gdnet = algorithms.MinibatchGradientDescent( [ layers.Input(10), layers.Relu(5), batch_norm, layers.Sigmoid(1), ], batch_size=10, ) gdnet.train(x_train, y_train) error_before_save = gdnet.prediction_error(x_test, y_test) mean_before_save = batch_norm.running_mean.get_value() inv_std_before_save = batch_norm.running_inv_std.get_value() with tempfile.NamedTemporaryFile() as temp: storage.save(gdnet, temp.name) storage.load(gdnet, temp.name) error_after_load = gdnet.prediction_error(x_test, y_test) mean_after_load = batch_norm.running_mean.get_value() inv_std_after_load = batch_norm.running_inv_std.get_value() self.assertAlmostEqual(error_before_save, error_after_load) np.testing.assert_array_almost_equal(mean_before_save, mean_after_load) np.testing.assert_array_almost_equal(inv_std_before_save, inv_std_after_load)
def test_batch_norm_storage(self): x_train, x_test, y_train, y_test = simple_classification() batch_norm = layers.BatchNorm() gdnet = algorithms.GradientDescent( [ layers.Input(10), layers.Relu(5), batch_norm, layers.Sigmoid(1), ], batch_size=10, verbose=True, # keep it as `True` ) gdnet.train(x_train, y_train, epochs=5) error_before_save = gdnet.prediction_error(x_test, y_test) mean_before_save = self.eval(batch_norm.running_mean) variance_before_save = self.eval(batch_norm.running_inv_std) with tempfile.NamedTemporaryFile() as temp: storage.save(gdnet, temp.name) storage.load(gdnet, temp.name) error_after_load = gdnet.prediction_error(x_test, y_test) mean_after_load = self.eval(batch_norm.running_mean) variance_after_load = self.eval(batch_norm.running_inv_std) self.assertAlmostEqual(error_before_save, error_after_load) np.testing.assert_array_almost_equal(mean_before_save, mean_after_load) np.testing.assert_array_almost_equal(variance_before_save, variance_after_load)
def test_simple_storage(self): connection = layers.join( layers.Input(10), layers.Sigmoid(5), layers.Sigmoid(2), ) with tempfile.NamedTemporaryFile() as temp: storage.save(connection, temp.name) temp.file.seek(0) filesize_after = os.path.getsize(temp.name) self.assertGreater(filesize_after, 0) data = pickle.load(temp.file) self.assertIn('sigmoid-1', data) self.assertIn('sigmoid-2', data) self.assertIn('weight', data['sigmoid-1']) self.assertIn('bias', data['sigmoid-1']) self.assertIn('weight', data['sigmoid-2']) self.assertIn('bias', data['sigmoid-2']) self.assertEqual(data['sigmoid-1']['weight'].shape, (10, 5)) self.assertEqual(data['sigmoid-1']['bias'].shape, (5,)) self.assertEqual(data['sigmoid-2']['weight'].shape, (5, 2)) self.assertEqual(data['sigmoid-2']['bias'].shape, (2,))
def on_epoch_end(network): if network.last_epoch in steps: print("Saving pre-trained VIN model...") storage.save(network, env['pretrained_network_file']) new_step = steps[network.last_epoch] session = tensorflow_session() network.variables.step.load(new_step, session)
def reset_log(self): F = open("Logs/Rede3_4-12-8-3/R3_" + str(amostra) + ".txt", "a") F.write(str(int(self.distance_car)) + "\n") if (self.resetCount >= 120): storage.save( nn, filepath= f'Saves/Rede3_4-12-8-3/motorista{amostra}_{CODINOME}_g{self.resetCount}_d{self.maxDistance:.0f}.hdf5' ) sys.exit()
def on_epoch_end(gdnet): epoch = gdnet.last_epoch errors = gdnet.validation_errors if errors.previous() and errors.last() > errors.previous(): # Load parameters and stop training storage.load(gdnet, 'training-epoch-{}.pickle'.format(epoch - 1)) raise StopTraining("Training has been interrupted") else: # Save parameters after successful epoch storage.save(gdnet, 'training-epoch-{}.pickle'.format(epoch))
def on_epoch_end(network): epoch = network.last_epoch errors[epoch] = network.prediction_error(x_test, y_test) if epoch == 4: storage.load( network.connection, os.path.join(tempdir, 'training-epoch-2')) raise StopTraining('Stop training process after 4th epoch') else: storage.save( network.connection, os.path.join(tempdir, 'training-epoch-{}'.format(epoch)))
def test_storage_save_conection_from_network(self): network = algorithms.GradientDescent([ layers.Input(10), layers.Sigmoid(5), layers.Sigmoid(2), ]) with tempfile.NamedTemporaryFile() as temp: storage.save(network, temp.name) temp.file.seek(0) filesize_after = os.path.getsize(temp.name) self.assertGreater(filesize_after, 0)
def test_storage_save_load_save(self): connection = layers.join( layers.Input(10), layers.Sigmoid(5), layers.Sigmoid(2), ) with tempfile.NamedTemporaryFile() as temp: storage.save(connection, temp.name) temp.file.seek(0) filesize_first = os.path.getsize(temp.name) storage.load(connection, temp.name) with tempfile.NamedTemporaryFile() as temp: storage.save(connection, temp.name) temp.file.seek(0) filesize_second = os.path.getsize(temp.name) self.assertEqual(filesize_first, filesize_second)
print("Initializing VIN...") network = algorithms.RMSProp( create_VIN( env['input_image_shape'], n_hidden_filters=150, n_state_filters=10, k=env['k'], ), verbose=True, error=loss_function, epoch_end_signal=on_epoch_end_from_steps(env['steps']), **env['training_options'] ) print("Training VIN...") network.train( (x_train, s1_train, s2_train), y_train, (x_test, s1_test, s2_test), y_test, epochs=env['epochs'], ) if not os.path.exists(MODELS_DIR): os.mkdir(MODELS_DIR) print("Saving pre-trained VIN model...") storage.save(network, env['pretrained_network_file']) print("Evaluating accuracy on test set...") evaluate_accuracy(network.predict, x_test, s1_test, s2_test)
if args.use_pretrained: if not os.path.exists(CARTPOLE_WEIGHTS): raise OSError("Cannot find file with pretrained weights " "(File name: {})".format(CARTPOLE_WEIGHTS)) print("Loading pretrained weights") storage.load(network, CARTPOLE_WEIGHTS) else: print("Start training") train_network( env, network, memory, n_games=150, # Number of games that networks is going to play, max_score=200, # Maximum score that network can achive in the game epsilon=0.2, # Probability to select random action during the game gamma=0.99, ) if not os.path.exists(FILES_DIR): os.mkdir(FILES_DIR) print("Saving parameters") storage.save(network, CARTPOLE_WEIGHTS) # After the training we can check how network solves the problem print("Start playing game") play_game(env, network, n_steps=100000)
deeplab, error='categorical_crossentropy', step=0.00001, verbose=True, addons=[algorithms.WeightDecay], decay_rate=0.0001, ) for i in range(args.epochs): print("Epoch #{}".format(i + 1)) for x_batch, y_batch in training_iterator(): x_batch = resnet50.predict(x_batch) optimizer.train(x_batch, y_batch, epochs=1, summary='inline') print("Start validation") val_images, val_annotations = next(vaidation_iterator()) segmentation = deeplab.predict(resnet50.predict(val_images)) confusion = get_confusion_matrix(val_annotations, segmentation) accuracy, miou = segmentation_metrics(confusion) print("Val accuracy: {:.3f}".format(accuracy)) print("Val miou: {:.3f}".format(miou)) filename = 'deeplab_{:0>3}_{:.3f}_{:.3f}.hdf5'.format( i, accuracy, miou) filepath = os.path.join(storage_folder, filename) print("Saved: {}".format(filepath)) storage.save(deeplab, filepath)