:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size) :param train_op: TF Operation to train the neural network :param cross_entropy_loss: TF Tensor for the amount of loss :param input_image: TF Placeholder for input images :param correct_label: TF Placeholder for label images :param keep_prob: TF Placeholder for dropout keep probability :param learning_rate: TF Placeholder for learning rate """ # TODO: Implement function for i in range(epochs): print("EPOCH {} ...".format(i+1)) for image, label in get_batches_fn(batch_size): _, loss = sess.run([train_op, cross_entropy_loss],feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.001}) print("Loss: = {:.3f}".format(loss)) print() tests.test_train_nn(train_nn) def run(): num_classes = 2 image_shape = (160, 576) data_dir = './data' runs_dir = './runs' tests.test_for_kitti_dataset(data_dir) # Download pretrained vgg model helper.maybe_download_pretrained_vgg(data_dir) # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset. # You'll need a GPU with at least 10 teraFLOPS to train on. # https://www.cityscapes-dataset.com/
def perform_tests(): tests.test_for_kitti_dataset(data_dir) tests.test_load_vgg(load_vgg, tf) tests.test_layers(layers) tests.test_optimize(optimize) tests.test_train_nn(train_nn)
for batch, (image, label) in enumerate(get_batches_fn(batch_size)): feed_dict = { input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 1e-4 } _, loss = sess.run([train_op, cross_entropy_loss], feed_dict=feed_dict) print('Epoch {} Batch {} Loss {}'.format(epoch, batch, loss), flush=True) pass tests.test_train_nn(train_nn) def process_image(image): # resize the image orig_image_shape = image.shape[:2] image = scipy.misc.imresize(image, image_shape) net_output = sess.run([tf.nn.softmax(logits)], { keep_prob: 1.0, input_image: [image] }) label_idx = np.argmax(net_output, axis=2) value_fill = label_idx.copy()
def run_tests(): tests.test_layers(layers) tests.test_optimize(optimize) tests.test_for_kitti_dataset(DATA_DIRECTORY) tests.test_train_nn(train_nn)
def train(epochs: int = None, save_model_freq: int = None, batch_size: int = None, learning_rate: float = None, keep_prob: float = None, dataset: str = None): """ Performs the FCN training from begining to end, that is, downloads required datasets and pretrained models, constructs the FNC architecture, trains it, and saves the trained model. :param epochs: number of epochs for training :param save_model_freq: save model each save_model_freq epoch :param batch_size: batch size for training :param learning_rate: learning rate for training :param keep_prob: keep probability for dropout layers for training :param dataset: dataset name """ if None in [epochs, save_model_freq, batch_size, learning_rate, keep_prob, dataset]: raise ValueError('some parameters were not specified for function "%s"' % train.__name__) dataset = DATASETS[dataset] if not os.path.exists(dataset.data_root_dir): os.makedirs(dataset.data_root_dir) # Download Kitti Road dataset helper.maybe_download_dataset_from_yandex_disk(dataset) # Download pretrained vgg model helper.maybe_download_pretrained_vgg_from_yandex_disk(dataset.data_root_dir) # Run tests to check that environment is ready to execute the semantic segmentation pipeline if dataset.name == 'kitti_road': tests.test_for_kitti_dataset(dataset.data_root_dir) tests.test_load_vgg(load_vgg, tf) tests.test_layers(layers) tests.test_optimize(optimize) tests.test_train_nn(train_nn, dataset) # TODO: Train and Inference on the cityscapes dataset instead of the Kitti dataset. # https://www.cityscapes-dataset.com/ with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess: # Path to vgg model vgg_path = os.path.join(dataset.data_root_dir, 'vgg') # Create function to get batches get_batches_fn = helper.gen_batch_function(dataset.data_training_dir, dataset.image_shape) # TODO: Augment Images for better results # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network image_input_tensor, keep_prob_tensor, layer3_out_tensor, layer4_out_tensor, layer7_out_tensor = \ load_vgg(sess, vgg_path) output_layer_tensor = layers(layer3_out_tensor, layer4_out_tensor, layer7_out_tensor, dataset.num_classes) correct_label_tensor = tf.placeholder(tf.float32, (None, None, None, dataset.num_classes)) learning_rate_tensor = tf.placeholder(tf.float32) logits_tensor, train_op_tensor, cross_entropy_loss_tensor, softmax_tensor = \ optimize(output_layer_tensor, correct_label_tensor, learning_rate_tensor, dataset.num_classes) iou_tensor, iou_op_tensor = mean_iou(softmax_tensor, correct_label_tensor, dataset.num_classes) train_nn(sess, dataset, epochs, save_model_freq, batch_size, learning_rate, keep_prob, get_batches_fn, train_op_tensor, cross_entropy_loss_tensor, image_input_tensor, correct_label_tensor, keep_prob_tensor, learning_rate_tensor, iou_tensor, iou_op_tensor) save_model(sess, 'fcn8-final', dataset, epochs=epochs, batch_size=batch_size, learning_rate=learning_rate, keep_prob=learning_rate)