Esempio n. 1
0
best_epoch_losses = [1000, 1000, 1000, 1000, 1000]

with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
    # initialize all variables/parameters:
    init = tf.global_variables_initializer()
    sess.run(init)

    for epoch in range(no_of_epochs):
        print("epoch: %d/%d" % (epoch + 1, no_of_epochs))

        # run an epoch and get all batch losses:
        batch_losses = []
        for step, (imgs, onehot_labels) in enumerate(tqdm(train_data_iterator())):
            # create a feed dict containing the batch data:
            batch_feed_dict = model.create_feed_dict(imgs_batch=imgs,
                                                     early_drop_prob=0.01, late_drop_prob=0.1,
                                                     onehot_labels_batch=onehot_labels)

            # compute the batch loss and compute & apply all gradients w.r.t to
            # the batch loss (without model.train_op in the call, the network
            # would NOT train, we would only compute the batch loss):
            batch_loss, _ = sess.run([model.loss, model.train_op],
                                     feed_dict=batch_feed_dict)
            batch_losses.append(batch_loss)

            print("step: %d/%d, training batch loss: %g" % (step + 1, no_of_batches, batch_loss))

        # compute the train epoch loss:
        train_epoch_loss = np.mean(batch_losses)
        # save the train epoch loss:
        train_loss_per_epoch.append(train_epoch_loss)
Esempio n. 2
0
        for i in range(batch_size):
            img_path = seq_frame_paths[batch_pointer + i]
            img_paths.append(img_path)
            print(img_path)
            # read the image:
            img = cv2.imread(img_path, -1)
            print(type(img))
	    img = cv2.resize(img, (img_width, img_height))
            
            img = img - train_mean_channels
            batch_imgs[i] = img

        batch_pointer += batch_size

        batch_feed_dict = model.create_feed_dict(imgs_batch=batch_imgs,
                    early_drop_prob=0.0, late_drop_prob=0.0)

        # run a forward pass and get the logits:
        logits = sess.run(model.logits, feed_dict=batch_feed_dict)

        print("step: %d/%d" % (step+1, no_of_batches))

        # save all predicted label images overlayed on the input frames to results_dir:
        predictions = np.argmax(logits, axis=3)
        for i in range(batch_size):
            pred_img = predictions[i]
            pred_img_color = label_img_to_color(pred_img)

            img = batch_imgs[i] + train_mean_channels

            img_file_name = img_paths[i].split("/")[-1]