## convert this back to a list of arrs X_final = [X_batch[t] for t in range(T)] ## perform downsampling feed_dict_input = {i: d for i, d in zip(sequence, X_final)} #size has to be fixed... feed_dict_input[label_placeholder] = y_batch test_loss, test_prediction = sess.run([loss, outputs], feed_dict=feed_dict_input) # print(test_loss) # test_prediction = sess.run(outputs, feed_dict=feed_dict_input) test_prediction = test_prediction > 0.5 print('saving figures...') for k in range(mini_batch_size): # print(iou.IoU_3D(y[k], test_prediction[k])) test_iou.append(iou.IoU_3D(y_batch[k], test_prediction[k])) fig = plt.figure(figsize = (20,8)) ax = fig.add_subplot(1, 2, 1, projection='3d') ax.voxels(test_prediction[k], edgecolor='k') ax.set(xlabel='x', ylabel='y', zlabel='z', title='prediction') ax = fig.add_subplot(1, 2, 2, projection='3d') ax.voxels(y_batch[k], edgecolor='k', facecolor='blue') ax.set(xlabel='x', ylabel='y', zlabel='z', title='original model') plt.savefig('./results/figures/'+ 'test_sample_' + str(k) + '_minibatch_' + str(i) + '.jpg') plt.close() print('figures saved!')
X_final = [X_batch[t] for t in t_sample] feed_dict_input = {i: d for i, d in zip(sequence, X_final)} feed_dict_input[label_placeholder] = y_batch #print(feed_dict_input.keys()) #manually insert the label_placeholder loss_epoch, prediction, _ = sess.run([loss, outputs, optimizer], feed_dict=feed_dict_input) prediction = prediction > 0.5 accuracy = np.mean(prediction == y_batch) training_accuracy.append(accuracy) epoch_training_iou = [] for j in range(mini_batch_size): epoch_training_iou.append(iou.IoU_3D(y_batch[j], prediction[j])) training_iou.append(np.mean(epoch_training_iou)) loss_history.append(loss_epoch) toc = time.time() if epoch % print_every == 0: print('time elapsed for this epoch:', toc - tic) print('mean iou:', np.mean(epoch_training_iou)) print('loss:', loss_epoch) print('accuracy:', accuracy) if (epoch % save_every == 0 and epoch > 0): #save it occassionally in case we stop the run pickle.dump([loss_history, training_iou, training_accuracy], open('gray_scale_planes_training_data.p', 'wb'))
X_final = [X_batch[t, :, :, :, :] for t in t_sample] feed_dict_input = {i: d for i, d in zip(sequence, X_final)} feed_dict_input[label_placeholder] = y_batch #print(feed_dict_input.keys()) #manually insert the label_placeholder loss_epoch, prediction, _ = sess.run([loss, outputs, optimizer], feed_dict=feed_dict_input) prediction = prediction > 0.5 training_accuracy.append(np.mean(prediction == y_batch)) epoch_training_iou = [] for j in range(mini_batch_size): epoch_training_iou.append( iou.IoU_3D(y_batch[j, :, :, :], prediction[j, :, :, :])) training_iou.append(np.mean(epoch_training_iou)) training_iou.append(np.mean(epoch_training_iou)) print(np.mean(training_iou)) print('epoch: ' + str(epoch) + ' loss: ' + str(loss_epoch)) prediction = np.squeeze(prediction) print(np.mean(prediction == y_batch)) loss_history.append(loss_epoch) if (epoch % 20 == 0 and epoch > 1): #save it occassionally in case we stop the run pickle.dump([loss_history, training_iou, training_accuracy], open('gray_scale_planes_training_data.p', 'wb')) saver.save(sess, './grayscale_plane_R2N2_model_weights', global_step=epoch)
#manually insert the label_placeholder sess.run(optimizer, feed_dict=feed_dict_input) loss_epoch = sess.run(loss, feed_dict=feed_dict_input) #reconfigure feed_dict to accept a place_holder for y prediction = sess.run(predictions, feed_dict=feed_dict_input) print('epoch: ' + str(epoch) + ' loss: ' + str(loss_epoch)) print(prediction.shape) print(np.mean(prediction == y_batch_flat)) loss_history.append(loss_epoch) training_accuracy.append(np.mean(prediction == y_batch_flat)) interim_iou = 0 for k in range(mini_batch_size): interim_iou += (iou.IoU_3D(y[k, :, :, :], prediction[k, :, :, :])) training_iou.append(interim_iou / mini_batch_size) print(training_iou) #predictions = tf.argmax(outputs, axis = 4) ## run test batch f = open(os.path.join(data_dir, test_batch), 'rb') batch_sample_2 = pickle.load(f) X, y = pickle_to_data.unravel_batch_pickle(batch_sample_2) X_nparr = np.array(X) batch_size, T, H, W, C = X_nparr.shape num_batches = int(128 / mini_batch_size) test_iou = list() print('run test case') for i in range(num_batches - 1):