def main(unused_argv): # Load training and eval data train_data, train_labels = data_utils.get_train_data() eval_data, eval_labels = data_utils.get_eval_data() # Create the Estimator mnist_classifier = tf.estimator.Estimator( model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model") # Set up logging for predictions # Log the values in the "Softmax" tensor with label "probabilities" tensors_to_log = {"probabilities": "softmax_tensor"} logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50) # Train the model train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": train_data}, y=train_labels, batch_size=100, num_epochs=None, shuffle=True) mnist_classifier.train(input_fn=train_input_fn, steps=20000, hooks=[logging_hook]) # Evaluate the model and print results eval_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": eval_data}, y=eval_labels, num_epochs=1, shuffle=False) eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn) print(eval_results)
def main(unused_argv): # Load training and eval data train_data, train_labels = data_utils.get_train_data( file_dir, height, width, img_format) eval_data, eval_labels = data_utils.get_eval_data(file_dir, height, width, img_format) train_labels = train_labels - 1 eval_labels = eval_labels - 1 # Create the Estimator run_config = tf.estimator.RunConfig(save_summary_steps=None, save_checkpoints_secs=None) pitch2d_predictor = tf.estimator.Estimator(model_fn=cnn_model_fn, config=run_config) # Set up logging for predictions # Log the values in the "Softmax" tensor with label "probabilities" tensors_to_log = {"probabilities": "softmax_tensor"} logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50) # Train the model train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": train_data}, y=train_labels, batch_size=32, num_epochs=None, shuffle=True) pitch2d_predictor.train(input_fn=train_input_fn, steps=20000, hooks=[logging_hook]) # Evaluate the model and print results eval_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": eval_data}, y=eval_labels, num_epochs=1, shuffle=False) eval_results = pitch2d_predictor.evaluate(input_fn=eval_input_fn) print(eval_results)
def main(unused_argv): # Time start start_time = time.time() # Load training and eval data train_data, train_labels = data_utils.get_train_data( data_dir, height, width, "color") eval_data, eval_labels = data_utils.get_eval_data(data_dir, height, width, "color") # Create the Estimator pitch2d_predictor = tf.estimator.Estimator( model_fn=model_fn, model_dir="/tmp/pitch2d_alexnet_np") # Set up logging for predictions # Log the values in the "Softmax" tensor with label "probabilities" tensors_to_log = {"probabilities": "softmax_tensor"} logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50) # Train the model train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": train_data}, y=train_labels, batch_size=128, num_epochs=128, shuffle=True, num_threads=4) pitch2d_predictor.train(input_fn=train_input_fn, hooks=[logging_hook]) # Evaluate the model and print results eval_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": eval_data}, y=eval_labels, num_epochs=None, shuffle=False) eval_results = pitch2d_predictor.evaluate(input_fn=eval_input_fn) print(eval_results) # End timing end_time = time.time() print("{} seconds elapsed".format(end_time - start_time))
# we train our model again (this time fine-tuning the top 2 inception blocks # alongside the top Dense layers model.fit_generator(data_utils.get_batch_data(), epochs=2, steps_per_epoch=2000) model.save('MSL_tuned_inceptionnetV3.h5') test_batch_size = 64 total_test_batch = 500 avg = 0 predicted_y = np.array([]) actual_y = np.array([]) for i in range(total_test_batch): evalX, evalY = data_utils.get_eval_data() loss, acc = model.evaluate(evalX, evalY, batch_size=64) y1 = model.predict(evalX, batch_size=64) y1 = np.argmax(y1, axis=1) y2 = np.argmax(evalY, axis=1) predicted_y = np.hstack((predicted_y, y1)) actual_y = np.hstack((actual_y, y2)) print("loss: ", loss) print("acc: ", acc) avg += acc print("avg: ", avg / total_test_batch) arr = np.vstack((actual_y, predicted_y)).T np.savetxt( '/home/cse/mtech/mcs162557/Replicate_MSL/Results/UPD_MSL_Multinomial.txt', arr)
def evaluate(config_task, ids, model, outdir='eval_out', epoch_num=0): """ evalutation """ files = load_files(ids) files = list(files) datDir = os.path.join(config.prepData_dir, config_task.task, "Tr") dices_list = [] # files = files[:2] # debugging. logger.info('Evaluating epoch{} for {}--- {} cases:\n{}'.format( epoch_num, config_task.task, len(files), str([obj['id'] for obj in files]))) for obj in tqdm(files, desc='Eval epoch{}'.format(epoch_num)): ID = obj['id'] # logger.info('evaluating {}:'.format(ID)) obj['im'] = os.path.join(config.base_dir, config_task.task, "imagesTr", ID) obj['gt'] = os.path.join(config.base_dir, config_task.task, "labelsTr", ID) img_path = os.path.join(config.base_dir, config_task.task, "imagesTr", ID) gt_path = os.path.join(config.base_dir, config_task.task, "labelsTr", ID) data = get_eval_data(obj, datDir) # final_label, probs = segment_one_image(config_task, data, model) # final_label: d, h, w, num_classes try: final_label = segment_one_image( config_task, data, model, ID) # final_label: d, h, w, num_classes save_to_nii(final_label, filename=ID + '.nii.gz', refer_file_path=img_path, outdir=outdir, mode="label", prefix='Epoch{}_'.format(epoch_num)) gt = sitk.GetArrayFromImage(sitk.ReadImage(gt_path)) # d, h, w # treat cancer as organ for Task03_Liver and Task07_Pancreas if config_task.task in ['Task03_Liver', 'Task07_Pancreas']: gt[gt == 2] = 1 # cal dices dices = multiClassDice(gt, final_label, config_task.num_class) dices_list.append(dices) tinies.sureDir(outdir) fo = open(os.path.join(outdir, '{}_eval_res.csv'.format(config_task.task)), mode='a+') wo = csv.writer(fo, delimiter=',') wo.writerow([epoch_num, tinies.datestr(), ID] + dices) fo.flush() ## for tensorboard visualization tb_img = sitk.GetArrayFromImage(sitk.ReadImage(img_path)) # d,h,w if tb_img.ndim == 4: tb_img = tb_img[0, ...] train.tb_images([tb_img, gt, final_label], [False, True, True], ['image', 'GT', 'PS'], epoch_num * config.step_per_epoch, tag='Eval_{}_epoch_{}_dices_{}'.format( ID, epoch_num, str(dices))) except Exception as e: logger.info('{}'.format(str(e))) labels = config_task.labels dices_all = np.asarray(dices_list) dices_mean = dices_all.mean(axis=0) logger.info('Eval mean dices:') dices_res = {} for i in range(config_task.num_class): tag = labels[str(i)] dices_res[tag] = dices_mean[i] logger.info(' {}, {}'.format(tag, dices_mean[i])) return dices_res