sess, val_loss, val_accuracy, val_labels, val_probabilities) val_loss_arr.append(loss_values) val_acc_arr.append(accuracy_values) logging.info('Loss on validation batch %s is : %s' % (i, loss_values)) logging.info('AUC on validaton batch %s is : %s' % (i, auc)) # for label_idx in range(len(auc)): # auc_arr[label_idx] += auc[label_idx] logging.info( 'Mean loss on this validation epoch is: %s' % (float(sum(val_loss_arr)) / max(len(val_loss_arr), 1))) logging.info( 'Mean accuracy on this validation epoch is: %s' % (float(sum(val_acc_arr)) / max(len(val_acc_arr), 1))) # mean_auc = [auc / val_num_batches_per_epoch for auc in auc_arr] # logging.info('Mean auc on this validation epoch is: %s' % mean_auc) # Log the summaries every 10 step. if step % 10 == 0: summaries = sess.run(my_summary_ops) sv.summary_computed(sess, summaries) #Once all the training has been done, save the log files and checkpoint model logging.info('Finished training! Saving model to disk now.') sv.saver.save(sess, sv.save_path, global_step=sv.global_step) if __name__ == '__main__': mlog.initlog(FLAGS.log_dir) run()
'shuffle': True, # shuffle dataset every epoch or not 'normalization': 'divide_255', } def get_train_params_by_name(name): if name in ['C10', 'C10+', 'C100', 'C100+']: return train_params_cifar if name == 'SVHN': return train_params_svhn if name == 'chexnet': return train_params_chexnet if __name__ == '__main__': initlog() logging.basicConfig( format='%(asctime)s(%(relativeCreated)d) - %(levelname)s %(filename)s(%(lineno)d) :: %(message)s', level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument( '--train', action='store_true', help='Train the model') parser.add_argument( '--test', action='store_true', help='Test model for required dataset if pretrained model exists.' 'If provided together with `--train` flag testing will be' 'performed right after training.') parser.add_argument( '--model_type', '-m', type=str, choices=['DenseNet', 'DenseNet-BC'], default='DenseNet',
sys.stdout.write('\n') sys.stdout.flush() def main(): LAYERS = 3 pkl_fname = "data/preprocess/stage1_train_set_rgb.pkl" images, masks = get_dataset(pkl_fname) logging.info("read train set: %s, %s", images.shape, masks.shape) logging.info("image:[%s, %s], mask:[%s, %s]", np.max(images), np.min(images), np.max(masks), np.min(masks)) # pred_size, offset = unet_size(256, LAYERS) # logging.info("pred_size: %d, offset: %d", pred_size, offset) # images = padding_array(images, offset, default_val=0.0) # masks = padding_array(masks, offset, default_val=False) # args.data_dir = args.data_dir.strip() # if len(args.data_dir) >= 0: # fnames = [os.path.join(args.data_dir, x) for x in fnames] train_ratio = 0.9 n_train = int(len(images)*train_ratio) logging.info("train_ratio: %s, n_train: %s, n_val: %s", train_ratio, n_train, len(images)-n_train) convert_dataset(images[:n_train], masks[:n_train], "data/tfrecords/256x256/train", 4) convert_dataset(images[n_train:], masks[n_train:], "data/tfrecords/256x256/val", 2) if __name__ == "__main__": initlog("log") main()
print 'logits: \n', logits_value print 'Probabilities: \n', probabilities_value print 'predictions: \n', predictions_value print 'Labels:\n:', labels_value #Log the summaries every 10 step. if step % 10 == 0: loss, _ = train_step(sess, train_op, sv.global_step) mean_loss_arr.append(loss) summaries = sess.run(my_summary_op) sv.summary_computed(sess, summaries) #If not, simply run the training step else: loss, _ = train_step(sess, train_op, sv.global_step) mean_loss_arr.append(loss) #We log the final training loss and accuracy logging.info('Final Loss: %s', loss) logging.info('Final Accuracy: %s', sess.run(accuracy)) #Once all the training has been done, save the log files and checkpoint model logging.info('Finished training! Saving model to disk now.') # saver.save(sess, "./flowers_model.ckpt") sv.saver.save(sess, sv.save_path, global_step=sv.global_step) if __name__ == '__main__': mlog.initlog('./log/pne_log') run()