def get_ts_loggers(self): loggers = {} restore_step = self.step.get() loggers['loss'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'loss.csv'), ['train', 'valid'], name='Loss', buffer_size=1, restore_step=restore_step) loggers['box_loss'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'box_loss.csv'), ['train', 'valid'], name='Box Loss', buffer_size=1, restore_step=restore_step) loggers['conf_loss'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'conf_loss.csv'), ['train', 'valid'], name='Confidence Loss', buffer_size=1, restore_step=restore_step) loggers['step_time'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'step_time.csv'), 'step time (ms)', name='Step time', buffer_size=1, restore_step=restore_step) return loggers
def get_ts_loggers(self): loggers = {} restore_step = self.step.get() loggers['loss'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'loss.csv'), ['train', 'valid'], name='Loss', buffer_size=1, restore_step=restore_step) loggers['conf_loss'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'conf_loss.csv'), ['train', 'valid'], name='Confidence Loss', buffer_size=1, restore_step=restore_step) loggers['segm_loss'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'segm_loss.csv'), ['train', 'valid'], name='Segmentation Loss', buffer_size=1, restore_step=restore_step) loggers['dic'] = TimeSeriesLogger(os.path.join(self.logs_folder, 'dic.csv'), ['train', 'valid'], name='DiC', buffer_size=1, restore_step=restore_step) loggers['dic_abs'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'dic_abs.csv'), ['train', 'valid'], name='|DiC|', buffer_size=1, restore_step=restore_step) loggers['learn_rate'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'learn_rate.csv'), 'learning rate', name='Learning rate', buffer_size=1, restore_step=restore_step) loggers['count_acc'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'count_acc.csv'), ['train', 'valid'], name='Count acc', buffer_size=1, restore_step=restore_step) loggers['step_time'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'step_time.csv'), 'step time (ms)', name='Step time', buffer_size=1, restore_step=restore_step) loggers['box_loss'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'box_loss.csv'), ['train', 'valid'], name='Box Loss', buffer_size=1, restore_step=restore_step) loggers['gt_knob'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'gt_knob.csv'), ['box', 'segmentation'], name='GT mix', buffer_size=1, restore_step=restore_step) return loggers
def _get_ts_loggers(model_opt): loggers = {} loggers['loss'] = TimeSeriesLogger(os.path.join(logs_folder, 'loss.csv'), ['train', 'valid'], name='Loss', buffer_size=1) loggers['iou'] = TimeSeriesLogger( os.path.join(logs_folder, 'iou.csv'), ['train soft', 'valid soft', 'train hard', 'valid hard'], name='IoU', buffer_size=1) loggers['step_time'] = TimeSeriesLogger(os.path.join( logs_folder, 'step_time.csv'), 'step time (ms)', name='Step time', buffer_size=1) return loggers
def get_ts_loggers(model_opt, restore_step=0): loggers = {} loggers['loss'] = TimeSeriesLogger(os.path.join(logs_folder, 'loss.csv'), ['train', 'valid'], name='Loss', buffer_size=1, restore_step=restore_step) loggers['segm_loss'] = TimeSeriesLogger(os.path.join( logs_folder, 'segm_loss.csv'), ['train', 'valid'], name='Segmentation Loss', buffer_size=1, restore_step=restore_step) loggers['iou'] = TimeSeriesLogger( os.path.join(logs_folder, 'iou.csv'), ['train soft', 'valid soft', 'train hard', 'valid hard'], name='IoU', buffer_size=1, restore_step=restore_step) loggers['wt_cov'] = TimeSeriesLogger(os.path.join(logs_folder, 'wt_cov.csv'), ['train', 'valid'], name='Weighted Coverage', buffer_size=1, restore_step=restore_step) loggers['unwt_cov'] = TimeSeriesLogger(os.path.join( logs_folder, 'unwt_cov.csv'), ['train', 'valid'], name='Unweighted Coverage', buffer_size=1, restore_step=restore_step) loggers['dice'] = TimeSeriesLogger(os.path.join(logs_folder, 'dice.csv'), ['train', 'valid'], name='Dice', buffer_size=1, restore_step=restore_step) loggers['learn_rate'] = TimeSeriesLogger(os.path.join( logs_folder, 'learn_rate.csv'), 'learning rate', name='Learning rate', buffer_size=1, restore_step=restore_step) loggers['step_time'] = TimeSeriesLogger(os.path.join( logs_folder, 'step_time.csv'), 'step time (ms)', name='Step time', buffer_size=1, restore_step=restore_step) return loggers
def get_ts_loggers(self): model_opt = self.model_opt loggers = {} restore_step = self.step.get() loggers['loss'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'loss.csv'), ['train', 'valid'], name='Loss', buffer_size=1, restore_step=restore_step) loggers['iou'] = TimeSeriesLogger( os.path.join(self.logs_folder, 'iou.csv'), ['train soft', 'valid soft', 'train hard', 'valid hard'], name='IoU', buffer_size=1, restore_step=restore_step) loggers['foreground_loss'] = TimeSeriesLogger( os.path.join(self.logs_folder, 'foreground_loss.csv'), ['train', 'valid'], name='Foreground loss', buffer_size=1, restore_step=restore_step) if model_opt['add_orientation']: loggers['orientation_ce'] = TimeSeriesLogger( os.path.join(self.logs_folder, 'orientation_ce.csv'), ['train', 'valid'], name='Orientation CE', buffer_size=1, restore_step=restore_step) loggers['orientation_acc'] = TimeSeriesLogger( os.path.join(self.logs_folder, 'orientation_acc.csv'), ['train', 'valid'], name='Orientation accuracy', buffer_size=1, restore_step=restore_step) loggers['step_time'] = TimeSeriesLogger(os.path.join( self.logs_folder, 'step_time.csv'), 'step time (ms)', name='Step time', buffer_size=1, restore_step=restore_step) return loggers
log.info('Loading dataset') dataset = get_dataset(args.dataset, data_opt, args.num_ex, int(args.num_ex / 10)) sess = tf.Session() if args.restore: saver.restore(sess, ckpt_fname) else: sess.run(tf.initialize_all_variables()) # Create time series logger if args.logs: loss_logger = TimeSeriesLogger( os.path.join(logs_folder, 'loss.csv'), ['train', 'valid'], name='Loss', buffer_size=1) iou_logger = TimeSeriesLogger( os.path.join(logs_folder, 'iou.csv'), ['train soft', 'valid soft', 'train hard', 'valid hard'], name='IoU', buffer_size=1) count_acc_logger = TimeSeriesLogger( os.path.join(logs_folder, 'count_acc.csv'), ['train', 'valid'], name='Count accuracy', buffer_size=1) learn_rate_logger = TimeSeriesLogger( os.path.join(logs_folder, 'learn_rate.csv'), 'learning rate', name='Learning rate',
num_ex = inp_all.shape[0] log.info('{} training examples'.format(num_ex)) inp_all_val = dataset['valid']['input'] lab_seg_all_val = dataset['valid']['label_segmentation'] lab_obj_all_val = dataset['valid']['label_objectness'] num_ex_val = inp_all_val.shape[0] log.info('{} validation examples'.format(num_ex_val)) # Create saver saver = tf.train.Saver(tf.all_variables()) # saver = tf.train.Saver(tf.trainable_variables()) # Create time series logger train_ce_logger = TimeSeriesLogger(os.path.join(exp_logs_folder, 'train_ce.csv'), 'train_ce', buffer_size=25) valid_ce_logger = TimeSeriesLogger(os.path.join(exp_logs_folder, 'valid_ce.csv'), 'valid_ce', buffer_size=2) log.info('Curves can be viewed at: http://{}/visualizer?id={}'.format( args.localhost, model_id)) step = 0 while step < loop_config['num_steps']: # Validation valid_ce = 0 for st, nd in BatchIterator(num_ex_val, batch_size=64, progress_bar=False):
sess.run(tf.initialize_all_variables()) saver = tf.train.Saver(tf.all_variables()) task_name = 'vae_mnist_half' time_obj = datetime.datetime.now() model_id = timestr = '{}-{:04d}{:02d}{:02d}{:02d}{:02d}{:02d}'.format( task_name, time_obj.year, time_obj.month, time_obj.day, time_obj.hour, time_obj.minute, time_obj.second) results_folder = args.results logs_folder = args.logs exp_folder = os.path.join(results_folder, model_id) exp_logs_folder = os.path.join(logs_folder, model_id) # Create time series logger train_logger = TimeSeriesLogger( os.path.join(exp_logs_folder, 'train_logp.csv'), 'train_logp', buffer_size=25) valid_logger = TimeSeriesLogger( os.path.join(exp_logs_folder, 'valid_logp.csv'), 'valid_logp', buffer_size=2) log.info( 'Curves can be viewed at: http://{}/visualizer?id={}'.format( args.localhost, model_id)) random = np.random.RandomState(2) step = 0 while step < loop_config['num_steps']: # Validation valid_log_px_lb = 0 log.info('Running validation')
num_ex = inp_all.shape[0] log.info('{} training examples'.format(num_ex)) inp_all_val = dataset['valid']['input'] lab_seg_all_val = dataset['valid']['label_segmentation'] lab_obj_all_val = dataset['valid']['label_objectness'] num_ex_val = inp_all_val.shape[0] log.info('{} validation examples'.format(num_ex_val)) # Create saver saver = tf.train.Saver(tf.all_variables()) # saver = tf.train.Saver(tf.trainable_variables()) # Create time series logger train_ce_logger = TimeSeriesLogger( os.path.join(exp_logs_folder, 'train_ce.csv'), 'train_ce', buffer_size=25) valid_ce_logger = TimeSeriesLogger( os.path.join(exp_logs_folder, 'valid_ce.csv'), 'valid_ce', buffer_size=2) log.info( 'Curves can be viewed at: http://{}/visualizer?id={}'.format( args.localhost, model_id)) step = 0 while step < loop_config['num_steps']: # Validation valid_ce = 0 for st, nd in BatchIterator(num_ex_val, batch_size=64, progress_bar=False): inp_batch = inp_all_val[st: nd] lab_seg_batch = lab_seg_all_val[st: nd]
# Model ID model_id = get_model_id('vae_mnist') results_folder = args.results exp_folder = os.path.join(results_folder, model_id) # Logger if args.logs: logs_folder = args.logs logs_folder = os.path.join(logs_folder, model_id) log = logger.get(os.path.join(logs_folder, 'raw')) # Create time series logger logp_logger = TimeSeriesLogger(os.path.join(logs_folder, 'logp.csv'), ['train logp', 'valid logp'], name='Log prob', buffer_size=1) henc_sparsity_logger = TimeSeriesLogger( os.path.join(logs_folder, 'henc_sparsity.csv'), 'henc sparsity', name='Encoder hidden activation sparsity', buffer_size=1) hdec_sparsity_logger = TimeSeriesLogger( os.path.join(logs_folder, 'hdec_sparsity.csv'), 'hdec sparsity', name='Decoder hidden activation sparsity', buffer_size=1) step_time_logger = TimeSeriesLogger(os.path.join( logs_folder, 'step_time.csv'), 'step time (ms)', buffer_size=10)
task_name = 'draw_mnist' time_obj = datetime.datetime.now() model_id = timestr = '{}-{:04d}{:02d}{:02d}{:02d}{:02d}{:02d}'.format( task_name, time_obj.year, time_obj.month, time_obj.day, time_obj.hour, time_obj.minute, time_obj.second) results_folder = args.results logs_folder = args.logs exp_folder = os.path.join(results_folder, model_id) # Create time series logger if args.logs: exp_logs_folder = os.path.join(logs_folder, model_id) train_ce_logger = TimeSeriesLogger( os.path.join(exp_logs_folder, 'train_ce.csv'), 'train_ce', buffer_size=25) valid_ce_logger = TimeSeriesLogger( os.path.join(exp_logs_folder, 'valid_ce.csv'), 'valid_ce', buffer_size=2) step_time_logger = TimeSeriesLogger( os.path.join(exp_logs_folder, 'step_time.csv'), 'step time (ms)', buffer_size=25) log.info( 'Curves can be viewed at: http://{}/visualizer?id={}'.format( args.localhost, model_id)) random = np.random.RandomState(args.seed) while step < loop_config['num_steps']: # Validation
def get_ts_loggers(model_opt, restore_step=0): loggers = {} loggers['loss'] = TimeSeriesLogger(os.path.join(logs_folder, 'loss.csv'), ['train', 'valid'], name='Loss', buffer_size=1, restore_step=restore_step) loggers['conf_loss'] = TimeSeriesLogger(os.path.join( logs_folder, 'conf_loss.csv'), ['train', 'valid'], name='Confidence Loss', buffer_size=1, restore_step=restore_step) loggers['segm_loss'] = TimeSeriesLogger(os.path.join( logs_folder, 'segm_loss.csv'), ['train', 'valid'], name='Segmentation Loss', buffer_size=1, restore_step=restore_step) loggers['iou'] = TimeSeriesLogger( os.path.join(logs_folder, 'iou.csv'), ['train soft', 'valid soft', 'train hard', 'valid hard'], name='IoU', buffer_size=1, restore_step=restore_step) loggers['wt_cov'] = TimeSeriesLogger(os.path.join(logs_folder, 'wt_cov.csv'), ['train', 'valid'], name='Weighted Coverage', buffer_size=1, restore_step=restore_step) loggers['unwt_cov'] = TimeSeriesLogger(os.path.join( logs_folder, 'unwt_cov.csv'), ['train', 'valid'], name='Unweighted Coverage', buffer_size=1, restore_step=restore_step) loggers['dice'] = TimeSeriesLogger(os.path.join(logs_folder, 'dice.csv'), ['train', 'valid'], name='Dice', buffer_size=1, restore_step=restore_step) loggers['dic'] = TimeSeriesLogger(os.path.join(logs_folder, 'dic.csv'), ['train', 'valid'], name='DiC', buffer_size=1, restore_step=restore_step) loggers['dic_abs'] = TimeSeriesLogger(os.path.join(logs_folder, 'dic_abs.csv'), ['train', 'valid'], name='|DiC|', buffer_size=1, restore_step=restore_step) loggers['learn_rate'] = TimeSeriesLogger(os.path.join( logs_folder, 'learn_rate.csv'), 'learning rate', name='Learning rate', buffer_size=1, restore_step=restore_step) loggers['count_acc'] = TimeSeriesLogger(os.path.join( logs_folder, 'count_acc.csv'), ['train', 'valid'], name='Count acc', buffer_size=1, restore_step=restore_step) loggers['step_time'] = TimeSeriesLogger(os.path.join( logs_folder, 'step_time.csv'), 'step time (ms)', name='Step time', buffer_size=1, restore_step=restore_step) if model_opt['type'] == 'attention': loggers['box_loss'] = TimeSeriesLogger(os.path.join( logs_folder, 'box_loss.csv'), ['train', 'valid'], name='Box Loss', buffer_size=1, restore_step=restore_step) loggers['gt_knob'] = TimeSeriesLogger(os.path.join( logs_folder, 'gt_knob.csv'), ['box', 'segmentation'], name='GT mix', buffer_size=1, restore_step=restore_step) return loggers
log.info('Loading dataset') dataset = get_dataset(args.dataset, data_opt, args.num_ex, int(args.num_ex / 10)) sess = tf.Session() if args.restore: saver.restore(sess, ckpt_fname) else: sess.run(tf.initialize_all_variables()) # Create time series logger if args.logs: loss_logger = TimeSeriesLogger(os.path.join(logs_folder, 'loss.csv'), ['train', 'valid'], name='Loss', buffer_size=1) iou_logger = TimeSeriesLogger( os.path.join(logs_folder, 'iou.csv'), ['train soft', 'valid soft', 'train hard', 'valid hard'], name='IoU', buffer_size=1) count_acc_logger = TimeSeriesLogger(os.path.join( logs_folder, 'count_acc.csv'), ['train', 'valid'], name='Count accuracy', buffer_size=1) learn_rate_logger = TimeSeriesLogger(os.path.join( logs_folder, 'learn_rate.csv'), 'learning rate', name='Learning rate', buffer_size=10)
dataset = mnist.read_data_sets("../MNIST_data/", one_hot=True) m = get_model(None, device=device) sess = tf.Session() sess.run(tf.initialize_all_variables()) # Logger if args.logs: logs_folder = args.logs logs_folder = os.path.join(logs_folder, model_id) log = logger.get(os.path.join(logs_folder, 'raw')) # Create time series logger ce_logger = TimeSeriesLogger( os.path.join(logs_folder, 'ce.csv'), ['train', 'valid'], name='Cross Entropy', buffer_size=1) acc_logger = TimeSeriesLogger( os.path.join(logs_folder, 'acc.csv'), ['train', 'valid'], name='Accuracy', buffer_size=1) step_time_logger = TimeSeriesLogger( os.path.join(logs_folder, 'step_time.csv'), 'step time (ms)', buffer_size=10) log_manager.register(log.filename, 'plain', 'Raw logs') log.info( 'Curves can be viewed at: http://{}/deep-dashboard?id={}'.format( args.localhost, model_id)) else: log = logger.get()
sess.run(tf.initialize_all_variables()) saver = tf.train.Saver(tf.all_variables()) task_name = 'vae_mnist_half' time_obj = datetime.datetime.now() model_id = timestr = '{}-{:04d}{:02d}{:02d}{:02d}{:02d}{:02d}'.format( task_name, time_obj.year, time_obj.month, time_obj.day, time_obj.hour, time_obj.minute, time_obj.second) results_folder = args.results logs_folder = args.logs exp_folder = os.path.join(results_folder, model_id) exp_logs_folder = os.path.join(logs_folder, model_id) # Create time series logger train_logger = TimeSeriesLogger(os.path.join(exp_logs_folder, 'train_logp.csv'), 'train_logp', buffer_size=25) valid_logger = TimeSeriesLogger(os.path.join(exp_logs_folder, 'valid_logp.csv'), 'valid_logp', buffer_size=2) log.info('Curves can be viewed at: http://{}/visualizer?id={}'.format( args.localhost, model_id)) random = np.random.RandomState(2) step = 0 while step < loop_config['num_steps']: # Validation valid_log_px_lb = 0 log.info('Running validation')