def on_end_epoch(hook_state, state): if val_loader is not None: if 'best_loss' not in hook_state: hook_state['best_loss'] = np.inf if 'wait' not in hook_state: hook_state['wait'] = 0 if val_loader is not None: model_utils.evaluate(state['model'], val_loader, meters['val'], desc="Epoch {:d} valid".format( state['epoch'])) #state['model'].propose_new() #model_utils.evaluate(state['model'], # val_loader, # meters['val_new'], # desc="Epoch {:d} valid (new struct)".format(state['epoch'])) meter_vals = log_utils.extract_meter_values(meters) #if meter_vals['val']['loss'] < meter_vals['val_new']['loss']: # state['model'].reject_new() print("Epoch {:02d}: {:s}".format( state['epoch'], log_utils.render_meter_values(meter_vals))) meter_vals['epoch'] = state['epoch'] with open(trace_file, 'a') as f: json.dump(meter_vals, f) f.write('\n') if val_loader is not None: if meter_vals['val']['loss'] < hook_state['best_loss']: hook_state['best_loss'] = meter_vals['val']['loss'] print( "==> best model (loss = {:0.6f}), saving model...".format( hook_state['best_loss'])) state['model'].cpu() torch.save( state['model'].state_dict(), os.path.join(opt['log.exp_dir'], 'best_model_state_dict.pt')) if opt['data.cuda']: state['model'].cuda() hook_state['wait'] = 0 else: hook_state['wait'] += 1 if hook_state['wait'] > opt['train.patience']: print("==> patience {:d} exceeded".format( opt['train.patience'])) state['stop'] = True else: state['model'].cpu() torch.save(state['model'], os.path.join(opt['log.exp_dir'], 'best_model.pt')) if opt['data.cuda']: state['model'].cuda()
def on_end_epoch(hook_state, state): if val_loader is not None: if 'best_loss' not in hook_state: hook_state['best_loss'] = np.inf if 'wait' not in hook_state: hook_state['wait'] = 0 if val_loader is not None: state['loader'].mode = 'val' # added model_utils.evaluate( state['model'], state['loader'], #val_loader, meters['val'], desc="Epoch {:d} valid".format(state['epoch'])) meter_vals = log_utils.extract_meter_values(meters) print("Epoch {:02d}: {:s}".format( state['epoch'], log_utils.render_meter_values(meter_vals))) meter_vals['epoch'] = state['epoch'] # this prevents cuda bugs meter_vals = { k: ({kk: float(vv) for kk, vv in v.items()} if isinstance(v, dict) else v) for k, v in meter_vals.items() } with open(trace_file, 'a') as f: json.dump(meter_vals, f) f.write('\n') if val_loader is not None: if meter_vals['val']['loss'] < hook_state['best_loss']: hook_state['best_loss'] = meter_vals['val']['loss'] print( "==> best model (loss = {:0.6f}), saving model...".format( hook_state['best_loss'])) state['model'].cpu() # used with inception #torch.save(state['model'].encoder.added_layers, os.path.join(opt['log.exp_dir'], 'best_model.t7')) torch.save(state['model'], os.path.join(opt['log.exp_dir'], 'best_model.t7')) if opt['data.cuda']: state['model'].cuda() hook_state['wait'] = 0 else: hook_state['wait'] += 1 if hook_state['wait'] > opt['train.patience']: print("==> patience {:d} exceeded".format( opt['train.patience'])) state['stop'] = True else: state['model'].cpu() # used with inception #torch.save(state['model'].encoder.added_layers, os.path.join(opt['log.exp_dir'], 'best_model.t7')) torch.save(state['model'], os.path.join(opt['log.exp_dir'], 'best_model.t7')) if opt['data.cuda']: state['model'].cuda()
def on_end_epoch(hook_state, state): if val_loader is not None: if 'best_loss' not in hook_state: hook_state['best_loss'] = np.inf if 'wait' not in hook_state: hook_state['wait'] = 0 if val_loader is not None: model_utils.evaluate(state['model'], val_loader, meters['val'], desc="Epoch {:d} valid".format(state['epoch'])) meter_vals = log_utils.extract_meter_values(meters) print("Epoch {:02d}: {:s}".format(state['epoch'], log_utils.render_meter_values(meter_vals))) meter_vals['epoch'] = state['epoch'] with open(trace_file, 'a') as f: json.dump(meter_vals, f) f.write('\n') if state['epoch'] == 1 or state['epoch'] % 10 == 0: state['model'].cpu() torch.save(state['model'], os.path.join(opt['log.exp_dir'], f'epoch_{state["epoch"]}.pt')) torch.save(state['optimizer'].state_dict(), os.path.join(opt['log.exp_dir'], f'epoch_{state["epoch"]}_optim.pt')) # save everything including model state into log dir # torch.save(state, os.path.join(opt['log.exp_dir'], f'epoch_{state["epoch"]}.pt.tar')) if opt['data.cuda']: state['model'].cuda() if val_loader is not None: if meter_vals['val']['loss'] < hook_state['best_loss']: hook_state['best_loss'] = meter_vals['val']['loss'] print("==> best model (loss = {:0.6f}), saving model...".format(hook_state['best_loss'])) state['model'].cpu() torch.save(state['model'], os.path.join(opt['log.exp_dir'], 'best_model.pt')) torch.save(state['optimizer'].state_dict(), os.path.join(opt['log.exp_dir'], 'best_model_optim.pt')) # save everything including model state into log dir # torch.save(state, os.path.join(opt['log.exp_dir'], 'best_model.pt.tar')) if opt['data.cuda']: state['model'].cuda() hook_state['wait'] = 0 else: hook_state['wait'] += 1 if hook_state['wait'] > opt['train.patience']: print("==> patience {:d} exceeded".format(opt['train.patience'])) state['stop'] = True else: state['model'].cpu() torch.save(state['model'], os.path.join(opt['log.exp_dir'], 'best_model.pt')) if opt['data.cuda']: state['model'].cuda()
def main(opt): # load model model = model_utils.load(opt) state_dict = torch.load(opt['model.model_path']) model.load_state_dict(state_dict) model.eval() # load opts model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']), 'opt.json') with open(model_opt_file, 'r') as f: model_opt = json.load(f) # Postprocess arguments model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(',')) model_opt['log.fields'] = model_opt['log.fields'].split(',') # construct data data_opt = { 'data.' + k: v for k,v in filter_opt(model_opt, 'data').items() } episode_fields = { 'data.test_way': 'data.way', 'data.test_shot': 'data.shot', 'data.test_query': 'data.query', 'data.test_episodes': 'data.train_episodes' } for k,v in episode_fields.items(): if opt[k] != 0: data_opt[k] = opt[k] elif model_opt[k] != 0: data_opt[k] = model_opt[k] else: data_opt[k] = model_opt[v] print("Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes".format( data_opt['data.test_way'], data_opt['data.test_shot'], data_opt['data.test_query'], data_opt['data.test_episodes'])) torch.manual_seed(1234) if data_opt['data.cuda']: torch.cuda.manual_seed(1234) data = data_utils.load(data_opt, ['test']) if data_opt['data.cuda']: model.cuda() meters = { field: tnt.meter.AverageValueMeter() for field in model_opt['log.fields'] } model_utils.evaluate(model, data['test'], meters, desc="test") for field,meter in meters.items(): mean, std = meter.value() print("test {:s}: {:0.6f} +/- {:0.6f}".format(field, mean, 1.96 * std / math.sqrt(data_opt['data.test_episodes'])))
def on_end_epoch(hook_state, state): if val_loader is not None: if 'best_loss' not in hook_state: hook_state['best_loss'] = 0 if 'wait' not in hook_state: hook_state['wait'] = 0 if val_loader is not None: with torch.no_grad(): model_utils.evaluate(state['model'], val_loader, meters['val'], opt['model.stage'], desc="Epoch {:d} valid".format( state['epoch'])) meter_vals = log_utils.extract_meter_values(meters) print("Epoch {:02d}: {:s}".format( state['epoch'], log_utils.render_meter_values(meter_vals))) meter_vals['epoch'] = state['epoch'] with open(trace_file, 'a') as f: json.dump(meter_vals, f) f.write('\n') if val_loader is not None: if meter_vals['val']['acc'] > hook_state['best_loss']: hook_state['best_loss'] = meter_vals['val']['acc'] print("==> best model (acc = {:0.6f}), saving model...".format( hook_state['best_loss'])) state['model'].cpu() torch.save(state['model'], os.path.join(opt['log.exp_dir'], 'best_model.pt')) if opt['data.cuda']: state['model'].cuda() hook_state['wait'] = 0 else: hook_state['wait'] += 1 if hook_state['wait'] > opt['train.patience']: print("==> patience {:d} exceeded".format( opt['train.patience'])) state['stop'] = True else: state['model'].cpu() torch.save(state['model'], os.path.join(opt['log.exp_dir'], 'best_model.pt')) if opt['data.cuda']: state['model'].cuda()
def main(opt): # load model model = torch.load(opt['model.model_path']) # def get_image_input_hook(self, input, output): # # print(len(input)) # # print(type(input)) # # print(len(output)) # # print(type(output)) # print("in:",input[0].data.cpu().numpy()[0].shape) # print("out:",output.data.cpu().numpy()[0].shape) # viz.image(input[0].data.cpu().numpy()[0]) # for i in output.data.cpu().numpy()[0]: # viz.image(i) # model.encoder[0][0].register_forward_hook(get_image_input_hook) model.eval() # load opts model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']), 'opt.json') with open(model_opt_file, 'r') as f: model_opt = json.load(f) # Postprocess arguments model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(',')) model_opt['log.fields'] = model_opt['log.fields'].split(',') # construct data data_opt = { 'data.' + k: v for k, v in filter_opt(model_opt, 'data').items() } episode_fields = { 'data.test_way': 'data.way', 'data.test_shot': 'data.shot', 'data.test_query': 'data.query', 'data.test_episodes': 'data.train_episodes' } for k, v in episode_fields.items(): if opt[k] != 0: data_opt[k] = opt[k] elif model_opt[k] != 0: data_opt[k] = model_opt[k] else: data_opt[k] = model_opt[v] print( "Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes" .format(data_opt['data.test_way'], data_opt['data.test_shot'], data_opt['data.test_query'], data_opt['data.test_episodes'])) torch.manual_seed(1234) if data_opt['data.cuda']: torch.cuda.manual_seed(1234) print(data_opt) data = data_utils.load(data_opt, ['test']) if data_opt['data.cuda']: model.cuda() meters = { field: tnt.meter.AverageValueMeter() for field in model_opt['log.fields'] } model_utils.evaluate(model, data['test'], meters, desc="test") for field, meter in meters.items(): mean, std = meter.value() print("test {:s}: {:0.6f} +/- {:0.6f}".format( field, mean, 1.96 * std / math.sqrt(data_opt['data.test_episodes'])))
def main(opt): # load model model = torch.load(opt['model.model_path']) model.eval() # load opts model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']), 'opt.json') with open(model_opt_file, 'r') as f: model_opt = json.load(f) # Postprocess arguments model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(',')) model_opt['log.fields'] = model_opt['log.fields'].split(',') # construct data data_opt = { 'data.' + k: v for k, v in filter_opt(model_opt, 'data').items() } episode_fields = { 'data.test_way': 'data.way', 'data.test_shot': 'data.shot', 'data.test_query': 'data.query', 'data.test_episodes': 'data.train_episodes' } for k, v in episode_fields.items(): if opt[k] != 0: data_opt[k] = opt[k] elif model_opt[k] != 0: data_opt[k] = model_opt[k] else: data_opt[k] = model_opt[v] print( "Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes" .format(data_opt['data.test_way'], data_opt['data.test_shot'], data_opt['data.test_query'], data_opt['data.test_episodes'])) torch.manual_seed(1234) if data_opt['data.cuda']: torch.cuda.manual_seed(1234) data = data_utils.load(data_opt, ['test']) if data_opt['data.cuda']: model.cuda() meters = { field: tnt.meter.AverageValueMeter() for field in model_opt['log.fields'] } if opt['stage'] == 'protonet': _, class_acc, class_prec, prec_micro = model_utils.evaluate( model, data['test'], meters, stage='protonet', desc="test", evaluation=True) else: _, class_acc, class_prec, prec_micro = model_utils.evaluate( model, data['test'], meters, stage='feat', desc='test', evaluation=True) for field, meter in meters.items(): mean, std = meter.value() print("test {:s}: {:0.6f} +/- {:0.6f}".format( field, mean, 1.96 * std / math.sqrt(data_opt['data.test_episodes']))) mean_prec = 0 n = 0 for k in class_acc.keys(): print('class {} acc: {:0.4f}'.format(k, class_acc[k])) for k in class_prec.keys(): mean_prec += class_prec[k] n += 1 print('class {} prec: {:0.4f}'.format(k, class_prec[k])) mean_prec = mean_prec / n print('Average prec(macro): {:0.4f}; Average prec(micro): {:0.4f}'.format( mean_prec, prec_micro))
def on_end_epoch(hook_state, state): if val_loader is not None: if 'best_loss' not in hook_state: hook_state['best_loss'] = np.inf if 'wait' not in hook_state: hook_state['wait'] = 0 if val_loader is not None: model_utils.evaluate(state['model'], val_loader, meters['val'], desc="Epoch {:d} valid".format( state['epoch'])) meter_vals = log_utils.extract_meter_values(meters) print("Epoch {:02d}: {:s}".format( state['epoch'], log_utils.render_meter_values(meter_vals))) meter_vals['epoch'] = state['epoch'] with open(trace_file, 'a') as f: json.dump(meter_vals, f) f.write('\n') tensorboard.add_scalar("Value MAP", meter_vals['val']['map'], state['epoch']) tensorboard.add_scalar("Value Loss", meter_vals['val']['loss'], state['epoch']) tensorboard.add_scalar("Value Accuracy", meter_vals['val']['acc'], state['epoch']) tensorboard.add_scalar("Train MAP", meter_vals['train']['map'], state['epoch']) tensorboard.add_scalar("Train Loss", meter_vals['train']['loss'], state['epoch']) tensorboard.add_scalar("Train Accuracy", meter_vals['train']['acc'], state['epoch']) tensorboard.add_scalar("Best Loss", hook_state['best_loss'], state['epoch']) if val_loader is not None: if meter_vals['val']['loss'] < hook_state['best_loss']: hook_state['best_loss'] = meter_vals['val']['loss'] print( "==> best model (loss = {:0.6f}), saving model...".format( hook_state['best_loss'])) state['model'].cpu() torch.save(state['model'], os.path.join(opt['log.exp_dir'], 'best_model.pt')) if opt['data.cuda']: state['model'].cuda() hook_state['wait'] = 0 else: hook_state['wait'] += 1 if hook_state['wait'] > opt['train.patience']: print("==> patience {:d} exceeded".format( opt['train.patience'])) state['stop'] = True else: state['model'].cpu() torch.save(state['model'], os.path.join(opt['log.exp_dir'], 'best_model.pt')) if opt['data.cuda']: state['model'].cuda()
def main(opt, augment_stn): # load model model = torch.load(opt['model.model_path']) model.eval() # load opts model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']), 'opt.json') with open(model_opt_file, 'r') as f: model_opt = json.load(f) # Postprocess arguments model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(',')) model_opt['log.fields'] = model_opt['log.fields'].split(',') if model_opt['run_stn']: print("Loading STN here") try: stn_model = torch.load(opt['model.model_path'].replace( '.pt', '_stn.pt')) stn_model.eval() except: stn_model = None else: stn_model = None # Augment overrides it anyway if not augment_stn: stn_model = None # construct data data_opt = { 'data.' + k: v for k, v in filter_opt(model_opt, 'data').items() } episode_fields = { 'data.test_way': 'data.way', 'data.test_shot': 'data.shot', 'data.test_query': 'data.query', 'data.test_episodes': 'data.train_episodes' } for k, v in episode_fields.items(): if opt[k] != 0: data_opt[k] = opt[k] elif model_opt[k] != 0: data_opt[k] = model_opt[k] else: data_opt[k] = model_opt[v] print( "Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes" .format(data_opt['data.test_way'], data_opt['data.test_shot'], data_opt['data.test_query'], data_opt['data.test_episodes'])) torch.manual_seed(opt['seed']) if data_opt['data.cuda']: torch.cuda.manual_seed(opt['seed']) data = data_utils.load(data_opt, ['test']) if data_opt['data.cuda']: model.cuda() if stn_model is not None: stn_model.cuda() meters = { field: tnt.meter.AverageValueMeter() for field in model_opt['log.fields'] } model_utils.evaluate(model, stn_model, data['test'], meters, desc="test") for field, meter in meters.items(): mean, std = meter.value() print("test {:s}: {:0.6f} +/- {:0.6f}".format( field, mean, 1.96 * std / math.sqrt(data_opt['data.test_episodes'])))
def main(opt): # load model #model = torch.load(opt['model.model_path']) print('opt:', opt) model = model_utils.load(opt) model.eval() # load opts model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']), 'opt.json') with open(model_opt_file, 'r') as f: model_opt = json.load(f) # Postprocess arguments model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(',')) model_opt['log.fields'] = model_opt['log.fields'].split(',') # construct data data_opt = { 'data.' + k: v for k, v in filter_opt(model_opt, 'data').items() } episode_fields = { 'data.test_way': 'data.way', 'data.test_shot': 'data.shot', 'data.test_query': 'data.query', 'data.test_episodes': 'data.train_episodes' } for k, v in episode_fields.items(): if opt[k] != 0: data_opt[k] = opt[k] elif model_opt[k] != 0: data_opt[k] = model_opt[k] else: data_opt[k] = model_opt[v] # added for birds dataset data_opt['data.test_way'] = 5 data_opt['data.test_shot'] = 5 data_opt['data.test_query'] = (41 - 5) # max amount possible data_opt[ 'data.test_episodes'] = 1000 # average over 1000 randomly generated episodes print( "Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes" .format(data_opt['data.test_way'], data_opt['data.test_shot'], data_opt['data.test_query'], data_opt['data.test_episodes'])) torch.manual_seed(1234) if data_opt['data.cuda']: torch.cuda.manual_seed(1234) #data = data_utils.load(data_opt, ['test']) test_way = data_opt['data.test_way'] test_shot = data_opt['data.test_shot'] test_query = data_opt['data.test_query'] test_episodes = data_opt['data.test_episodes'] print('test way:', test_way, 'test shot:', test_shot, 'test query:', test_query, 'test_episodes', test_episodes) # figure out how to go through all the data with 5 shot data = PytorchBirdsDataLoader(n_episodes=test_episodes, n_way=test_way, n_query=test_query, n_support=test_shot) if data_opt['data.cuda']: model.cuda() meters = { field: tnt.meter.AverageValueMeter() for field in model_opt['log.fields'] } model_utils.evaluate(model, data, meters, desc="test") for field, meter in meters.items(): mean, std = meter.value() print("test {:s}: {:0.6f} +/- {:0.6f}".format( field, mean, 1.96 * std / math.sqrt(data_opt['data.test_episodes'])))