def __init__(self, para, env, logger_name, caption=''): self.para = para self.logger = VisdomPlotLogger('line', env=env, opts={ 'title': 'norm_' + caption + logger_name, 'caption': caption }) self.iter_n = 0
def __init__(self, para, env, logger_name, caption=''): # import pdb;pdb.set_trace() self.para = para self.para.register_hook(lambda grad: grad) self.logger = VisdomPlotLogger('line', env=env, opts={ 'title': caption + '\n' + logger_name, 'caption': caption }) self.iter_n = 0
def __addlogger(self, meter, ptype): if ptype == 'line': opts = {'title': self.title + ' Train ' + meter} self.logger['Train'][meter] = VisdomPlotLogger(ptype, server=self.server, port=self.port, opts=opts) opts = {'title': self.title + ' Test ' + meter} self.logger['Test'][meter] = VisdomPlotLogger(ptype, server=self.server, port=self.port, opts=opts) elif ptype == 'heatmap': names = list(range(self.nclass)) opts = {'title': self.title + ' Train ' + meter, 'columnnames': names, 'rownames': names} self.logger['Train'][meter] = VisdomLogger('heatmap', server=self.server, port=self.port, opts=opts) opts = {'title': self.title + ' Test ' + meter, 'columnnames': names, 'rownames': names} self.logger['Test'][meter] = VisdomLogger('heatmap', server=self.server, port=self.port, opts=opts)
class Visualier(): """Visulization, plot the logs during training process""" def __init__(self, num_classes=10): port = 8097 self.loss_logger = VisdomPlotLogger('line', port=port, win="Loss", opts={'title': 'Loss Logger'}) self.acc_logger = VisdomPlotLogger('line', port=port, win="acc", opts={'title': 'Accuracy Logger'}) self.confusion_logger = VisdomLogger('heatmap', port=port, win="confusion", opts={ 'title': 'Confusion matrix', 'columnnames': list(range(num_classes)), 'rownames': list(range(num_classes)) }) def plot(self, train_acc, train_err, val_acc, val_err, confusion, epoch): self.loss_logger.log(epoch, train_err, name="train") self.acc_logger.log(epoch, train_acc, name="train") self.loss_logger.log(epoch, val_err, name="val") self.acc_logger.log(epoch, val_acc, name="val") self.confusion_logger.log(confusion) print("epoch: [%d/%d]" % (epoch, args.n_epoches)) print('Training loss: %.4f, accuracy: %.2f%%' % (train_err, train_acc)) print('Validation loss: %.4f, accuracy: %.2f%%' % (val_err, val_acc))
class Norm(object): def __init__(self, para, env, logger_name, caption=''): self.para = para self.logger = VisdomPlotLogger('line', env=env, opts={ 'title': 'norm_' + caption + logger_name, 'caption': caption }) self.iter_n = 0 def plot(self): self.logger.log(self.iter_n, torch.mean(self.para.data**2)) self.iter_n += 1
class statJoints(statBase): def __init__(self, args, scale = [1.,1.,1.]): super(statJoints, self).__init__(args) self.jointErrAvg = tnt.meter.AverageValueMeter() self.joint_logger = VisdomPlotLogger('line', opts={'title': 'Joint error'}, env='PoseCapsules') self.scale = scale def reset(self): super(statJoints, self).reset() self.jointErrAvg.reset() def log(self, pbar, output, labels, stat=None): #err = (output[:,:4,0,0,:-1].data - labels[:,:4,:]) #.abs().mean().item() #err = err.view(err.shape[0], err.shape[1],-1, 3) shp = (labels.shape[0], labels.shape[1], -1, 3) labels_abs = labels.view(shp) output_abs = output.data[...,:-1].view(shp) """ l_labels = [labels_abs[:,:,0,:]] l_output = [output_abs[:,:,0,:]] for i in range(4): l_labels.append( l_labels[i] + labels_abs[:,:,i+1,:] ) l_output.append( l_output[i] + output_abs[:,:,i+1,:] ) labels_abs = torch.stack(l_labels, dim=2) output_abs = torch.stack(l_output, dim=2) """ #err = (output[...,:-1].data.view(shp)[:,:,1:,:] - labels.view(shp)[:,:,1:,:]) err = output_abs - labels_abs sc = torch.from_numpy(self.scale).float().cuda()[None,None,None,:].expand(err.shape) #torch.tensor([self.scale], device=output.device)[None, None, None, :].expand(err.shape) err = err * sc mean = err[:,:,1:,:].norm(dim=3).mean().item() mean1 = err[:,0,0,:].norm(dim=1).mean().item() mean = (20*mean + mean1)/21 self.jointErrAvg.add(mean) dict = OrderedDict() dict['jointErr'] = self.jointErrAvg.value()[0] super(statJoints, self).log(pbar, output, labels, dict, stat) def endTrainLog(self, epoch, groundtruth_image=None, recon_image=None): super(statJoints, self).endTrainLog(epoch, groundtruth_image, recon_image) self.joint_logger.log(epoch, self.jointErrAvg.value()[0], name='train') def endTestLog(self, epoch): super(statJoints, self).endTestLog(epoch) self.joint_logger.log(epoch, self.jointErrAvg.value()[0], name='test')
def __init__(self, meter, titles, plot_type='line'): self.meter = meter assert type(titles) is dict self.loggers = dict() for key in titles: self.loggers[key] = VisdomPlotLogger(plot_type, opts={'title': titles[key]}, env=environment)
class BatchLRVisdom(object): def __init__(self, title='TBD'): self._lr = VisdomPlotLogger( 'line', opts={'title': '{:s} lr Curve'.format(title)}) check_visdom_server(self._lr.viz) def log(self, idx, lr, train=None): assert train is not None,\ 'train should be True or False, not {}'.format(train) name = 'train' if train else 'test' try: self._lr.log(idx, lr, name=name) except BaseException as e: check_visdom_server(self._lr.viz) print(e) print("***Retry LossVisdom") self.log(idx, lr, train)
class statNothing(): def __init__(self): self.lossAvg = tnt.meter.AverageValueMeter() self.train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'}, env='PoseCapsules') self.test_loss_logger = VisdomPlotLogger('line', opts={'title': 'Test Loss'}, env='PoseCapsules') def reset(self): self.lossAvg.reset() def log(self, pbar, output, labels, dict = OrderedDict(), stat=None): dict['loss'] = self.lossAvg.value()[0] pbar.set_postfix(dict, refresh=False) def endTrainLog(self, epoch, groundtruth_image=None, recon_image=None): self.train_loss_logger.log(epoch, self.lossAvg.value()[0], name='loss') def endTestLog(self, epoch): self.test_loss_logger.log(epoch, self.lossAvg.value()[0], name='loss')
class AccuracyVisdom(object): '''Plot train and test accuracy curve together in a VisdomPlotLogger ''' def __init__(self, title='TBD'): self._acc = VisdomPlotLogger( 'line', opts={'title': '{:s} Accuracy Curve'.format(title)}) check_visdom_server(self._acc.viz) def log(self, epoch, accuracy, train=None): assert train is not None,\ 'train should be True or False, not {}'.format(train) name = 'train' if train else 'test' try: self._acc.log(epoch, accuracy, name=name) except BaseException as e: check_visdom_server(self._acc.viz) print("***Retry AccuracyVisdom") self.log(epoch, accuracy, train)
def __init__(self, layer, env, logger_name, caption): self.layer = layer self.layer.register_forward_hook(hook_forward) self.mean_logger = VisdomPlotLogger('line', env=env, opts={ 'title': 'mean_' + caption + logger_name, 'caption': caption }) self.std_logger = VisdomPlotLogger('line', env=env, opts={ 'title': 'std_' + caption + logger_name, 'caption': caption }) self.iter_n = 0
def __init__(self, args): self.args = args self.lossAvg = tnt.meter.AverageValueMeter() #self.lossSparseMu = tnt.meter.AverageValueMeter() #self.lossSparseVar = tnt.meter.AverageValueMeter() self.train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'}, env='PoseCapsules') self.test_loss_logger = VisdomPlotLogger('line', opts={'title': 'Test Loss'}, env='PoseCapsules') self.recon_sum = 0 self.rout_id = 1 if not self.args.disable_recon: self.reconLossAvg = tnt.meter.AverageValueMeter() self.ground_truth_logger_left = VisdomLogger('image', opts={'title': 'Ground Truth, left'}, env='PoseCapsules') self.reconstruction_logger_left = VisdomLogger('image', opts={'title': 'Reconstruction, left'}, env='PoseCapsules') if self.args.regularize: self.regularizeLossAvg = tnt.meter.AverageValueMeter() self.logsigAvg = tnt.meter.AverageValueMeter() self.costmeanAvg = tnt.meter.AverageValueMeter() self.costAvg = tnt.meter.AverageValueMeter() self.aAvg = tnt.meter.AverageValueMeter()
def train(self, architecture, fold, lr, batch_size, epochs, epoch_size, validation_size, iter_size, patience=4, optim="adam", ignore_prev_best_loss=False): print("Start training with following params:", f"architecture = {architecture}", f"fold = {fold}", f"lr = {lr}", f"batch_size = {batch_size}", f"epochs = {epochs}", f"epoch_size = {epoch_size}", f"validation_size = {validation_size}", f"iter_size = {iter_size}", f"optim = {optim}", f"patience = {patience}") train_loader, valid_loader, num_classes = get_loaders( batch_size, train_transform=train_augm(), valid_transform=valid_augm(), n_fold=fold) model = get_model(num_classes, architecture) criterion = CrossEntropyLoss(size_average=False) self.ignore_prev_best_loss = ignore_prev_best_loss self.lr = lr self.model = model self.root = Path(f"../results/{architecture}") self.fold = fold self.optim = optim self.train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'}) self.lr_logger = VisdomPlotLogger( 'line', opts={'title': 'Train Learning Rate'}) self.test_loss_logger = VisdomPlotLogger('line', opts={'title': 'Test Loss'}) self.test_accuracy_logger = VisdomPlotLogger( 'line', opts={'title': 'Test Accuracy'}) train_kwargs = dict(args=dict(iter_size=iter_size, n_epochs=epochs, batch_size=batch_size, epoch_size=epoch_size), model=model, criterion=criterion, train_loader=train_loader, valid_loader=valid_loader, validation_size=validation_size, patience=patience) self._train(**train_kwargs)
def _new_loss_logger(self) -> VisdomPlotLogger: """ Create the VisdomPlotLogger in the given environment Returns: """ return VisdomPlotLogger( SimpleLogger.LOG_TYPE_LINE, opts={SimpleLogger.LOG_KEY_TITLE: SimpleLogger.LOSS_LOGGER_NAME}, env=self._env)
class WeightRatio(object): def __init__(self, para, env, logger_name, caption=''): # import pdb;pdb.set_trace() self.para = para self.para.register_hook(lambda grad: grad) self.logger = VisdomPlotLogger('line', env=env, opts={ 'title': caption + '\n' + logger_name, 'caption': caption }) self.iter_n = 0 def plot(self): ratio = torch.norm(self.para.grad.data, 2) / torch.norm( self.para.data, 2) self.logger.log(self.iter_n, ratio) self.iter_n += 1
def __init__(self, num_classes=10): port = 8097 self.loss_logger = VisdomPlotLogger('line', port=port, win="Loss", opts={'title': 'Loss Logger'}) self.acc_logger = VisdomPlotLogger('line', port=port, win="acc", opts={'title': 'Accuracy Logger'}) self.confusion_logger = VisdomLogger('heatmap', port=port, win="confusion", opts={ 'title': 'Confusion matrix', 'columnnames': list(range(num_classes)), 'rownames': list(range(num_classes)) })
class statClassification(statBase): def __init__(self, args): super(statClassification, self).__init__(args) self.meter_accuracy = tnt.meter.ClassErrorMeter(accuracy=True) self.accuracy_logger = VisdomPlotLogger('line', opts={'title': 'accuracy'}, env='PoseCapsules') def reset(self): super(statClassification, self).reset() self.meter_accuracy.reset() def log(self, pbar, output, labels, stat=None): self.meter_accuracy.add(output.squeeze()[:,:,-1:].squeeze().data, labels.data) dict = OrderedDict() dict['acc'] = self.meter_accuracy.value()[0] super(statClassification, self).log(pbar, output, labels, dict, stat) def endTrainLog(self, epoch, groundtruth_image=None, recon_image=None): super(statClassification, self).endTrainLog(epoch, groundtruth_image, recon_image) self.accuracy_logger.log(epoch, self.meter_accuracy.value()[0], name='train') def endTestLog(self, epoch): super(statClassification, self).endTestLog(epoch) self.accuracy_logger.log(epoch, self.meter_accuracy.value()[0], name='test') print ("Test accuracy: ", self.meter_accuracy.value()[0])
def visual_log(title): """Return a pytorch tnt visual loggger. Parameters ---------- title : str A title to describe the logging. Returns ------- type pytorch visual logger. """ visual_logger = VisdomPlotLogger( 'line', opts=dict(legend=['Training', 'Validation', 'Testing'], xlabel='Epochs', ylabel='Accuracy', title=title)) return visual_logger
def train_valid_loop(train_loader, dev_loader, test_loader, args, model, fold=None): # -------------------------------------------------------------------------- # TRAIN/VALID LOOP logger.info('-' * 100) stats = { 'timer': utils.Timer(), 'epoch': 0, 'best_valid': 0, 'best_epoch': 0, 'fold': fold } start_epoch = 0 if args.visdom: # add visdom logger code port = args.visdom_port train_loss_logger = VisdomPlotLogger( 'line', port=port, opts={'title': f'{args.model_name} Train Loss'}) train_metric_logger = VisdomPlotLogger( 'line', port=port, opts={'title': f'{args.model_name} Train Class Accuracy'}) idx2label = {i: label for label, i in model.label_dict.items()} label_names = [idx2label[i] for i in range(model.args.label_size)] train_confusion_logger = VisdomLogger( 'heatmap', port=port, opts={ 'title': f'{args.model_name} Train Confusion Matrix', 'columnnames': label_names, 'rownames': label_names }) valid_metric_logger = VisdomPlotLogger( 'line', port=port, opts={'title': f'{args.model_name} Valid Class Accuracy'}) valid_confusion_logger = VisdomLogger( 'heatmap', port=port, opts={ 'title': f'{args.model_name} Valid Confusion Matrix', 'columnnames': label_names, 'rownames': label_names }) train_confusion_meter = tnt.meter.ConfusionMeter(model.args.label_size, normalized=True) valid_confusion_meter = tnt.meter.ConfusionMeter(model.args.label_size, normalized=True) else: train_confusion_meter = None valid_confusion_meter = None try: for epoch in range(start_epoch, args.num_epochs): stats['epoch'] = epoch # Train loss = train(args, train_loader, model, stats) stats['train_loss'] = loss # Validate train train_res, train_cfm = validate( args, train_loader, model, stats, mode='train', confusion_meter=train_confusion_meter) for m in train_res: stats['train_' + m] = train_res[m] # Validate dev val_res, valid_cfm = validate( args, dev_loader, model, stats, mode='dev', confusion_meter=valid_confusion_meter) for m in train_res: stats['dev_' + m] = val_res[m] if args.visdom: train_loss_logger.log(epoch, loss) train_metric_logger.log(epoch, train_res[args.valid_metric]) train_confusion_logger.log(train_cfm) valid_metric_logger.log(epoch, val_res[args.valid_metric]) valid_confusion_logger.log(valid_cfm) train_confusion_meter.reset() valid_confusion_meter.reset() # Save best valid if val_res[args.valid_metric] > stats['best_valid']: logger.info( colored( f'Best valid: {args.valid_metric} = {val_res[args.valid_metric]*100:.2f}% ', 'yellow') + colored( f'(epoch {stats["epoch"]}, {model.updates} updates)', 'yellow')) fold_info = f'.fold_{fold}' if fold is not None else '' model.save(args.model_file + fold_info) stats['best_valid'] = val_res[args.valid_metric] stats['best_epoch'] = epoch logger.info('-' * 100) if args.stats_file: with open(args.stats_file, 'w') as f: out_stats = stats.copy() out_stats['timer'] = out_stats['timer'].time() if fold is None: del out_stats['fold'] f.write(json.dumps(out_stats) + '\n') if epoch - stats['best_epoch'] >= args.early_stopping: logger.info( colored( f'No improvement for {args.early_stopping} epochs, stop training.', 'red')) break except KeyboardInterrupt: logger.info(colored(f'User ended training. stop.', 'red')) logger.info('Load best model...') model = EntityClassifier.load(args.model_file + fold_info, args) # device = torch.device(f"cuda:{args.gpu}" if args.cuda else "cpu") # model.to(device) model.cuda() stats['epoch'] = stats['best_epoch'] if fold is not None: mode = f'fold {fold} test' else: mode = 'test' test_result, _ = validate(args, test_loader, model, stats, mode=mode) return test_result
model = Net(upscale_factor=UPSCALE_FACTOR) criterion = AdjacentFrameLoss() if torch.cuda.is_available(): model = model.cuda() criterion = criterion.cuda() print('# parameters:', sum(param.numel() for param in model.parameters())) optimizer = optim.Adam(model.parameters(), lr=1e-2) scheduler = MultiStepLR(optimizer, milestones=[30, 80], gamma=0.1) engine = Engine() meter_loss = tnt.meter.AverageValueMeter() meter_psnr = PSNRMeter() train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'}) train_psnr_logger = VisdomPlotLogger('line', opts={'title': 'Train PSNR'}) val_loss_logger = VisdomPlotLogger('line', opts={'title': 'Val Loss'}) val_psnr_logger = VisdomPlotLogger('line', opts={'title': 'Val PSNR'}) engine.hooks['on_sample'] = on_sample engine.hooks['on_forward'] = on_forward engine.hooks['on_start_epoch'] = on_start_epoch engine.hooks['on_end_epoch'] = on_end_epoch engine.train(processor, train_loader, maxepoch=NUM_EPOCHS, optimizer=optimizer)
def __addlogger(self, meter, ptype): if ptype == 'line': if self.plotstylecombined: opts = {'title': self.title + ' ' + meter} self.logger['Train'][meter] = VisdomPlotLogger( ptype, win=meter, env=self.env, server=self.server, port=self.port, opts=opts) opts = {} self.logger['Test'][meter] = self.logger['Train'][meter] else: opts = {'title': self.title + 'Train ' + meter} self.logger['Train'][meter] = VisdomPlotLogger( ptype, win=meter, env=self.env, server=self.server, port=self.port, opts=opts) opts = {'title': self.title + 'Test ' + meter} self.logger['Test'][meter] = VisdomPlotLogger( ptype, win=meter, env=self.env, server=self.server, port=self.port, opts=opts) elif ptype == 'heatmap': names = list(range(self.nclass)) opts = { 'title': self.title + ' Train ' + meter, 'columnnames': names, 'rownames': names } self.logger['Train'][meter] = VisdomLogger('heatmap', win=('train_' + meter), env=self.env, server=self.server, port=self.port, opts=opts) opts = { 'title': self.title + ' Test ' + meter, 'columnnames': names, 'rownames': names } self.logger['Test'][meter] = VisdomLogger('heatmap', win=('test_' + meter), env=self.env, server=self.server, port=self.port, opts=opts) elif ptype == 'bar': names = list(range(self.nclass)) opts = {'title': self.title + 'Train ' + meter, 'rownames': names} self.logger['Train'][meter] = VisdomLogger('bar', win=meter, env=self.env, server=self.server, port=self.port, opts=opts) opts = {'title': self.title + 'Test ' + meter, 'rownames': names} self.logger['Test'][meter] = VisdomLogger('bar', win=meter, env=self.env, server=self.server, port=self.port, opts=opts)
import torchnet as tnt model = CapsuleNet() # model.load_state_dict(torch.load('epochs/epoch_327.pt')) model.cuda() print("# parameters:", sum(param.numel() for param in model.parameters())) optimizer = Adam(model.parameters()) engine = Engine() meter_loss = tnt.meter.AverageValueMeter() meter_accuracy = tnt.meter.ClassErrorMeter(accuracy=True) confusion_meter = tnt.meter.ConfusionMeter(NUM_CLASSES, normalized=True) train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'}) train_error_logger = VisdomPlotLogger('line', opts={'title': 'Train Accuracy'}) test_loss_logger = VisdomPlotLogger('line', opts={'title': 'Test Loss'}) test_accuracy_logger = VisdomPlotLogger('line', opts={'title': 'Test Accuracy'}) confusion_logger = VisdomLogger('heatmap', opts={ 'title': 'Confusion matrix', 'columnnames': list(range(NUM_CLASSES)), 'rownames': list(range(NUM_CLASSES)) }) ground_truth_logger = VisdomLogger('image', opts={'title': 'Ground Truth'}) reconstruction_logger = VisdomLogger('image', opts={'title': 'Reconstruction'})
for state in optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() if args.use_cuda: lambda_ = torch.tensor([args.max_lambda]).cuda() """ Logging of loss, reconstruction and ground truth """ meter_loss = tnt.meter.AverageValueMeter() meter_loss_dae = tnt.meter.AverageValueMeter() setting_logger = VisdomLogger('text', opts={'title': 'Settings'}, env=args.env_name) train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'}, env=args.env_name) epoch_offset = 0 if args.load_loss: if os.path.isfile('loss.log'): with open("loss.log", "r") as lossfile: loss_list = [] for loss in lossfile: loss_list.append(loss) while len(loss_list) > args.load_loss: loss_list.pop(0) for loss in loss_list: train_loss_logger.log(epoch_offset, float(loss)) epoch_offset += 1 ground_truth_logger_left = VisdomLogger('image', opts={'title': 'Ground Truth, left'}, env=args.env_name) ground_truth_logger_right = VisdomLogger('image', opts={'title': 'Ground Truth, right'}, env=args.env_name)
def plogger(title): return VisdomPlotLogger('line', opts={'title': title}, **self.defaults)
def get_iterator(mode): dataset = datasets.MNIST('../data', train=mode, download=True, transform=transforms.Compose( [transforms.ToTensor(), ])) loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=1, pin_memory=True) return loader engine = BasicEngine() meter_loss = tnt.meter.AverageValueMeter() train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'}, env=args.env) test_loss_logger = VisdomPlotLogger('line', opts={'title': 'Test Loss'}, env=args.env) image_logger = VisdomLogger('image', env=args.env) model = VAE(784, 400, args.noise_dim, batch_size=args.batch_size) model.cuda() optimizer = optim.Adam(model.parameters(), lr=1e-3) model.train() model_wrapper = ModelWrapperVAE(model, dataset_iter=get_iterator, meters={"loss": meter_loss}, loggers={"train_loss":train_loss_logger, "test_loss": test_loss_logger, "generated_image": image_logger})
class_criterion_B, train_loader, test_loader, opt) if opt.pretrained: trained_model = load_weight(fullModel, opt.pretrained, verbose=True) # -- Evaluation nTestImages = reid_set.test_inds # [2 ** (n+1) for n in range(5)] cmc, simMat, _, avgSame, avgDiff = compute_cmc(reid_set, nTestImages, trained_model, 128) print(cmc) print(simMat) print(avgSame, avgDiff) sim_logger = VisdomLogger('heatmap', port=8097, opts={ 'title': 'simMat', 'columnnames': list(range(len(simMat[0]))), 'rownames': list(range(len(simMat))) }) cmc_logger = VisdomPlotLogger("line", win="cmc_curve") for i, v in enumerate(cmc): cmc_logger.log(i, v, name="cmc_curve") sim_logger.log(simMat) log.info("Saving results...") with open("cmc.pkl", 'w') as f: pickle.dump(cmc, f) with open("simMat.pkl", 'w') as f: pickle.dump(simMat, f)
# CUDA that shit. model.cuda() print("Model Parameters:", sum(param.numel() for param in model.parameters())) optimizer = Adam(model.parameters()) # Create the torchnet engine and metrics. engine = Engine() meter_loss = tnt.meter.AverageValueMeter() meter_accuracy = tnt.meter.ClassErrorMeter(accuracy=True, topk=[1, 5]) confusion_meter = tnt.meter.ConfusionMeter(NUM_CLASSES, normalized=True) # Create a bunch of loggers that can be viewed in the browser. train_loss_logger = VisdomPlotLogger('line', opts={'title': 'Train Loss'}) train_accuracy_logger_top1 = VisdomPlotLogger( 'line', opts={'title': 'Train Accuracy (Top1)'}) train_accuracy_logger_top5 = VisdomPlotLogger( 'line', opts={'title': 'Train Accuracy (Top5)'}) test_loss_logger = VisdomPlotLogger('line', opts={'title': 'Test Loss'}) test_accuracy_logger_top1 = VisdomPlotLogger( 'line', opts={'title': 'Test Accuracy (Top1)'}) test_accuracy_logger_top5 = VisdomPlotLogger( 'line', opts={'title': 'Test Accuracy (Top5)'}) confusion_logger = VisdomLogger('heatmap', opts={ 'title': 'Confusion matrix', 'columnnames': list(range(NUM_CLASSES)), 'rownames': list(range(NUM_CLASSES))
def main(): params = { "conv0.weight": conv_init(1, 50, 5), "conv0.bias": torch.zeros(50), "conv1.weight": conv_init(50, 50, 5), "conv1.bias": torch.zeros(50), "linear2.weight": linear_init(800, 512), "linear2.bias": torch.zeros(512), "linear3.weight": linear_init(512, 10), "linear3.bias": torch.zeros(10), } params = {k: Variable(v, requires_grad=True) for k, v in params.items()} optimizer = torch.optim.SGD(params.values(), lr=0.01, momentum=0.9, weight_decay=0.0005) engine = Engine() meter_loss = tnt.meter.AverageValueMeter() classerr = tnt.meter.ClassErrorMeter(accuracy=True) confusion_meter = tnt.meter.ConfusionMeter(10, normalized=True) port = 8097 train_loss_logger = VisdomPlotLogger("line", port=port, opts={"title": "Train Loss"}) train_err_logger = VisdomPlotLogger("line", port=port, opts={"title": "Train Class Error"}) test_loss_logger = VisdomPlotLogger("line", port=port, opts={"title": "Test Loss"}) test_err_logger = VisdomPlotLogger("line", port=port, opts={"title": "Test Class Error"}) confusion_logger = VisdomLogger( "heatmap", port=port, opts={ "title": "Confusion matrix", "columnnames": list(range(10)), "rownames": list(range(10)), }, ) def h(sample): inputs = Variable(sample[0].float() / 255.0) targets = Variable(torch.LongTensor(sample[1])) o = f(params, inputs, sample[2]) return F.cross_entropy(o, targets), o def reset_meters(): classerr.reset() meter_loss.reset() confusion_meter.reset() def on_sample(state): state["sample"].append(state["train"]) def on_forward(state): classerr.add(state["output"].data, torch.LongTensor(state["sample"][1])) confusion_meter.add(state["output"].data, torch.LongTensor(state["sample"][1])) meter_loss.add(state["loss"].data[0]) def on_start_epoch(state): reset_meters() state["iterator"] = tqdm(state["iterator"]) def on_end_epoch(state): print("Training loss: %.4f, accuracy: %.2f%%" % (meter_loss.value()[0], classerr.value()[0])) train_loss_logger.log(state["epoch"], meter_loss.value()[0]) train_err_logger.log(state["epoch"], classerr.value()[0]) # do validation at the end of each epoch reset_meters() engine.test(h, get_iterator(False)) test_loss_logger.log(state["epoch"], meter_loss.value()[0]) test_err_logger.log(state["epoch"], classerr.value()[0]) confusion_logger.log(confusion_meter.value()) print("Testing loss: %.4f, accuracy: %.2f%%" % (meter_loss.value()[0], classerr.value()[0])) engine.hooks["on_sample"] = on_sample engine.hooks["on_forward"] = on_forward engine.hooks["on_start_epoch"] = on_start_epoch engine.hooks["on_end_epoch"] = on_end_epoch engine.train(h, get_iterator(True), maxepoch=10, optimizer=optimizer)
def train(self): # vis = visdom.Visdom(env='temp_log') train_data, train_label = self._preprocess('train') train_iter = [[train_data[i], train_label[i]] for i in range(len(train_data))] test_data, test_label = self._preprocess('test') val_iter = [[test_data[i], test_label[i]] for i in range(len(test_data))] self.feature_size = train_data[0].shape[2] encoder = Encoder(self.feature_size, self.hidden_size, self.en_cnn_k_s, self.strides, n_layers=1, dropout=0.5) decoder = Decoder(self.hidden_size, 1, n_layers=1, dropout=0.3) seq2seq = Seq2Seq(encoder, decoder).cuda() # seq2seq = torch.load('./model/newest_seq2seq') seq2seq.teacher_forcing_ratio = 0.3 optimizer = optim.Adam(seq2seq.parameters(), lr=self.lr) # optimizer = optim.SparseAdam(seq2seq,lr=self.lr) # optimizer = optim.Adamax(seq2seq.parameters(), lr=self.lr) # optimizer = optim.SGD(seq2seq.parameters(), lr=self.lr) # optimizer = optim.ASGD(seq2seq.parameters(), lr=self.lr) # optimizer = optim.RMSprop(seq2seq.parameters(), lr=self.lr) log = OrderedDict() log['train_loss'] = [] log['val_loss'] = [] log['test_loss'] = [] log['teacher_ratio'] = [] log['mean_er'] = [] log['mean_abs_er'] = [] log['score'] = [] score_logger = VisdomPlotLogger('line', opts={'title': 'score logger'}) loss_logger = VisdomPlotLogger('line', opts={'title': 'loss logger'}) count = 0 count2 = 0 count3 = 0 e0 = 120 best_loss = 1 for e in range(1, self.epochs + 1): train_loss = self._fit(e, seq2seq, optimizer, train_iter, grad_clip=5.0) val_loss = self._evaluate(seq2seq, train_iter) test_loss, er = self._evaluate(seq2seq, val_iter, cal_er=True) score = self._cal_score(er) print( "[Epoch:%d][train_loss:%.4e][val_loss:%.4e][test_loss:%.4e][mean_er:%.4e][mean_abs_er:%.4e][score:%.4f]" % (e, train_loss, val_loss, test_loss, np.mean(er), np.mean(np.abs(er)), np.mean(score))) score_logger.log(e, np.mean(score)) loss_logger.log(e, [train_loss, val_loss, test_loss]) log['train_loss'].append(float(train_loss)) log['val_loss'].append(float(val_loss)) log['test_loss'].append(float(test_loss)) log['teacher_ratio'].append(seq2seq.teacher_forcing_ratio) log['mean_er'].append(float(np.mean(er))) log['mean_abs_er'].append(float(np.mean(np.abs(er)))) log['score'].append(float(np.mean(score))) pd.DataFrame(log).to_csv('./model/log.csv', index=False) if float(val_loss) == min(log['val_loss']): torch.save(seq2seq, './model/seq2seq') if (float(test_loss) * 11 + float(val_loss) * 6) / 17 <= best_loss: torch.save(seq2seq, './model/best_seq2seq') best_loss = (float(test_loss) * 11 + float(val_loss) * 6) / 17 # if float(np.mean(np.abs(er))) == min(log['mean_abs_er']): # torch.save(seq2seq,'./model/lowest_test_seq2seq') if float(np.mean(score)) == max(log['score']): torch.save(seq2seq, './model/best_score_seq2seq') torch.save(seq2seq, './model/newest_seq2seq') count2 += 1 if float(train_loss) <= float(val_loss) * 0.2: count += 1 else: count = 0 if count >= 3 or count2 >= 100: seq2seq.teacher_forcing_ratio *= self.gama count -= 1 count2 = 0
elif params.mode == 'test': utils.set_logger(os.path.join(experiment_path, 'test.log')) elif params.mode == 'load_train': utils.set_logger(os.path.join(experiment_path, 'load_train.log')) logger = logging.getLogger() port, env = 8098, args.visdom_env columnnames, rownames = list(range( 1, params.model_args["num_class"] + 1)), list( range(1, params.model_args["num_class"] + 1)) loss_logger = VisdomPlotLogger('line', port=port, opts={ 'title': params.experiment_path + '_Loss', 'legend': ['train', 'test'] }, win=None, env=env) loss_logger_split = VisdomPlotLogger( 'line', port=port, opts={'title': params.experiment_path + '_Loss_split'}, win=None, env=env) # error_logger = VisdomPlotLogger('line',port=port, opts={'title': params.experiment_path + '_Error @top1','legend':['train','test']},win=None,env=env) error_logger15 = VisdomPlotLogger( 'line', port=port, opts={