def deploy_single(img_path): # load model model = MODEL(arch=opt.arch, pretrained=False, num_classes=2) opt.resume = True # Initialize Trainer for initializing losses, optimizers, loading weights, etc trainer = Trainer(model=model, model_dir=opt.model_dir, mode=CONST.VAL) # Load and Transform Image img = pil_loader(img_path) img_ori_size = np.array(img.size).copy() img, _, padding = data_transform(mode = CONST.VAL, img = img, label = [0,0]) # Make Prediction model.eval() with torch.no_grad(): img = to_cuda(img.unsqueeze(0), trainer.computing_device) pred = model(img) pred = np.float64(pred.cpu().data.numpy()).flatten() print(pred) pred = (pred*np.array(img.shape[2:]) - np.array(padding))/img_ori_size print("{} {}".format(pred[0],pred[1])) return
def evaluate(model, trainer, data_loader, epoch=0, batch_size=opt.batch_size, logger=None, tb_logger=None, max_iters=None): """ Evaluate model Similar to `train()` structure, where the function includes bookkeeping features and wrapper items. The only difference is that evaluation will only occur until the `max_iter` if it is specified and includes an `EvalMetrics` intiailization. The latter is currrently used to save predictions and ground truths to compute the confusion matrix. Args: model: Classification model trainer (Trainer): Training wrapper data_loader (torch.data.Dataloader): Generator data loading instance epoch (int): Current epoch logger (Logger): Logger. Used to display/log metrics tb_logger (SummaryWriter): Tensorboard Logger batch_size (int): Batch size max_iters (int): Max iterations Returns: float: Loss average float: Accuracy average float: Run time average EvalMetrics: Evaluation wrapper to compute CMs """ criterion = trainer.criterion # Initialize meter and metrics meter = get_meter(meters=['batch_time', 'loss', 'acc']) predictions, gtruth, ids = [], [], [] classes = data_loader.dataset.classes metrics = EvalMetrics(classes, predictions, gtruth, ids, trainer.model_dir) # Switch to evaluate mode model.eval() with torch.no_grad(): for i, batch in enumerate(data_loader): # process batch items: images, labels img = to_cuda(batch[CONST.IMG], trainer.computing_device) target = to_cuda(batch[CONST.LBL], trainer.computing_device, label=True) id = batch[CONST.ID] # compute output end = time.time() logits = model(img) loss = criterion(logits, target) acc = accuracy(logits, target) batch_size = list(batch[CONST.LBL].shape)[0] # update metrics meter['acc'].update(acc, batch_size) meter['loss'].update(loss, batch_size) # update metrics2 metrics.update(logits, target, id) # measure elapsed time meter['batch_time'].update(time.time() - end, batch_size) if i % opt.print_freq == 0: log = 'EVAL [{:02d}][{:2d}/{:2d}] TIME {:10} ACC {:10} LOSS {' \ ':10}'.format(epoch, i, len(data_loader), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['batch_time']), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['acc']), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['loss']) ) logger.info(log) if tb_logger is not None: tb_logger.add_scalar('test/loss', meter['loss'].val, epoch) tb_logger.add_scalar('test/accuracy', meter['acc'].val, epoch) if max_iters is not None and i >= max_iters: break # Print last eval log = 'EVAL [{:02d}][{:2d}/{:2d}] TIME {:10} ACC {:10} LOSS {' \ ':10}'.format(epoch, i, len(data_loader), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['batch_time']), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['acc']), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['loss']) ) logger.info(log) if tb_logger is not None: tb_logger.add_scalar('test-epoch/loss', meter['loss'].avg, epoch) tb_logger.add_scalar('test-epoch/accuracy', meter['acc'].avg, epoch) return meter['loss'].avg, meter['acc'].avg, meter['batch_time'], metrics
def train(model, trainer, train_loader, epoch, logger, tb_logger, batch_size=opt.batch_size, print_freq=opt.print_freq): """ Train the model Outside of the typical training loops, `train()` incorporates other useful bookkeeping features and wrapper functions. This includes things like keeping track of accuracy, loss, batch time to wrapping optimizers and loss functions in the `trainer`. Be sure to reference `trainer.py` or `utils/eval_utils.py` if extra detail is needed. Args: model: Classification model trainer (Trainer): Training wrapper train_loader (torch.data.Dataloader): Generator data loading instance epoch (int): Current epoch logger (Logger): Logger. Used to display/log metrics tb_logger (SummaryWriter): Tensorboard Logger batch_size (int): Batch size print_freq (int): Print frequency Returns: None """ criterion = trainer.criterion optimizer = trainer.optimizer # Initialize meter to bookkeep the following parameters meter = get_meter(meters=['batch_time', 'data_time', 'loss', 'acc']) # Switch to training mode model.train(True) end = time.time() for i, batch in enumerate(train_loader): # process batch items: images, labels img = to_cuda(batch[CONST.IMG], trainer.computing_device) target = to_cuda(batch[CONST.LBL], trainer.computing_device, label=True) id = batch[CONST.ID] # measure data loading time meter['data_time'].update(time.time() - end) # compute output end = time.time() logits = model(img) loss = criterion(logits, target) acc = accuracy(logits, target) # update metrics meter['acc'].update(acc, batch_size) meter['loss'].update(loss, batch_size) # compute gradient and do sgd step optimizer.zero_grad() loss.backward() if i % print_freq == 0: log = 'TRAIN [{:02d}][{:2d}/{:2d}] TIME {:10} DATA {:10} ACC {:10} LOSS {:10}'.\ format(epoch, i, len(train_loader), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['batch_time']), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['data_time']), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['acc']), "{t.val:.3f} ({t.avg:.3f})".format(t=meter['loss']) ) logger.info(log) tb_logger.add_scalar('train/loss', meter['loss'].val, epoch * len(train_loader) + i) tb_logger.add_scalar('train/accuracy', meter['acc'].val, epoch * len(train_loader) + i) tb_logger.add_scalar('data_time', meter['data_time'].val, epoch * len(train_loader) + i) tb_logger.add_scalar( 'compute_time', meter['batch_time'].val - meter['data_time'].val, epoch * len(train_loader) + i) optimizer.step() # measure elapsed time meter['batch_time'].update(time.time() - end) end = time.time() tb_logger.add_scalar('train-epoch/loss', meter['loss'].avg, epoch) tb_logger.add_scalar('train-epoch/accuracy', meter['acc'].avg, epoch) return meter['loss'].avg, meter['acc'].avg