Exemplo n.º 1
0
 def test(self):
     epoch = self.load()
     self.test_logger = get_logger('test.{}.{}'.format(
         self.__class__.__name__, self.tag))
     acc_meter = AverageMeter('test_accuracy')
     for samples, labels in tqdm(self.dataset.test_loader(self.batch_size),
                                 desc='Test'):
         with tc.no_grad():
             guids, image, bow = samples
             preds = self.model(image, bow)
         acc = self.metric(preds, labels)
         acc_meter.add(acc.item(), labels.size(0))
         _, top = preds.topk(self.metric.top_k, dim=1)
         match = labels.unsqueeze(dim=1).eq(top).any(dim=1, keepdim=False)
         for i, eq in enumerate(match):
             if not bool(eq.item()):
                 guid = guids[i]
                 label = self.dataset.labels.inverse_transform(
                     labels[i].item())
                 preds = self.dataset.labels.inverse_transform(
                     top[i].cpu().numpy())
                 self.test_logger.info(
                     'Prediction failure: {} belongs to {}, but was predicted as {}'
                     .format(guid, label, preds))
     self.test_logger.info(
         'After {} epochs of training, {} = {:.4f}'.format(
             epoch + 1, acc_meter.tag, acc_meter.read()))
Exemplo n.º 2
0
def validate(model, val_loader, device, min_depth, max_depth, cfg):
    model.eval()

    metric = Metrics(max_depth=max_depth)
    score = AverageMeter()
    score_1 = AverageMeter()
    with torch.no_grad():
        for _, inputs in tqdm(enumerate(val_loader)):
            rgb = inputs['color'].to(device) * 255.
            sdepth = inputs['depth_gt'].to(device)

            if cfg.colorize:
                depth_color = inputs['depth_color'].to(device) * 255.
                depth_in = torch.cat([sdepth, depth_color], 1)
            else:
                depth_in = sdepth

            mask = (sdepth > 0).float()
            output, _ = model(depth_in, mask, rgb)
            if use_norm_depth:
                output = torch.clamp(output, 0, 1.0)
                output = min_depth + output * (max_depth - min_depth)
            else:
                output = torch.clamp(output, min_depth, max_depth)
            output = output[:, 0:1].detach().cpu()

            gt = inputs['depth_sd_gt']
            metric.calculate(output, gt)
            score.update(metric.get_metric('mae'), metric.num)
            score_1.update(metric.get_metric('rmse'), metric.num)

    model.train()
    return score.avg, score_1.avg
Exemplo n.º 3
0
 def train(self, num_epochs, resume=False):
     if resume:
         start_epoch = self.load()
     else:
         start_epoch = 0
         self.train_logger.info('-' * 100)
     timer = TimeMeter()
     tm = AverageMeter('train_loss')
     for epoch in range(start_epoch, start_epoch + num_epochs):
         tm.reset()
         for samples, labels in tqdm(
                 self.dataset.train_loader(self.batch_size,
                                           self.num_training_samples),
                 desc='Train epoch {}'.format(epoch + 1)):
             _, image, bow = samples
             preds = self.model(image, bow)
             loss = self.loss_func(preds, labels)
             self.optimizer.zero_grad()
             loss.backward()
             self.optimizer.step()
             tm.add(loss.item(), labels.size(0))
         vm_loss, vm_acc = self.eval()
         if self.decayer.is_better(vm_acc.read(), self.decayer.best):
             self.dump(epoch)
         self.decayer.step(vm_acc.read())
         self.train_logger.info(
             'Epoch {:02d}, elapsed Time {:.2f}, {} = {:.4f}, {} = {:.4f}, {} = {:.4f}'
             .format(epoch + 1, timer.read(), tm.tag,
                     tm.read(), vm_loss.tag, vm_loss.read(), vm_acc.tag,
                     vm_acc.read()))
Exemplo n.º 4
0
 def eval(self):
     loss_meter = AverageMeter('valid_loss')
     acc_meter = AverageMeter('valid_accuracy')
     for samples, labels in tqdm(self.dataset.valid_loader(self.batch_size),
                                 desc='Validation'):
         with tc.no_grad():
             _, image, bow = samples
             preds = self.model(image, bow)
         loss = self.loss_func(preds, labels)
         loss_meter.add(loss.item(), labels.size(0))
         acc = self.metric(preds, labels)
         acc_meter.add(acc.item(), labels.size(0))
     return loss_meter, acc_meter
Exemplo n.º 5
0
    def validate(self):
        self.model.eval()

        valid_loss = AverageMeter()
        valid_acc = AccuracyMeter()

        for i, (x, y) in enumerate(self.valid_loader):
            x = Variable(x, volatile=True)
            y = Variable(y).long()
            if self.use_cuda:
                x = x.cuda()
                y = y.cuda()
            output = self.model(x)
            loss = F.cross_entropy(output, y)

            valid_loss.update(float(loss.data), x.size(0))

            y_pred = output.data.max(dim=1)[1]
            correct = int(y_pred.eq(y.data).cpu().sum())
            valid_acc.update(correct, x.size(0))
        print('\nTrain Epoch [{}]: Average batch loss: {:.6f}\n'.format(
            epoch, valid_acc.accuracy))
        return valid_loss.average, valid_acc.accuracy
                              verbose=1,
                              min_lr=1e-7,
                              factor=.1)
earlystop = EarlyStopping(mode='max', patience=EARLY_STOP, percentage=False)
model, optimizer = amp.initialize(model,
                                  optimizer,
                                  opt_level='O1',
                                  verbosity=0)

if TRAIN:
    best_score, best_epoch, history = 0, 0, pd.DataFrame()
    for epoch in range(MAX_EPOCH):
        if LOVASZ_HINGE and epoch + 1 >= LOVASZ_HINGE:
            seg_criterion_1 = lovasz_hinge
        tt0 = datetime.now()
        clf_loss_meter = AverageMeter()
        seg_loss_1_meter = AverageMeter()
        seg_loss_2_meter = AverageMeter()
        seg_metric_meter = SegMeter()
        train_clf_labels, train_clf_preds = [], []

        optimizer.zero_grad()
        for it, (images, classes, masks) in enumerate(train_dl, 1):
            t0 = datetime.now()

            model = model.train()
            images, classes, masks = images.cuda(), classes.cuda().float(
            ), masks.cuda()
            clf_logits, seg_logits = model(images)

            clf_loss = clf_criterion(clf_logits, classes)
Exemplo n.º 7
0
criterion = ArcFaceLoss(args.arcface_s, args.arcface_m, crit=args.crit)
#optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.wd, nesterov=True)
#optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.wd)
optimizer = radam(model.parameters(),
                  lr=args.learning_rate,
                  weight_decay=args.wd)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                 T_max=len(train_loader) *
                                                 args.epochs,
                                                 eta_min=1e-6)

# Training
# 매 epoch마다 ./ckpt 파일에 모델이 저장됩니다.
# validation dataset 없이 모든 train data를 train하는 방식입니다.
if not args.test:
    batch_time = AverageMeter()
    losses = AverageMeter()
    acc_score = AverageMeter()
    gap_score = AverageMeter()

    train_loss, train_acc = [], []
    best_acc, best_gap, best_epoch, best_gap_epoch = 0, 0, 0, 0

    end = time.time()
    start_epoch = 0
    if args.resume is not None:
        model.load_state_dict(torch.load(args.resume))
        start_epoch = int(args.resume[-7:-4])
        print(f'Loaded {start_epoch} epoch..')
        start_epoch += 1
Exemplo n.º 8
0
    def run(self):
        self.net.eval()
        for loader in [self.test_loader]:
            for batch_idx, inputs in tqdm(enumerate(loader)):
                depth_gt = inputs['depth_gt'].float()
                depth_sd_gt = inputs['depth_sd_gt'].float()

                # reduce scan lines
                depth_map = depth_gt[0][0].numpy()
                pc, coord = project_depth_to_points(depth_map)
                reorg_pc, reorg_coord = restore_scan_line(pc, coord, verbose=not self.opt.dump)
                reduced_pc, reduced_coord = reduce_scan_line(reorg_pc, reorg_coord, step=4)
                # reduced_pc, reduced_coord = sample_scan_line(reorg_pc, reorg_coord, ratio=0.1)

                print('pc:', pc.shape[0], '->', reduced_pc.shape[0])

                reduced_depth = restore_depth_map(reduced_pc, reduced_coord, [self.opt.crop_h, self.opt.crop_w])
                reduced_depth = torch.from_numpy(reduced_depth).float()
                reduced_depth = reduced_depth.unsqueeze(0).unsqueeze(0)

                raw = reduced_depth.to(self.device)
                rgb = inputs['color'].float().to(self.device)
                rgb = rgb*255.0
                # crop
                assert raw.size()[2:] == rgb.size()[2:]
                h, w = raw.size()[2:]
                assert h >= self.crop_h
                assert w == self.crop_w  # 1216 don't need crop w
                h_cropped = h - self.crop_h
                depth_gt = depth_gt[:,:, h_cropped:h, 0:self.crop_w]
                depth_sd_gt = depth_sd_gt[:,:, h_cropped:h, 0:self.crop_w]
                raw = raw[:,:, h_cropped:h, 0:self.crop_w]
                rgb = rgb[:,:, h_cropped:h, 0:self.crop_w]

                mask = (raw > 0).float()
                output, _ = self.net(raw, mask, rgb)

                if use_norm_depth == False:
                    output = torch.clamp(output, min=self.opt.min_depth, max=self.opt.max_depth)
                else:
                    output = torch.clamp(output, min=0, max=1.0)
                    output = restore_depth(output, self.opt.min_depth, self.opt.max_depth)
                output = output[:,0:1].detach().cpu()

                metric = Metrics(max_depth=self.opt.max_depth)
                mae = AverageMeter()
                rmse = AverageMeter()
                metric.calculate(output, depth_sd_gt)
                mae.update(metric.get_metric('mae'), metric.num)
                rmse.update(metric.get_metric('rmse'), metric.num)
                print("model: mae {} rmse {}".
                      format(int(1000*mae.avg), int(1000*rmse.avg)))

                if not self.opt.dump:
                    plot3d(reduced_pc)
                    fig = plt.figure(num=batch_idx, figsize=(8, 10))
                    plt_img(fig, 4, 1, 1, plt, inputs['color'][0], 'color')
                    plt_img(fig, 4, 1, 2, plt, depth_gt[0], 'depth')
                    plt_img(fig, 4, 1, 3, plt, raw.cpu()[0], 'depth')
                    plt_img(fig, 4, 1, 4, plt, output[0], 'depth')
                    plt.tight_layout()
                    plt.show()