def val_epoch(model, dataloader):
    model.eval()
    losses_sbj = AverageMeter('Loss', ':.4e')
    losses_obj = AverageMeter('Loss', ':.4e')
    losses_rel = AverageMeter('Loss', ':.4e')
    losses_total = AverageMeter('Loss', ':.4e')

    for _, data in enumerate(dataloader):
        images, targets = data
        with torch.no_grad():
            _, metrics = model(images, targets)
        final_loss = metrics["loss_objectness"] + metrics["loss_rpn_box_reg"] + \
            metrics["loss_classifier"] + metrics["loss_box_reg"] + \
            metrics["loss_sbj"] + metrics["loss_obj"] + metrics["loss_rlp"]

        losses_sbj.update(metrics["loss_sbj"].item(), len(images))
        losses_obj.update(metrics["loss_obj"].item(), len(images))
        losses_rel.update(metrics["loss_rlp"].item(), len(images))
        losses_total.update(final_loss.item(), len(images))

    losses = {}
    losses['total_loss'] = losses_total.avg
    losses['sbj_loss'] = losses_sbj.avg
    losses['obj_loss'] = losses_obj.avg
    losses['rel_loss'] = losses_rel.avg
    return losses
Ejemplo n.º 2
0
    def test(self, val_loader, net, criterion):
        top1 = AverageMeter()
        top5 = AverageMeter()
        print_freq = 100
        # switch to evaluate mode
        net.eval()
        with torch.no_grad():
            for i, (input, label) in enumerate(val_loader):
                target = label.cuda()
                input = input.cuda()
                # forwardclea
                prob1, cam_top1, M_p = net(input)
                crop_img = attention_crop_test(M_p, input, config.mask_test_th)
                crop_img = crop_img.cuda()
                prob2, cam_top1_2, _ = net(crop_img)

                # measure accuracy and record loss
                out = (F.softmax(prob1, dim=-1) + F.softmax(prob2, dim=-1)) / 2
                prec1, prec5 = accuracy(out, target, topk=(1, 5))
                top1.update(prec1[0], input.size(0))
                top5.update(prec5[0], input.size(0))

                if i % print_freq == 0:
                    print('Test: [{0}/{1}]\t'
                          'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                          'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                              i, len(val_loader), top1=top1, top5=top5))

            print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(
                top1=top1, top5=top5))
        return top1.avg, top5.avg
def train(train_loader, net, optimizer, epoch, visualizer, idx, opt):
    # batch_time = AverageMeter()
    # data_time = AverageMeter()
    losses = AverageMeter()
    pckhs = AverageMeter()
    pckhs_origin_res = AverageMeter()
    # switch to train mode
    net.train()

    # end = time.time()
    for i, (img, heatmap, c, s, r, grnd_pts,
            normalizer) in enumerate(train_loader):
        quan_op.quantization()
        # """measure data loading time"""
        # data_time.update(time.time() - end)

        # input and groundtruth
        img_var = torch.autograd.Variable(img)
        heatmap = heatmap.cuda(async=True)
        target_var = torch.autograd.Variable(heatmap)

        # output and loss
        # output1, output2 = net(img_var)
        # loss = (output1 - target_var) ** 2 + (output2 - target_var) ** 2
        output = net(img_var)
        # exit()
        # print(type(output))
        # print(len(output))
        loss = 0
        for per_out in output:
            tmp_loss = (per_out - target_var)**2
            loss = loss + tmp_loss.sum() / tmp_loss.numel()

        # gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        quan_op.restore()
        quan_op.updateQuanGradWeight()
        optimizer.step()

        # """measure optimization time"""
        # batch_time.update(time.time() - end)
        # end = time.time()
        # print log
        losses.update(loss.data[0])

        pckh = Evaluation.accuracy(output[-1].data.cpu(),
                                   target_var.data.cpu(), idx)
        pckhs.update(pckh[0])
        pckh_origin_res = Evaluation.accuracy_origin_res(
            output[-1].data.cpu(), c, s, [64, 64], grnd_pts, normalizer, r)
        pckhs_origin_res.update(pckh_origin_res[0])

        loss_dict = OrderedDict([('loss', losses.avg), ('pckh', pckhs.avg),
                                 ('pckh_origin_res', pckhs_origin_res.avg)])
        if i % opt.print_freq == 0 or i == len(train_loader) - 1:
            visualizer.print_log(epoch, i, len(train_loader), value1=loss_dict)
        # if i == 1:
        #     break
    return losses.avg, pckhs_origin_res.avg
Ejemplo n.º 4
0
    def __init__(self, criterion, w_optimizer, w_scheduler, logger, writer, device):
        self.top1 = AverageMeter()
        self.top3 = AverageMeter()
        self.losses = AverageMeter()
        self.losses_lat = AverageMeter()
        self.losses_ce = AverageMeter()

        self.logger = logger
        self.writer = writer
        self.device = device

        self.criterion = criterion
        self.w_optimizer = w_optimizer
        self.w_scheduler = w_scheduler

        self.layers_structure = []
        self.dataset = CONFIG["dataloading"]["dataset"]

        self.cnt_epochs = CONFIG["train_settings"]["cnt_epochs"]
        self.meta_epochs = CONFIG["train_settings"]["meta_epochs"]
        self.warmup_epochs = CONFIG["train_settings"]["warmup_epochs"]
        self.print_freq = CONFIG["train_settings"]["print_freq"]
        self.path_to_save_model = CONFIG["train_settings"]["path_to_save_model"]
        self.path_to_save_structure = CONFIG["train_settings"]["path_to_save_structure"]
        self.path_to_save_acc = CONFIG["train_settings"]["path_to_save_acc"]
        self.path_to_candidate_table = CONFIG["train_settings"]["path_to_candidate_table"]
        self.ngpu = CONFIG["ngpu"]
        self.max_epochs = 0

        self.acc_record = {}
        self.candidate_table = []
        self.layer = 0

        with open(self.path_to_save_acc, "w") as f:
            json.dump(self.acc_record, f)
Ejemplo n.º 5
0
    def __init__(self, g_optimizer, writer, device, accuracy_predictor,
                 flops_table, CONFIG):
        self.top1 = AverageMeter()
        self.top5 = AverageMeter()
        self.losses = AverageMeter()
        self.hc_losses = AverageMeter()

        self.writer = writer
        self.device = device

        self.criterion = criterion
        self.g_optimizer = g_optimizer

        self.CONFIG = CONFIG

        self.epochs = self.CONFIG.epochs
        self.warmup_epochs = self.CONFIG.warmup_epochs
        self.search_epochs = self.CONFIG.search_epochs

        self.hardware_pool = [
            i for i in range(self.CONFIG.low_macs, self.CONFIG.high_macs, 5)
        ]
        self.hardware_index = 0
        random.shuffle(self.hardware_pool)

        self.noise_weight = self.CONFIG.noise_weight

        # ================== OFA ====================
        self.accuracy_predictor = accuracy_predictor
        self.flops_table = flops_table

        self.backbone = self.calculate_one_hot(torch.randn(8 * 21)).cuda()
Ejemplo n.º 6
0
    def predict(self):

        self._resume_ckpt()

        self.model.eval()
        predict_time = AverageMeter()
        batch_time = AverageMeter()
        data_time = AverageMeter()

        with torch.no_grad():
            tic = time.time()
            for steps, (data, filenames) in enumerate(self.dataloader_predict, start=1):
                # data
                data = data.to(self.device, non_blocking = True)
                data_time.update(time.time() - tic)

                pre_tic = time.time()
                logits = self.model(data)
                predict_time.update(time.time() - pre_tic)
                self._save_pred(logits, filenames)

                batch_time.update(time.time() - tic)
                tic = time.time()

            print("Predicting and Saving Done!\n"
                  "Total Time: {:.2f}\n"
                  "Data Time: {:.2f}\n"
                  "Pre Time: {:.2f}"
                  .format(batch_time._get_sum(), data_time._get_sum(), predict_time._get_sum()))
Ejemplo n.º 7
0
def train(model, train_loader, args):
    optimizer = Adam(model.parameters(), lr=args.lr)
    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer,
                                                milestones=args.milestones,
                                                gamma=args.gamma)
    criterion = nn.CrossEntropyLoss().cuda(device)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        acc_record = AverageMeter()
        model.train()
        exp_lr_scheduler.step()
        for batch_idx, (x, label, _) in enumerate(train_loader):
            x, target = x.to(device), label.to(device)
            optimizer.zero_grad()
            output = model(x)
            loss = criterion(output, target)
            acc = accuracy(output, target)
            loss.backward()
            optimizer.step()
            acc_record.update(acc[0].item(), x.size(0))
            loss_record.update(loss.item(), x.size(0))
        print('Train Epoch: {} Avg Loss: {:.4f} \t Avg Acc: {:.4f}'.format(
            epoch, loss_record.avg, acc_record.avg))
        test(model, eva_loader, args)
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))
def val_epoch(model, dataloader):
    losses_sbj = AverageMeter()
    losses_obj = AverageMeter()
    losses_rel = AverageMeter()
    losses_total = AverageMeter()

    model.train()
    for i, data in enumerate(dataloader):
        images, targets = data
        _, metrics = model(images, targets)
        final_loss = metrics["loss_objectness"] + metrics["loss_rpn_box_reg"] + \
         metrics["loss_classifier"] + metrics["loss_box_reg"] + \
         metrics["loss_sbj"] + metrics["loss_obj"] + metrics["loss_rlp"]

        losses_sbj.update(metrics["loss_sbj"].item())
        losses_obj.update(metrics["loss_obj"].item())
        losses_rel.update(metrics["loss_rlp"].item())
        losses_total.update(final_loss.item())

    losses = {}
    losses['total_loss'] = losses_total.avg
    losses['sbj_loss'] = losses_sbj.avg
    losses['obj_loss'] = losses_obj.avg
    losses['rel_loss'] = losses_rel.avg
    return losses
Ejemplo n.º 9
0
 def val_epoch(self, epoch):
     model_with_loss = self.model_with_loss
     model_with_loss.eval()
     data_time, batch_time = AverageMeter(), AverageMeter()
     avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
     end = time.time()
     for iter_id, batch in enumerate(self.val_loader):
         show_str = '[%d/%d/%d] ' % (epoch + 1, iter_id + 1,
                                     self.num_val_iter)
         data_time.update(time.time() - end)
         with torch.no_grad():
             for k in batch:
                 batch[k] = batch[k].to(device=self.config.TRAIN['DEVICE'],
                                        non_blocking=True)
             loss, loss_stats = model_with_loss(batch)
         batch_time.update(time.time() - end)
         end = time.time()
         for l in avg_loss_stats:
             avg_loss_stats[l].update(loss_stats[l].mean().item(),
                                      batch['input'].size(0))
             self.writer.add_scalar('val/' + l, avg_loss_stats[l].avg,
                                    epoch * self.num_val_iter + iter_id)
             show_str += ' {}:{:0.4}   '.format(l, avg_loss_stats[l].avg)
         print(show_str)
     save_checkpoint(
         model_with_loss.model,
         self.config.TRAIN['CHECKPOINT'] + '/model_%d.pth' % epoch)
Ejemplo n.º 10
0
def PI_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(),
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)
    w = 0
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        acc_record = AverageMeter()
        model.train()
        w = args.rampup_coefficient * ramps.sigmoid_rampup(
            epoch, args.rampup_length)
        for batch_idx, ((x, x_bar), label,
                        idx) in enumerate(tqdm(train_loader)):
            x, x_bar = x.to(device), x_bar.to(device)
            feat = model(x)
            feat_bar = model(x_bar)
            prob = feat2prob(feat, model.center)
            prob_bar = feat2prob(feat_bar, model.center)
            sharp_loss = F.kl_div(prob.log(),
                                  args.p_targets[idx].float().to(device))
            consistency_loss = F.mse_loss(prob, prob_bar)
            loss = sharp_loss + w * consistency_loss
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(
            epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args, epoch)
        if epoch % args.update_interval == 0:
            print('updating target ...')
            args.p_targets = target_distribution(probs)
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))
Ejemplo n.º 11
0
def cal_acc(data_list, pred_folder, classes, names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        pred = cv2.imread(os.path.join(pred_folder, image_name+'.png'), cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info('Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name+'.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i], names[i]))
Ejemplo n.º 12
0
 def train_epoch(self, epoch):
     model_with_loss = self.model_with_loss
     model_with_loss.train()
     data_time, batch_time = AverageMeter(), AverageMeter()
     avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
     end = time.time()
     for iter_id, batch in enumerate(self.train_loader):
         data_time.update(time.time() - end)
         for k in batch:
             batch[k] = batch[k].to(device=self.config.TRAIN['DEVICE'],
                                    non_blocking=True)
         loss, loss_stats = model_with_loss(batch)
         loss = loss.mean()
         if torch.isinf(loss) or torch.isnan(loss):
             raise Exception("nan/inf in sum loss")
         self.optimizer.zero_grad()
         lr = self.lr_just.step(1e-3)
         loss.backward()
         self.optimizer.step()
         self.writer.add_scalar('train/lr', lr, self.lr_just.global_step)
         batch_time.update(time.time() - end)
         end = time.time()
         show_str = '[%d/%d/%d] ' % (epoch + 1, iter_id,
                                     self.num_train_iter)
         for l in avg_loss_stats:
             avg_loss_stats[l].update(loss_stats[l].mean().item(),
                                      batch['input'].size(0))
             self.writer.add_scalar('train/' + l, avg_loss_stats[l].avg,
                                    epoch * self.num_train_iter + iter_id)
             show_str += '{}:{:0.4} '.format(l, avg_loss_stats[l].avg)
         print(show_str)
Ejemplo n.º 13
0
    def __init__(self, criterion, optimizer, g_optimizer, scheduler, writer,
                 device, lookup_table, prior_pool, CONFIG):
        self.top1 = AverageMeter()
        self.top5 = AverageMeter()
        self.losses = AverageMeter()
        self.hc_losses = AverageMeter()

        self.writer = writer
        self.device = device

        self.criterion = criterion
        self.optimizer = optimizer
        self.g_optimizer = g_optimizer
        self.scheduler = scheduler

        self.CONFIG = CONFIG

        self.epochs = self.CONFIG.epochs
        self.warmup_epochs = self.CONFIG.warmup_epochs
        self.search_epochs = self.CONFIG.search_epochs

        self.prior_pool = prior_pool
        # ==============
        self.hardware_pool = [
            i for i in range(self.CONFIG.low_flops, self.CONFIG.high_flops, 5)
        ]
        self.hardware_index = 0
        random.shuffle(self.hardware_pool)
        # ==============

        self.lookup_table = lookup_table
def train(epoch, model, device, dataloader, optimizer, exp_lr_scheduler,
          criterion, args):
    loss_record = AverageMeter()
    acc_record = AverageMeter()
    exp_lr_scheduler.step()
    model.train()
    for batch_idx, (data, label) in enumerate(tqdm(dataloader(epoch))):
        data, label = data.to(device), label.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, label)

        # measure accuracy and record loss
        acc = accuracy(output, label)
        acc_record.update(acc[0].item(), data.size(0))
        loss_record.update(loss.item(), data.size(0))

        # compute gradient and do optimizer step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    print('Train Epoch: {} Avg Loss: {:.4f} \t Avg Acc: {:.4f}'.format(
        epoch, loss_record.avg, acc_record.avg))

    return loss_record
Ejemplo n.º 15
0
def validate(val_loader, net, Mean_2D, Mean_Delta, SD_2D, SD_Delta, epoch, opt):

    batch_time = AverageMeter()
    losses_det = AverageMeter()
    losses = AverageMeter()
    dist_errors = AverageMeter()

    # switch to evaluate mode
    net.eval()

    end = time.time()
    DIST_ERROR=0

    for i, (pts3d, pts2d, name) in enumerate(val_loader):  

        # input and groundtruth
        pts2d = (pts2d - Mean_2D)/SD_2D 
        pts2d = torch.autograd.Variable(pts2d.cuda(async=True),requires_grad=False) 

        pts3d = pts3d.narrow(1,1,16) #remove pelvis center
        pts3d = (pts3d - Mean_Delta)/SD_Delta 

        target_var = torch.autograd.Variable(pts3d.cuda(async=True),requires_grad=False)  

        # output 
        output = net(pts2d)

        #loss
        loss =  (output - target_var)**2 
        loss = loss.sum() / loss.numel()

        #3d body pose error calculation
        s3d = pts3d * SD_Delta + Mean_Delta
        pred_pts = output.cuda().data 
        pred_pts = pred_pts * SD_Delta.cuda() + Mean_Delta.cuda()

        dist_error = PoseError.Dist_Err(pred_pts, s3d.cuda())
        dist_errors.update(dist_error)

        DIST_ERROR=DIST_ERROR+dist_error

        """measure elapsed time"""
        batch_time.update(time.time() - end)
        end = time.time()

        # print log
        losses.update(loss.data[0])
        dist_errors.update(dist_error)
        loss_dict = OrderedDict( [('loss', losses.avg),
                                  ('dist_error', dist_errors.avg)] )

        print('Epoch:[{0}][{1}/{2}] '
              'dist_error:[{3:.4f}] '.format(
               epoch, i, len(val_loader), dist_error, ))     

    DIST_ERROR = DIST_ERROR/len(val_loader)
    print('the average dist_error is')
    print(DIST_ERROR)

    return losses.avg, dist_errors.avg
Ejemplo n.º 16
0
def Baseline_train(model, train_loader, eva_loader, args):
    optimizer = SGD(model.parameters(),
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)
    for epoch in range(args.epochs):
        loss_record = AverageMeter()
        acc_record = AverageMeter()
        model.train()
        for batch_idx, (x, _, idx) in enumerate(tqdm(train_loader)):
            x = x.to(device)
            output = model(x)
            prob = feat2prob(output, model.center)
            loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
            loss_record.update(loss.item(), x.size(0))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('Train Epoch: {} Avg Loss: {:.4f}'.format(
            epoch, loss_record.avg))
        _, _, _, probs = test(model, eva_loader, args, epoch)
        if epoch % args.update_interval == 0:
            print('updating target ...')
            args.p_targets = target_distribution(probs)
    torch.save(model.state_dict(), args.model_dir)
    print("model saved to {}.".format(args.model_dir))
Ejemplo n.º 17
0
def mode_val(valid_loader, model, criterion, epoch_str, print_freq):
    model.eval()
    val_top1 = AverageMeter()
    val_top5 = AverageMeter()
    val_loss = AverageMeter()
    for step, (val_input, val_target) in enumerate(valid_loader):
        batch = val_input.size(0)
        val_input = val_input.cuda()
        val_target = val_target.cuda()

        if model.name in ['cifar10', 'cifar100']:
            feature, logits_aux = model(val_input)
            loss = criterion(feature, val_target)
            if logits_aux is not None:
                loss_aux = criterion(logits_aux, val_target)
                loss += model.pre_model.auxiliary_weight * loss_aux
        elif model.name in ['uji', 'hapt']:
            feature = model(val_input)
            loss = criterion(feature, val_target)
        else:
            raise ValueError

        pre1, pre5 = accuracy(feature.data, val_target.data, top_k=(1, 5))
        val_top1.update(pre1.item(), batch)
        val_top5.update(pre5.item(), batch)
        val_loss.update(loss.item(), batch)

        if step % print_freq == 0 or step + 1 == len(valid_loader):
            str1 = 'valid - epoch:' + epoch_str + ' batch:[' + '{:3d}/{:}]  '.format(
                step, len(valid_loader))
            str2 = '[Loss:{:.6f}  Pre@1:{:.5f}%  Pre@5:{:.5f}%]'.format(
                val_loss.avg, val_top1.avg, val_top5.avg)
            logging.info(str1 + str2)

    return val_top1.avg, val_top5.avg, val_loss.avg
Ejemplo n.º 18
0
def train_epoch(model, data_loader, criterion, optimizer, device, opt):
   
	model.train()
	
	losses = AverageMeter('Loss', ':.2f')
	accuracies = AverageMeter('Acc', ':.2f')
	progress = ProgressMeter(
        len(data_loader),
        [losses, accuracies],
        prefix='Train: ')
	# Training
	for batch_idx, (data, targets) in enumerate(data_loader):
		# compute outputs
		data, targets = data.to(device), targets.to(device)

		outputs =  model(data)
		loss = criterion(outputs, targets)

		acc = accuracy(outputs, targets)
		losses.update(loss.item(), data.size(0))
		accuracies.update(acc[0].item(),  data.size(0))

		optimizer.zero_grad()
		loss.backward()
		optimizer.step()

		# show information
		if batch_idx % opt.log_interval == 0:
			progress.display(batch_idx)
		
	# show information
	print(f' * Train Loss {losses.avg:.3f}, Train Acc {accuracies.avg:.3f}')
	return losses.avg, accuracies.avg
 def _get_loggers(self):
     self.LossBox = AverageMeter()
     self.LossConf = AverageMeter()
     self.LossClass = AverageMeter()
     self.logger_losses = {}
     self.logger_losses.update({"lossBox": self.LossBox})
     self.logger_losses.update({"lossConf": self.LossConf})
     self.logger_losses.update({"lossClass": self.LossClass})
Ejemplo n.º 20
0
def train(epoch, train_loader, model, regressor, criterion, optimizer, opt):
    """
    one epoch training
    """

    model.eval()
    regressor.train()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    InterOcularError = AverageMeter()

    end = time.time()
    for idx, (input, _, target, index) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        input = input.cuda(opt.gpu, non_blocking=True)
        input = input.float()
        target = target.cuda(opt.gpu, non_blocking=True)

        # ===================forward=====================
        with torch.no_grad():
            feat = model(input, opt.layer, opt.use_hypercol, opt.output_shape)
            feat = feat.detach()
        output, _ = regressor(feat)
        loss = criterion(output, target, alpha=10.)

        if idx == 0:
            print('Layer:{0}, shape of input:{1}, feat:{2}, output:{3}'.format(opt.layer, 
                                input.size(), feat.size(), output.size()))

        ic_error = inter_ocular_error(output, target, eyeidxs=opt.eye_idx)
        losses.update(loss.item(), input.size(0))
        InterOcularError.update(ic_error, input.size(0))

        # ===================backward=====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # ===================meters=====================
        batch_time.update(time.time() - end)
        end = time.time()

        # print info
        if idx % opt.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'InterOcularError {InterOcularError.val:.3f}'.format(
                   epoch, idx, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses, InterOcularError=InterOcularError))
            sys.stdout.flush()

    return InterOcularError.avg, losses.avg
Ejemplo n.º 21
0
    def train(self, state, epoch):
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()
        print_freq = config.print_freq
        net = state['model']
        criterion = state['criterion']
        optimizer = state['optimizer']
        train_loader = state['train_loader']
        write = state['write']
        net.train()
        pbar = tqdm(total=len(train_loader), unit='batches')
        pbar.set_description('Epoch {}/{}'.format(epoch + 1, config.epochs))
        for i, (img, label) in enumerate(train_loader):
            # if config.use_gpu:
            if img.size(0) == 1:
                img = img.repeat(2, 1, 1, 1)
                label = label.repeat(2)
            target = label.cuda()
            input = img.cuda()
            optimizer.zero_grad()

            #net forward
            prob1, cam_top1, M_p = net(input)
            crop_img = attention_drop_train(M_p, input, config.mask_train_th,
                                            cam_top1)
            crop_img = crop_img.cuda()
            prob2, cam_top1_2, _ = net(crop_img)

            loss1 = criterion(prob1, target)
            loss2 = criterion(prob2, target)
            loss = (loss1 + loss2) / 2

            #net train  accuracy
            prec1, prec5 = accuracy(prob1, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            top1.update(prec1[0], input.size(0))
            top5.update(prec5[0], input.size(0))
            loss.backward()
            optimizer.step()
            if i % 300 == 0:
                #first image show
                first_pre = image_with_cam(input, cam_top1)

                #second image show
                second_pre = image_with_cam(crop_img, cam_top1_2)

                #可视化mask4
                write.add_images('first_pre', first_pre, 0, dataformats='NCHW')
                write.add_images('second_pre',
                                 second_pre,
                                 0,
                                 dataformats='NCHW')

            pbar.update()
            # pbar.set_postfix_str(batch_info)
        pbar.close()
        return top1.avg, losses.avg
Ejemplo n.º 22
0
    def _eval_epoch(self, epoch):

        batch_time = AverageMeter()
        data_time = AverageMeter()
        ave_total_loss = AverageMeter()
        ave_acc = AverageMeter()
        ave_iou = AverageMeter()

        # set model mode
        self.model.eval()

        with torch.no_grad():
            tic = time.time()
            for steps, (data, target) in enumerate(self.valid_data_loder,
                                                   start=1):

                # processing no blocking
                # non_blocking tries to convert asynchronously with respect to the host if possible
                # converting CPU tensor with pinned memory to CUDA tensor
                # overlap transfer if pinned memory
                data = data.to(self.device, non_blocking=True)
                target = target.to(self.device, non_blocking=True)
                data_time.update(time.time() - tic)

                logits = self.model(data)
                loss = self.loss(logits, target)
                # calculate metrics
                acc = Accuracy(logits, target)
                miou = MIoU(logits, target, self.config.nb_classes)
                #print("===========acc, miou==========", acc, miou)

                # update ave metrics
                batch_time.update(time.time() - tic)

                ave_total_loss.update(loss.data.item())
                ave_acc.update(acc.item())
                ave_iou.update(miou.item())
                tic = time.time()
            # display validation at the end
            print('Epoch {} validation done !'.format(epoch))
            print('Time: {:.4f},       Data:     {:.4f},\n'
                  'MIoU: {:6.4f},      Accuracy: {:6.4f},      Loss: {:.6f}'.
                  format(batch_time.average(), data_time.average(),
                         ave_iou.average(), ave_acc.average(),
                         ave_total_loss.average()))

        self.history['valid']['epoch'].append(epoch)
        self.history['valid']['loss'].append(ave_total_loss.average())
        self.history['valid']['acc'].append(ave_acc.average())
        self.history['valid']['miou'].append(ave_iou.average())
        #  validation log and return
        return {
            'epoch': epoch,
            'val_Loss': ave_total_loss.average(),
            'val_Accuracy': ave_acc.average(),
            'val_MIoU': ave_iou.average(),
        }
Ejemplo n.º 23
0
def train(model, optimizer, data_loader, scheduler, writer, max_acc=0, step_start=0):
    loss_cls_accumulate = AverageMeter()
    loss_reg_accumulate = AverageMeter()
    loss_center_accumulate = AverageMeter()
    max_acc = max_acc

    loader = data_loader.train_loader
    model.train()
    if train_arg.apex:
        model.apply(fix_bn)
    if train_arg.rank == 0:
        loader = tqdm(loader, ncols=20)

    loader_len = len(loader)
    for step, data in enumerate(loader):
        # Input
        if step > loader_len - step_start:
            break
        step += step_start
        losses = run_one_iter(model, optimizer, data, scheduler, False)

        # Loss and results
        if losses:
            if not np.isnan(losses['loss_cls'].data.cpu().numpy()):
                loss_cls_accumulate.update(val=losses['loss_cls'].data.cpu().numpy())
            if not np.isnan(losses['loss_reg'].data.cpu().numpy()):
                loss_reg_accumulate.update(val=losses['loss_reg'].data.cpu().numpy())
            if not np.isnan(losses['loss_centerness'].data.cpu().numpy()):
                loss_center_accumulate.update(val=losses['loss_centerness'].data.cpu().numpy())

        if train_arg.rank == 0:
            writer.add_scalar('train/loss_cls', loss_cls_accumulate.avg, step)
            writer.add_scalar('train/loss_reg', loss_reg_accumulate.avg, step)
            writer.add_scalar('train/loss_center', loss_center_accumulate.avg, step)
            writer.add_scalar('train/lr', optimizer.param_groups[0]["lr"], step)

        if step % 1000 == 999:
            if train_arg.rank == 0:
                print('save model')
                torch.save({'state': model.state_dict(),
                            'max_acc': max_acc,
                            'step': step,
                            'opt': optimizer.state_dict(),
                            'sched': scheduler.state_dict()},
                           train_arg.model_path + '/' + train_arg.model_name)

        if step % train_arg.reset_iter == train_arg.reset_iter - 1:
            loss_cls_accumulate.reset()
            loss_reg_accumulate.reset()
            loss_center_accumulate.reset()

        if train_arg.local_rank == 0:
            loader.set_description('Loss_cls: ' + str(loss_cls_accumulate.avg)[0:6] +
                                   ',\tLoss_reg: ' + str(loss_reg_accumulate.avg)[0:6] +
                                   ',\tLoss_center: ' + str(loss_center_accumulate.avg)[0:6], refresh=False)
Ejemplo n.º 24
0
    def train(self):
        self.net.train()

        bce_losses = AverageMeter()
        image_gradient_losses = AverageMeter()
        image_gradient_criterion = ImageGradientLoss().to(self.device)
        bce_criterion = nn.CrossEntropyLoss().to(self.device)

        for epoch in range(self.epoch, self.num_epoch):
            bce_losses.reset()
            image_gradient_losses.reset()
            for step, (image, gray_image, mask) in enumerate(self.data_loader):
                image = image.to(self.device)
                mask = mask.to(self.device)
                gray_image = gray_image.to(self.device)

                pred = self.net(image)

                pred_flat = pred.permute(0, 2, 3, 1).contiguous().view(
                    -1, self.num_classes)
                mask_flat = mask.squeeze(1).view(-1).long()

                # preds_flat.shape (N*224*224, 2)
                # masks_flat.shape (N*224*224, 1)
                image_gradient_loss = image_gradient_criterion(
                    pred, gray_image)
                bce_loss = bce_criterion(pred_flat, mask_flat)

                loss = bce_loss + self.gradient_loss_weight * image_gradient_loss

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                bce_losses.update(bce_loss.item(), self.batch_size)
                image_gradient_losses.update(
                    self.gradient_loss_weight * image_gradient_loss,
                    self.batch_size)
                iou = iou_loss(pred, mask)

                # save sample images
                if step % 50 == 0:
                    print(
                        f"Epoch: [{epoch}/{self.num_epoch}] | Step: [{step}/{self.image_len}] | "
                        f"Bce Loss: {bce_losses.avg:.4f} | Image Gradient Loss: {image_gradient_losses.avg:.4f} | "
                        f"IOU: {iou:.4f}")
                if step % self.sample_step == 0:
                    self.save_sample_imgs(image[0], mask[0],
                                          torch.argmax(pred[0], 0),
                                          self.sample_dir, epoch, step)
                    print('[*] Saved sample images')

            torch.save(
                self.net.state_dict(),
                f'{self.checkpoint_dir}/MobileHairNet_epoch-{epoch-1}.pth')
Ejemplo n.º 25
0
def train(train_loader, model, optimizer, epoch, epoch_size, train_writer):
    global n_iter, args
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    flow_finest_EPEs = AverageMeter()

    # switch to train mode
    model.train()
    end = time.time()

    for i, (input, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        target = target.to(device)
        input = torch.cat(input, 1).to(device)

        # compute output
        output = model(input)

        if args.sparse:
            # Since Target pooling is not very precise when sparse,
            # take the highest resolution prediction and upsample it instead of downsampling target
            h, w = target.size()[-2:]
            output = [F.interpolate(output[0], (h, w)), *output[1:]]

        loss = L1Loss(output, target, sparse=args.sparse)
        flow_finest_EPE = args.div_flow * realEPE(
            output, target, sparse=args.sparse)

        # record loss and EPE
        losses.update(loss.item(), target.size(0))
        train_writer.add_scalar('train_loss', loss.item(), n_iter)
        flow_finest_EPEs.update(flow_finest_EPE.item(), target.size(0))

        # compute gradient and do optimization step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            print(
                'Epoch: [{0}][{1}/{2}]\t Time(s) {3}\t Data Time(s) {4}\t Loss {5}\t EPE {6}'
                .format(epoch, i, epoch_size, batch_time, data_time, losses,
                        flow_finest_EPEs))
        n_iter += 1
        if i >= epoch_size:
            break

    return losses.avg, flow_finest_EPEs.avg
Ejemplo n.º 26
0
def evaluate(val_dataloader, img_encoder, text_encoder, fc_model, args):
    m_top1 = AverageMeter('Acc@1', ':6.2f')
    m_iou = AverageMeter('IoU', ':6.2f')
    m_ap50 = AverageMeter('AP50', ':6.2f')
    progress = ProgressMeter(len(val_dataloader), [m_top1, m_iou, m_ap50],
                             prefix='Test: ')

    img_encoder.eval()
    fc_model.eval()

    ignore_index = val_dataloader.dataset.ignore_index

    for i, batch in enumerate(val_dataloader):

        # Data
        region_proposals = batch['rpn_image'].cuda(non_blocking=True)
        commands = batch['command']
        sentence = batch['sentence']
        command_length = batch['command_length'].cuda(non_blocking=True)
        gt = batch['rpn_gt'].cuda(non_blocking=True)

        iou = batch['rpn_iou'].cuda(non_blocking=True).squeeze()
        b, r, c, h, w = region_proposals.size()

        # Image features
        img_features = img_encoder(region_proposals.view(b * r, c, h, w))
        norm = img_features.norm(p=2, dim=1, keepdim=True)
        img_features = img_features.div(norm).view(b, r, -1)

        #Sentence features
        sentence_features = torch.from_numpy(
            np.array(text_encoder.encode(sentence))).cuda(non_blocking=True)
        sentence_features = fc_model(sentence_features)

        # Product in latent space
        scores = torch.bmm(img_features,
                           sentence_features.unsqueeze(2)).squeeze()
        gt = gt.squeeze()

        # Summary
        pred = torch.argmax(scores, 1)
        pred_bin = F.one_hot(pred, r).bool()
        valid = (gt != ignore_index)
        num_valid = torch.sum(valid).float().item()
        m_top1.update(
            torch.sum(pred[valid] == gt[valid]).float().item(), num_valid)
        m_iou.update(
            torch.masked_select(iou, pred_bin).sum().float().item(), b)
        m_ap50.update(
            (torch.masked_select(iou, pred_bin) > 0.5).sum().float().item(), b)

        if i % args.print_freq == 0:
            progress.display(i)
    return m_ap50.avg
Ejemplo n.º 27
0
def validate(val_loader, model, regressor, criterion, opt):
    batch_time = AverageMeter()
    losses = AverageMeter()
    PCK = AverageMeter()

    # switch to evaluate mode
    model.eval()
    regressor.eval()

    with torch.no_grad():
        end = time.time()
        for idx, (input, visible, target, index) in enumerate(val_loader):
            input = input.cuda(opt.gpu, non_blocking=True)
            input = input.float()
            target = target.cuda(opt.gpu, non_blocking=True)
            target = target.float()

            # compute output
            if opt.model == 'hourglass':
                feat = model(input, opt.layer, opt.output_shape)
            else:
                feat = model(input, opt.layer, opt.use_hypercol,
                             opt.output_shape)
            feat = feat.detach()
            output, _ = regressor(feat)
            loss = criterion(output, target, visible)

            # measure accuracy and record loss
            ic_error = calc_pck(output, target, visible, boxsize=opt.boxsize)
            losses.update(loss.item(), input.size(0))
            PCK.update(ic_error, input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if opt.vis_keypoints and idx <= 3:
                keypoints_animal(input, output, target, visible, index,
                                 opt.vis_path)

            if idx % opt.print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'PCK {PCK.val:.3f}'.format(idx,
                                                 len(val_loader),
                                                 batch_time=batch_time,
                                                 loss=losses,
                                                 PCK=PCK))

        print(' * PCK {PCK.avg:.3f}'.format(PCK=PCK))

    return PCK.avg, losses.avg
Ejemplo n.º 28
0
    def _eval_task(self, task_idx: int, val_loader: TaskDataLoader,
                   train_epoch: int,
                   val_epoch: int) -> Tuple[float, float, Dict]:
        self.crt_eval_info = info = dict({})

        self._start_eval_task()  # TEMPLATE

        report_or_not = self._eval_batch_report
        print_freq = self._eval_batch_show_freq
        report_freq = self._eval_batch_save_freq
        report = self.report

        last_batch = len(val_loader) - 1
        losses = AverageMeter()
        acc = AverageMeter()
        correct_cnt = 0
        seen = self.seen
        seen_eval = 0

        with torch.no_grad():
            for batch_idx, (data, targets, head_idx) in enumerate(val_loader):
                outputs, loss, info_batch = self._eval_task_batch(
                    batch_idx, data, targets, head_idx)
                info.update(info_batch)

                (top1, correct), = accuracy(outputs, targets)
                correct_cnt += correct

                seen_eval += data.size(0)
                acc.update(top1, data.size(0))
                losses.update(loss.item(), data.size(0))

                if report_or_not:
                    if report_freq > 0:
                        if (batch_idx + 1
                            ) % report_freq == 0 or batch_idx == last_batch:
                            report.trace_eval_batch(seen, task_idx,
                                                    train_epoch, val_epoch,
                                                    info)

                    if print_freq > 0:
                        if (batch_idx + 1
                            ) % print_freq == 0 or batch_idx == last_batch:
                            print(
                                f'\t\t[Eval] [Epoch: {train_epoch:3}] [Batch: {batch_idx:5}]:\t '
                                f'[Loss] crt: {losses.val:3.4f}  avg: {losses.avg:3.4f}\t'
                                f'[Accuracy] crt: {acc.val:3.2f}  avg: {acc.avg:.2f}'
                            )

            self._end_eval_task()  # TEMPLATE

            return losses.avg, correct_cnt / float(seen_eval), info
Ejemplo n.º 29
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h, crop_w, scales, gray_folder, color_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    results = []
    with torch.no_grad():
        for i, (input, _) in enumerate(test_loader):
            data_time.update(time.time() - end)
            input = np.squeeze(input.numpy(), axis=0)
            image = np.transpose(input, (1, 2, 0))
            h, w, _ = image.shape
            prediction = np.zeros((h, w, classes), dtype=float)
            for scale in scales:
                long_size = round(scale * base_size)
                new_h = long_size
                new_w = long_size
                if h > w:
                    new_w = round(long_size/float(h)*w)
                else:
                    new_h = round(long_size/float(w)*h)
                image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
                prediction += scale_process(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
            prediction /= len(scales)
            prediction = np.argmax(prediction, axis=2)
            results.append(prediction)
            batch_time.update(time.time() - end)
            end = time.time()
            if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
                logger.info('Test: [{}/{}] '
                            'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                            'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, len(test_loader),
                                                                                        data_time=data_time,
                                                                                        batch_time=batch_time))
            # check_makedirs(gray_folder)
            # check_makedirs(color_folder)
            # gray = np.uint8(prediction)
            # labelId = dataset._convert_to_label_id(gray)
            # color = colorize(gray, colors)
            # image_path, _ = data_list[i]
            # image_name = image_path.split('/')[-1].split('.')[0]
            # gray_path = os.path.join(gray_folder, image_name + '.png')
            # label_path = os.path.join(gray_folder, image_name + '_ID.png')
            # color_path = os.path.join(color_folder, image_name + '.png')
            # cv2.imwrite(label_path, labelId)
            # cv2.imwrite(gray_path, gray)
            # color.save(color_path)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    logger.info('Convert to Label ID')
    result_files = dataset.results2img(results=results, data_root=args.data_root, data_list=args.test_list, save_dir='./test_result', to_label_id=True)
    logger.info('Convert to Label ID Finished')
Ejemplo n.º 30
0
    def val(self, epoch):
        # test mode
        self.model_feature.eval()
        self.model_target_classifier.eval()

        val_losses = AverageMeter()
        val_top1_accs = AverageMeter()

        # Batches
        for i, (imgs, labels) in enumerate(self.val_loader):
            # Move to GPU, if available
            if torch.cuda.is_available():
                imgs = imgs.cuda()
                labels = labels.cuda()

            if self.data_aug == 'improved':
                bs, ncrops, c, h, w = imgs.size()
                imgs = imgs.view(-1, c, h, w)

            # forward and loss
            with torch.no_grad():
                outputs = self.model_feature(imgs)
                outputs = self.model_target_classifier(outputs)

                if self.data_aug == 'improved':
                    outputs = outputs.view(bs, ncrops, -1).mean(1)

                val_loss = self.loss_fn(outputs, labels)

            val_losses.update(val_loss.item(), imgs.size(0))
            # compute accuracy
            top1_accuracy = accuracy(outputs, labels, 1)
            val_top1_accs.update(top1_accuracy, imgs.size(0))

            # batch update
            self.layer_outputs_source.clear()
            self.layer_outputs_target.clear()

            # Print status
            if i % self.print_freq == 0:
                self.logger.info('Val Epoch: [{:d}/{:d}][{:d}/{:d}]\tval_loss={:.4f}\t\ttop1_accuracy={:.4f}\t'
                            .format(epoch, self.num_epochs, i, len(self.val_loader), val_losses.avg, val_top1_accs.avg))
        # save tensorboard
        self.writer.add_scalar('Val_loss', val_losses.avg, epoch)
        self.writer.add_scalar('Val_top1_accuracy', val_top1_accs.avg, epoch)

        self.logger.info('||==> Val Epoch: [{:d}/{:d}]\tval_loss={:.4f}\t\ttop1_accuracy={:.4f}'
                         .format(epoch, self.num_epochs, val_losses.avg, val_top1_accs.avg))

        return val_losses.avg, val_top1_accs.avg