def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)
        
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        
        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)
            
            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)
            
            loss = xent_loss + htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time,
                   data_time=data_time, loss=losses))
        
        end = time.time()
def train(epoch, model, criterion, optimizer, trainloader, use_gpu, freeze_bn=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)
        
        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()
        
        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx+1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                   epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time,
                   data_time=data_time, loss=losses))
        
        end = time.time()
Exemplo n.º 3
0
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            end = time.time()

            n, c, h, w = imgs.size()
            features = torch.FloatTensor(n, model.module.feat_dim).zero_()
            for i in range(args.flip_cnt):
                if (i == 1):
                    imgs = fliplr(imgs, use_gpu)
                f = model(imgs)[1]
                f = f.data.cpu()
                features = features + f

            batch_time.update(time.time() - end)

            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):

            end = time.time()

            n, c, h, w = imgs.size()
            features = torch.FloatTensor(n, model.module.feat_dim).zero_()
            for i in range(args.flip_cnt):
                if (i == 1):
                    imgs = fliplr(imgs, use_gpu)
                if use_gpu: imgs = imgs.cuda()
                f = model(imgs)[1]
                f = f.data.cpu()
                features = features + f

            batch_time.update(time.time() - end)

            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.zeros((m, n))
    if args.distance == 'euclidean':
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
    else:
        q_norm = torch.norm(qf, p=2, dim=1, keepdim=True)
        g_norm = torch.norm(gf, p=2, dim=1, keepdim=True)
        qf = qf.div(q_norm.expand_as(qf))
        gf = gf.div(g_norm.expand_as(gf))
        distmat = -torch.mm(qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    return cmc[0]
Exemplo n.º 4
0
def train(epoch,
          model,
          criterion_xent,
          criterion_htri,
          criterion_mask,
          optimizer,
          trainloader,
          use_gpu=True):
    xent_losses = AverageMeter()
    htri_losses = AverageMeter()
    mask_losses = AverageMeter()
    accs = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    end = time.time()
    for batch_idx, (sequences, pids, _) in enumerate(trainloader):
        if use_gpu:
            sequences, pids = sequences.cuda(), pids.cuda()

        # measure data loading time
        data_time.update(time.time() - end)

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward
        imgs = sequences[:, :3]
        head_masks = sequences[:, 3:4]
        upper_masks = sequences[:, 4:5]
        lower_masks = sequences[:, 5:6]
        shoes_masks = sequences[:, 6:7]

        outputs, features, a_head, a_upper, a_lower, a_shoes = model(imgs)
        _, preds = torch.max(outputs.data, 1)
        xent_loss = criterion_xent(outputs, pids)
        htri_loss = criterion_htri(features, pids)
        loss = xent_loss + htri_loss

        head_loss = DeepSupervision(criterion_mask, a_head, head_masks)
        upper_loss = DeepSupervision(criterion_mask, a_upper, upper_masks)
        lower_loss = DeepSupervision(criterion_mask, a_lower, lower_masks)
        shoes_loss = DeepSupervision(criterion_mask, a_shoes, shoes_masks)
        mask_loss = (head_loss + upper_loss + lower_loss + shoes_loss) / 4.0

        total_loss = loss + args.alpha * mask_loss

        # backward + optimize
        total_loss.backward()
        optimizer.step()

        # statistics
        accs.update(
            torch.sum(preds == pids.data).float() / pids.size(0), pids.size(0))
        xent_losses.update(xent_loss.item(), pids.size(0))
        htri_losses.update(htri_loss.item(), pids.size(0))
        mask_losses.update(mask_loss.item(), pids.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

    print('Epoch{0} '
          'Time:{batch_time.sum:.1f}s '
          'Data:{data_time.sum:.1f}s '
          'xentLoss:{xent_loss.avg:.4f} '
          'triLoss:{tri_loss.avg:.4f} '
          'MaskLoss:{mask_loss.avg:.4f} '
          'Acc:{acc.avg:.2%} '.format(epoch + 1,
                                      batch_time=batch_time,
                                      data_time=data_time,
                                      xent_loss=xent_losses,
                                      tri_loss=htri_losses,
                                      mask_loss=mask_losses,
                                      acc=accs))
Exemplo n.º 5
0
def testCATfeature(model1,
                   model2,
                   queryloader,
                   galleryloader,
                   test_batch,
                   loss_type,
                   euclidean_distance_loss,
                   epoch,
                   use_metric_cuhk03=False,
                   ranks=[1, 5, 10, 20],
                   return_distmat=False):
    batch_time = AverageMeter()

    model1.eval()
    model2.eval()
    with torch.no_grad():
        tqf, tq_pids, tq_camids = [], [], []
        for batch_idx, (imgs1, imgs2, _, pids,
                        camids) in enumerate(queryloader):

            imgs1 = Variable(imgs1.cuda())
            imgs2 = Variable(imgs2.cuda())
            end = time.time()
            features1 = model1(imgs1)
            features2 = model2(imgs2)
            features = torch.cat((features1, features2), 1)
            #features = features1
            batch_time.update(time.time() - end)
            features = features.data.cpu()

            tqf.append(features)
            tq_pids.extend(pids)
            tq_camids.extend(camids)
        tqf = torch.cat(tqf, 0)
        tq_pids = np.asarray(tq_pids)
        tq_camids = np.asarray(tq_camids)
        print(
            "Extracted features for train_query set, obtained {}-by-{} matrix".
            format(tqf.size(0), tqf.size(1)))
        print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
            batch_time.avg, test_batch))
        tgf, tg_pids, tg_camids = [], [], []
        for batch_idx, (imgs1, imgs2, _, pids,
                        camids) in enumerate(galleryloader):
            imgs1 = imgs1.cuda()
            imgs2 = imgs2.cuda()
            end = time.time()
            features1 = model1(imgs1)
            features2 = model2(imgs2)
            features = torch.cat((features1, features2), 1)
            #features=features1
            batch_time.update(time.time() - end)
            features = features.data.cpu()

            tgf.append(features)
            tg_pids.extend(pids)
            tg_camids.extend(camids)
        tgf = torch.cat(tgf, 0)
        tg_pids = np.asarray(tg_pids)
        tg_camids = np.asarray(tg_camids)
        print(
            "Extracted features for train_gallery set, obtained {}-by-{} matrix"
            .format(tgf.size(0), tgf.size(1)))
        print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
            batch_time.avg, test_batch))

    print("Start compute distmat.")
    if loss_type in euclidean_distance_loss:
        m, n = tqf.size(0), tgf.size(0)
        distmat = torch.pow(tqf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(tgf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, tqf, tgf.t())
        distmat = distmat.numpy()
    elif loss_type == 'angle':
        tvec_dot = torch.matmul(tqf, tgf.t())
        tqf_len = tqf.norm(dim=1, keepdim=True)
        tgf_len = tgf.norm(dim=1, keepdim=True)
        tvec_len = torch.matmul(tqf_len, tgf_len.t()) + 1e-5
        distmat = -torch.div(tvec_dot, tvec_len).numpy()
    else:
        raise KeyError("Unsupported loss: {}".format(loss_type))
    print("Compute distmat done.")
    print("distmat shape:", distmat.shape)
    print("Start computing CMC and mAP")
    start_time = time.time()
    cmc, mAP = evaluate(distmat,
                        tq_pids,
                        tg_pids,
                        tq_camids,
                        tg_camids,
                        use_metric_cuhk03=use_metric_cuhk03,
                        use_cython=False)
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Evaluate train data time (h:m:s): {}.".format(elapsed))
    print("Train data results ----------")
    print("Epoch {} trmAP: {:.2%}".format(epoch, mAP))
    print("CMC curve")
    for r in ranks:
        print("Epoch {} trRank-{:<3}: {:.2%}".format(epoch, r, cmc[r - 1]))
    print("------------------")
    if return_distmat:
        return distmat
    return cmc[0], mAP
Exemplo n.º 6
0
class NetTrain:
    def __init__(self, backbone, head, train_data_loader, val_dataset,
                 criterions=None,
                 loss_weights=None,
                 optimizer=None,
                 backbone_name='backbone',
                 head_name='head',
                 lowest_train_loss=5,
                 use_cuda=True, gpu_list=None):
        self.train_data_loader = train_data_loader
        self.backbone = backbone
        self.head = head
        self.backbone_name = backbone_name
        self.head_name = head_name
        self.loss_forward = loss_forward
        self.evaluation = Evaluation(val_dataset, 'lfw',self.batch_inference)
        self.criterions = criterions
        self.loss_weights = loss_weights
        self.optimizer = optimizer
        self.lowest_train_loss = lowest_train_loss
        self.use_cuda = use_cuda
        self.gpu_list = gpu_list
        self.writer = SummaryWriter()
        sys.stdout = Logger()
        self.epoch = 0
        self.max_epoch = 400
        self.combine_conv_bn_epoch = 150
        self.init_meter()

        if self.criterions is None:
            self.criterions = {'xent': torch.nn.CrossEntropyLoss()}
        if self.loss_weights is None:
            self.loss_weights = torch.as_tensor([1.0]*len(self.criterions))
        if self.gpu_list is None:
            self.gpu_list = range(torch.cuda.device_count())
        if self.use_cuda:
            self.backbone.cuda()
            self.head.cuda()
            self.loss_weights = self.loss_weights.cuda()

    def init_meter(self):
        self.accuracy_top_1 = AverageMeter()
        self.accuracy_top_5 = AverageMeter()
        self.total_losses_meter = AverageMeter()
        self.loss_meters = list()
        for index, criterion_name in enumerate(self.criterions.keys()):
            self.loss_meters.append(AverageMeter())

    def reset_meter(self):
        self.accuracy_top_1.reset()
        self.accuracy_top_5.reset()
        self.total_losses_meter.reset()
        for index, criterion_name in enumerate(self.criterions.keys()):
            self.loss_meters[index].reset()

    def load_checkpoint(self, check_point, finetune=False, pretrained=False):
        check_point = torch.load(check_point)
        if pretrained:
            self.backbone.load_state_dict(check_point)
            return
        if finetune:
            # 导入特征提取部分网络参数
            mapped_state_dict = self.backbone.state_dict()
            for key, value in check_point['backbone'].items():
                mapped_state_dict[key] = value
            self.backbone.load_state_dict(mapped_state_dict)
            # 导入特征提取部分优化子参数
            optimizer_state_dict = self.optimizer.state_dict()
            param_len = len(optimizer_state_dict['param_groups'][0]['params'])
            for index in range(param_len):
                optimizer_state_dict['state'].update({
                    optimizer_state_dict['param_groups'][0]['params'][index]:
                        check_point['optimizer']['state'].get(
                            check_point['optimizer']['param_groups'][0]['params'][index])})
            self.optimizer.load_state_dict(optimizer_state_dict)
        else:
            self.lowest_train_loss = check_point['loss']
            self.epoch = check_point['epoch']
            if self.epoch > 150:
                fuse_module(self.backbone)
                fuse_module(self.head)
            print("lowest_train_loss: ", self.lowest_train_loss)
            mapped_state_dict = self.backbone.state_dict()
            for key, value in check_point['backbone'].items():
                mapped_state_dict[key] = value
            self.backbone.load_state_dict(mapped_state_dict)

            mapped_state_dict = self.head.state_dict()
            for key, value in check_point['head'].items():
                mapped_state_dict[key] = value
            self.head.load_state_dict(mapped_state_dict)
            self.optimizer.load_state_dict(check_point['optimizer'])

    def finetune_model(self):
        if isinstance(self.backbone, torch.nn.DataParallel):
            backbone_named_children = self.backbone.module.named_children()
        else:
            backbone_named_children = self.backbone.named_children()
        if isinstance(self.head, torch.nn.DataParallel):
            head_named_children = self.head.module.named_children()
        else:
            head_named_children = self.head.named_children()
        for name, module in backbone_named_children:
            module.eval()
            for p in module.parameters():
                p.requires_grad = False
        for name, module in head_named_children:
            module.train()
            for p in module.parameters():
                p.requires_grad = True

    def set_bn_eval(self, m):
        classname = m.__class__.__name__
        if classname.find('BatchNorm') != -1:
            m.eval()

    def adjust_lr_exp(self, optimizer, ep, total_ep, start_decay_at_ep):
        """Decay exponentially in the later phase of training. All parameters in the
        optimizer share the same learning rate.

        Args:
          optimizer: a pytorch `Optimizer` object
          base_lr: starting learning rate
          ep: current epoch, ep >= 1
          total_ep: total number of epochs to train
          start_decay_at_ep: start decaying at the BEGINNING of this epoch

        Example:
          base_lr = 2e-4
          total_ep = 300
          start_decay_at_ep = 201
          It means the learning rate starts at 2e-4 and begins decaying after 200
          epochs. And training stops after 300 epochs.

        NOTE:
          It is meant to be called at the BEGINNING of an epoch.
        """
        assert ep >= 1, "Current epoch number should be >= 1"
        if ep < start_decay_at_ep:  # warm-up
            for g in optimizer.param_groups:
                g['lr'] = (g['initial_lr'] * 0.1 * (10 ** (float(ep) / start_decay_at_ep)))
                print('=====> lr adjusted to {:.10f}'.format(g['lr']).rstrip('0'))
        else:
            for g in optimizer.param_groups:
                g['lr'] = (g['initial_lr'] * (0.001 ** (float(ep + 1 - start_decay_at_ep)
                                                        / (total_ep + 1 - start_decay_at_ep))))
                print('=====> lr adjusted to {:.10f}'.format(g['lr']).rstrip('0'))

    def eval(self):
        self.backbone.eval()
        self.head.eval()
        accuracy, best_thresholds, roc_curve_tensor = self.evaluation.evaluate()
        buffer_val(self.writer, 'lfw', accuracy, best_thresholds, roc_curve_tensor, self.epoch)
        # self.evaluation.eval_rerank()

    def train(self, epoches=10, save_flag=True, finetune=False):
        if len(self.gpu_list) > 1:
            self.backbone = torch.nn.DataParallel(self.backbone, device_ids=self.gpu_list)
            self.head = torch.nn.DataParallel(self.head, device_ids=self.gpu_list)
            cudnn.benchmark = True
        # scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=20, gamma=0.1, last_epoch=self.epoch)
        # scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[30, 60, 100],
        #                                                  gamma=0.1, last_epoch=self.epoch)
        while self.epoch < epoches:
            print("Epoch: ", self.epoch)
            self.adjust_lr_exp(self.optimizer, self.epoch + 1, epoches, int(finetune) * 10 + 10)
            if self.epoch % 10 == 0:
                print(self.optimizer)
            self.reset_meter()
            if finetune and self.epoch < 10:
                self.finetune_model()
            else:
                self.backbone.train()
                self.head.train()
            if self.epoch == self.combine_conv_bn_epoch:
                # 冻结BN参数更新
                # self.model.apply(self.set_bn_eval)
                # 融合conv+bn
                fuse_module(self.backbone)
                fuse_module(self.head)
            self.train_epoch()
            # scheduler.step()
            if (self.epoch + 1) % 10 == 0:
                self.eval()
            if save_flag:
                self.save_model()
                self.save_model(False, False)
            self.epoch += 1
        torch.cuda.empty_cache()
        self.writer.close()
        print("Finished training.")

    def search_learn_rate(self):
        if self.use_cuda:
            self.backbone = torch.nn.DataParallel(self.backbone, device_ids=self.gpu_list)
            self.head = torch.nn.DataParallel(self.head, device_ids=self.gpu_list)
            cudnn.benchmark = True
        print(self.optimizer)
        lr_mult = (1 / 1e-5) ** (1 / self.train_data_loader.__len__())
        self.backbone.train()
        self.head.train()
        self.reset_meter()
        train_data_batch = self.train_data_loader.__len__()
        batch_iterator = iter(self.train_data_loader)
        for step in range(train_data_batch):
            start = time.time()

            images, targets = next(batch_iterator)
            self.batch_inference(images, targets)

            end = time.time()
            batch_time = end - start
            eta = int(batch_time * ((train_data_batch - step) + (self.max_epoch - self.epoch) * train_data_batch))
            for g in self.optimizer.param_groups:
                g['lr'] = (g['lr'] * lr_mult)
            if (step + 1) % 10 == 0:
                print_infos = 'Epoch:{}/{} || Epochiter: {}/{} || Batchtime: {:.4f} s || ETA: {} || ' \
                    .format(self.epoch, self.max_epoch, step, train_data_batch,
                            batch_time, str(datetime.timedelta(seconds=eta)))
                print_infos += 'acc_top1: {:>.4f}, acc_top5: {:>.4f}, total_loss: {:>.4f}( {:>.4f})'.format(
                    self.accuracy_top_1.avg, self.accuracy_top_5.avg,
                    self.total_losses_meter.val, self.total_losses_meter.avg)
                for index, criterion_name in enumerate(self.criterions.keys()):
                    print_infos = print_infos + f", {criterion_name}: {self.loss_meter[index].val:>.4f}" \
                                                f"({self.loss_meter[index].avg:>.4f})"
                print(print_infos)
                self.writer.add_scalar('loss/loss', self.total_losses_meter.val, step)
                self.writer.add_scalar('loss/total_loss', self.total_losses_meter.val, step)
                for index, criterion_name in enumerate(self.criterions.keys()):
                    self.writer.add_scalar(f'loss/{criterion_name}', self.loss_meter[index].val,
                                           step)
                self.writer.add_scalar('acc/acc_top1', self.accuracy_top_1.val, step)
                self.writer.add_scalar('acc/acc_top5', self.accuracy_top_5.val, step)
                self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]['lr'], step)
            if (step + 1) % 100 == 0:
                print(self.optimizer)
        torch.cuda.empty_cache()
        self.writer.close()
        print("Finished training.")

    def train_epoch(self):
        train_data_batch = self.train_data_loader.__len__()
        batch_iterator = iter(self.train_data_loader)
        for step in range(train_data_batch):
            start = time.time()

            images, targets = next(batch_iterator)
            self.batch_inference(images, targets)

            end = time.time()
            batch_time = end - start
            eta = int(batch_time * ((train_data_batch - step) + (self.max_epoch - self.epoch) * train_data_batch))
            if step % 20 == 0:
                print_infos = 'Epoch:{}/{} || Epochiter: {}/{} || Batchtime: {:.4f} s || ETA: {} || ' \
                    .format(self.epoch, self.max_epoch, step, train_data_batch,
                            batch_time, str(datetime.timedelta(seconds=eta)))
                print_infos += ' acc_top1: {:>.4f}, acc_top5: {:>.4f}, total_loss: {:>.4f}( {:>.4f})'.format(
                    self.accuracy_top_1.avg, self.accuracy_top_5.avg,
                    self.total_losses_meter.val, self.total_losses_meter.avg)
                for index, criterion_name in enumerate(self.criterions.keys()):
                    print_infos = print_infos + f", {criterion_name}: {self.loss_meters[index].val:>.4f}" \
                                                f"({self.loss_meters[index].avg:>.4f})"
                print(print_infos)
            if step % 100 == 0:
                # Window
                # self.writer.add_image('Image', images, step + self.epoch * train_data_batch)
                # Linux
                # self.writer.add_image('Image', image, step + self.epoch * train_data_batch, dataformats='NCHW')
                for name, param in self.backbone.named_parameters():
                    self.writer.add_histogram(name, param.clone().cpu().data.numpy(),
                                              step + self.epoch * train_data_batch)
                for name, param in self.head.named_parameters():
                    self.writer.add_histogram(
                        name,
                        param.clone().cpu().data.numpy(),
                        step + self.epoch * train_data_batch)
                self.writer.add_scalar('loss/total_loss', self.total_losses_meter.val, step + self.epoch * train_data_batch)
                for index, criterion_name in enumerate(self.criterions.keys()):
                    self.writer.add_scalar(f'loss/{criterion_name}', self.loss_meters[index].val,
                                           step + self.epoch * train_data_batch)
                self.writer.add_scalar('acc/acc_top1', self.accuracy_top_1.val, step + self.epoch * train_data_batch)
                self.writer.add_scalar('acc/acc_top5', self.accuracy_top_5.val, step + self.epoch * train_data_batch)
        print("Total train loss:", self.total_losses_meter.avg)

    def save_model(self, save_head=True, save_optimizer=True):
        if self.total_losses_meter.avg < self.lowest_train_loss or self.total_losses_meter.avg < 2.0:
            state = {
                'backbone': self.backbone.module.state_dict() if self.use_cuda else self.backbone.state_dict(),
                'loss': self.total_losses_meter.avg,
                'epoch': self.epoch + 1
            }
            if save_optimizer:
                state.update({'optimizer': self.optimizer.state_dict()})
            model_name = self.backbone_name
            if save_head:
                state.update({'head': self.head.module.state_dict() if self.use_cuda else self.head.state_dict()})
                model_name = '_'.join([self.backbone_name, self.head_name])
            if not os.path.exists('./checkpoints'):
                os.makedirs('./checkpoints')
            save_path = './checkpoints/{}_{}_{:.04f}.pth'.format(model_name, self.epoch,
                                                                 self.total_losses_meter.avg)
            torch.save(state, save_path)
        if self.total_losses_meter.avg < self.lowest_train_loss:
            self.lowest_train_loss = self.total_losses_meter.avg

    def batch_inference(self, images, targets=None, backward=True):
        if self.use_cuda:
            images = images.cuda()
            if targets is not None:
                targets = targets.cuda()
        if (not self.backbone.training and not self.head.training) or targets is None:
            features = self.backbone(images)
            return features
        features = self.backbone(images)
        outputs = self.head(features, targets.long())
        total_loss = 0

        losses = self.loss_forward(self.criterions, features, outputs, targets)
        accuracy_top_1, accuracy_top_5 = accuracy(outputs, targets, (1, 5))
        total_loss = torch.stack(losses).mul(self.loss_weights).sum()

        if backward:
            self.optimizer.zero_grad()
            total_loss.backward()
            apply_weight_decay(self.backbone)
            apply_weight_decay(self.head)
            self.optimizer.step()

        losses_value = []
        for index, criterion_name in enumerate(self.criterions.keys()):
            losses_value.append(losses[index].item())
        total_loss_value = total_loss.item()
        accuracy_top_1_value = accuracy_top_1.item()
        accuracy_top_5_value = accuracy_top_5.item()

        for index, criterion_name in enumerate(self.criterions.keys()):
            self.loss_meters[index].update(losses_value[index], targets.size(0))
        self.total_losses_meter.update(total_loss_value, targets.size(0))
        self.accuracy_top_1.update(accuracy_top_1_value, targets.size(0))
        self.accuracy_top_5.update(accuracy_top_5_value, targets.size(0))
        return outputs
def train(epoch,
          model,
          criterion,
          optimizer,
          trainloader,
          use_gpu,
          freeze_bn=False):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    if freeze_bn or args.freeze_bn:
        model.apply(set_bn_to_eval)

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs = model(imgs)
        if isinstance(outputs, tuple):
            loss = DeepSupervision(criterion, outputs, pids)
        else:
            loss = criterion(outputs, pids)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
Exemplo n.º 8
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
Exemplo n.º 9
0
    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    model = nn.DataParallel(model).cuda() if use_gpu else model

    print('Matching {} ...'.format(args.test_set))
    queryloader = testloader_dict['query']
    galleryloader = testloader_dict['test']
    
    
    
    
    
#     distmat, q_pids, g_pids, q_camids, g_camids = run(model, queryloader, galleryloader, use_gpu, return_distmat=True)
    batch_time = AverageMeter()
    model.eval()
    with torch.no_grad():
        
        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(tqdm(galleryloader)):
            if use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
Exemplo n.º 10
0
def run(model,
        queryloader,
        galleryloader,
        use_gpu,
        ranks=[1, 5, 10, 20],
        return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(tqdm(queryloader)):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)  #extend()用于在列表末尾一次性追加另一个序列
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)  #query images features
        q_pids = np.asarray(q_pids)  #query image ids
        q_camids = np.asarray(q_camids)  # query camera ids

        print('Extracted features for query set, obtained {}-by-{} matrix'.
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids,
                        _) in enumerate(tqdm(galleryloader)):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)  #gallery image feature
        g_pids = np.asarray(g_pids)  #gallery image ids
        g_camids = np.asarray(g_camids)  #gallery camera ids

        print('Extracted features for gallery set, obtained {}-by-{} matrix'.
              format(gf.size(0), gf.size(1)))

    print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())  #从features计算距离矩阵
    distmat = distmat.numpy()
    '''补充'''
    feat_norm = 0  #1 #这个要做
    do_DBA = 0  #好像ruiyan不做这个?

    feats = torch.cat([qf, gf])

    if feat_norm:
        feats = torch.nn.functional.normalize(feats, dim=1, p=2)
        print("The test feature is normalized")
    if do_DBA:
        feats = database_aug(feats, top_k=6)
    '''后处理'''
    query_expansion = 0  #1
    TRACK_AUG = 0
    USE_VOC = 0
    do_rerank = 1  #1
    TRACK_RERANK = 0

    if query_expansion:
        print('before query_expansion', qf.shape)
        qf = average_query_expansion(qf, feats, top_k=6)  #后处理query expansion
        print('Query expansion done.')
        print('after query_expansion', qf.shape)
    # if TRACK_AUG:
    #     gf = track_aug(gf, self.dataset.test_tracks, self.img_paths[self.num_query:]) #后处理track_aug
    if USE_VOC:
        print('using VOC-ReID')
        # cam_dist = np.load(self.cfg.TEST.CAM_DIST_PATH)  #读取之前计算好的camREID的距离矩阵
        # ori_dist = np.load(self.cfg.TEST.ORI_DIST_PATH)  #方向reid
    else:
        cam_dist = None
        ori_dist = None
    if do_rerank:  # 后处理re-ranking(用到了camReid和方向reid) [50, 15, 0.5]
        distmat_np = re_ranking(qf,
                                gf,
                                k1=50,
                                k2=15,
                                lambda_value=0.5,
                                USE_VOC=USE_VOC,
                                cam_dist=cam_dist,
                                ori_dist=ori_dist)
        print('re-ranking done')
    else:  #不做re-ranking的话
        print('no re-ranking')
        distmat_np, indices = comput_distmat(qf, gf)  #直接用qf gf计算距离矩阵
        distmat_np = distmat_np.cpu().numpy()  #后缀_np的含义
    indices_np = np.argsort(distmat_np,
                            axis=1)  #距离矩阵保持行顺序固定,列排序,得到排序index就是indices_np(返回)
    # if TRACK_RERANK:
    #     rerank_indice_by_track(indices_np, self.img_paths[self.num_query:], self.dataset.test_tracks)
    print('post-process done')
    '''后处理结束'''
    #     print(type(distmat),type(distmat_np))
    print((distmat == distmat_np).all())

    return distmat, q_pids, g_pids, q_camids, g_camids, distmat_np, feats.cpu(
    ).numpy()
Exemplo n.º 11
0
    def _eval_epoch(self, epoch):
        self.encoder.eval()
        self.decoder.eval()

        accs = AverageMeter()
        losses_clsf = AverageMeter()
        losses_recons = AverageMeter()
        for batch_idx, (inputs, targets) in enumerate(self.eval_loader):
            # print('inputs:', inputs.shape)
            bs = inputs.size(0)
            if USE_GPU:
                inputs = inputs.cuda()
                targets = targets.cuda()
            outputs, fms = self.encoder(inputs, return_fm=True)
            # print('fm4:', fms[3].shape, '; fm3:', fms[2].shape)
            loss_clsf = self.criterion_ce(outputs, targets)
            _, preds = outputs.max(dim=1)
            acc = preds.eq(targets).sum().float() / bs
            accs.update(acc.item(), bs)
            losses_clsf.update(loss_clsf.item(), bs)

            img_recons = self.decoder(fms[3], scale_factor=self.decoder_scale_factor,
                                      out_size=self.decoder_output_size)
            # print('img recon shape:', img_recons.shape)
            if self.dsae:
                loss_recons = self.criterion_mse(img_recons, fms[2])  # reconstruction loss
            else:
                loss_recons = self.criterion_mse(img_recons, inputs)  # reconstruction loss
            losses_recons.update(loss_recons, bs)

        acc_avg = accs.avg
        loss_c_avg = losses_clsf.avg
        loss_r_avg = losses_recons.avg
        self.writer.add_scalar('Loss/eval/Classification', loss_c_avg, global_step=epoch)
        self.writer.add_scalar('Loss/eval/Reconstruction', loss_r_avg, global_step=epoch)
        self.writer.add_scalar('Accuracy/eval', acc_avg, global_step=epoch)
        print('-Eval- Epoch: {}, Loss(C|R): {:.4f} | {:.4f}, Accuracy: {:.2%}'.format(
            epoch, loss_c_avg, loss_r_avg, acc_avg))

        return acc_avg
Exemplo n.º 12
0
    def _train_epoch(self, epoch):
        self.encoder.train()
        self.decoder.train()

        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses_clsf = AverageMeter()
        losses_recons = AverageMeter()
        accs = AverageMeter()

        end = time.time()
        for batch_idx, (inputs, targets) in enumerate(self.train_loader):
            data_time.update(time.time() - end)

            if USE_GPU:
                inputs = inputs.cuda()
                targets = targets.cuda()

            bs = inputs.size(0)
            # === Forward
            outputs, fms = self.encoder(inputs, return_fm=True)
            img_recons = self.decoder(fms[3], scale_factor=self.decoder_scale_factor,
                                      out_size=self.decoder_output_size)
            if self.dsae:
                loss_recons = self.criterion_mse(img_recons, fms[2])  # reconstruction loss
            else:
                loss_recons = self.criterion_mse(img_recons, inputs)

            losses_recons.update(loss_recons.item(), bs)

            loss_clsf = self.criterion_ce(outputs, targets)  # classification loss
            _, preds = outputs.max(dim=1)
            acc = preds.eq(targets).sum().float() / bs
            accs.update(acc.item(), bs)
            losses_clsf.update(loss_clsf.item(), bs)

            # === Backward
            loss_all = self.gamma * loss_clsf + (1 - self.gamma) * loss_recons
            self.optimizer.zero_grad()
            loss_all.backward()
            nn.utils.clip_grad_norm_(self.trainable_params, max_norm=5., norm_type=2)
            self.optimizer.step()

            # print(batch_idx, '; loss:', loss.item())
            batch_time.update(time.time() - end)
            end = time.time()

            # Release CUDA memory
            torch.cuda.empty_cache()
        self.scheduler.step()

        acc_avg = accs.avg
        loss_c_avg = losses_clsf.avg
        loss_r_avg = losses_recons.avg
        self.writer.add_scalar('Loss/train/Classification', loss_c_avg, global_step=epoch)
        self.writer.add_scalar('Loss/train/Reconstruction', loss_r_avg, global_step=epoch)
        print(
            '-Train- Epoch: {}, Lr: {:.5f}, Time: {:.1f}s, Data: {:.1f}s, '
            'Loss(C|R): {:.4f} | {:.4f}, Acc: {:.2%}'.format(
                epoch, self.lr, batch_time.sum, data_time.sum, loss_c_avg, loss_r_avg, acc_avg))
Exemplo n.º 13
0
def test(model,
         queryloader,
         galleryloader,
         batch_size,
         use_gpu,
         ranks=[1, 5, 10],
         return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print('Extracted features for query set, obtained {}-by-{} matrix'.
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print('Extracted features for gallery set, obtained {}-by-{} matrix'.
              format(gf.size(0), gf.size(1)))

    print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(
        batch_time.avg, batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    #distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)
    print('Computing CMC and mAP')
    # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, target_names)
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, 10)

    print('Results ----------')
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in ranks:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
    print('------------------')

    return cmc[0], distmat
Exemplo n.º 14
0
def main():
    #GENERAL
    torch.cuda.empty_cache()
    root = "/home/kuru/Desktop/veri-gms-master_noise/"
    train_dir = '/home/kuru/Desktop/veri-gms-master_noise/VeRispan/image_train/'
    source = {'verispan'}
    target = {'verispan'}
    workers = 4
    height = 240
    width = 240
    train_size = 32
    train_sampler = 'RandomSampler'

    #AUGMENTATION
    random_erase = True
    jitter = True
    aug = True

    #OPTIMIZATION
    opt = 'adam'
    lr = 0.0003
    weight_decay = 5e-4
    momentum = 0.9
    sgd_damp = 0.0
    nesterov = True
    warmup_factor = 0.01
    warmup_method = 'linear'

    #HYPERPARAMETER
    max_epoch = 80
    start = 0
    train_batch_size = 16
    test_batch_size = 50

    #SCHEDULER
    lr_scheduler = 'multi_step'
    stepsize = [30, 60]
    gamma = 0.1

    #LOSS
    margin = 0.3
    num_instances = 4
    lambda_tri = 1

    #MODEL
    #arch = 'resnet101'
    arch = 'resnet101_ibn_a'
    no_pretrained = False

    #TEST SETTINGS
    load_weights = '/home/kuru/Desktop/veri-gms-master/IBN-Net_pytorch0.4.1/resnet101_ibn_a.pth'
    #load_weights = None
    start_eval = 0
    eval_freq = -1

    #MISC
    use_gpu = True
    print_freq = 10
    seed = 1
    resume = ''
    save_dir = '/home/kuru/Desktop/veri-gms-master_noise/spanningtree_verinoise_101_stride2/'
    gpu_id = 0, 1
    vis_rank = True
    query_remove = True
    evaluate = False

    dataset_kwargs = {
        'source_names': source,
        'target_names': target,
        'root': root,
        'height': height,
        'width': width,
        'train_batch_size': train_batch_size,
        'test_batch_size': test_batch_size,
        'train_sampler': train_sampler,
        'random_erase': random_erase,
        'color_jitter': jitter,
        'color_aug': aug
    }
    transform_kwargs = {
        'height': height,
        'width': width,
        'random_erase': random_erase,
        'color_jitter': jitter,
        'color_aug': aug
    }

    optimizer_kwargs = {
        'optim': opt,
        'lr': lr,
        'weight_decay': weight_decay,
        'momentum': momentum,
        'sgd_dampening': sgd_damp,
        'sgd_nesterov': nesterov
    }

    lr_scheduler_kwargs = {
        'lr_scheduler': lr_scheduler,
        'stepsize': stepsize,
        'gamma': gamma
    }

    use_gpu = torch.cuda.is_available()
    log_name = 'log_test.txt' if evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(save_dir, log_name))
    print('Currently using GPU ', gpu_id)
    cudnn.benchmark = True

    print('Initializing image data manager')
    #dataset = init_imgreid_dataset(root='/home/kuru/Desktop/veri-gms-master/', name='veri')
    dataset = init_imgreid_dataset(
        root='/home/kuru/Desktop/veri-gms-master_noise/', name='verispan')
    train = []
    num_train_pids = 0
    num_train_cams = 0
    print(len(dataset.train))

    for img_path, pid, camid, subid, countid in dataset.train:
        #print(img_path)
        path = img_path[56 + 6:90 + 6]
        #print(path)
        folder = path[1:4]
        #print(folder)
        #print(img_path, pid, camid,subid,countid)
        pid += num_train_pids
        camid += num_train_cams
        newidd = 0
        train.append((path, folder, pid, camid, subid, countid))
        #print(train)
        #break

    num_train_pids += dataset.num_train_pids
    num_train_cams += dataset.num_train_cams

    pid = 0
    pidx = {}
    for img_path, pid, camid, subid, countid in dataset.train:
        path = img_path[56 + 6:90 + 6]

        folder = path[1:4]
        pidx[folder] = pid
        pid += 1
    #print(pidx)

    sub = []
    final = 0
    xx = dataset.train
    newids = []
    print(train[0:2])
    train2 = {}
    for k in range(0, 770):
        for img_path, pid, camid, subid, countid in dataset.train:
            if k == pid:
                newid = final + subid
                sub.append(newid)
                #print(pid,subid,newid)
                newids.append(newid)
                train2[img_path] = newid
                #print(img_path, pid, camid, subid, countid, newid)

        final = max(sub)
        #print(final)
    print(len(newids), final)

    #train=train2
    #print(train2)
    train3 = []
    for img_path, pid, camid, subid, countid in dataset.train:
        #print(img_path,pid,train2[img_path])
        path = img_path[56 + 6:90 + 6]
        #print(path)
        folder = path[1:4]
        newid = train2[img_path]
        #print((path, folder, pid, camid, subid, countid,newid ))
        train3.append((path, folder, pid, camid, subid, countid, newid))

    train = train3

    # for (path, folder, pid, camid, subid, countid,newid) in train:
    #     print(path, folder)

    path = '/home/kuru/Desktop/adhi/veri-final-draft-master_noise/gmsNoise776/'
    pkl = {}
    #pkl[0] = pickle.load('/home/kuru/Desktop/veri-gms-master/gms/620.pkl')

    entries = os.listdir(path)
    for name in entries:
        f = open((path + name), 'rb')
        ccc = (path + name)
        #print(ccc)
        if name == 'featureMatrix.pkl':
            s = name[0:13]
        else:
            s = name[0:3]
        #print(s)
        #with open (ccc,"rb") as ff:
        #    pkl[s] = pickle.load(ff)
        #print(pkl[s])
        pkl[s] = pickle.load(f)
        f.close
        #print(len(pkl))

    print('=> pickle indexing')

    data_index = search(pkl)
    print(len(data_index))

    # with open('cids.pkl', 'rb') as handle:
    #     b = pickle.load(handle)
    #     #print(b)

    # with open('index.pkl', 'rb') as handle:
    #     c = pickle.load(handle)

    transform_t = train_transforms(**transform_kwargs)
    #print(train[0],train[10])

    # train4=[]
    # for path, folder, pid, camid, subid, countid,newid  in train:
    #     if countid > 3:
    #         train4.append((path, folder, pid, camid, subid, countid,newid ))

    # print(len(train4))
    # train=train4

    #data_tfr = vd(pkl_file='index.pkl', dataset = train, root_dir='/home/kuru/Desktop/veri-gms-master/VeRi/image_train/', transform=transform_t)
    data_tfr = vdspan(
        pkl_file='index_veryspan_noise.pkl',
        dataset=train,
        root_dir=
        '/home/kuru/Desktop/veri-gms-master_noise/VeRispan/image_train/',
        transform=transform_t)
    #print(data_tfr)
    #print(trainloader)
    #data_tfr2=list(data_tfr)

    df2 = []
    data_tfr_old = data_tfr
    for (img, label, index, pid, cid, subid, countid, newid) in data_tfr:
        #print("datframe",(label))
        #print(countid)
        if countid > 4:
            #print(countid)
            df2.append((img, label, index, pid, cid, subid, countid, newid))
    print("filtered final trainset length", len(df2))

    data_tfr = df2

    # with open('df2noise_ex.pkl', 'wb') as handle:
    #     b = pickle.dump(df2, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # with open('df2noise.pkl', 'rb') as handle:
    #     df2 = pickle.load(handle)
    # data_tfr=df2
    # for (img,label,index,pid, cid,subid,countid,newid) in data_tfr :
    #     print("datframe",(label))

    #data_tfr = vdspansort( dataset = train, root_dir='/home/kuru/Desktop/veri-gms-master_noise/VeRispan/image_train/', transform=transform_t)

    #trainloader = DataLoader(df2, sampler=None,batch_size=train_batch_size, shuffle=True, num_workers=workers,pin_memory=True, drop_last=True)
    trainloader = DataLoader(data_tfr,
                             sampler=None,
                             batch_size=train_batch_size,
                             shuffle=True,
                             num_workers=workers,
                             pin_memory=True,
                             drop_last=True)

    for batch_idx, (img, label, index, pid, cid, subid, countid,
                    newid) in enumerate(trainloader):
        #print("trainloader",batch_idx, (label,index,pid, cid,subid,countid,newid))
        print("trainloader", batch_idx, (label))
        break

    print('Initializing test data manager')
    dm = ImageDataManager(use_gpu, **dataset_kwargs)
    testloader_dict = dm.return_dataloaders()

    print('Initializing model: {}'.format(arch))
    model = models.init_model(name=arch,
                              num_classes=num_train_pids,
                              loss={'xent', 'htri'},
                              pretrained=not no_pretrained,
                              last_stride=2)
    print('Model size: {:.3f} M'.format(count_num_param(model)))

    if load_weights is not None:
        print("weights loaded")
        load_pretrained_weights(model, load_weights)

    #checkpoint = torch.load('/home/kuru/Desktop/veri-gms-master/logg/model.pth.tar-19')
    #model._load_from_state_dict(checkpoint['state_dict'])
    #model.load_state_dict(checkpoint['state_dict'])

    #optimizer.load_state_dict(checkpoint['optimizer'])
    #print(checkpoint['epoch'])
    #print(checkpoint['rank1'])
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    print(torch.cuda.device_count())
    model = nn.DataParallel(model).cuda() if use_gpu else model
    optimizer = init_optimizer(model, **optimizer_kwargs)

    #optimizer = init_optimizer(model)
    #optimizer.load_state_dict(checkpoint['optimizer'])

    scheduler = init_lr_scheduler(optimizer, **lr_scheduler_kwargs)
    # scheduler = WarmupMultiStepLR(optimizer, STEPS, GAMMA,
    #                               WARMUP_FACTOR,
    #                               WARMUP_EPOCHS, WARMUP_METHOD)

    criterion_xent = CrossEntropyLoss(num_classes=num_train_pids,
                                      use_gpu=use_gpu,
                                      label_smooth=True)
    criterion_htri = TripletLoss(margin=margin)
    ranking_loss = nn.MarginRankingLoss(margin=margin)

    if evaluate:
        print('Evaluate only')

        for name in target:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            _, distmat = test(model,
                              queryloader,
                              galleryloader,
                              train_batch_size,
                              use_gpu,
                              return_distmat=True)

            if vis_rank:
                visualize_ranked_results(distmat,
                                         dm.return_testdataset_by_name(name),
                                         save_dir=osp.join(
                                             save_dir, 'ranked_results', name),
                                         topk=20)
        return

    time_start = time.time()
    ranklogger = RankLogger(source, target)

    # # checkpoint = torch.load('/home/kuru/Desktop/market_all/ibna_model/model.pth.tar-79')
    # # model.load_state_dict(checkpoint['state_dict'])
    # # optimizer.load_state_dict(checkpoint['optimizer'])
    # # print(checkpoint['epoch'])
    # # start_epoch=checkpoint['epoch']
    # # start=start_epoch

    # checkpoint = torch.load('/home/kuru/Desktop/veri-gms-master/spanningtreeveri/model.pth.tar-2')
    # model.load_state_dict(checkpoint['state_dict'])
    # optimizer.load_state_dict(checkpoint['optimizer'])
    # print(checkpoint['epoch'])
    # start_epoch=checkpoint['epoch']
    # start=start_epoch

    ##start_epoch=resume_from_checkpoint('/home/kuru/Desktop/veri-gms-master/logg/model.pth.tar-20', model, optimizer=None)
    print('=> Start training')

    for epoch in range(start, max_epoch):
        print(epoch, scheduler.get_lr()[0])
        #print( torch.cuda.memory_allocated(0))
        losses = AverageMeter()
        #xent_losses = AverageMeter()
        htri_losses = AverageMeter()
        accs = AverageMeter()
        batch_time = AverageMeter()
        xent_losses = AverageMeter()

        model.train()
        for p in model.parameters():
            p.requires_grad = True  # open all layers

        end = time.time()
        for batch_idx, (img, label, index, pid, cid, subid, countid,
                        newid) in enumerate(trainloader):
            trainX, trainY = torch.zeros(
                (train_batch_size * 3, 3, height, width),
                dtype=torch.float32), torch.zeros((train_batch_size * 3),
                                                  dtype=torch.int64)
            #pids = torch.zeros((batch_size*3), dtype = torch.int16)
            #batchcount=0
            for i in range(train_batch_size):
                if (countid[i] > 4):
                    #batchcount=batchcount+1
                    #print("dfdsfs")
                    labelx = label[i]
                    indexx = index[i]
                    cidx = pid[i]
                    if indexx > len(pkl[labelx]) - 1:
                        indexx = len(pkl[labelx]) - 1

                    #maxx = np.argmax(pkl[labelx][indexx])
                    a = pkl[labelx][indexx]
                    minpos = np.argmin(ma.masked_where(a == 0, a))

                    # print(len(a))
                    # print(a)
                    # print(ma.masked_where(a==0, a))
                    # print(labelx,index,pid,cidx,minpos)
                    # print(np.array(data_index).shape)
                    # print(data_index[cidx][1])
                    pos_dic = data_tfr_old[data_index[cidx][1] + minpos]
                    #print('posdic', pos_dic)

                    neg_label = int(labelx)
                    while True:
                        neg_label = random.choice(range(1, 770))
                        #print(neg_label)
                        if neg_label is not int(labelx) and os.path.isdir(
                                os.path.join(
                                    '/home/kuru/Desktop/veri-gms-master_noise/veriNoise_train_spanning_folder',
                                    strint(neg_label))) is True:
                            break
                    negative_label = strint(neg_label)
                    neg_cid = pidx[negative_label]
                    neg_index = random.choice(
                        range(0, len(pkl[negative_label])))
                    #print(negative_label,neg_cid,neg_index,data_index[neg_cid] )
                    neg_dic = data_tfr_old[data_index[neg_cid][1] + neg_index]
                    #print('negdic', neg_dic)
                    trainX[i] = img[i]
                    trainX[i + train_batch_size] = pos_dic[0]
                    trainX[i + (train_batch_size * 2)] = neg_dic[0]
                    trainY[i] = cidx
                    trainY[i + train_batch_size] = pos_dic[3]
                    trainY[i + (train_batch_size * 2)] = neg_dic[3]
                    # trainY[i+train_batch_size] = pos_dic[7]
                    # trainY[i+(train_batch_size*2)] = neg_dic[7]
                #break
                # else:
                #     print("skiped",countid[i],subid[i],label[i])
            #break
            #print(batchcount)
            trainX = trainX.cuda()
            trainY = trainY.cuda()
            outputs, features = model(trainX)
            xent_loss = criterion_xent(outputs[0:train_batch_size],
                                       trainY[0:train_batch_size])
            htri_loss = criterion_htri(features, trainY)
            # centerloss= CENTER_LOSS_WEIGHT * center_criterion(features, trainY)

            #tri_loss = ranking_loss(features)
            #ent_loss = xent_loss(outputs[0:batch_size], trainY[0:batch_size], num_train_pids)

            #loss = htri_loss+xent_loss + centerloss
            loss = htri_loss + xent_loss

            optimizer.zero_grad()
            #optimizer_center.zero_grad()
            loss.backward()
            optimizer.step()
            # for param in center_criterion.parameters():
            #     param.grad.data *= (1. /CENTER_LOSS_WEIGHT)
            # optimizer_center.step()

            for param_group in optimizer.param_groups:
                #print(param_group['lr'] )
                lrrr = str(param_group['lr'])

            batch_time.update(time.time() - end)
            losses.update(loss.item(), trainY.size(0))
            htri_losses.update(htri_loss.item(), trainY.size(0))
            xent_losses.update(xent_loss.item(), trainY.size(0))
            accs.update(
                accuracy(outputs[0:train_batch_size],
                         trainY[0:train_batch_size])[0])

            if (batch_idx) % 50 == 0:
                print('Train ', end=" ")
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'TriLoss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'XLoss {xloss.val:.4f} ({xloss.avg:.4f})\t'
                      'OveralLoss {oloss.val:.4f} ({oloss.avg:.4f})\t'
                      'Acc {acc.val:.2f} ({acc.avg:.2f})\t'
                      'lr {lrrr} \t'.format(
                          epoch + 1,
                          batch_idx + 1,
                          len(trainloader),
                          batch_time=batch_time,
                          loss=htri_losses,
                          xloss=xent_losses,
                          oloss=losses,
                          acc=accs,
                          lrrr=lrrr,
                      ))

            end = time.time()

        # del loss
        # del htri_loss
        # del xent_loss
        # del htri_losses
        # del losses
        # del outputs
        # del features
        # del accs
        # del trainX
        # del trainY

        scheduler.step()
        print('=> Test')
        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                #'rank1': rank1,
                'epoch': epoch + 1,
                'arch': arch,
                'optimizer': optimizer.state_dict(),
            },
            save_dir)
        GPUtil.showUtilization()
        print(torch.cuda.memory_allocated(), torch.cuda.memory_cached())
        for name in target:
            print('Evaluating {} ...'.format(name))
            queryloader = testloader_dict[name]['query']
            galleryloader = testloader_dict[name]['gallery']
            rank1, distmat = test(model, queryloader, galleryloader,
                                  test_batch_size, use_gpu)
            ranklogger.write(name, epoch + 1, rank1)
            rank2, distmat2 = test_rerank(model, queryloader, galleryloader,
                                          test_batch_size, use_gpu)
            ranklogger.write(name, epoch + 1, rank2)
        del queryloader
        del galleryloader
        del distmat
        print(torch.cuda.memory_allocated(), torch.cuda.memory_cached())
        torch.cuda.empty_cache()

        if (epoch + 1) == max_epoch:
            #if (epoch + 1) % 10 == 0:
            print('=> Test')
            save_checkpoint(
                {
                    'state_dict': model.state_dict(),
                    'rank1': rank1,
                    'epoch': epoch + 1,
                    'arch': arch,
                    'optimizer': optimizer.state_dict(),
                }, save_dir)
            for name in target:
                print('Evaluating {} ...'.format(name))
                queryloader = testloader_dict[name]['query']
                galleryloader = testloader_dict[name]['gallery']
                rank1, distmat = test(model, queryloader, galleryloader,
                                      test_batch_size, use_gpu)
                ranklogger.write(name, epoch + 1, rank1)
                # del queryloader
                # del galleryloader
                # del distmat

                if vis_rank:
                    visualize_ranked_results(
                        distmat,
                        dm.return_testdataset_by_name(name),
                        save_dir=osp.join(save_dir, 'ranked_results', name),
                        topk=20)
Exemplo n.º 15
0
    def train(self, fixbase=False):
        """
        Train the model for an epoch.
        :param fixbase: Is this a fixbase epoch?
        :return: Time of execution end.
        """
        losses_main = AverageMeter()
        losses_hp = AverageMeter()
        train_main = not self.args.train_hp_only and self.epoch < self.args.main_net_train_epochs
        train_main_finetuning = (not self.args.train_hp_only
                                 and self.epoch >= self.args.max_epoch -
                                 self.args.main_net_finetuning_epochs)
        rejection_epoch = (not self.args.train_hp_only
                           and self.epoch == self.args.max_epoch -
                           self.args.main_net_finetuning_epochs)
        train_hp = (self.args.hp_epoch_offset <= self.epoch <
                    self.args.hp_net_train_epochs + self.args.hp_epoch_offset)
        num_batch = len(self.trainloader)

        if rejection_epoch:
            self.update_rejector_thresholds()
        if self.args.hp_epoch_offset == self.epoch:
            self.update_hp_calibrator_thresholds()

        if train_main or train_main_finetuning:
            self.model_main.train()
            losses = losses_main
        else:
            self.model_main.eval()
            losses = losses_hp

        if train_hp:
            self.model_hp.train()
        else:
            self.model_hp.eval()

        # For saving results to compute mean calibration thresholds.
        positive_logits_sum = torch.zeros(self.dm.num_attributes)
        negative_logits_sum = torch.zeros(self.dm.num_attributes)
        positive_num = torch.zeros(self.dm.num_attributes)
        negative_num = torch.zeros(self.dm.num_attributes)
        if self.use_gpu:
            positive_logits_sum = positive_logits_sum.cuda()
            negative_logits_sum = negative_logits_sum.cuda()
            positive_num = positive_num.cuda()
            negative_num = negative_num.cuda()

        for batch_idx, (imgs, labels, _) in enumerate(self.trainloader):

            if self.use_gpu:
                imgs, labels = imgs.cuda(), labels.cuda()
            if self.use_bbs:
                visibility_labels = labels[:, self.dm.num_attributes:]
                labels = labels[:, :self.dm.num_attributes]
                assert labels.shape == visibility_labels.shape
            else:
                visibility_labels = None
            # Run the batch through both nets.
            label_prediciton_probs = self.model_main(imgs)
            label_predicitons_logits = self.criterion_main.logits(
                label_prediciton_probs.detach())

            labels_bool = labels > 0.5  # TODO: make nicer
            positive_logits_sum += label_predicitons_logits[labels_bool].sum(0)
            negative_logits_sum += label_predicitons_logits[~labels_bool].sum(
                0)
            positive_num += labels_bool.sum(0, dtype=torch.float)
            negative_num += (~labels_bool).sum(0, dtype=torch.float)

            if not self.args.use_confidence:
                hardness_predictions = self.model_hp(imgs)
            if train_main or train_main_finetuning:
                if not self.args.use_confidence:
                    hardness_predictions_logits = self.criterion_hp.logits(
                        hardness_predictions.detach())
                    hardness_predictions_logits = self.criterion_hp.broadcast(
                        hardness_predictions_logits)
                elif train_main_finetuning:
                    if self.args.f1_calib:
                        decision_thresholds = self.f1_calibration_thresholds
                    else:
                        decision_thresholds = None
                    hardness_predictions_logits = 1 - metrics.get_confidence(
                        label_predicitons_logits,
                        decision_thresholds).detach()
                if self.args.no_hp_feedback or not train_hp:
                    main_net_weights = label_prediciton_probs.new_ones(
                        label_prediciton_probs.shape)
                else:
                    # Make a detached version of the hp scores for computing the main loss.
                    main_net_weights = hardness_predictions_logits
                    if self.args.use_bbs_feedback:
                        main_net_weights *= visibility_labels
                if train_main_finetuning:
                    select = self.rejector(hardness_predictions_logits)
                    main_net_weights = main_net_weights * select
                # Compute main loss, gradient and optimize main net.
                loss_main = self.criterion_main(label_prediciton_probs, labels,
                                                main_net_weights)
                self.optimizer_main.zero_grad()
                loss_main.backward()
                nn.utils.clip_grad_norm_(self.model_main.parameters(),
                                         max_norm=10.0)
                self.optimizer_main.step()

                losses_main.update(loss_main.item(), labels.size(0))

            if train_hp and not self.args.use_confidence:
                # Compute HP loss, gradient and optimize HP net.
                # The label predictions are calibrated.
                loss_hp = self.criterion_hp(
                    hardness_predictions,
                    self.hp_calibrator(label_predicitons_logits), labels,
                    visibility_labels)
                self.optimizer_hp.zero_grad()
                loss_hp.backward()
                nn.utils.clip_grad_norm_(self.model_hp.parameters(),
                                         max_norm=10.0)
                self.optimizer_hp.step()

                losses_hp.update(loss_hp.item(), labels.size(0))
            # Print progress.
            if (batch_idx + 1) % args.print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Main loss {loss.avg:.4f}\t'
                      'HP-Net loss {hp_loss.avg:.4f}'.format(
                          self.epoch + 1,
                          batch_idx + 1,
                          num_batch,
                          loss=losses_main,
                          hp_loss=losses_hp))
        print('Epoch: [{0}][{1}/{2}]\t'
              'Main loss {loss.avg:.4f}\t'
              'HP-Net loss {hp_loss.avg:.4f}'.format(self.epoch + 1,
                                                     batch_idx + 1,
                                                     num_batch,
                                                     loss=losses_main,
                                                     hp_loss=losses_hp))

        # Update HP calibrator thresholds (mean thresholds are only used if the option is selected in args)
        positive_logits_sum /= positive_num
        negative_logits_sum /= negative_num
        self.update_hp_calibrator_thresholds(
            (positive_logits_sum + negative_logits_sum) / 2)

        return losses_main.avg, losses_hp.avg
Exemplo n.º 16
0
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
          use_gpu):
    losses = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()

    end = time.time()
    for batch_idx, (imgs, pids, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs, features = model(imgs)
        if args.htri_only:
            if isinstance(features, tuple):
                loss = DeepSupervision(criterion_htri, features, pids)
            else:
                loss = criterion_htri(features, pids)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(criterion_xent, outputs, pids)
            else:
                xent_loss = criterion_xent(outputs, pids)

            if isinstance(features, tuple):
                htri_loss = DeepSupervision(criterion_htri, features, pids)
            else:
                htri_loss = criterion_htri(features, pids)

            loss = xent_loss + htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        losses.update(loss.item(), pids.size(0))

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                      epoch + 1,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses))

        end = time.time()
Exemplo n.º 17
0
    def _evaluate(self,
                  epoch,
                  dataset_name='',
                  query_loader=None,
                  gallery_loader=None,
                  dist_metric='euclidean',
                  normalize_feature=False,
                  visrank=False,
                  visrank_topk=10,
                  save_dir='',
                  use_metric_cuhk03=False,
                  ranks=[1, 5, 10, 20],
                  rerank=False):
        batch_time = AverageMeter()

        def _feature_extraction(data_loader):
            f_, pids_, camids_ = [], [], []
            for batch_idx, data in enumerate(data_loader):
                imgs, pids, camids = self._parse_data_for_eval(data)
                if self.use_gpu:
                    imgs = imgs.cuda()
                end = time.time()
                features = self._extract_features(imgs)
                batch_time.update(time.time() - end)
                features = features.data.cpu()
                f_.append(features)
                pids_.extend(pids)
                camids_.extend(camids)
            f_ = torch.cat(f_, 0)
            pids_ = np.asarray(pids_)
            camids_ = np.asarray(camids_)
            return f_, pids_, camids_

        print('Extracting features from query set ...')
        qf, q_pids, q_camids = _feature_extraction(query_loader)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set ...')
        gf, g_pids, g_camids = _feature_extraction(gallery_loader)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

        print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

        if normalize_feature:
            print('Normalzing features with L2 norm ...')
            qf = F.normalize(qf, p=2, dim=1)
            gf = F.normalize(gf, p=2, dim=1)

        print(
            'Computing distance matrix with metric={} ...'.format(dist_metric))
        distmat = compute_distance_matrix(qf, gf, dist_metric)
        distmat = distmat.numpy()

        if rerank:
            print('Applying person re-ranking ...')
            distmat_qq = compute_distance_matrix(qf, qf, dist_metric)
            distmat_gg = compute_distance_matrix(gf, gf, dist_metric)
            distmat = re_ranking(distmat, distmat_qq, distmat_gg)

        print('Computing CMC and mAP ...')
        cmc, mAP = evaluate_rank(distmat,
                                 q_pids,
                                 g_pids,
                                 q_camids,
                                 g_camids,
                                 use_metric_cuhk03=use_metric_cuhk03)

        print('** Results **')
        print('mAP: {:.1%}'.format(mAP))
        print('CMC curve')
        for r in ranks:
            print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))

        if visrank:
            visualize_ranked_results(
                distmat,
                self.datamanager.return_query_and_gallery_by_name(
                    dataset_name),
                self.datamanager.data_type,
                width=self.datamanager.width,
                height=self.datamanager.height,
                save_dir=osp.join(save_dir, 'visrank_' + dataset_name),
                topk=visrank_topk)

        return cmc[0]
Exemplo n.º 18
0
def run(model,
        queryloader,
        galleryloader,
        use_gpu,
        ranks=[1, 5, 10, 20],
        return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print('Extracted features for query set, obtained {}-by-{} matrix'.
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print('Extracted features for gallery set, obtained {}-by-{} matrix'.
              format(gf.size(0), gf.size(1)))

    print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    # distmat.addmm_(1, -2, qf, gf.t())
    distmat.addmm_(qf, gf.t(), beta=1, alpha=-2)
    distmat = distmat.numpy()

    return distmat, q_pids, g_pids, q_camids, g_camids
Exemplo n.º 19
0
def train(epoch, model, criterion_xent, criterion_htri, optimizer, trainloader,
          use_gpu):
    xent_losses = AverageMeter()
    htri_losses = AverageMeter()
    accs = AverageMeter()
    batch_time = AverageMeter()
    data_time = AverageMeter()

    model.train()
    for p in model.parameters():
        p.requires_grad = True  # open all layers

    end = time.time()
    for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
        data_time.update(time.time() - end)

        if use_gpu:
            imgs, pids = imgs.cuda(), pids.cuda()

        outputs, features = model(imgs)
        if isinstance(outputs, (tuple, list)):
            xent_loss = DeepSupervision(criterion_xent, outputs, pids)
        else:
            xent_loss = criterion_xent(outputs, pids)

        if isinstance(features, (tuple, list)):
            htri_loss = DeepSupervision(criterion_htri, features, pids)
        else:
            htri_loss = criterion_htri(features, pids)

        loss = args.lambda_xent * xent_loss + args.lambda_htri * htri_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)

        xent_losses.update(xent_loss.item(), pids.size(0))
        htri_losses.update(htri_loss.item(), pids.size(0))
        accs.update(accuracy(outputs, pids)[0])

        if (batch_idx + 1) % args.print_freq == 0:
            print('Epoch: [{0}/{1}][{2}/{3}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.4f} ({data_time.avg:.4f})\t'
                  'Xent {xent.val:.4f} ({xent.avg:.4f})\t'
                  'Htri {htri.val:.4f} ({htri.avg:.4f})\t'
                  'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
                      epoch + 1,
                      args.max_epoch,
                      batch_idx + 1,
                      len(trainloader),
                      batch_time=batch_time,
                      data_time=data_time,
                      xent=xent_losses,
                      htri=htri_losses,
                      acc=accs))

        end = time.time()
Exemplo n.º 20
0
def testPec(model,
            Vec,
            queryloader,
            galleryloader,
            train_query_loader,
            train_gallery_loader,
            test_batch,
            loss_type,
            euclidean_distance_loss,
            epoch,
            use_metric_cuhk03=False,
            ranks=[1, 5, 10, 20],
            return_distmat=False):
    batch_time = AverageMeter()

    model.eval()
    with torch.no_grad():
        qf, q_pids, q_camids, q_paths = [], [], [], []
        for batch_idx, (imgs, _, pids, camids) in enumerate(queryloader):
            imgs = imgs.cuda()

            end = time.time()
            features = model(imgs, Vec)
            batch_time.update(time.time() - end)
            features = features.data.cpu()

            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        #    q_paths.extend(paths)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        #q_paths = np.asarray(q_paths)
        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))
        print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
            batch_time.avg, test_batch))
        gf, g_pids, g_camids, g_paths = [], [], [], []
        for batch_idx, (imgs, _, pids, camids) in enumerate(galleryloader):
            imgs = imgs.cuda()

            end = time.time()
            features = model(imgs, Vec)
            batch_time.update(time.time() - end)
            features = features.data.cpu()

            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        #    g_paths.extend(paths)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        #g_paths = np.asarray(g_paths)
        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))
        print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
            batch_time.avg, test_batch))

    print("Start compute distmat.")
    if loss_type in euclidean_distance_loss:
        m, n = qf.size(0), gf.size(0)
        distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                  torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        distmat.addmm_(1, -2, qf, gf.t())
        distmat = distmat.numpy()
    elif loss_type == 'angle':
        vec_dot = torch.matmul(qf, gf.t())
        qf_len = qf.norm(dim=1, keepdim=True)
        gf_len = gf.norm(dim=1, keepdim=True)
        vec_len = torch.matmul(qf_len, gf_len.t()) + 1e-5
        distmat = -torch.div(vec_dot, vec_len).numpy()
    else:
        raise KeyError("Unsupported loss: {}".format(loss_type))
    print("Compute distmat done.")
    print("distmat shape:", distmat.shape)
    # result = {'query_f': qf.numpy(),
    #           'query_cam': q_camids, 'query_label': q_pids, 'quim_path': q_paths,
    #           'gallery_f': gf.numpy(),
    #           'gallery_cam': g_camids, 'gallery_label': g_pids, 'gaim_path': g_paths}
    # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '.mat'), result)
    # dist_mat_dict = {'dist_mat': distmat}
    # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '_dist.mat'), dist_mat_dict)
    print("Start computing CMC and mAP")
    start_time = time.time()
    cmc, mAP = evaluate(distmat,
                        q_pids,
                        g_pids,
                        q_camids,
                        g_camids,
                        use_metric_cuhk03=use_metric_cuhk03,
                        use_cython=False)
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Evaluate test data time (h:m:s): {}.".format(elapsed))
    print("Test data results ----------")
    print("Epoch {} temAP: {:.2%}".format(epoch, mAP))
    print("CMC curve")
    for r in ranks:
        print("Epoch {} teRank-{:<3}: {:.2%}".format(epoch, r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0], mAP
Exemplo n.º 21
0
def test_vehicleid(model,
                   queryloader,
                   galleryloader,
                   train_query_loader,
                   train_gallery_loader,
                   test_batch,
                   loss_type,
                   euclidean_distance_loss,
                   epoch,
                   use_metric_cuhk03=False,
                   ranks=[1, 5, 10, 20],
                   return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_paths = [], [], []
        for batch_idx, (imgs, _, pids) in enumerate(queryloader):
            imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()

            qf.append(features)
            q_pids.extend(pids)

        #    q_paths.extend(paths)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        #q_paths = np.asarray(q_paths)
        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))
        print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
            batch_time.avg, test_batch))
        gf, g_pids, g_paths = [], [], []
        for batch_idx, (imgs, _, pids) in enumerate(galleryloader):
            imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()

            gf.append(features)
            g_pids.extend(pids)
        #    g_paths.extend(paths)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        #g_paths = np.asarray(g_paths)
        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))
        print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
            batch_time.avg, test_batch))

    # result = {'query_f': qf.numpy(),
    #           'query_cam': q_camids, 'query_label': q_pids, 'quim_path': q_paths,
    #           'gallery_f': gf.numpy(),
    #           'gallery_cam': g_camids, 'gallery_label': g_pids, 'gaim_path': g_paths}
    # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '.mat'), result)
    # dist_mat_dict = {'dist_mat': distmat}
    # scipy.io.savemat(os.path.join(args.save_dir, 'features_' + str(60) + '_dist.mat'), dist_mat_dict)
    print("Start computing CMC and mAP")
    start_time = time.time()
    cmc, mAP = cmc_common_oneshot_v2(qf.numpy(),
                                     q_pids,
                                     gf.numpy(),
                                     g_pids,
                                     repeat=1,
                                     topk=50)
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Evaluate test data time (h:m:s): {}.".format(elapsed))
    print("Test data results ----------")
    print("Epoch {} temAP: {:.2%}".format(epoch, mAP))
    print("CMC curve")
    for r in ranks:
        print("Epoch {} teRank-{:<3}: {:.2%}".format(epoch, r, cmc[r - 1]))
    print("------------------")

    return cmc[0], mAP
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]):
    batch_time = AverageMeter()
    
    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids) in enumerate(queryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, (imgs, pids, camids) in enumerate(galleryloader):
            if use_gpu: imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
              torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    print("Computing CMC and mAP")
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.1%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.1%}".format(r, cmc[r-1]))
    print("------------------")

    return cmc[0]
    def train(self, epoch, record_file=None, loss_process='mean', func='L1'):
        criterion = nn.CrossEntropyLoss().cuda()

        # Various measurements for the discrepancy
        dis_dict = {
            'L1': self.discrepancy,
            'MSE': self.discrepancy_mse,
            'Cosine': self.discrepancy_cos,
            'SWD': self.discrepancy_slice_wasserstein
        }

        self.G.train()
        self.C.train()
        torch.cuda.manual_seed(1)
        batch_time = AverageMeter()
        data_time = AverageMeter()

        batch_num = min(
            len(self.datasets.data_loader_A), 
            len(self.datasets.data_loader_B)
        )

        end = time.time()

        for batch_idx, data in enumerate(self.datasets):
            data_time.update(time.time() - end)
            img_t = data['T']
            img_s = data['S']
            label_s = data['S_label']

            if img_s.size()[0] < self.batch_size or img_t.size()[0] < self.batch_size:
                break

            img_s = img_s.cuda()
            img_t = img_t.cuda()

            imgs_st = torch.cat((img_s, img_t), dim=0)

            label_s = label_s.long().cuda()

            # Step1: update the whole network using source data
            self.reset_grad()
            feat_s = self.G(img_s)
            outputs_s = self.C(feat_s)

            loss_s = []
            for index_tr in range(self.num_classifiers_train):
                loss_s.append(criterion(outputs_s[index_tr], label_s))

            if loss_process == 'mean':
                loss_s = torch.stack(loss_s).mean()
            else:
                loss_s = torch.stack(loss_s).sum()

            loss_s.backward()
            self.opt_g.step()
            self.opt_c.step()

            # Step2: update the classifiers using target data
            self.reset_grad()
            feat_st = self.G(imgs_st)
            outputs_st = self.C(feat_st)
            outputs_s = [
                outputs_st[0][:self.batch_size], 
                outputs_st[1][:self.batch_size]
            ]
            outputs_t = [
                outputs_st[0][self.batch_size:], 
                outputs_st[1][self.batch_size:]
            ]

            loss_s = []
            loss_dis = []
            for index_tr in range(self.num_classifiers_train):
                loss_s.append(criterion(outputs_s[index_tr], label_s))

            if loss_process == 'mean':
                loss_s = torch.stack(loss_s).mean()
            else:
                loss_s = torch.stack(loss_s).sum()

            for index_tr in range(self.num_classifiers_train):
                for index_tre in range(index_tr + 1, self.num_classifiers_train):
                    loss_dis.append(dis_dict[func](outputs_t[index_tr], outputs_t[index_tre]))

            if loss_process == 'mean':
                loss_dis = torch.stack(loss_dis).mean()
            else:
                loss_dis = torch.stack(loss_dis).sum()

            loss = loss_s - loss_dis

            loss.backward()
            self.opt_c.step()

            # Step3: update the generator using target data
            self.reset_grad()

            for index in range(self.num_k+1):
                loss_dis = []
                feat_t = self.G(img_t)
                outputs_t = self.C(feat_t)

                for index_tr in range(self.num_classifiers_train):
                    for index_tre in range(index_tr + 1, self.num_classifiers_train):
                        loss_dis.append(dis_dict[func](outputs_t[index_tr], outputs_t[index_tre]))

                if loss_process == 'mean':
                    loss_dis = torch.stack(loss_dis).mean()
                else:
                    loss_dis = torch.stack(loss_dis).sum()

                loss_dis.backward()
                self.opt_g.step()
                self.reset_grad()

            batch_time.update(time.time() - end)

            if batch_idx % self.interval == 0:
                print('Train Epoch: {} [{}/{}]\t '
                      'Loss: {:.6f}\t '
                      'Discrepancy: {:.6f} \t '
                      'Lr C: {:.6f}\t'
                      'Lr G: {:.6f}\t'
                      'Time: {:.3f}({:.3f})\t'
                      .format(epoch + 1, batch_idx,
                              batch_num, loss_s.data,
                              loss_dis.data,
                              self.opt_c.param_groups[0]['lr'],
                              self.opt_g.param_groups[0]['lr'],
                              batch_time.val, batch_time.avg))

                if record_file:
                    record = open(record_file, 'a')
                    record.write('Dis Loss: {}, Cls Loss: {}, Lr C: {}, Lr G: {} \n'
                                 .format(loss_dis.data.cpu().numpy(),
                                         loss_s.data.cpu().numpy(),
                                         self.opt_c.param_groups[0]['lr'],
                                         self.opt_g.param_groups[0]['lr']))
                    record.close()