Пример #1
0
    def forward(self, batch):
        data_time = time.time() - self._last_time
        output = self.model(batch[0])
        # print(batch[1])
        # loss = self.criterion(output, batch[1])
        # import ipdb;ipdb.set_trace()
        loss_cat = torch.nn.BCEWithLogitsLoss()(output[0][0], batch[1][0][:26])
        loss_dim = torch.nn.MSELoss()(output[1][0], batch[1][0][26:29])
        loss = loss_cat + loss_dim / 10

        if self.multi_class:
            mAP = self.calculate_mAP(output, batch[1])
            self._preverse_for_show = [loss.detach(), data_time, mAP]
        else:
            prec1, prec5 = accuracy(output,
                                    batch[1],
                                    topk=(1,
                                          min(5,
                                              self.config.dataset.num_class)))
            self._preverse_for_show = [
                loss.detach(), data_time,
                prec1.detach(),
                prec5.detach()
            ]
        return loss
Пример #2
0
 def forward(self, batch):
     data_time = time.time() - self._last_time
     output = self.model(batch[0])
     loss = self.criterion(output, batch[1])
     if self.multi_class:
         mAP = self.calculate_mAP(output, batch[1])
         self._preverse_for_show = [loss.detach(), data_time, mAP]
     else:
         prec1, prec5 = accuracy(output, batch[1], topk=(1, min(5, self.config.dataset.num_class)))
         self._preverse_for_show = [loss.detach(), data_time, prec1.detach(), prec5.detach()]
     return loss
Пример #3
0
    def evaluate(self):
        batch_time = AverageMeter(0)
        losses = AverageMeter(0)
        if self.multi_class:
            mAPs = AverageMeter(0)
        else:
            top1 = AverageMeter(0)
            top5 = AverageMeter(0)

        if self.inference_only:
            spatial_crops = self.config.get('evaluate', {}).get('spatial_crops', 1)
            temporal_samples = self.config.get('evaluate', {}).get('temporal_samples', 1)
        else:
            spatial_crops = 1
            temporal_samples = 1
        dup_samples = spatial_crops * temporal_samples

        self.model.cuda().eval()
        test_loader = self.data_loaders['test']
        test_len = len(test_loader)
        end = time.time()
        for iter_idx in range(test_len):
            inputs = self.get_batch('test')
            isizes = inputs[0].shape

            if self.config.net.model_type == '2D':
                inputs[0] = inputs[0].view(
                    isizes[0] * dup_samples, -1, isizes[2], isizes[3])
            else:
                inputs[0] = inputs[0].view(
                    isizes[0], isizes[1], dup_samples, -1, isizes[3], isizes[4]
                        )
                inputs[0] = inputs[0].permute(0, 2, 1, 3, 4, 5).contiguous()
                inputs[0] = inputs[0].view(isizes[0] * dup_samples, isizes[1], -1, isizes[3], isizes[4])

            output = self.model(inputs[0])
            osizes = output.shape

            output = output.view((osizes[0] // dup_samples, -1, osizes[1]))
            output = torch.mean(output, 1)


            loss = self.criterion(output, inputs[1])
            num = inputs[0].size(0)
            losses.update(loss.item(), num)
            if self.multi_class:
                mAP = self.calculate_mAP(output, inputs[1])
                mAPs.update(mAP, num)
            else:
                prec1, prec5 = accuracy(output, inputs[1], topk=(1,  min(5, self.config.dataset.num_class)))
                top1.update(prec1.item(), num)
                top5.update(prec5.item(), num)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if iter_idx % self.config.trainer.print_freq == 0:
                self.logger.info('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
                       iter_idx, test_len, batch_time=batch_time))

        total_num = torch.Tensor([losses.count]).cuda()
        loss_sum = torch.Tensor([losses.avg*losses.count]).cuda()

        if self.config.gpus > 1:
            all_reduce(total_num, False)
            all_reduce(loss_sum,  False)
        final_loss = loss_sum.item()/total_num.item()
        
        if self.multi_class:
            mAP_sum = torch.Tensor([mAPs.avg*mAPs.count]).cuda()
            if self.config.gpus > 1:
                all_reduce(mAP_sum)
            final_mAP = mAP_sum.item()/total_num.item()
            self.logger.info(' * mAP {:.3f}\tLoss {:.3f}\ttotal_num={}'.format(final_mAP, final_loss,
                total_num.item()))
            metric = Top1Metric(final_mAP, 0, final_loss)
        else:
            top1_sum = torch.Tensor([top1.avg*top1.count]).cuda()
            top5_sum = torch.Tensor([top5.avg*top5.count]).cuda()
            if self.config.gpus > 1:
                all_reduce(top1_sum, False)
                all_reduce(top5_sum, False)
            final_top1 = top1_sum.item()/total_num.item()
            final_top5 = top5_sum.item()/total_num.item()
            self.logger.info(' * Prec@1 {:.3f}\tPrec@5 {:.3f}\tLoss {:.3f}\ttotal_num={}'.format(final_top1,
                final_top5, final_loss, total_num.item()))
            metric = Top1Metric(final_top1, final_top5, final_loss)

        self.model.cuda().train()
        return metric
Пример #4
0
    def evaluate(self):
        global predict_mode
        output_data = []
        batch_time = AverageMeter(0)
        losses = AverageMeter(0)
        if self.multi_class:
            mAPs = AverageMeter(0)
        else:
            top1 = AverageMeter(0)
            top5 = AverageMeter(0)

        if self.inference_only:
            spatial_crops = self.config.get('evaluate',
                                            {}).get('spatial_crops', 1)
            temporal_samples = self.config.get('evaluate',
                                               {}).get('temporal_samples', 1)
        else:
            spatial_crops = 1
            temporal_samples = 1
        dup_samples = spatial_crops * temporal_samples

        self.model.cuda().eval()
        test_loader = self.data_loaders['test']
        test_len = len(test_loader)
        end = time.time()
        for iter_idx in range(test_len):
            inputs = self.get_batch('test')
            isizes = inputs[0].shape

            if self.config.net.model_type == '2D':
                inputs[0] = inputs[0].view(isizes[0] * dup_samples, -1,
                                           isizes[2], isizes[3])
            else:
                inputs[0] = inputs[0].view(isizes[0], isizes[1], dup_samples,
                                           -1, isizes[3], isizes[4])
                inputs[0] = inputs[0].permute(0, 2, 1, 3, 4, 5).contiguous()
                inputs[0] = inputs[0].view(isizes[0] * dup_samples, isizes[1],
                                           -1, isizes[3], isizes[4])

            output = self.model(inputs[0])

            osizes = output[0].shape
            output[0] = output[0].view(
                (osizes[0] // dup_samples, -1, osizes[1]))
            output[0] = torch.mean(output[0], 1)
            osizes = output[1].shape
            output[1] = output[1].view(
                (osizes[0] // dup_samples, -1, osizes[1]))
            output[1] = torch.mean(output[1], 1)
            # print(output)
            # import ipdb;ipdb.set_trace()
            # for __, _ in zip(output[0].tolist(), output[1].tolist()):
            #     out_data.append(','.join([str(_) for _ in __+___]))
            out_data = [
                ','.join([str(_) for _ in __ + ___])
                for __, ___ in zip(output[0].tolist(), output[1].tolist())
            ]
            # print(out_data)
            for i in range(len(out_data)):
                output_data.append(out_data[i])

            # loss = self.criterion(output, inputs[1])
            loss_cat = torch.nn.BCEWithLogitsLoss()(output[0][0],
                                                    inputs[1][0][:26])
            loss_dim = torch.nn.MSELoss()(output[1][0], inputs[1][0][26:29])
            loss = loss_cat + loss_dim / 10

            num = inputs[0].size(0)
            losses.update(loss.item(), num)
            if self.multi_class:
                mAP = self.calculate_mAP(output, inputs[1])
                mAPs.update(mAP, num)
            else:
                prec1, prec5 = accuracy(
                    output,
                    inputs[1],
                    topk=(1, min(5, self.config.dataset.num_class)))
                top1.update(prec1.item(), num)
                top5.update(prec5.item(), num)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if iter_idx % self.config.trainer.print_freq == 0:
                self.logger.info(
                    'Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})'
                    .format(iter_idx, test_len, batch_time=batch_time))

        total_num = torch.Tensor([losses.count]).cuda()
        loss_sum = torch.Tensor([losses.avg * losses.count]).cuda()

        if self.config.gpus > 1:
            all_reduce(total_num, False)
            all_reduce(loss_sum, False)
        final_loss = loss_sum.item() / total_num.item()

        if self.multi_class:
            mAP_sum = torch.Tensor([mAPs.avg * mAPs.count]).cuda()
            if self.config.gpus > 1:
                all_reduce(mAP_sum)
            final_mAP = mAP_sum.item() / total_num.item()
            self.logger.info(' * mAP {:.3f}\tLoss {:.3f}\ttotal_num={}'.format(
                final_mAP, final_loss, total_num.item()))
            metric = Top1Metric(final_mAP, 0, final_loss)
        else:
            top1_sum = torch.Tensor([top1.avg * top1.count]).cuda()
            top5_sum = torch.Tensor([top5.avg * top5.count]).cuda()
            if self.config.gpus > 1:
                all_reduce(top1_sum, False)
                all_reduce(top5_sum, False)
            final_top1 = top1_sum.item() / total_num.item()
            final_top5 = top5_sum.item() / total_num.item()
            self.logger.info(
                ' * Prec@1 {:.3f}\tPrec@5 {:.3f}\tLoss {:.3f}\ttotal_num={}'.
                format(final_top1, final_top5, final_loss, total_num.item()))
            metric = Top1Metric(final_top1, final_top5, final_loss)

        self.model.cuda().train()
        if not predict_mode:
            return metric
        else:
            return metric, output_data