コード例 #1
0
ファイル: inference.py プロジェクト: ZJCV/TRN
def compute_on_dataset(model, data_loader, device):
    results_dict = {}
    cate_acc_dict = {}
    acc_top1 = list()
    acc_top5 = list()

    for batch in tqdm(data_loader):
        images, targets = batch
        cpu_device = torch.device("cpu")

        with torch.no_grad():
            outputs = model(images.to(device)).to(cpu_device)
            # outputs = torch.stack([o.to(cpu_device) for o in outputs])

            topk_list = topk_accuracy(outputs, targets, topk=(1, 5))
            acc_top1.append(topk_list[0].item())
            acc_top5.append(topk_list[1].item())

            outputs = outputs.numpy()
            preds = np.argmax(outputs, 1)
            targets = targets.numpy()
            for target, pred in zip(targets, preds):
                results_dict.update(
                    {str(target): results_dict.get(str(target), 0) + 1})
                cate_acc_dict.update({
                    str(target):
                    cate_acc_dict.get(str(target), 0) + int(target == pred)
                })

    return results_dict, cate_acc_dict, acc_top1, acc_top5
コード例 #2
0
ファイル: jester.py プロジェクト: ZJCV/Non-local
    def evaluate_train(self, output_dict: dict, targets: torch.Tensor):
        assert isinstance(output_dict, dict) and 'probs' in output_dict.keys()

        probs = output_dict['probs']
        res = topk_accuracy(probs, targets, topk=self.topk)

        acc_dict = dict()
        for i in range(len(self.topk)):
            acc_dict[f'tok{self.topk[i]}'] = res[i]
        return acc_dict
コード例 #3
0
    def evaluate_test(self, outputs, targets):
        outputs = outputs.to(device=self.device)
        targets = targets.to(device=self.device)

        res = topk_accuracy(outputs, targets, topk=self.topk)
        self.topk_list.append(torch.stack(res))
        preds = torch.argmax(outputs, dim=1)
        for target, pred in zip(targets.numpy(), preds.numpy()):
            self.cate_num_dict.update(
                {str(target): self.cate_num_dict.get(str(target), 0) + 1})
            self.cate_acc_dict.update({
                str(target):
                self.cate_acc_dict.get(str(target), 0) + int(target == pred)
            })
コード例 #4
0
ファイル: jester.py プロジェクト: ZJCV/Non-local
    def evaluate_test(self, output_dict: dict, targets: torch.Tensor):
        assert isinstance(output_dict, dict) and 'probs' in output_dict.keys()
        probs = output_dict['probs']
        outputs = probs.to(device=self.device)
        targets = targets.to(device=self.device)

        res = topk_accuracy(outputs, targets, topk=self.topk)
        self.topk_list.append(torch.stack(res))
        preds = torch.argmax(outputs, dim=1)
        for target, pred in zip(targets.numpy(), preds.numpy()):
            self.cate_num_dict.update(
                {str(target): self.cate_num_dict.get(str(target), 0) + 1})
            self.cate_acc_dict.update({
                str(target):
                self.cate_acc_dict.get(str(target), 0) + int(target == pred)
            })
コード例 #5
0
ファイル: fusion_test.py プロジェクト: ZJCV/TSM
def compute_on_dataset(rgb_model, rgb_data_loader, rgbdiff_model,
                       rgbdiff_data_loader, device):
    results_dict = {}
    cate_acc_dict = {}
    acc_top1 = list()
    acc_top5 = list()

    cpu_device = torch.device("cpu")
    rgb_data_loader_iter = iter(rgb_data_loader)
    rgbdiff_data_loader_iter = iter(rgbdiff_data_loader)
    for i in tqdm(range(len(rgb_data_loader))):
        outputs_list = list()

        images, targets = next(rgb_data_loader_iter)
        outputs = rgb_model(images.to(device=device,
                                      non_blocking=True)).to(cpu_device)
        outputs_list.append(outputs)

        images, targets = next(rgbdiff_data_loader_iter)
        outputs = rgbdiff_model(images.to(device=device,
                                          non_blocking=True)).to(cpu_device)
        outputs_list.append(outputs)
        outputs = torch.mean(torch.stack(outputs_list), dim=0)

        topk_list = topk_accuracy(outputs, targets, topk=(1, 5))
        acc_top1.append(topk_list[0].item())
        acc_top5.append(topk_list[1].item())

        outputs = outputs.numpy()
        preds = np.argmax(outputs, 1)
        targets = targets.numpy()
        for target, pred in zip(targets, preds):
            results_dict.update(
                {str(target): results_dict.get(str(target), 0) + 1})
            cate_acc_dict.update({
                str(target):
                cate_acc_dict.get(str(target), 0) + int(target == pred)
            })

    return results_dict, cate_acc_dict, acc_top1, acc_top5
コード例 #6
0
ファイル: trainer.py プロジェクト: ZJCV/SlowFast
def do_train(args, cfg, arguments, data_loader, model, criterion, optimizer,
             lr_scheduler, checkpointer, device, logger):
    meters = MetricLogger()
    summary_writer = None

    if is_master_proc():
        logger.info("Start training ...")
        if args.use_tensorboard:
            from torch.utils.tensorboard import SummaryWriter
            summary_writer = SummaryWriter(
                log_dir=os.path.join(cfg.OUTPUT.DIR, 'tf_logs'))

    model.train()
    start_iter = arguments['iteration']
    max_iter = cfg.TRAIN.MAX_ITER

    synchronize()
    start_training_time = time.time()
    end = time.time()

    for iteration, (images, targets) in enumerate(data_loader, start_iter):
        synchronize()
        iteration = iteration + 1
        arguments["iteration"] = iteration

        images = images.to(device)
        targets = targets.to(device)

        outputs = model(images)
        loss = criterion(outputs, targets)
        # compute top-k accuray
        topk_list = topk_accuracy(outputs, targets, topk=(1, 5))
        meters.update(loss=loss / len(targets),
                      acc_1=topk_list[0],
                      acc_5=topk_list[1])

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        lr_scheduler.step()

        if iteration % len(data_loader) == 0 and hasattr(
                data_loader.batch_sampler, "set_epoch"):
            data_loader.batch_sampler.set_epoch(iteration)

        batch_time = time.time() - end
        end = time.time()
        meters.update(time=batch_time)
        if is_master_proc():
            if iteration % args.log_step == 0:
                eta_seconds = meters.time.global_avg * (max_iter - iteration)
                eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
                logger.info(
                    meters.delimiter.join([
                        "iter: {iter:06d}",
                        "lr: {lr:.5f}",
                        '{meters}',
                        "eta: {eta}",
                        'mem: {mem}M',
                    ]).format(
                        iter=iteration,
                        lr=optimizer.param_groups[0]['lr'],
                        meters=str(meters),
                        eta=eta_string,
                        mem=round(torch.cuda.max_memory_allocated() / 1024.0 /
                                  1024.0),
                    ))
                if summary_writer:
                    global_step = iteration
                    for name, meter in meters.meters.items():
                        summary_writer.add_scalar('{}/avg'.format(name),
                                                  float(meter.avg),
                                                  global_step=global_step)
                        summary_writer.add_scalar('{}/global_avg'.format(name),
                                                  meter.global_avg,
                                                  global_step=global_step)
                    summary_writer.add_scalar('lr',
                                              optimizer.param_groups[0]['lr'],
                                              global_step=global_step)

            if not args.stop_save and iteration % args.save_step == 0:
                checkpointer.save("model_{:06d}".format(iteration),
                                  **arguments)
            if not args.stop_eval and args.eval_step > 0 and iteration % args.eval_step == 0 and not iteration == max_iter:
                eval_results = do_evaluation(cfg,
                                             model,
                                             device,
                                             iteration=iteration)
                if summary_writer:
                    for key, value in eval_results.items():
                        summary_writer.add_scalar(f'eval/{key}',
                                                  value,
                                                  global_step=iteration)
                model.train()

    if is_master_proc() and not args.stop_eval:
        logger.info('Start final evaluating...')
        torch.cuda.empty_cache()  # speed up evaluating after training finished
        eval_results = do_evaluation(cfg, model, device)

        if summary_writer:
            for key, value in eval_results.items():
                summary_writer.add_scalar(f'eval/{key}',
                                          value,
                                          global_step=iteration)
            summary_writer.close()
        checkpointer.save("model_final", **arguments)
    # compute training time
    total_training_time = int(time.time() - start_training_time)
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    if is_master_proc():
        logger.info("Total training time: {} ({:.4f} s / it)".format(
            total_time_str, total_training_time / max_iter))
    return model
コード例 #7
0
 def evaluate_train(self, outputs, targets):
     return topk_accuracy(outputs, targets, topk=self.topk)