def main():
    global args
    args = parse_args()

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True
    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.out is None or not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    if osp.exists(args.out):
        outputs = mmcv.load(args.out)
    else:
        if args.launcher == 'none':
            raise NotImplementedError(
                "By default, we use distributed testing, so that launcher should be pytorch"
            )
        else:
            distributed = True
            init_dist(args.launcher, **cfg.dist_params)

        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        data_loader = build_dataloader(dataset,
                                       imgs_per_gpu=1,
                                       workers_per_gpu=1,
                                       dist=distributed,
                                       shuffle=False)

        load_checkpoint(model, args.checkpoint, map_location='cpu')
        find_unused_parameters = cfg.get('find_unused_parameters', False)
        model = MMDistributedDataParallel(
            model.cuda(),
            device_ids=[torch.cuda.current_device()],
            broadcast_buffers=False,
            find_unused_parameters=find_unused_parameters)

        outputs = multiple_test(model, data_loader)

        rank, _ = get_dist_info()
        if rank == 0:
            print('writing results to {}'.format(args.out))
            mmcv.dump(outputs, args.out)

    eval_type = args.eval
    if eval_type:
        print('Starting evaluate {}'.format(eval_type))

        result_file = osp.join(args.out + '.csv')
        results2csv(dataset, outputs, result_file)

        ava_eval(result_file, eval_type, args.label_file, args.ann_file,
                 args.exclude_file)
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    cfg.gpus = args.gpus
    if cfg.checkpoint_config is not None:
        # save mmaction version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(mmact_version=__version__,
                                          config=cfg.text)

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    model = build_recognizer(cfg.model,
                             train_cfg=cfg.train_cfg,
                             test_cfg=cfg.test_cfg)

    train_dataset = get_trimmed_dataset(cfg.data.train)
    val_dataset = get_trimmed_dataset(cfg.data.val)
    datasets = []
    for flow in cfg.workflow:
        assert flow[0] in ['train', 'val']
        if flow[0] == 'train':
            datasets.append(train_dataset)
        else:
            datasets.append(val_dataset)
    train_network(model,
                  datasets,
                  cfg,
                  distributed=distributed,
                  validate=args.validate,
                  logger=logger)
示例#3
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # pass arg of fcn testing
    if args.fcn_testing:
        cfg.model.update({'fcn_testing': True})
        cfg.model['cls_head'].update({'fcn_testing': True})

    # for regular testing
    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.launcher == 'none':
        raise NotImplementedError("By default, we use distributed testing, so that launcher should be pytorch")
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=1,
        dist=distributed,
        shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')
    model = MMDistributedDataParallel(model.cuda())
    outputs = multi_test(model, data_loader)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

        gt_labels = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            gt_labels.append(ann['label'])

        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:
            print("Averaging score over {} clips without softmax (ie, raw)".format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]
        top1, top5 = top_k_accuracy(results, gt_labels, k=(1,5))
        mean_acc = mean_class_accuracy(results, gt_labels)
        print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
        print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
        print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
示例#4
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True

    # update configs according to CLI args
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir

    if is_valid(args.resume_from):
        cfg.resume_from = args.resume_from

    if is_valid(args.load_from):
        cfg.load_from = args.load_from

    if is_valid(args.load2d_from):
        cfg.model.backbone.pretrained = args.load2d_from
        cfg.model.backbone.pretrained2d = True

    if args.num_videos is not None:
        assert args.num_videos > 0
        cfg.data.videos_per_gpu = args.num_videos

    if cfg.checkpoint_config is not None:
        cfg.checkpoint_config.meta = dict(mmact_version=__version__,
                                          config=cfg.text)

    if args.data_dir is not None:
        cfg = update_data_paths(cfg, args.data_dir)

    if hasattr(
            cfg.model, 'masked_num'
    ) and cfg.model.masked_num is not None and cfg.model.masked_num > 0:
        assert cfg.data.videos_per_gpu > cfg.model.masked_num

        cfg.data.videos_per_gpu -= cfg.model.masked_num

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # init logger before other steps
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))

    # set random seeds
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)

    train_dataset = get_trimmed_dataset(cfg.data.train)
    ignores = ['num_batches_tracked']
    if args.ignores is not None and len(args.ignores) > 0:
        ignores += args.ignores

    model = build_recognizer(cfg.model,
                             train_cfg=cfg.train_cfg,
                             test_cfg=cfg.test_cfg)
    if args.sync_bn:
        logger.info('Enabled SyncBatchNorm')
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    train_network(model,
                  train_dataset,
                  cfg,
                  distributed=distributed,
                  validate=args.validate,
                  logger=logger,
                  ignores=tuple(ignores))
示例#5
0
def main():
    global args
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # must use fcn testing
    cfg.model.update({'fcn_testing': True})
    cfg.model['cls_head'].update({'fcn_testing': True})

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if args.testfile != '':
        cfg.data.test.ann_file = args.testfile

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    assert distributed, "We only support distributed testing"

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=1,
        dist=distributed,
        shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')

    find_unused_parameters = cfg.get('find_unused_parameters', False)
    model = MMDistributedDataParallel(
        model.cuda(),
        device_ids=[torch.cuda.current_device()],
        broadcast_buffers=False,
        find_unused_parameters=find_unused_parameters)

    outputs = multi_test(model, data_loader)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

        gt_labels = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            gt_labels.append(ann['label'])

        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:
            print("Averaging score over {} clips without softmax (ie, raw)".format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]
        top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
        mean_acc = mean_class_accuracy(results, gt_labels)
        print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
        print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
        print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
示例#6
0
def main():
    args = parse_args()

    assert args.out, ('Please specify the output path for results')

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    if cfg.model.get('necks', None) is not None:
        cfg.model.necks.aux_head_config = None

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8
    if args.fcn_testing:
        cfg.model['cls_head'].update({'fcn_testing': True})
        cfg.model.update({'fcn_testing': True})
    if args.flip:
        cfg.model.update({'flip': True})

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.ignore_cache and args.out is not None:
        if not distributed:
            if args.gpus == 1:
                model = build_recognizer(cfg.model,
                                         train_cfg=None,
                                         test_cfg=cfg.test_cfg)
                load_checkpoint(model,
                                args.checkpoint,
                                strict=True,
                                map_location='cpu')
                model = MMDataParallel(model, device_ids=[0])

                data_loader = build_dataloader(
                    dataset,
                    imgs_per_gpu=1,
                    workers_per_gpu=cfg.data.workers_per_gpu,
                    num_gpus=1,
                    dist=False,
                    shuffle=False)
                outputs = single_test(model, data_loader)
            else:
                model_args = cfg.model.copy()
                model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
                model_type = getattr(recognizers, model_args.pop('type'))

                outputs = parallel_test(model_type,
                                        model_args,
                                        args.checkpoint,
                                        dataset,
                                        _data_func,
                                        range(args.gpus),
                                        workers_per_gpu=args.proc_per_gpu)
        else:
            data_loader = build_dataloader(
                dataset,
                imgs_per_gpu=1,
                workers_per_gpu=cfg.data.workers_per_gpu,
                dist=distributed,
                shuffle=False)
            model = build_recognizer(cfg.model,
                                     train_cfg=None,
                                     test_cfg=cfg.test_cfg)
            load_checkpoint(model,
                            args.checkpoint,
                            strict=True,
                            map_location='cpu')
            model = MMDistributedDataParallel(model.cuda())
            outputs = multi_gpu_test(model, data_loader, args.tmpdir)
    else:
        try:
            if distributed:
                rank, _ = get_dist_info()
                if rank == 0:
                    outputs = mmcv.load(args.out)
            else:
                outputs = mmcv.load(args.out)
        except:
            raise FileNotFoundError

    rank, _ = get_dist_info()
    if args.out:
        if rank == 0:
            print('writing results to {}'.format(args.out))
            mmcv.dump(outputs, args.out)
            gt_labels = []
            for i in range(len(dataset)):
                ann = dataset.get_ann_info(i)
                gt_labels.append(ann['label'])

            results = []
            for res in outputs:
                res_list = [res[i] for i in range(res.shape[0])]
                results += res_list
            results = results[:len(gt_labels)]
            print('results_length', len(results))
            top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
            mean_acc = mean_class_accuracy(results, gt_labels)
            non_mean_acc = non_mean_class_accuracy(results, gt_labels)
            if args.log:
                f = open(args.log, 'w')
                f.write(f'Testing ckpt from {args.checkpoint}\n')
                f.write(f'Testing config from {args.config}\n')
                f.write("Mean Class Accuracy = {:.04f}\n".format(mean_acc *
                                                                 100))
                f.write("Top-1 Accuracy = {:.04f}\n".format(top1 * 100))
                f.write("Top-5 Accuracy = {:.04f}\n".format(top5 * 100))
                f.close()
            else:
                print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
                print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
                print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
                print("Non mean Class Accuracy", non_mean_acc)
                print('saving non_mean acc')
示例#7
0
def main():
    global args
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')
    cfg = mmcv.Config.fromfile(args.config)
    if args.checkpoint == None:
        args.checkpoint = os.path.join(cfg.work_dir, 'latest.pth')
    else:
        args.checkpoint = os.path.join(cfg.work_dir,
                                       'epoch_%d.pth' % (int(args.checkpoint)))

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # pass arg of fcn testing
    if args.fcn_testing:
        cfg.model.update({'fcn_testing': True})
        cfg.model['cls_head'].update({'fcn_testing': True})

    # for regular testing
    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.launcher == 'none':
        raise NotImplementedError(
            "By default, we use distributed testing, so that launcher should be pytorch"
        )
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=1,
                                   dist=distributed,
                                   shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')

    find_unused_parameters = cfg.get('find_unused_parameters', False)
    model = MMDistributedDataParallel(
        model.cuda(),
        device_ids=[torch.cuda.current_device()],
        broadcast_buffers=False,
        find_unused_parameters=find_unused_parameters)

    outputs = multi_test(model, data_loader, cfg.work_dir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

        data_path = []
        gt_labels = []
        pre = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            gt_labels.append(ann['label'])
            data_path.append(ann['path'])
            pre.append(outputs[i].mean(axis=0))

        save_data = {
            path: [p, g]
            for path, p, g in zip(data_path, pre, gt_labels)
        }
        with open(os.path.join(cfg.work_dir, 'test.pkl'), 'wb') as f:
            pickle.dump(save_data, f)

        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(
                outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:

            print("Averaging score over {} clips without softmax (ie, raw)".
                  format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]
        top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
        mean_acc = mean_class_accuracy(results, gt_labels)
        with open(os.path.join(cfg.work_dir, 'test_result.txt'), 'w') as f:
            f.writelines('model is :' + args.checkpoint + '\n')
            f.writelines("Mean Class Accuracy = {:.04f}".format(mean_acc *
                                                                100) + '\n')
            f.writelines("Top-1 Accuracy = {:.04f}".format(top1 * 100) + '\n')
            f.writelines("Top-5 Accuracy = {:.04f}".format(top5 * 100) + '\n')

        print("Mean Class Accuracy = {:.04f}".format(mean_acc * 100))
        print("Top-1 Accuracy = {:.04f}".format(top1 * 100))
        print("Top-5 Accuracy = {:.04f}".format(top5 * 100))
示例#8
0
def main():
    global args
    args = parse_args()

    # if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
    #     raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # must use fcn testing
    cfg.model.update({'fcn_testing': True})
    cfg.model['cls_head'].update({'fcn_testing': True})

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if args.testfile != '':
        cfg.data.test.ann_file = args.testfile

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    assert distributed, "We only support distributed testing"

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=1,
        dist=distributed,
        shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')

    find_unused_parameters = cfg.get('find_unused_parameters', False)
    model = MMDistributedDataParallel(
        model.cuda(),
        device_ids=[torch.cuda.current_device()],
        broadcast_buffers=False,
        find_unused_parameters=find_unused_parameters)

    outputs = multi_test(model, data_loader)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)

        results_numpyformat = np.zeros((1,226))
        for i in range(len(outputs)):
            results_numpyformat = np.row_stack((results_numpyformat,outputs[i]))
        np.save("res",results_numpyformat[1:,:])

        # gt_labels = []
        image_name = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            image_name.append(ann['path'])
            # gt_labels.append(ann['label'])
        print(image_name[0])
        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:
            print("Averaging score over {} clips without softmax (ie, raw)".format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]

        prediction = pd.read_csv('predictions.csv',header=None)
        if not os.path.exists(args.out):
            os.mkdir(args.out)
        pbar = tqdm(total=len(image_name))
        for i in range(len(results)):
            pred_class = np.argsort(results[i])[-1:].item()
            # index = prediction[(prediction[0]==image_name[i].replace("_depth",""))].index.item()
            index = prediction[(prediction[0]==image_name[i].replace("_color",""))].index.item()
            # print("{}:{}".format(index,pred_class))
            prediction.iloc[index,1] = pred_class
            pbar.update(1)
            # break
        prediction.to_csv(os.path.join(args.out,'predictions.csv'),header=False,index=False)
示例#9
0
def main():
    global args
    args = parse_args()

    # if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
    #     raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # pass arg of fcn testing
    if args.fcn_testing:
        cfg.model.update({'fcn_testing': True})
        cfg.model['cls_head'].update({'fcn_testing': True})

    # for regular testing
    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    # dataset[0]

    if args.launcher == 'none':
        # raise NotImplementedError("By default, we use distributed testing, so that launcher should be pytorch")
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=1,
                                   dist=distributed,
                                   shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')

    # find_unused_parameters = cfg.get('find_unused_parameters', False)
    # model = MMDistributedDataParallel(
    #     model.cuda(),
    #     device_ids=[torch.cuda.current_device()],
    #     broadcast_buffers=False,
    #     find_unused_parameters=find_unused_parameters)
    model = MMDataParallel(model,
                           device_ids=range(args.test_gpus,
                                            args.test_gpus + 1))
    outputs = multi_test(model, data_loader)
    # print(outputs)
    # print(type(outputs))
    # print(len(outputs))

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))

        # mmcv.dump(outputs, args.out)

        results_numpyformat = np.zeros((1, 226))
        for i in range(len(outputs)):
            results_numpyformat = np.row_stack(
                (results_numpyformat, outputs[i]))
        np.save(os.path.join(args.out, args.name), results_numpyformat[1:, :])

        # gt_labels = []
        image_name = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            image_name.append(ann['path'])
            # gt_labels.append(ann['label'])

        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(
                outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:
            print("Averaging score over {} clips without softmax (ie, raw)".
                  format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]
        prediction = pd.read_csv('predictions_testphase.csv', header=None)
        if not os.path.exists(args.out):
            os.mkdir(args.out)
        pbar = tqdm(total=len(image_name))
        for i in range(len(results)):
            pred_class = np.argsort(results[i])[-1:].item()
            # import pdb;pdb.set_trace()
            # print(pred_class)
            # print(image_name[i])

            index = prediction[(prediction[0] == image_name[i].replace(
                "_{}".format(args.color_or_depth), ""))].index.item()
            # print("{}:{}".format(index,pred_class))
            prediction.iloc[index, 1] = pred_class
            pbar.update(1)
            # if i==5:
            #     break
        prediction.to_csv(os.path.join(args.out,
                                       '{}_predictions.csv'.format(args.name)),
                          header=False,
                          index=False)