コード例 #1
0
def main():
    '''画出没类的精度'''
    global args
    args = parse_args()

    rgb_cfg = mmcv.Config.fromfile(args.rgb_config)
    flow_cfg = mmcv.Config.fromfile(args.flow_config)

    rgb_info = pickle.load(
        open(os.path.join(rgb_cfg.work_dir, 'test.pkl'), 'rb'))
    flow_info = pickle.load(
        open(os.path.join(flow_cfg.work_dir, 'test.pkl'), 'rb'))

    path_dir = os.path.dirname(rgb_cfg.data_root)
    classInd = [
        x.split(' ')
        for x in open(path_dir + '/annotations/classInd.txt', 'r').readlines()
    ]
    classInd = {int(x[0]): x[1] for x in classInd}

    # 验证数据一致
    for k in rgb_info.keys():
        assert k in flow_info

    ratio = 1.5
    gt_lable = []
    out_pred = []
    for k in rgb_info.keys():
        gt_lable.append(rgb_info[k][1])
        out_pred.append(
            np.argmax(softmax(rgb_info[k][0] + ratio * flow_info[k][0],
                              dim=0)))

    plot_info = {}
    out_pred = np.array(out_pred)
    gt_lable = np.array(gt_lable)
    all_right = out_pred == gt_lable
    for k, v in classInd.items():
        mask = gt_lable == k
        p = sum(all_right[mask]) / sum(mask)
        plot_info[v.strip()] = p

    save_path = path_dir + '/two_stream_bar.png'
    plot(plot_info, save_path)
コード例 #2
0
def main():

    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    rgb_results = args.rgb  # "/home/lixun/Desktop/HAR/mmaction/work_dirs/predictions_hmdb51_split1/rgb.pkl"
    flow_results = args.flow  #  "/home/lixun/Desktop/HAR/mmaction/work_dirs/predictions_hmdb51_split1/flow.pkl"
    outputs_rgb = mmcv.load(rgb_results)
    outputs_flow = mmcv.load(flow_results)

    outputs = [
        outputs_rgb[i] + outputs_flow[i] for i in range(len(outputs_rgb))
    ]

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    gt_labels = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        gt_labels.append(ann['label'])

    if args.use_softmax:
        print("Averaging score over {} clips with softmax".format(
            outputs[0].shape[0]))
        results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
    else:
        print("Averaging score over {} clips without softmax (ie, raw)".format(
            outputs[0].shape[0]))
        results = [res.mean(axis=0) for res in outputs]
    top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
    mean_acc = mean_class_accuracy(results, gt_labels)

    print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
    print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
    print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
コード例 #3
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])
        a = dataset[1]

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    gt_labels = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        gt_labels.append(ann['label'])

    if args.use_softmax:
        print("Averaging score over {} clips with softmax".format(
            outputs[0].shape[0]))
        results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
    else:
        print("Averaging score over {} clips without softmax (ie, raw)".format(
            outputs[0].shape[0]))
        results = [res.mean(axis=0) for res in outputs]

    import datetime

    currentDT = datetime.datetime.now()

    with open('data/nturgbd/nturgbd_val_split_generalization_rawframes.txt'
              ) as f:
        video_names = [l.strip().split(' ')[0] for l in f.readlines()]

    with open(
            osp.join(args.checkpoint + '.result_%s.pkl' %
                     currentDT.strftime("%Y-%m-%d_%H:%M:%S")), 'wb') as f:
        pickle.dump([results, gt_labels, video_names], f)
    top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
    mean_acc = mean_class_accuracy(results, gt_labels)
    print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
    print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
    print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
コード例 #4
0
ファイル: test_recognizer.py プロジェクト: volpepe/mmaction
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # pass arg of fcn testing
    if args.fcn_testing:
        cfg.model.update({'fcn_testing': True})
        cfg.model['cls_head'].update({'fcn_testing': True})

    # for regular testing
    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.launcher == 'none':
        raise NotImplementedError("By default, we use distributed testing, so that launcher should be pytorch")
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=1,
        dist=distributed,
        shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')
    model = MMDistributedDataParallel(model.cuda())
    outputs = multi_test(model, data_loader)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

        gt_labels = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            gt_labels.append(ann['label'])

        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:
            print("Averaging score over {} clips without softmax (ie, raw)".format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]
        top1, top5 = top_k_accuracy(results, gt_labels, k=(1,5))
        mean_acc = mean_class_accuracy(results, gt_labels)
        print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
        print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
        print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
コード例 #5
0
ファイル: test_twostream.py プロジェクト: hushunda/mmaction
def main():
    global args
    args = parse_args()

    rgb_cfg = mmcv.Config.fromfile(args.rgb_config)
    flow_cfg = mmcv.Config.fromfile(args.flow_config)
    if args.multi:
        '''存下所有的精度'''

        rgb_infoes = []
        rgb_file = glob.glob(os.path.join(rgb_cfg.work_dir, 'test_*.pkl'))
        idx_file = {int(os.path.basename(x).split('_')[1].split('.')[0]):x for x in rgb_file}
        sort_idx = sorted(idx_file.keys())
        for idx in sort_idx:
            file = idx_file[idx]
            rgb_infoes.append(pickle.load(open(file, 'rb')))

        flow_infoes = []
        flow_file = glob.glob(os.path.join(flow_cfg.work_dir, 'test_*.pkl'))
        idx_file = {int(os.path.basename(x).split('_')[1].split('.')[0]):x for x in flow_file}
        sort_idx = sorted(idx_file.keys())
        for idx in sort_idx:
            file = idx_file[idx]
            flow_infoes.append(pickle.load(open(file, 'br')))
        # rgb+ flow

        ratio = 1.5
        out_info = []
        for rgb_info,flow_info in zip(rgb_infoes,flow_infoes):

            gt_lable = []
            out_pred = []
            for k in rgb_info.keys():
                gt_lable.append(rgb_info[k][1])
                out_pred.append(rgb_info[k][0][0]+ratio*flow_info[k][0][0])
            # top1, top5 = top_k_accuracy(out_pred, gt_lable, k=(1, 5))
            # print(out_pred)
            mean_acc = mean_class_accuracy(out_pred,gt_lable)
            out_info.append(mean_acc)
        with open(os.path.join(flow_cfg.work_dir,'two_stream.pkl'),'wb') as f:
            pickle.dump(out_info,f)

    else:
        rgb_info = pickle.load(open(os.path.join(rgb_cfg.work_dir,'test.pkl'),'rb'))
        flow_info = pickle.load(open(os.path.join(flow_cfg.work_dir,'test.pkl'),'rb'))

        # 验证数据一致
        for k in rgb_info.keys():
            assert k in flow_info

        ratio = float(args.ratio)
        gt_lable = []
        out_pred = []
        for k in rgb_info.keys():
            gt_lable.append(rgb_info[k][1])
            if args.way == 'avg':
                #out_pred.append(softmax(rgb_info[k][0],dim=0)+ ratio* softmax(flow_info[k][0],dim=0))
                out_pred.append(rgb_info[k][0]+ ratio* flow_info[k][0])
            elif args.way == 'max':
                rgb = softmax(rgb_info[k][0],dim=0)
                flow = softmax(flow_info[k][0],dim=0)
                score = None
                if rgb.max()>flow.max():
                    score = rgb
                else:
                    score = flow
                out_pred.append(score)
            else:
                print('no the way: ',args.way)
                raise NotImplementedError

        top1, top5 = top_k_accuracy(out_pred, gt_lable, k=(1, 5))
        mean_acc = mean_class_accuracy(out_pred,gt_lable)
        print("Mean Class Accuracy = {:.04f}".format(mean_acc * 100))
        print("Top-1 Accuracy = {:.04f}".format(top1 * 100))
        print("Top-5 Accuracy = {:.04f}".format(top5 * 100))
コード例 #6
0
def main():
    global args
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # must use fcn testing
    cfg.model.update({'fcn_testing': True})
    cfg.model['cls_head'].update({'fcn_testing': True})

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if args.testfile != '':
        cfg.data.test.ann_file = args.testfile

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    assert distributed, "We only support distributed testing"

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=1,
        dist=distributed,
        shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')

    find_unused_parameters = cfg.get('find_unused_parameters', False)
    model = MMDistributedDataParallel(
        model.cuda(),
        device_ids=[torch.cuda.current_device()],
        broadcast_buffers=False,
        find_unused_parameters=find_unused_parameters)

    outputs = multi_test(model, data_loader)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

        gt_labels = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            gt_labels.append(ann['label'])

        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:
            print("Averaging score over {} clips without softmax (ie, raw)".format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]
        top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
        mean_acc = mean_class_accuracy(results, gt_labels)
        print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
        print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
        print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
コード例 #7
0
ファイル: test_recognizer.py プロジェクト: ioir123ju/mmaction
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[0])

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    gt_labels = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        gt_labels.append(ann['label'])

    if args.use_softmax:
        print("Averaging score over {} clips with softmax".format(
            outputs[0].shape[0]))
        results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
    else:
        print("Averaging score over {} clips without softmax (ie, raw)".format(
            outputs[0].shape[0]))
        results = [res.mean(axis=0) for res in outputs]
    top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
    mean_acc = mean_class_accuracy(results, gt_labels)
    print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
    print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
    print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
コード例 #8
0
def main():
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    if args.gpus == 1:
        model = build_recognizer(cfg.model,
                                 train_cfg=None,
                                 test_cfg=cfg.test_cfg)
        load_checkpoint(model, args.checkpoint, strict=True)
        model = MMDataParallel(model, device_ids=[1])

        params = list(model.parameters())
        weight_softmax = np.squeeze(params[-2].data.cpu().numpy(
        ))  # fully conneted layer parameters to numpy already

        data_loader = build_dataloader(
            dataset,
            imgs_per_gpu=1,
            workers_per_gpu=cfg.data.workers_per_gpu,
            num_gpus=1,
            dist=False,
            shuffle=False)
        outputs, inputs = single_test(model, data_loader)
    else:
        model_args = cfg.model.copy()
        model_args.update(train_cfg=None, test_cfg=cfg.test_cfg)
        model_type = getattr(recognizers, model_args.pop('type'))
        outputs = parallel_test(model_type,
                                model_args,
                                args.checkpoint,
                                dataset,
                                _data_func,
                                range(args.gpus),
                                workers_per_gpu=args.proc_per_gpu)

    #print(len(features_blobs))
    #print(features_blobs[0].size())

    if args.out:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

    num_videos = len(outputs)
    class_name = 'YoYo'
    os.mkdir('data/CAM_imgs/' + class_name)

    for k in range(0, num_videos):
        os.mkdir('data/CAM_imgs/' + class_name + '/CAMs_{:02d}'.format(k))
        idx = get_top_5_index("tools/results.pkl",
                              k)  # change the dir of results.pkl to tools/
        conv_feat = pickle.load(open(
            "tools/hook_features/feat_{:02d}.pkl".format(k), 'rb'),
                                encoding='utf-8')
        conv_feat = conv_feat.cpu().numpy()
        CAMs = returnCAM(
            conv_feat, weight_softmax,
            [idx[0]
             ])  # generate class activation mapping for the top1 prediction
        single_input = inputs[k].numpy()
        writeCAMs(class_name, CAMs, single_input, k)

    gt_labels = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        gt_labels.append(ann['label'])

    if args.use_softmax:
        print("Averaging score over {} clips with softmax".format(
            outputs[0].shape[0]))
        results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
    else:
        print("Averaging score over {} clips without softmax (ie, raw)".format(
            outputs[0].shape[0]))
        results = [res.mean(axis=0) for res in outputs]
    top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
    mean_acc = mean_class_accuracy(results, gt_labels)
    print("Mean Class Accuracy = {:.02f}".format(mean_acc * 100))
    print("Top-1 Accuracy = {:.02f}".format(top1 * 100))
    print("Top-5 Accuracy = {:.02f}".format(top5 * 100))
コード例 #9
0
        for i in range(5):
            ret, frame = cap.read()
            if ret is False:
                break
            images.append(frame)
        imgs = data.get_data(images)
        print("start model recongnize")
        with torch.no_grad():
            output = model(return_loss=False, **imgs)
            outputs.append(output)

        use_softmax = True
        if use_softmax is True:
            print("Averaging score over {} clips with softmax".format(
                outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:
            print("Averaging score over {} clips without softmax (ie, raw)".format(
                outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]
        pred = int(np.argmax(results, axis=1))
        print(pred)
        print(results[0][pred])
        print(gt_labels[pred])
        text_color = color_val('green')
        for image in images:
            cv2.putText(image, str(gt_labels[pred]), (100, 100),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, text_color)
            cv2.imshow('image', image)
        cv2.waitKey(0)
コード例 #10
0
ファイル: test_recognizer.py プロジェクト: hushunda/mmaction
def main():
    global args
    args = parse_args()

    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
        raise ValueError('The output file must be a pkl file.')
    cfg = mmcv.Config.fromfile(args.config)
    if args.checkpoint == None:
        args.checkpoint = os.path.join(cfg.work_dir, 'latest.pth')
    else:
        args.checkpoint = os.path.join(cfg.work_dir,
                                       'epoch_%d.pth' % (int(args.checkpoint)))

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # pass arg of fcn testing
    if args.fcn_testing:
        cfg.model.update({'fcn_testing': True})
        cfg.model['cls_head'].update({'fcn_testing': True})

    # for regular testing
    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.launcher == 'none':
        raise NotImplementedError(
            "By default, we use distributed testing, so that launcher should be pytorch"
        )
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=1,
                                   dist=distributed,
                                   shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')

    find_unused_parameters = cfg.get('find_unused_parameters', False)
    model = MMDistributedDataParallel(
        model.cuda(),
        device_ids=[torch.cuda.current_device()],
        broadcast_buffers=False,
        find_unused_parameters=find_unused_parameters)

    outputs = multi_test(model, data_loader, cfg.work_dir)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        mmcv.dump(outputs, args.out)

        data_path = []
        gt_labels = []
        pre = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            gt_labels.append(ann['label'])
            data_path.append(ann['path'])
            pre.append(outputs[i].mean(axis=0))

        save_data = {
            path: [p, g]
            for path, p, g in zip(data_path, pre, gt_labels)
        }
        with open(os.path.join(cfg.work_dir, 'test.pkl'), 'wb') as f:
            pickle.dump(save_data, f)

        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(
                outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:

            print("Averaging score over {} clips without softmax (ie, raw)".
                  format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]
        top1, top5 = top_k_accuracy(results, gt_labels, k=(1, 5))
        mean_acc = mean_class_accuracy(results, gt_labels)
        with open(os.path.join(cfg.work_dir, 'test_result.txt'), 'w') as f:
            f.writelines('model is :' + args.checkpoint + '\n')
            f.writelines("Mean Class Accuracy = {:.04f}".format(mean_acc *
                                                                100) + '\n')
            f.writelines("Top-1 Accuracy = {:.04f}".format(top1 * 100) + '\n')
            f.writelines("Top-5 Accuracy = {:.04f}".format(top5 * 100) + '\n')

        print("Mean Class Accuracy = {:.04f}".format(mean_acc * 100))
        print("Top-1 Accuracy = {:.04f}".format(top1 * 100))
        print("Top-5 Accuracy = {:.04f}".format(top5 * 100))
コード例 #11
0
def main():
    global args
    args = parse_args()

    # if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
    #     raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # must use fcn testing
    cfg.model.update({'fcn_testing': True})
    cfg.model['cls_head'].update({'fcn_testing': True})

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    if args.testfile != '':
        cfg.data.test.ann_file = args.testfile

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))

    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    assert distributed, "We only support distributed testing"

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(
        dataset,
        imgs_per_gpu=1,
        workers_per_gpu=1,
        dist=distributed,
        shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')

    find_unused_parameters = cfg.get('find_unused_parameters', False)
    model = MMDistributedDataParallel(
        model.cuda(),
        device_ids=[torch.cuda.current_device()],
        broadcast_buffers=False,
        find_unused_parameters=find_unused_parameters)

    outputs = multi_test(model, data_loader)

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))
        # mmcv.dump(outputs, args.out)

        results_numpyformat = np.zeros((1,226))
        for i in range(len(outputs)):
            results_numpyformat = np.row_stack((results_numpyformat,outputs[i]))
        np.save("res",results_numpyformat[1:,:])

        # gt_labels = []
        image_name = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            image_name.append(ann['path'])
            # gt_labels.append(ann['label'])
        print(image_name[0])
        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:
            print("Averaging score over {} clips without softmax (ie, raw)".format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]

        prediction = pd.read_csv('predictions.csv',header=None)
        if not os.path.exists(args.out):
            os.mkdir(args.out)
        pbar = tqdm(total=len(image_name))
        for i in range(len(results)):
            pred_class = np.argsort(results[i])[-1:].item()
            # index = prediction[(prediction[0]==image_name[i].replace("_depth",""))].index.item()
            index = prediction[(prediction[0]==image_name[i].replace("_color",""))].index.item()
            # print("{}:{}".format(index,pred_class))
            prediction.iloc[index,1] = pred_class
            pbar.update(1)
            # break
        prediction.to_csv(os.path.join(args.out,'predictions.csv'),header=False,index=False)
コード例 #12
0
def main():
    global args
    args = parse_args()

    # if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
    #     raise ValueError('The output file must be a pkl file.')

    cfg = mmcv.Config.fromfile(args.config)
    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    cfg.data.test.test_mode = True

    # pass arg of fcn testing
    if args.fcn_testing:
        cfg.model.update({'fcn_testing': True})
        cfg.model['cls_head'].update({'fcn_testing': True})

    # for regular testing
    if cfg.data.test.oversample == 'three_crop':
        cfg.model.spatial_temporal_module.spatial_size = 8

    dataset = obj_from_dict(cfg.data.test, datasets, dict(test_mode=True))
    # dataset[0]

    if args.launcher == 'none':
        # raise NotImplementedError("By default, we use distributed testing, so that launcher should be pytorch")
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    model = build_recognizer(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
    data_loader = build_dataloader(dataset,
                                   imgs_per_gpu=1,
                                   workers_per_gpu=1,
                                   dist=distributed,
                                   shuffle=False)

    load_checkpoint(model, args.checkpoint, map_location='cpu')

    # find_unused_parameters = cfg.get('find_unused_parameters', False)
    # model = MMDistributedDataParallel(
    #     model.cuda(),
    #     device_ids=[torch.cuda.current_device()],
    #     broadcast_buffers=False,
    #     find_unused_parameters=find_unused_parameters)
    model = MMDataParallel(model,
                           device_ids=range(args.test_gpus,
                                            args.test_gpus + 1))
    outputs = multi_test(model, data_loader)
    # print(outputs)
    # print(type(outputs))
    # print(len(outputs))

    rank, _ = get_dist_info()
    if args.out and rank == 0:
        print('writing results to {}'.format(args.out))

        # mmcv.dump(outputs, args.out)

        results_numpyformat = np.zeros((1, 226))
        for i in range(len(outputs)):
            results_numpyformat = np.row_stack(
                (results_numpyformat, outputs[i]))
        np.save(os.path.join(args.out, args.name), results_numpyformat[1:, :])

        # gt_labels = []
        image_name = []
        for i in range(len(dataset)):
            ann = dataset.get_ann_info(i)
            image_name.append(ann['path'])
            # gt_labels.append(ann['label'])

        if args.use_softmax:
            print("Averaging score over {} clips with softmax".format(
                outputs[0].shape[0]))
            results = [softmax(res, dim=1).mean(axis=0) for res in outputs]
        else:
            print("Averaging score over {} clips without softmax (ie, raw)".
                  format(outputs[0].shape[0]))
            results = [res.mean(axis=0) for res in outputs]
        prediction = pd.read_csv('predictions_testphase.csv', header=None)
        if not os.path.exists(args.out):
            os.mkdir(args.out)
        pbar = tqdm(total=len(image_name))
        for i in range(len(results)):
            pred_class = np.argsort(results[i])[-1:].item()
            # import pdb;pdb.set_trace()
            # print(pred_class)
            # print(image_name[i])

            index = prediction[(prediction[0] == image_name[i].replace(
                "_{}".format(args.color_or_depth), ""))].index.item()
            # print("{}:{}".format(index,pred_class))
            prediction.iloc[index, 1] = pred_class
            pbar.update(1)
            # if i==5:
            #     break
        prediction.to_csv(os.path.join(args.out,
                                       '{}_predictions.csv'.format(args.name)),
                          header=False,
                          index=False)