Пример #1
0
    def test_ignore_pixel(self):
        def test_img(p,pixels):
            img=cv2.imread(p)
            new_pixels=np.unique(img)
            pixels=pixels.union(new_pixels)
            return pixels

        # 'FBMS','cdnet2014','segtrackv2', 'BMCnet', 'DAVIS2016', 'DAVIS2017'
        for dataset in ['DAVIS2016','DAVIS2017']:
            self.config.dataset=dataset
            for split in ['train','val']:
                pixels=set()
                xxx_dataset=get_dataset(self.config,split)
#                N=min(100,len(xxx_dataset))
                N=len(xxx_dataset)
                for i in trange(N):
                    main,aux,gt=xxx_dataset.__get_path__(i)
                    if isinstance(gt,str):
                        pixels=test_img(gt,pixels)
                    else:
                        for x in gt:
                            pixels=test_img(x,pixels)

                print(dataset,split,pixels)
        self.assertTrue(True)
Пример #2
0
    def test_motion_diff(self):
        """
        load model and show model output
        """
        #config_txt=os.path.expanduser('~/tmp/logs/motion/motion_diff/cdnet2014/test/2020-09-25___19-16-21/config.txt')
        config_txt=os.path.expanduser('~/tmp/logs/motion/motion_diff/FBMS/test/2020-09-25___18-52-18/config.txt')
        config=load_config(config_txt)

        model=get_model(config)
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        model.to(device)
        model.eval()
        for split in ['train','val']:
            xxx_loader=get_dataset(config,split)
            dataset_loader=td.DataLoader(dataset=xxx_loader,
                                         batch_size=1,
                                         shuffle=False,
                                         drop_last=False,
                                         num_workers=2)
            for data in dataset_loader:

                images,origin_labels,resize_labels=prepare_input_output(data=data,device=device,config=config)
                motionseg_show_images(images,origin_labels,[])

                outputs=model(images)
                predict=outputs['masks'][0]
                motionseg_show_images([],[],predict)
                break
        self.assertTrue(True)
Пример #3
0
    def test_fbms_pgm(self):
        for dataset in ['FBMS']:
            self.config.dataset=dataset
            xxx_dataset=get_dataset(self.config,'train')
            root=xxx_dataset.config.root_path



            ppm_files=glob.glob(os.path.join(root,
                                                 'Trainingset',
                                                 '*',
                                                 'GroundTruth',
                                                 '*.ppm'),recursive=True)

            pgm_files=glob.glob(os.path.join(root,
                                                 'Trainingset',
                                                 '*',
                                                 'GroundTruth',
                                                 '*.pgm'),recursive=True)

            in_count=noin_count=0
            for pgm in pgm_files:
                ppm=pgm.replace('.pgm','.ppm')
                if ppm in ppm_files:
                    in_count+=1
                else:
                    if noin_count==0:
                        print(pgm,ppm)
                    noin_count+=1

            print(root)
            print('in_count={}, noin_count={}, pgm={}, ppm={}'.format(in_count,noin_count,len(pgm_files),len(ppm_files)))
            self.assertTrue(True)
Пример #4
0
def evaluation_davis(result_root_path, dataset_name='DAVIS2017', split='val'):
    config = get_default_config()
    config.dataset = dataset_name
    config = fine_tune_config(config)
    dataset = get_dataset(config, split)
    N = len(dataset)

    sum_f = sum_p = sum_r = 0
    sum_tp = sum_fp = sum_tn = sum_fn = 0
    for idx in trange(N):
        img1_path, img2_path, gt_path = dataset.__get_path__(idx)
        save_path = dataset.get_result_path(result_root_path, img1_path)

        assert os.path.exists(gt_path), gt_path
        assert os.path.exists(save_path), save_path

        gt_img = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE)
        pred_img = cv2.imread(save_path, cv2.IMREAD_GRAYSCALE)

        tp = np.sum(np.logical_and(gt_img > 0, pred_img > 0))
        tn = np.sum(np.logical_and(gt_img == 0, pred_img == 0))
        fp = np.sum(np.logical_and(gt_img == 0, pred_img > 0))
        fn = np.sum(np.logical_and(gt_img > 0, pred_img == 0))

        if tp + fn == 0:
            r = 1
        else:
            r = tp / (tp + fn)

        if tp + fp == 0:
            p = 1
        else:
            p = tp / (tp + fp)

        if p + r == 0:
            f = 1
        else:
            f = 2 * p * r / (p + r)

        sum_f += f
        sum_p += p
        sum_r += r

        sum_tp += tp
        sum_fp += fp
        sum_tn += tn
        sum_fn += fn
    overall_precision = sum_tp / (sum_tp + sum_fp + 1e-5)
    overall_recall = sum_tp / (sum_tp + sum_fn + 1e-5)
    overall_fmeasure = 2 * overall_precision * overall_recall / (
        overall_precision + overall_recall + 1e-5)

    print('tp={},tn={},fp={},fn={}'.format(sum_tp, sum_tn, sum_fp, sum_fn))
    print('precision={},recall={}'.format(overall_precision, overall_recall))
    print('overall fmeasure is {}'.format(overall_fmeasure))

    print('mean precision={}, recall={}, fmeasure={}'.format(
        sum_p / N, sum_r / N, sum_f / N))
Пример #5
0
def test(config):
    model = get_load_convert_model(config)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    if config.dataset.upper() in ['DAVIS2017', 'DAVIS2016']:
        if config.app == 'test':
            split_set = ['val']
        elif config.app == 'benchmark':
            split_set = ['test-dev', 'test-challenge']
        else:
            assert False

        for split in split_set:
            save_dir = os.path.join(os.path.expanduser('~/tmp/result'),
                                    config.dataset, split, config.note)
            xxx_dataset = get_dataset(config, split)
            xxx_loader = td.DataLoader(dataset=xxx_dataset,
                                       batch_size=1,
                                       shuffle=False,
                                       num_workers=2,
                                       pin_memory=True)
            tqdm_step = tqdm(xxx_loader, desc='steps', leave=False)
            for step, data in enumerate(tqdm_step):
                frames = data['images']
                main_path = data['main_path'][0]
                height, width, _ = data['shape']
                height, width = height[0], width[0]

                save_path = xxx_dataset.get_result_path(save_dir, main_path)
                assert save_path != main_path
                images = [img.to(device).float() for img in frames]
                outputs = model.forward(images)
                result_mask = F.interpolate(outputs['masks'][0],
                                            size=(height, width),
                                            mode='nearest')
                # print(result_mask.shape) # (batch_size,2,height,width)
                np_mask = np.squeeze(
                    np.argmax(result_mask.data.cpu().numpy(),
                              axis=1)).astype(np.uint8)
                # print(np_mask.shape) # (height,width)

                os.makedirs(os.path.dirname(save_path), exist_ok=True)
                print(f'save image to {save_path}')
                cv2.imwrite(save_path, np_mask)

            if split == 'val':
                args = edict()
                args.davis_path = os.path.expanduser('~/cvdataset/DAVIS')
                args.set = 'val'
                args.task = 'unsupervised'
                args.results_path = save_dir

                # davis_benchmark import benchmark
                # benchmark(args)
                print('please run davis_benchmark {}'.format(args))
    else:
        assert False, 'not supported dataset for test'
Пример #6
0
    def compare_fbms_3dmotion(self):
        def filter_gt_files(gt_files):
            new_gt_files=[]
            for f in gt_files:
                finded=False
                for new_f in new_gt_files:
                    if os.path.dirname(f)==os.path.dirname(new_f):
                        finded=True
                        break
                else:
                    assert finded==False
                    new_gt_files.append(f)

            return new_gt_files

        images={}
        files={}
        for dataset in ['FBMS','FBMS-3D']:
            for split in ['train','val']:
                self.config.dataset=dataset
                xxx_dataset=get_dataset(self.config,split)
                files[dataset+'/'+split]=xxx_dataset.gt_files=filter_gt_files(xxx_dataset.gt_files)
                xxx_dataset.img_files=[]
                images[dataset+'/'+split]=[]
                assert len(xxx_dataset)<=30
                for i in range(len(xxx_dataset)):
                    frame_images,labels,main_file,aux_file,gt_file=xxx_dataset.__get_image__(i)
                    images[dataset+'/'+split].append(frame_images[0])
                    images[dataset+'/'+split].append(labels[0])


        for split in ['train','val']:
            if split=='train':
                N=58
            else:
                N=60

            assert N==len(images['FBMS/'+split]),'len {} is {}'.format(split,len(images['FBMS/'+split]))
            for i in range(0,N,2):
                x1=images['FBMS/'+split][i]
                y1=images['FBMS/'+split][i+1]
                x2=images['FBMS-3D/'+split][i]
                y2=images['FBMS-3D/'+split][i+1]
                if np.sum(x1-x2)!=0 or np.sum(y1-y2)!=0:
                    print(files['FBMS/'+split][i//2])
                    print(files['FBMS-3D/'+split][i//2])
                    show_images(images=[x1,y1,x2,y2],titles=['x1','y1','x2','y2'])
Пример #7
0
def compute_fps(config, idx):
    batch_size = config.batch_size
    model = get_load_convert_model(config)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    split = 'val'
    xxx_dataset = get_dataset(config, split)
    xxx_loader = td.DataLoader(dataset=xxx_dataset,
                               batch_size=batch_size,
                               shuffle=False,
                               num_workers=batch_size,
                               pin_memory=True)

    tqdm_step = tqdm(xxx_loader, desc='steps', leave=False)
    counter = 0.0
    total_time = 0.0

    for step, data in enumerate(tqdm_step):
        frames = data['images']
        images = [img.to(device).float() for img in frames]
        start_time = time.time()
        outputs = model.forward(images)
        total_time += (time.time() - start_time)
        counter += outputs['masks'][0].shape[0]

        if counter > 1000:
            break
    fps = counter / total_time
    print(
        f'{idx}: {config.backbone_name} {config.share_backbone} {config.attention_type} fps={fps}'
    )

    fps_summary_file = os.path.expanduser('~/tmp/result/fps.json')
    with open(fps_summary_file, 'r+') as f:
        try:
            fps_summary = json.load(f)
        except:
            fps_summary = dict()
        finally:
            f.seek(0)
            fps_summary[config.note + '-' + str(idx) + '-' +
                        config.backbone_name + '-' +
                        str(config.share_backbone) + '-' +
                        config.attention_type] = fps
            json.dump(fps_summary, f)
            f.truncate()
Пример #8
0
    def test_dataset(self):
        def test_img(p):
            try:
                img=cv2.imread(p)
            except Exception as e:
                print(dataset,p,e)
            else:
                self.assertIsNotNone(img,p)

        # 'FBMS','cdnet2014','segtrackv2', 'BMCnet'
        for dataset in ['FBMS','FBMS-3D']:
            self.config.dataset=dataset
            for split in ['train','val']:
                xxx_dataset=get_dataset(self.config,split)
#                N=min(10,len(xxx_dataset))
                N=len(xxx_dataset)
                for i in trange(N):
                    main,aux,gt=xxx_dataset.__get_path__(i)
                    for p in [main,aux,gt]:
                        if isinstance(p,str):
                            test_img(p)
                        else:
                            for x in p:
                                test_img(x)
Пример #9
0
def get_dist_module(config):
    model = get_model(config)

    # support for cpu/gpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    if config.use_sync_bn:
        torch.cuda.set_device(config.gpu)
        model.cuda(config.gpu)
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
        model = DDP(model,
                    find_unused_parameters=True,
                    device_ids=[config.gpu])
    else:
        model.to(device)

    if config.net_name == 'motion_diff' or not config.net_name.startswith(
            'motion'):
        seg_loss_fn = torch.nn.BCEWithLogitsLoss()
    elif config.loss_name in ['iou', 'dice']:
        # iou loss not support ignore_index
        assert config.dataset not in ['cdnet2014', 'all', 'all2', 'all3']
        assert config.ignore_pad_area == 0
        if config.loss_name == 'iou':
            seg_loss_fn = jaccard_loss
        else:
            seg_loss_fn = dice_loss
    else:
        seg_loss_fn = torch.nn.CrossEntropyLoss(ignore_index=255)

    if config.use_sync_bn:
        seg_loss_fn = seg_loss_fn.cuda(config.gpu)

    optimizer_params = [{
        'params': [p for p in model.parameters() if p.requires_grad]
    }]

    if config.optimizer == 'adam':
        optimizer = torch.optim.Adam(optimizer_params,
                                     lr=config['init_lr'],
                                     amsgrad=False)
    else:
        assert config.init_lr > 1e-3
        optimizer = torch.optim.SGD(optimizer_params,
                                    lr=config['init_lr'],
                                    momentum=0.9,
                                    weight_decay=1e-4)

    dataset_loaders = {}
    for split in ['train', 'val']:
        xxx_dataset = get_dataset(config, split)

        if config.use_sync_bn and split == 'train':
            xxx_sampler = torch.utils.data.DistributedSampler(xxx_dataset)
        else:
            xxx_sampler = None

        batch_size = config.batch_size if split == 'train' else 1

        if split == 'train':
            xxx_loader = td.DataLoader(dataset=xxx_dataset,
                                       batch_size=batch_size,
                                       shuffle=(xxx_sampler is None),
                                       drop_last=True,
                                       num_workers=2,
                                       sampler=xxx_sampler,
                                       pin_memory=True)
        else:
            xxx_loader = td.DataLoader(dataset=xxx_dataset,
                                       batch_size=batch_size,
                                       shuffle=False,
                                       num_workers=2,
                                       pin_memory=True)
        dataset_loaders[split] = xxx_loader

    return model, seg_loss_fn, optimizer, dataset_loaders
Пример #10
0
    # avoid open two much file in dataloader
    torch.multiprocessing.set_sharing_strategy('file_system')
    # detect nan in loss
    torch.autograd.set_detect_anomaly(True)
    # set the root dir for download model weights
    torch.hub.set_dir(os.path.expanduser('~/.torch/models'))

    parser = get_parser()
    args = parser.parse_args()

    config = update_default_config(args)

    if args.app == 'dataset':
        dataset_loaders = {}
        for split in ['train', 'val']:
            xxx_dataset = get_dataset(config, split)

            dataset_size = len(xxx_dataset)
            for idx in range(dataset_size):
                xxx_dataset.__getitem__(idx)

            if config.use_sync_bn and split == 'train':
                xxx_sampler = torch.utils.data.DistributedSampler(xxx_dataset)
            else:
                xxx_sampler = None

            batch_size = config.batch_size if split == 'train' else 1

            if split == 'train':
                xxx_loader = td.DataLoader(dataset=xxx_dataset,
                                           batch_size=batch_size,
Пример #11
0
    def test_duration(self):
        def get_shape(img_file):
            if img_file.endswith(('ppm','pgm')):
                img=pbm.imread(img_file)
            else:
                img=cv2.imread(img_file)

            return img.shape[:2]

        def statistic(imgs,v,min_duration,max_duration,min_shape,max_shape):
            duration=len(imgs)
            if duration==0:
                print(v,duration)
                return min_duration,max_duration,min_shape,max_shape

            shape=get_shape(imgs[0])

            if min_duration is None:
                max_duration=min_duration=duration
                min_shape=max_shape=shape
            else:
                min_duration=min(min_duration,duration)
                max_duration=max(max_duration,duration)

                if shape[0]*shape[1]>max_shape[0]*max_shape[1]:
                    max_shape=shape

                if shape[0]*shape[1]<min_shape[0]*min_shape[1]:
                    min_shape=shape

            print(v,duration,shape)

            return min_duration,max_duration,min_shape,max_shape

        for dataset in ['FBMS','FBMS-3D']:
            self.config.dataset=dataset
            xxx_dataset=get_dataset(self.config,'train')
            root=xxx_dataset.config.root_path
            min_duration=None
            max_duration=None
            min_shape=max_shape=None

            clips=glob.glob(os.path.join(root,'*','*'),recursive=False)
            for v in clips:
                imgs=glob.glob(os.path.join(v,'*.jpg'))
                min_duration,max_duration,min_shape,max_shape=statistic(imgs,v,min_duration,max_duration,min_shape,max_shape)

            print(dataset,min_duration,max_duration,min_shape,max_shape)

        for dataset in ['DAVIS2016','DAVIS2017']:
            self.config.dataset=dataset
            xxx_dataset=get_dataset(self.config,'train')
            root=xxx_dataset.config.root_path
            min_duration=None
            max_duration=None
            min_shape=max_shape=None

            clips=[]
            for txt_file in glob.glob(os.path.join(root,'ImageSets','201*','*.txt')):
                f=open(txt_file,'r')
                clips+=f.readlines()
                f.close()

            clips=[c.strip() for c in clips]

            for v in clips:
                imgs=glob.glob(os.path.join(root,'JPEGImages','480p',v,'*.jpg'))
                min_duration,max_duration,min_shape,max_shape=statistic(imgs,v,min_duration,max_duration,min_shape,max_shape)

            print(dataset,min_duration,max_duration,min_shape,max_shape)

        for dataset in ['segtrackv2']:
            self.config.dataset=dataset
            xxx_dataset=get_dataset(self.config,'train')
            root=xxx_dataset.config.root_path
            min_duration=None
            max_duration=None

            clips=glob.glob(os.path.join(root,'JPEGImages','*'),recursive=False)
            print(clips)
            for v in clips:
                imgs=[]
                for fmt in ['png','bmp']:
                    imgs+=glob.glob(os.path.join(v,'*.'+fmt))

                min_duration,max_duration,min_shape,max_shape=statistic(imgs,v,min_duration,max_duration,min_shape,max_shape)
            print(dataset,min_duration,max_duration,min_shape,max_shape)