Exemple #1
0
def test_model(model, save):
    nframes = 128

    def image_read(path):
        pic = Image.open(path)
        transform = tv.transforms.Compose([
            tv.transforms.ToTensor(),
            tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)
        ])
        return transform(pic)

    def label_read(path):
        pic = Image.open(path)
        transform = tv.transforms.Compose([LabelToLongTensor()])
        label = transform(pic)
        return label

    datasets = {
        'DAVIS16_val':
        DAVIS17V2(config['davis_path'], '2016', 'val', image_read, label_read,
                  nframes),
        'DAVIS17_val':
        DAVIS17V2(config['davis_path'], '2017', 'val', image_read, label_read,
                  nframes)
    }

    for key, dataset in datasets.items():
        if key == 'DAVIS16_val':
            evaluator = evaluation.VOSEvaluator(dataset, 'cuda', save)
            print("\n-- DAVIS16 dataset initialization started.")
        elif key == 'DAVIS17_val':
            evaluator = evaluation.VOSEvaluator(dataset, 'cuda', save)
            print("\n-- DAVIS17 dataset initialization started.")
        result_fpath = os.path.join(config['output_path'])
        evaluator.evaluate(model, os.path.join(result_fpath, key))
Exemple #2
0
def forward(model,
            path,
            prediction_path,
            debug_sequences_dict=None,
            save_prediction=False):
    size = (480, 854)
    nframes = 128

    # pre-processing the input
    # function for reading a single image
    def image_read(path):
        pic = Image.open(path)
        transform = tv.transforms.Compose([
            tv.transforms.ToTensor(),
            tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)
        ])
        return transform(pic)

    def label_read(path):
        if (os.path.exists(path)):
            pic = Image.open(path)
            transform = tv.transforms.Compose([LabelToLongTensor()])
            label = transform(pic)
        else:
            label = torch.LongTensor(1, *size).fill_(
                255)  # Put label that will be ignored
        return label

    # our init : (self, root_path, image_set, image_read=get_default_image_read(),
    #              anno_read=get_default_anno_read(),
    #              joint_transform=None, samplelen=4, obj_selection=get_sample_all(), min_num_obj=1,
    #              start_frame='random')
    # YTVOS init : (self, root_path, split, image_set, impath='JPEGImages', image_read=get_default_image_read(),
    #              anno_read=get_default_anno_read(), joint_transform=None,
    #              samplelen=4, obj_selection=get_sample_all(), min_num_obj=1, start_frame='random')
    datasets = {
        "new-botinok":
        RANDATA(root_path=path,
                image_set='val',
                image_read=image_read,
                anno_read=label_read,
                joint_transform=None,
                samplelen=nframes,
                start_frame="first")
    }

    if debug_sequences_dict is None:
        debug_sequences_dict = {key: () for key in datasets.keys()}

    for key, dataset in datasets.items():
        evaluator = evaluation.VOSEvaluator(dataset, 'cuda', 'all',
                                            True, False,
                                            debug_sequences_dict.get(key))
        result_fpath = os.path.join(
            path,
            os.path.splitext(os.path.basename(__file__))[0])

        evaluator.evaluate(model, os.path.join(result_fpath, key))
Exemple #3
0
def test_model(model, debug_sequences_dict=None, save_predictions=False, prediction_path=None):
    nframes = 128
    def image_read(path):
        pic = Image.open(path)
        transform = tv.transforms.Compose(
            [tv.transforms.ToTensor(),
             tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)])
        return transform(pic)
    def label_read(path):
        pic = Image.open(path)
        transform = tv.transforms.Compose(
            [LabelToLongTensor()])
        label = transform(pic)
        return label
    datasets = {
        'DAVIS16_train': DAVIS17V2(config['davis17_path'], '2016', 'train', image_read, label_read, None, nframes),
        'DAVIS16_val': DAVIS17V2(config['davis17_path'], '2016', 'val', image_read, label_read, None, nframes),
        'DAVIS17_val': DAVIS17V2(config['davis17_path'], '2017', 'val', image_read, label_read, None, nframes),
        'YTVOS_jval': YTVOSV2(config['ytvos_path'], 'train', 'val_joakim', 'JPEGImages', image_read, label_read,
                              None, nframes),
        'YTVOS_val': YTVOSV2(config['ytvos_path'], 'valid', None, 'JPEGImages', image_read, label_read,
                             None, nframes)
    }
    multitarget_sets = ('DAVIS17_val', 'YTVOS_jval', 'YTVOS_val')
    
    if debug_sequences_dict is None:
        debug_sequences_dict = {key: () for key in datasets.keys()}

    for key, dataset in datasets.items():
        if key == 'YTVOS_val':
            evaluator = evaluation.VOSEvaluator(dataset, 'cuda', 'all', True, False, debug_sequences_dict.get(key))
        elif key == 'DAVIS17_val':
            evaluator = evaluation.VOSEvaluator(dataset, 'cuda', 'all', True, True, debug_sequences_dict.get(key))
        else:
            evaluator = evaluation.VOSEvaluator(dataset, 'cuda', 'all', False, True, debug_sequences_dict.get(key))
        result_fpath = os.path.join(config['output_path'], os.path.splitext(os.path.basename(__file__))[0])

        if key in multitarget_sets: # Only apply the multitarget aggregation if we have mult. targets
            model.update_with_softmax_aggregation = True
        else:
            model.update_with_softmax_aggregation = False
        evaluator.evaluate(model, os.path.join(result_fpath, key))
Exemple #4
0
def test_model(model,
               debug_sequences_dict=None,
               save_predictions=False,
               prediction_path=None):
    nframes = 128

    def image_read(path):
        pic = Image.open(path)
        transform = tv.transforms.Compose([
            tv.transforms.ToTensor(),
            tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
        ])
        return transform(pic)

    def label_read(path):
        pic = Image.open(path)
        transform = tv.transforms.Compose([LabelToLongTensor()])
        label = transform(pic)
        return label

    datasets = {
        "DAVIS16_train":
        DAVIS17V2(
            config["davis17_path"],
            "2016",
            "train",
            image_read,
            label_read,
            None,
            nframes,
        ),
        "DAVIS16_val":
        DAVIS17V2(config["davis17_path"], "2016", "val", image_read,
                  label_read, None, nframes),
        "DAVIS17_val":
        DAVIS17V2(config["davis17_path"], "2017", "val", image_read,
                  label_read, None, nframes),
        "YTVOS_jval":
        YTVOSV2(
            config["ytvos_path"],
            "train",
            "val_joakim",
            "JPEGImages",
            image_read,
            label_read,
            None,
            nframes,
        ),
        "YTVOS_val":
        YTVOSV2(
            config["ytvos_path"],
            "valid",
            None,
            "JPEGImages",
            image_read,
            label_read,
            None,
            nframes,
        ),
    }
    multitarget_sets = ("DAVIS17_val", "YTVOS_jval", "YTVOS_val")

    if debug_sequences_dict is None:
        debug_sequences_dict = {key: () for key in datasets.keys()}

    for key, dataset in datasets.items():
        if key == "YTVOS_val":
            evaluator = evaluation.VOSEvaluator(dataset, "cuda", "all", True,
                                                False,
                                                debug_sequences_dict.get(key))
        elif key == "DAVIS17_val":
            evaluator = evaluation.VOSEvaluator(dataset, "cuda", "all", True,
                                                True,
                                                debug_sequences_dict.get(key))
        else:
            evaluator = evaluation.VOSEvaluator(dataset, "cuda", "all", False,
                                                True,
                                                debug_sequences_dict.get(key))
        result_fpath = os.path.join(
            config["output_path"],
            os.path.splitext(os.path.basename(__file__))[0])

        if (
                key in multitarget_sets
        ):  # Only apply the multitarget aggregation if we have mult. targets
            model.update_with_softmax_aggregation = True
        else:
            model.update_with_softmax_aggregation = False
        evaluator.evaluate(model, os.path.join(result_fpath, key))