def build_dataset(dataset_list,
                  transform=None,
                  target_transform=None,
                  is_train=True):
    assert len(dataset_list) > 0
    datasets = []
    for dataset_name in dataset_list:
        data = DatasetCatalog.get(dataset_name)
        args = data['args']
        factory = _DATASETS[data['factory']]
        args['transform'] = transform
        args['target_transform'] = target_transform
        if factory == VOCDataset:
            args['keep_difficult'] = not is_train
        elif factory == COCODataset:
            args['remove_empty'] = is_train
        elif factory == MyDataset:
            args['remove_empty'] = is_train
        dataset = factory(**args)
        datasets.append(dataset)
    # for testing, return a list of datasets
    if not is_train:
        return datasets
    dataset = datasets[0]
    if len(datasets) > 1:
        dataset = ConcatDataset(datasets)

    return [dataset]
Example #2
0
def build_dataset(dataset_list,
                  transform=None,
                  target_transform=None,
                  is_train=True) -> Dataset:
    """
    returns: a torch.data.dataset.Dataset instance
    """
    assert dataset_list, "dataset_list should not be empty"
    datasets = []
    for dataset_name in dataset_list:
        data = DatasetCatalog.get(dataset_name)
        args = data["args"]
        factory = _DATASETS[data["factory"]]
        args["transform"] = transform
        args["target_transform"] = target_transform
        if factory == VOCDataset:
            args["keep_difficult"] = not is_train
        elif factory == XVIEWCOCODataset:
            args["remove_empty"] = is_train
        elif factory == UCBCOCODataset:
            args["remove_empty"] = is_train
        elif factory == COCODataset:
            args["remove_empty"] = is_train
        dataset = factory(**args)
        datasets.append(dataset)
    # for testing, return a list of datasets
    if not is_train:
        return datasets
    dataset = datasets[0]
    if len(datasets) > 1:
        dataset = ConcatDataset(datasets)

    return [dataset]
Example #3
0
def build_dataset(dataset_list,
                  transform=None,
                  target_transform=None,
                  is_test=False,
                  split=False,
                  split_val_size=10):
    assert len(dataset_list) > 0
    datasets = []
    for dataset_name in dataset_list:
        data = DatasetCatalog.get(dataset_name)
        args = data['args']
        factory = _DATASETS[data['factory']]
        args['transform'] = transform
        args['target_transform'] = target_transform
        if factory == VOCDataset or factory == VOCModelDataset:
            args['keep_difficult'] = is_test
        elif factory == COCODataset:
            args['remove_empty'] = not is_test
        dataset = factory(**args)
        datasets.append(dataset)
    # for testing, return a list of datasets
    if is_test:
        return datasets
    if len(datasets) > 1:
        dataset = DetectionConcatDataset(datasets)
        if split:
            return get_train_val_splits(dataset, split_val_size)
    else:
        dataset = datasets[0]
        if split:
            return get_train_val_splits(dataset, split_val_size)

    return dataset
Example #4
0
def build_dataset(dataset_list,
                  transform=None,
                  target_transform=None,
                  is_test=False):
    assert len(dataset_list) > 0
    datasets = []
    for dataset_name in dataset_list:
        if dataset_name == 'voc_2012_trainval': continue
        data = DatasetCatalog.get(dataset_name)
        args = data['args']
        factory = _DATASETS[data['factory']]
        args['transform'] = transform
        args['target_transform'] = target_transform
        if factory == VOCDataset:
            args['keep_difficult'] = is_test
        elif factory == COCODataset:
            args['remove_empty'] = not is_test
        dataset = factory(**args)
        datasets.append(dataset)
    # for testing, return a list of datasets
    if is_test:
        return datasets
    if len(datasets) > 1:
        dataset = ConcatDataset(datasets)
    else:
        dataset = datasets[0]

    return dataset
Example #5
0
def build_dataset(dataset_list, transform=None, target_transform=None, is_test=False):
    assert len(dataset_list) > 0
    datasets = []
    for dataset_name in dataset_list:
        data = DatasetCatalog.get(dataset_name)
        args = data['args']
        factory = globals()[data['factory']]
        args['transform'] = transform
        args['target_transform'] = target_transform
        dataset = factory(**args)
        datasets.append(dataset)
    # for testing, return a list of datasets
    if is_test:
        return datasets
    if len(datasets) > 1:
        dataset = ConcatDataset(datasets)
    else:
        dataset = datasets[0]

    return dataset
Example #6
0
def build_dataset(base_path: str,
                  dataset_list,
                  transform=None,
                  target_transform=None,
                  is_train=True):
    assert len(dataset_list) > 0
    datasets = []
    for dataset_name in dataset_list:
        data = DatasetCatalog.get(base_path, dataset_name)
        args = data['args']
        factory = _DATASETS[data['factory']]
        args['transform'] = transform
        args['target_transform'] = target_transform
        dataset = factory(**args)
        datasets.append(dataset)
    # for testing, return a list of datasets
    if not is_train:
        return datasets
    dataset = datasets[0]
    if len(datasets) > 1:
        dataset = ConcatDataset(datasets)
    #get_data_stats(dataset)
    return [dataset]