def main():
    args = parse_opts()
    set_random_seed(args.random_seed)

    logger_name = "retrieval_data"
    retrieval_dataset_name_suffix = "-retrieval"
    logger = setup_logger(logger_name, None)
    data_path = get_data_path()

    script_path = os.path.expanduser(os.path.dirname(os.path.abspath(__file__)))
    target_path = os.path.join(script_path, "cnnimageretrieval-pytorch", "data")
    mkdir(target_path)

    dataset_train = build_dataset_by_name(data_path, args.dataset_train,
                                        eval_scale=args.dataset_train_scale,
                                        logger_prefix=logger_name)
    retrieval_dataset_train_name = dataset_train.get_name() + retrieval_dataset_name_suffix

    dataset_val = build_dataset_by_name(data_path, args.dataset_val,
                                        eval_scale=args.dataset_val_scale,
                                        logger_prefix=logger_name)
    retrieval_dataset_val_name = dataset_val.get_name() + retrieval_dataset_name_suffix


    datasets_test = []
    retrieval_dataset_test_names = []
    if args.datasets_test:
        if len(args.datasets_test_scale) == 1:
            datasets_test_scale = args.datasets_test_scale * len(args.datasets_test)
        else:
            datasets_test_scale = args.datasets_test_scale
        assert len(args.datasets_test) == len(datasets_test_scale), "Arg datasets-test-scale should be of len 1 or of len equal to the len of datasets-test"

        for dataset_name, scale in zip(args.datasets_test, datasets_test_scale):
            dataset = build_dataset_by_name(data_path, dataset_name,
                                            eval_scale=scale,
                                            logger_prefix=logger_name)
            retrieval_dataset_test_names.append(dataset.get_name() + retrieval_dataset_name_suffix)
            datasets_test.append(dataset)

    # create dataset
    if args.num_random_crops_per_image > 0:
        crop_suffix = f"-rndCropPerImage{args.num_random_crops_per_image}"
        retrieval_dataset_train_name = retrieval_dataset_train_name + crop_suffix
        retrieval_dataset_val_name = retrieval_dataset_val_name + crop_suffix
        retrieval_dataset_test_names = [name + crop_suffix for name in retrieval_dataset_test_names]

    prepare_dataset(target_path,
                    retrieval_dataset_train_name, dataset_train,
                    retrieval_dataset_val_name, dataset_val,
                    args.iou_pos_threshold, args.iou_neg_threshold,
                    args.num_queries_image_to_image,
                    logger,
                    retrieval_dataset_test_names=retrieval_dataset_test_names, datasets_test=datasets_test,
                    num_random_crops_per_image=args.num_random_crops_per_image)
Exemplo n.º 2
0
def build_os2d_dataset_by_name(name, data_path=None):
    if data_path is None:
        data_path = cfg.DATA_DIR

    dataset = build_dataset_by_name(data_path,
                                    name,
                                    eval_scale=None,
                                    cache_images=False)
    return Os2dDataset(dataset)
Exemplo n.º 3
0
def main():
    target_object_size = 240
    data_path = get_data_path()
    logger = setup_logger("get_dataset_scales", None)

    for name in DATASET_LIST:
        dataset = build_dataset_by_name(data_path, name, eval_scale=None, logger_prefix="get_dataset_scales")

        image_sizes_by_id = get_image_sizes(dataset)
        average_size, median, q10, q90 = compute_average_object_size(dataset.gtboxframe, image_sizes_by_id)
        print("Average size of object = {0:0.2f} for image size = {1}".format(average_size, dataset.image_size))
        print("Median = {0:0.2f}, q10 = {1:0.2f}, q90 = {2:0.2f}".format(median, q10, q90))
        print("To get objects to size {0}, images should be of size {1:d}".format(target_object_size, int(dataset.image_size * target_object_size / median)))
Exemplo n.º 4
0
def build_detection_dataset_by_name(data_path,
                                    name,
                                    transforms,
                                    class_ids=None,
                                    cache_images=False,
                                    ignore_labels=False):
    dataset_os2d = build_dataset_by_name(data_path,
                                         name,
                                         cache_images=cache_images,
                                         no_image_reading=False)

    dataset = DatasetDetection(dataset_os2d,
                               transforms,
                               class_ids=class_ids,
                               ignore_labels=ignore_labels)
    return dataset
Exemplo n.º 5
0
def main():
    args = parse_opts()
    set_random_seed(args.random_seed)
    crop_suffix = f"-rndCropPerImage{args.num_random_crops_per_image}"

    logger_name = "retrieval_data"
    retrieval_dataset_name_suffix = "-retrieval"
    logger = setup_logger(logger_name, None)
    data_path = get_data_path()

    script_path = os.path.expanduser(os.path.dirname(
        os.path.abspath(__file__)))
    target_path = os.path.join(script_path, "cnnimageretrieval-pytorch",
                               "data")
    mkdir(target_path)

    dataset_train = build_dataset_by_name(data_path,
                                          args.dataset_train,
                                          eval_scale=args.dataset_train_scale,
                                          logger_prefix=logger_name)
    retrieval_dataset_train_name = dataset_train.get_name(
    ) + retrieval_dataset_name_suffix

    dataset_val = build_dataset_by_name(data_path,
                                        args.dataset_val,
                                        eval_scale=args.dataset_val_scale,
                                        logger_prefix=logger_name)
    retrieval_dataset_val_name = dataset_val.get_name(
    ) + retrieval_dataset_name_suffix

    if args.datasets_test:
        if len(args.datasets_test_scale) == 1:
            datasets_test_scale = args.datasets_test_scale * len(
                args.datasets_test)
        else:
            datasets_test_scale = args.datasets_test_scale
        assert len(args.datasets_test) == len(
            datasets_test_scale
        ), "Arg datasets-test-scale should ne of len 1 or of len equal to the len of datasets-test"

        datasets_test = []
        retrieval_dataset_test_names = []
        for dataset_name, scale in zip(args.datasets_test,
                                       datasets_test_scale):
            dataset = build_dataset_by_name(data_path,
                                            dataset_name,
                                            eval_scale=scale,
                                            logger_prefix=logger_name)
            retrieval_dataset_test_names.append(dataset.get_name() +
                                                retrieval_dataset_name_suffix)
            datasets_test.append(dataset)

    # prepare data images for train and val
    tgt_image_path_trainval = os.path.join(target_path, "train",
                                           retrieval_dataset_train_name, "ims")
    mkdir(tgt_image_path_trainval)
    logger.info(
        f"Train set {retrieval_dataset_train_name} with no random crops")
    db_images_train = save_cropped_boxes(dataset_train,
                                         tgt_image_path_trainval,
                                         extension="")

    # create val subset: add all boxes from images that have at least one validation box (can add some boxes from train as distractors)
    logger.info(f"Val set {retrieval_dataset_val_name} with no random crops")
    db_images_val = save_cropped_boxes(dataset_val,
                                       tgt_image_path_trainval,
                                       extension="")

    # prepare data images for trainval with crops
    tgt_image_path_trainval_randcrops = os.path.join(
        target_path, "train", retrieval_dataset_train_name + crop_suffix,
        "ims")
    mkdir(tgt_image_path_trainval_randcrops)

    logger.info(
        f"Train set {retrieval_dataset_train_name} with {args.num_random_crops_per_image} crops per image"
    )
    db_images_train_randomCrops = save_cropped_boxes(
        dataset_train,
        tgt_image_path_trainval_randcrops,
        extension="",
        num_random_crops_per_image=args.num_random_crops_per_image)

    # create val subset: add all boxes from images that have at least one validation box (can add some boxes from train as distractors)
    logger.info(
        f"Val set {retrieval_dataset_val_name} with {args.num_random_crops_per_image} crops per image"
    )
    db_images_val_randomCrops = save_cropped_boxes(
        dataset_val,
        tgt_image_path_trainval_randcrops,
        extension="",
        num_random_crops_per_image=args.num_random_crops_per_image)

    # prepare data images for test
    dbs_images_test = {}
    if datasets_test:
        for dataset_test, dataset_name in zip(datasets_test,
                                              retrieval_dataset_test_names):
            tgt_image_path_test = os.path.join(
                target_path, "test", dataset_name, "jpg"
            )  # the folder name should be always "test" - from cirtorch
            mkdir(tgt_image_path_test)
            logger.info(f"Eval dataset: {dataset_name}")
            dbs_images_test[dataset_name] = save_cropped_boxes(
                dataset_test, tgt_image_path_test)

            # prepare data images for test with random crops
            tgt_image_path_test = os.path.join(
                target_path, "test", dataset_name + crop_suffix, "jpg"
            )  # the folder name should be always "test" - from cirtorch
            mkdir(tgt_image_path_test)
            logger.info(f"Eval dataset: {dataset_name + crop_suffix}")
            dbs_images_test[dataset_name + crop_suffix] = save_cropped_boxes(
                dataset_test,
                tgt_image_path_test,
                num_random_crops_per_image=args.num_random_crops_per_image)

    # save GT images from train
    db_classes_train = save_class_images(
        dataset_train,
        os.path.join(target_path, "train", retrieval_dataset_train_name,
                     "ims"),
        extension="")
    db_classes_train_randomCrops = save_class_images(
        dataset_train,
        os.path.join(target_path, "train",
                     retrieval_dataset_train_name + crop_suffix, "ims"),
        extension="")

    # save GT images from val
    db_classes_val = save_class_images(
        dataset_val,
        os.path.join(target_path, "train", retrieval_dataset_train_name,
                     "ims"),
        extension="")
    db_classes_val_randomCrops = save_class_images(
        dataset_val,
        os.path.join(target_path, "train",
                     retrieval_dataset_train_name + crop_suffix, "ims"),
        extension="")

    # save GT images for testing
    dbs_classes_test = {}
    if args.datasets_test:
        for dataset_test, dataset_name in zip(datasets_test,
                                              retrieval_dataset_test_names):
            dbs_classes_test[dataset_name] = save_class_images(
                dataset_test,
                os.path.join(target_path, "test", dataset_name, "jpg"))
            dbs_classes_test[dataset_name + crop_suffix] = save_class_images(
                dataset_test,
                os.path.join(target_path, "test", dataset_name + crop_suffix,
                             "jpg"))

    # merge databases
    logger.info(
        f"Processing trainval set from {retrieval_dataset_train_name} and {retrieval_dataset_val_name}"
    )
    db_train = create_train_database_queries(
        db_images_train,
        db_classes_train,
        iou_pos_threshold=args.iou_pos_threshold,
        iou_neg_threshold=args.iou_neg_threshold,
        logger=logger)
    db_val = create_train_database_queries(
        db_images_val,
        db_classes_val,
        iou_pos_threshold=args.iou_pos_threshold,
        iou_neg_threshold=args.iou_neg_threshold,
        logger=logger)

    logger.info(
        f"Processing trainval set from {retrieval_dataset_train_name} and {retrieval_dataset_val_name} with {args.num_random_crops_per_image} random crops"
    )
    db_train_randomCrops = create_train_database_queries(
        db_images_train_randomCrops,
        db_classes_train_randomCrops,
        iou_pos_threshold=args.iou_pos_threshold,
        iou_neg_threshold=args.iou_neg_threshold,
        logger=logger)
    db_val_randomCrops = create_train_database_queries(
        db_images_val_randomCrops,
        db_classes_val_randomCrops,
        iou_pos_threshold=args.iou_pos_threshold,
        iou_neg_threshold=args.iou_neg_threshold,
        logger=logger)

    dbs_test = {}
    if args.datasets_test:
        for dataset_name in retrieval_dataset_test_names:
            logger.info(
                f"Processing test set {dataset_name} with {args.num_random_crops_per_image} random crops"
            )
            dbs_test[dataset_name] = create_test_database_queries(
                dbs_images_test[dataset_name],
                dbs_classes_test[dataset_name],
                iou_pos_threshold=args.iou_pos_threshold,
                iou_neg_threshold=args.iou_neg_threshold,
                logger=logger)

            logger.info(f"Processing test set {dataset_name + crop_suffix}")
            dbs_test[dataset_name +
                     crop_suffix] = create_test_database_queries(
                         dbs_images_test[dataset_name + crop_suffix],
                         dbs_classes_test[dataset_name + crop_suffix],
                         iou_pos_threshold=args.iou_pos_threshold,
                         iou_neg_threshold=args.iou_neg_threshold,
                         logger=logger)

    # save trainval to disk
    db_trainval = {"train": db_train, "val": db_val}
    db_fn = os.path.join(
        os.path.join(target_path, "train", retrieval_dataset_train_name),
        f"{retrieval_dataset_train_name}.pkl")
    with open(db_fn, "wb") as f:
        pickle.dump(db_trainval, f)

    # save train separately for whitening
    db_fn = os.path.join(
        os.path.join(target_path, "train", retrieval_dataset_train_name),
        f"{retrieval_dataset_train_name}-whiten.pkl")
    with open(db_fn, "wb") as f:
        pickle.dump(db_train, f)

    # save trainval with random crops to disk
    db_trainval_randomCrops = {
        "train": db_train_randomCrops,
        "val": db_val_randomCrops
    }
    db_fn = os.path.join(
        os.path.join(target_path, "train",
                     retrieval_dataset_train_name + crop_suffix),
        f"{retrieval_dataset_train_name}{crop_suffix}.pkl")
    with open(db_fn, "wb") as f:
        pickle.dump(db_trainval_randomCrops, f)

    db_fn = os.path.join(
        os.path.join(target_path, "train",
                     retrieval_dataset_train_name + crop_suffix),
        f"{retrieval_dataset_train_name}{crop_suffix}-whiten.pkl")
    with open(db_fn, "wb") as f:
        pickle.dump(db_train_randomCrops, f)

    # save test to disk
    if args.datasets_test:
        for dataset_name in retrieval_dataset_test_names:
            db_fn = os.path.join(
                os.path.join(target_path, "test", dataset_name),
                f"gnd_{dataset_name}.pkl")
            with open(db_fn, "wb") as f:
                pickle.dump(dbs_test[dataset_name], f)

            # save test with random crops to disk
            db_fn = os.path.join(
                os.path.join(target_path, "test", dataset_name + crop_suffix),
                f"gnd_{dataset_name}{crop_suffix}.pkl")
            with open(db_fn, "wb") as f:
                pickle.dump(dbs_test[dataset_name + crop_suffix], f)