Exemplo n.º 1
0
def plain_register_dataset():
    DatasetCatalog.register(
        "train_2019",
        lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH, "train_2019"))
    MetadataCatalog.get("train_2019").set(thing_classes=CATEGORIES_NAMES,
                                          json_file=TRAIN_JSON,
                                          image_root=TRAIN_PATH)
    DatasetCatalog.register(
        "val_2019", lambda: load_coco_json(VAL_JSON, VAL_PATH, "val_2019"))
    MetadataCatalog.get("val_2019").set(thing_classes=CATEGORIES_NAMES,
                                        json_file=VAL_JSON,
                                        image_root=VAL_PATH)
def plain_register_dataset():
    DatasetCatalog.register("coco_my_train", lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH))
    MetadataCatalog.get("coco_my_train").set(  # thing_classes=CLASS_NAMES,  # 可以选择开启,但是不能显示中文,所以本人关闭
        evaluator_type='coco',  # 指定评估方式
        json_file=TRAIN_JSON,
        image_root=TRAIN_PATH)

    # DatasetCatalog.register("coco_my_val", lambda: load_coco_json(VAL_JSON, VAL_PATH, "coco_2017_val"))
    DatasetCatalog.register("coco_my_val", lambda: load_coco_json(VAL_JSON, VAL_PATH))
    MetadataCatalog.get("coco_my_val").set(  # thing_classes=CLASS_NAMES, # 可以选择开启,但是不能显示中文,所以本人关闭
        evaluator_type='coco',  # 指定评估方式
        json_file=VAL_JSON,
        image_root=VAL_PATH)
Exemplo n.º 3
0
def plain_register_dataset():
    DatasetCatalog.register(
        "CCDP_train_2020",
        lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH, "CCDP_train_2020"))
    MetadataCatalog.get("CCDP_train_2020").set(thing_classes=["pos", "neg"],
                                               json_file=TRAIN_JSON,
                                               image_root=TRAIN_PATH)
    DatasetCatalog.register(
        "CCDP_val_2020",
        lambda: load_coco_json(VAL_JSON, VAL_PATH, "CCDP_val_2020"))
    MetadataCatalog.get("CCDP_val_2020").set(thing_classes=["pos", "neg"],
                                             json_file=VAL_JSON,
                                             image_root=VAL_PATH)
Exemplo n.º 4
0
def plain_register_dataset():
    #训练集
    DatasetCatalog.register("coco_my_train", lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH))
    MetadataCatalog.get("coco_my_train").set(thing_classes=CLASS_NAMES,  # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
                                                    evaluator_type='coco', # 指定评估方式
                                                    json_file=TRAIN_JSON,
                                                    image_root=TRAIN_PATH)

    #验证/测试集
    DatasetCatalog.register("coco_my_val", lambda: load_coco_json(VAL_JSON, VAL_PATH))
    MetadataCatalog.get("coco_my_val").set(thing_classes=CLASS_NAMES, # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
                                                evaluator_type='coco', # 指定评估方式
                                                json_file=VAL_JSON,
                                                image_root=VAL_PATH)
Exemplo n.º 5
0
def plain_register_dataset():
    DatasetCatalog.register(
        "iecas_THz_2019_train",
        lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH, "iecas_THz_2019_train"))
    MetadataCatalog.get("iecas_THz_2019_train").set(
        thing_classes=["gun", "phone"],
        json_file=TRAIN_JSON,
        image_root=TRAIN_PATH)
    DatasetCatalog.register(
        "iecas_THz_2019_val",
        lambda: load_coco_json(VAL_JSON, VAL_PATH, "iecas_THz_2019_val"))
    MetadataCatalog.get("iecas_THz_2019_val").set(
        thing_classes=["gun", "phone"],
        json_file=VAL_JSON,
        image_root=VAL_PATH)
Exemplo n.º 6
0
def register_coco_instances(name, metadata, json_file, image_root):
    """
    Register a dataset in COCO's json annotation format for
    instance detection, instance segmentation and keypoint detection.
    (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
    `instances*.json` and `person_keypoints*.json` in the dataset).

    This is an example of how to register a new dataset.
    You can do something similar to this function, to register new datasets.

    Args:
        name (str): the name that identifies a dataset, e.g. "coco_2014_train".
        metadata (dict): extra metadata associated with this dataset.  You can
            leave it as an empty dict.
        json_file (str): path to the json instance annotation file.
        image_root (str or path-like): directory which contains all the images.
    """
    assert isinstance(name, str), name
    assert isinstance(json_file, (str, os.PathLike)), json_file
    assert isinstance(image_root, (str, os.PathLike)), image_root
    # 1. register a function which returns dicts
    DatasetCatalog.register(
        name, lambda: load_coco_json(
            json_file, image_root, name, extra_annotation_keys=["blind"]))

    # 2. Optionally, add metadata about this dataset,
    # since they might be useful in evaluation, visualization or logging
    MetadataCatalog.get(name).set(json_file=json_file,
                                  image_root=image_root,
                                  evaluator_type="coco",
                                  **metadata)
def register_coco_instances_with_points(name, metadata, json_file, image_root):
    """
    Register a dataset in COCO's json annotation format for
    instance segmentation with point annotation.

    The point annotation json does not have "segmentation" field, instead,
    it has "point_coords" and "point_labels" fields.

    Args:
        name (str): the name that identifies a dataset, e.g. "coco_2014_train".
        metadata (dict): extra metadata associated with this dataset.  You can
            leave it as an empty dict.
        json_file (str): path to the json instance annotation file.
        image_root (str or path-like): directory which contains all the images.
    """
    assert isinstance(name, str), name
    assert isinstance(json_file, (str, os.PathLike)), json_file
    assert isinstance(image_root, (str, os.PathLike)), image_root
    # 1. register a function which returns dicts
    DatasetCatalog.register(
        name, lambda: load_coco_json(json_file, image_root, name,
                                     ["point_coords", "point_labels"]))

    # 2. Optionally, add metadata about this dataset,
    # since they might be useful in evaluation, visualization or logging
    MetadataCatalog.get(name).set(json_file=json_file,
                                  image_root=image_root,
                                  evaluator_type="coco",
                                  **metadata)
Exemplo n.º 8
0
def custom_dataset_dict(json_file, img_root, name, subset='train'):
    # Basically try out register_coco_instances but without the metadata.
    dataset_dicts = load_coco_json(json_file, img_root, name)
    # Logic for filtering dataset_dicts can go here. This includes omitting some classes and such.

    # End of logic
    return dataset_dicts
Exemplo n.º 9
0
    def _register_datasets(self, ):
        # Query: Train
        DatasetCatalog.register(
            "coco_note_base_training_query_train",
            lambda: self.get_query_set_base_training_train())
        MetadataCatalog.get("coco_note_base_training_query_train").set(
            thing_classes=MetadataCatalog.get(
                "coco_2017_train_copy").thing_classes,
            evaluator_type='coco')

        # Support: Train + Validation
        DatasetCatalog.register("coco_note_base_training_support",
                                lambda: self.get_support_set_base_training())
        MetadataCatalog.get("coco_note_base_training_support").set(
            thing_classes=MetadataCatalog.get(
                "coco_2017_train_copy").thing_classes,
            evaluator_type='coco')

        # Query: Validation
        DatasetCatalog.register(
            "coco_note_base_training_query_val",
            # lambda: self.get_query_set_base_training_val(self.base_train_adaptor_instance)
            lambda: load_coco_json(
                "/scratch/ssd001/home/skhandel/FewshotDetection/WSASOD/data/data_utils/data/MSCOCO2017/annotations/instances_val2017.json",
                "/scratch/ssd001/home/skhandel/FewshotDetection/WSASOD/data/data_utils/data/MSCOCO2017/images/val2017/",
                "coco_note_base_training_query_val"))
        MetadataCatalog.get("coco_note_base_training_query_val").set(
            # thing_classes=self.base_train_adaptor_instance.cfg.coco_classes,
            evaluator_type='coco',
            json_file=
            "/scratch/ssd001/home/skhandel/FewshotDetection/WSASOD/data/data_utils/data/MSCOCO2017/annotations/instances_val2017.json"
        )
Exemplo n.º 10
0
def plain_register_dataset():

    DatasetCatalog.register("rectal_tumor_train",
                            lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH))
    MetadataCatalog.get("rectal_tumor_train").set(thing_classes=CLASS_NAMES,
                                                  evaluator_type='coco',
                                                  json_file=TRAIN_JSON,
                                                  image_root=TRAIN_PATH)

    #DatasetCatalog.register("coco_my_val", lambda: load_coco_json(VAL_JSON, VAL_PATH, "coco_2017_val"))
    DatasetCatalog.register("rectal_tumor_val",
                            lambda: load_coco_json(VAL_JSON, VAL_PATH))
    MetadataCatalog.get("rectal_tumor_val").set(thing_classes=CLASS_NAMES,
                                                evaluator_type='coco',
                                                json_file=VAL_JSON,
                                                image_root=VAL_PATH)
Exemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser(
        description='This script support analyzing data.')
    parser.add_argument('--data_dir',
                        type=str,
                        default=None,
                        required=True,
                        help='path to annotation files directory.')
    parser.add_argument('--format',
                        type=str,
                        default='coco',
                        help='format to analyze.(coco or voc)')

    args = parser.parse_args()

    logger = logging.getLogger('detectron2')
    logger.setLevel(logging.DEBUG)
    ch = logging.StreamHandler(stream=sys.stdout)
    ch.setLevel(logging.DEBUG)
    logger.addHandler(ch)
    if args.format == 'coco':
        dataset_dicts = load_coco_json(
            os.path.join(args.data_dir, 'annotations/train.json'),
            os.path.join(args.data_dir, 'images'), '1')
        class_names = MetadataCatalog.get('1').thing_classes
    elif args.format == 'voc':
        dataset_dicts, class_names = load_voc_instances(args.data_dir)
    else:
        raise Exception("only support coco or voc format")

    print_instances_class_histogram(dataset_dicts, class_names)
Exemplo n.º 12
0
def register_cityscapes(root="datasets"):
    # Assume pre-defined datasets live in `./datasets`.
    DatasetCatalog.register(
        'cityscapes_coco_fine_instance_seg_train', lambda: load_coco_json(
            os.path.join(root, 'cityscape-coco/coco_ann/instance_train.json'),
            os.path.join(root, 'cityscape-coco/coco_img/train'),
            'cityscapes_coco_fine_instance_seg_train'))

    DatasetCatalog.register(
        'cityscapes_coco_fine_instance_seg_val', lambda: load_coco_json(
            os.path.join(root, 'cityscape-coco/coco_ann/instance_val.json'),
            os.path.join(root, 'cityscape-coco/coco_img/val'),
            'cityscapes_coco_fine_instance_seg_val'))
    MetadataCatalog.get('cityscapes_coco_fine_instance_seg_train').set(
        evaluator_type="coco", )
    MetadataCatalog.get('cityscapes_coco_fine_instance_seg_val').set(
        evaluator_type="coco", )
Exemplo n.º 13
0
def main():
    setup_logger()

    for d in [ds_train, ds_validation]:
        DatasetCatalog.register(os.path.join(path_ds, d), lambda d=d:
                                load_coco_json(os.path.join(path_ds_meta, ds_train + ds_file_base),
                                               os.path.join(path_ds, ds_train)))
        # Update class names
        MetadataCatalog.get(os.path.join(path_ds, d)).set(thing_classes=["apple_green", "apple_red", "clementine",
                                                                         "cucumber", "garlic", "lemon", "onion"])
    project_metadata = MetadataCatalog.get(os.path.join(path_ds, ds_train))

    cfg = get_cfg()
    cfg.merge_from_file(path_config)
    cfg.DATASETS.TRAIN = (os.path.join(path_ds, ds_train), )
    cfg.DATASETS.TEST = ()   # no metrics implemented for this dataset
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_weights  # initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR (original: 0.00025)
    cfg.SOLVER.MAX_ITER = 1000  # you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this small dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 7  # Update number of classes
    cfg.OUTPUT_DIR = path_output

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7   # Set the testing threshold for this model
    cfg.DATASETS.TEST = (os.path.join(path_ds + ds_validation), )
    predictor = DefaultPredictor(cfg)

    dataset_dicts = load_coco_json(os.path.join(path_ds_meta, ds_validation + ds_file_base),
                                   os.path.join(path_ds, ds_validation))
    for d in random.sample(dataset_dicts, 3):  # Show n validation images
        im = cv2.imread(d["file_name"])
        outputs = predictor(im)
        v = Visualizer(im[:, :, ::-1], metadata=project_metadata, scale=1)
        v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
        cv2.imshow('object_detection', v.get_image()[:, :, ::-1])
        cv2.waitKey()
Exemplo n.º 14
0
def checkout_dataset_annotation(name="val_2019"):
    dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH, name)
    for d in dataset_dicts:
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1],
                                metadata=MetadataCatalog.get(name),
                                scale=1.5)
        vis = visualizer.draw_dataset_dict(d)
        cv2.imshow('show', vis.get_image()[:, :, ::-1])
        cv2.waitKey(0)
Exemplo n.º 15
0
def register_dataset_instances(name, json_file, image_root):
    """
    purpose: register dataset to DatasetCatalog,
             register metadata to MetadataCatalog and set attribute
    """
    DatasetCatalog.register(
        name, lambda: load_coco_json(json_file, image_root, name))
    MetadataCatalog.get(name).set(json_file=json_file,
                                  image_root=image_root,
                                  evaluator_type="coco")
Exemplo n.º 16
0
def checkout_dataset_annotation(json_file, dataset_dir, name="MHPv1_foot"):
    dataset_dicts = load_coco_json(json_file, dataset_dir, name)
    for d in dataset_dicts:
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1],
                                metadata=MetadataCatalog.get(name),
                                scale=1.5)
        vis = visualizer.draw_dataset_dict(d)
        cv2.imshow('show', vis.get_image()[:, :, ::-1])
        cv2.waitKey(0)
Exemplo n.º 17
0
def checkout_dataset_annotation(name="coco_my_train"):
    dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH)
    print(len(dataset_dicts))
    for i, d in enumerate(dataset_dicts,0):
        print(d)
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(name), scale=1.5)
        vis = visualizer.draw_dataset_dict(d)
        cv2.imshow('show', vis.get_image()[:, :, ::-1])
        # cv2.imwrite('out/'+str(i) + '.jpg',vis.get_image()[:, :, ::-1])
        cv2.waitKey(0)
Exemplo n.º 18
0
def register_isaid():
    # Register iSAID Dataset (in COCO format)
    data_dir = "/home/an1/detectron2/datasets/isaid"

    for d in ["train", "val"]:
        DatasetCatalog.register(
            "isaid_" + d,
            lambda d=d: load_coco_json(
                json_file=data_dir + '/annotations/instances_{}.json'.format(d
                                                                             ),
                image_root=data_dir + '/images/{}/'.format(d),
                dataset_name="isaid_{}".format(d)))
        MetadataCatalog.get("isaid_" + d).json_file = os.path.join(
            data_dir, "annotations", "instances_{}.json".format(d))
Exemplo n.º 19
0
def checkout_dataset_annotation(name="rectal_tumor_val"):
    # dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH, name)
    dataset_dicts = load_coco_json(VAL_JSON, VAL_PATH)
    print(len(dataset_dicts))
    for i, d in enumerate(dataset_dicts, 0):
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1],
                                metadata=MetadataCatalog.get(name),
                                scale=1)
        vis = visualizer.draw_dataset_dict(d)
        # cv2.imshow('show', vis.get_image()[:, :, ::-1])
        cv2.imwrite('out/' + d['file_name'][-14:], vis.get_image()[:, :, ::-1])
        #cv2.waitKey(0)
        if i == 200:
            break
Exemplo n.º 20
0
def register_distill_coco_instances(name, metadata, json_file, image_root):
    """
    add extra_annotation_keys
    """
    assert isinstance(name, str), name
    assert isinstance(json_file, (str, os.PathLike)), json_file
    assert isinstance(image_root, (str, os.PathLike)), image_root
    # 1. register a function which returns dicts
    DatasetCatalog.register(name, lambda: load_coco_json(
        json_file, image_root, name, extra_annotation_keys=['score']))

    # 2. Optionally, add metadata about this dataset,
    # since they might be useful in evaluation, visualization or logging
    MetadataCatalog.get(name).set(
        json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
    )
    def test(self):
        # Make a dummy dataset.
        mask = make_mask()
        DatasetCatalog.register("test_dataset",
                                lambda: make_dataset_dicts(mask))
        MetadataCatalog.get("test_dataset").set(thing_classes=["test_label"])

        # Dump to json.
        json_dict = convert_to_coco_dict("test_dataset")
        with tempfile.TemporaryDirectory() as tmpdir:
            json_file_name = os.path.join(tmpdir, "test.json")
            with open(json_file_name, "w") as f:
                json.dump(json_dict, f)
            # Load from json.
            dicts = load_coco_json(json_file_name, "")

        # Check the loaded mask matches the original.
        anno = dicts[0]["annotations"][0]
        loaded_mask = pycocotools.mask.decode(anno["segmentation"])
        self.assertTrue(np.array_equal(loaded_mask, mask))
Exemplo n.º 22
0
def checkout_dataset_annotation(name="coco_my_train"):
    #dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH, name)
    dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH)
    # print(len(dataset_dicts))
    for i, d in enumerate(dataset_dicts, 0):
        img_path = d["file_name"]
        print("img_path: {}".format(img_path))
        img = cv2.imread(img_path)

        visualizer = Visualizer(img[:, :, ::-1],
                                metadata=MetadataCatalog.get(name),
                                scale=1.5)
        vis = visualizer.draw_dataset_dict(d)

        # v = Visualizer(im[:, :, ::-1], metadata=train_metadata, scale=0.9, instance_mode=ColorMode.IMAGE_BW)
        # v = v.draw_instance_predictions(outputs["instances"].to("cpu"))

        #cv2.imshow('show', vis.get_image()[:, :, ::-1])
        cv2.imwrite('./out/' + str(os.path.basename(img_path)),
                    vis.get_image()[:, :, ::-1])
Exemplo n.º 23
0
# Change model by selecting a new one from the model zoo and updating following 2 lines
# https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md
path_config = os.path.join("detectron2", "detectron2_repo", "configs", "COCO-Detection",
                           "faster_rcnn_R_101_FPN_3x.yaml")
model_weights = "detectron2://COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/model_final_f6e8b1.pkl"

# Update class names
project_metadata = MetadataCatalog.get(path_ds + ds_train).set(thing_classes=["apple_green", "apple_red", "clementine",
                                                                              "cucumber", "garlic", "lemon", "onion"]
                                                               )
cfg = get_cfg()
cfg.merge_from_file(path_config)
cfg.OUTPUT_DIR = path_output
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 7
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7   # Set the testing threshold for this model
cfg.DATASETS.TEST = (os.path.join(path_ds, ds_validation), )
predictor = DefaultPredictor(cfg)

dataset_dicts = load_coco_json(os.path.join(path_ds_meta, ds_validation + ds_file_base),
                               os.path.join(path_ds, ds_validation))
# for d in random.sample(dataset_dicts, 3):
for d in dataset_dicts:
    im = cv2.imread(d["file_name"])
    outputs = predictor(im)
    v = Visualizer(im[:, :, ::-1], metadata=project_metadata, scale=1)
    v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
    cv2.imshow('object_detection', v.get_image()[:, :, ::-1])
    cv2.waitKey()
from detectron2.structures import Boxes, BoxMode, Instances
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
from detectron2.data.datasets.coco import load_coco_json

DATASET_ROOT = '/home/dasen/chennan/detectron2/data'
ANN_ROOT = os.path.join(DATASET_ROOT, 'annotations')

TRAIN_PATH = os.path.join(DATASET_ROOT, 'images')
VAL_PATH = os.path.join(DATASET_ROOT, 'images')

TRAIN_JSON = os.path.join(ANN_ROOT, 'voc_2019_trainval.json')
VAL_JSON = os.path.join(ANN_ROOT, 'voc_2019_test.json')

DatasetCatalog.register("coco_my_val",
                        lambda: load_coco_json(VAL_JSON, VAL_PATH))


def create_instances(predictions, image_size):
    ret = Instances(image_size)

    score = np.asarray([x["score"] for x in predictions])
    chosen = (score > args.conf_threshold).nonzero()[0]
    score = score[chosen]
    bbox = np.asarray([predictions[i]["bbox"] for i in chosen])
    bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)

    labels = np.asarray(
        [dataset_id_map(predictions[i]["category_id"]) for i in chosen])

    ret.scores = score