Ejemplo n.º 1
0
def plain_register_dataset():
    DatasetCatalog.register("football", lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH, "football"))
    MetadataCatalog.get("football").set(thing_classes=["ball", "man"],
                                          json_file=TRAIN_JSON,
                                          image_root=TRAIN_PATH)
    DatasetCatalog.register("footballval", lambda: load_coco_json(VAL_JSON, VAL_PATH, "footballval"))
    MetadataCatalog.get("footballval").set(thing_classes=["ball", "man"],
                                        json_file=VAL_JSON,
                                        image_root=VAL_PATH)
Ejemplo n.º 2
0
def plain_register_dataset():
    DatasetCatalog.register("train_2019", lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH, "train_2019"))
    MetadataCatalog.get("train_2019").set(thing_classes=CATEGORIES_NAMES,
                                          json_file=TRAIN_JSON,
                                          image_root=TRAIN_PATH)
    DatasetCatalog.register("val_2019", lambda: load_coco_json(VAL_JSON, VAL_PATH, "val_2019"))
    MetadataCatalog.get("val_2019").set(thing_classes=CATEGORIES_NAMES,
                                        json_file=VAL_JSON,
                                        image_root=VAL_PATH)
Ejemplo n.º 3
0
def setup_register_load_inputs(cfg):
    outpath = Path(r'/datadrive/denmark-data/preprocessed')
    im_tiles_path = outpath / r'images/train2016'
    val_tiles_path = outpath / r'images/val2016'
    train_coco_instances_path = outpath / r'annotations/instances_train2016.json'
    val_coco_instances_path = outpath / r'annotations/instances_val2016.json'
    
   # register each val and test set if there are more than one.
    register_coco_instances(cfg.DATASETS.TRAIN[0], {}, str(train_coco_instances_path), str(im_tiles_path))
    register_coco_instances(cfg.DATASETS.VALIDATION[0], {}, str(val_coco_instances_path), str(val_tiles_path))
#     register_coco_instances(cfg.DATASETS.TEST[0], {}, test_coco_instances_path, str(next(tiles_path.glob("*jpeg*"))))

    train_json = load_coco_json(str(train_coco_instances_path),  str(im_tiles_path))
    val_json = load_coco_json(str(val_coco_instances_path),  str(val_tiles_path))
Ejemplo n.º 4
0
def plain_register_dataset():
    DatasetCatalog.register(
        "iecas_THz_2019_train",
        lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH, "iecas_THz_2019_train"))
    MetadataCatalog.get("iecas_THz_2019_train").set(
        thing_classes=["gun", "phone"],
        json_file=TRAIN_JSON,
        image_root=TRAIN_PATH)
    DatasetCatalog.register(
        "iecas_THz_2019_val",
        lambda: load_coco_json(VAL_JSON, VAL_PATH, "iecas_THz_2019_val"))
    MetadataCatalog.get("iecas_THz_2019_val").set(
        thing_classes=["gun", "phone"],
        json_file=VAL_JSON,
        image_root=VAL_PATH)
Ejemplo n.º 5
0
def register_coco_instances(name, metadata, json_file, image_root):
    """
    Register a dataset in COCO's json annotation format for
    instance detection, instance segmentation and keypoint detection.
    (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
    `instances*.json` and `person_keypoints*.json` in the dataset).

    This is an example of how to register a new dataset.
    You can do something similar to this function, to register new datasets.

    Args:
        name (str): the name that identifies a dataset, e.g. "coco_2014_train".
        metadata (dict): extra metadata associated with this dataset.  You can
            leave it as an empty dict.
        json_file (str): path to the json instance annotation file.
        image_root (str): directory which contains all the images.
    """
    # 1. register a function which returns dicts
    DatasetCatalog.register(
        name, lambda: load_coco_json(json_file, image_root, name))

    # 2. Optionally, add metadata about this dataset,
    # since they might be useful in evaluation, visualization or logging
    MetadataCatalog.get(name).set(json_file=json_file,
                                  image_root=image_root,
                                  evaluator_type="coco",
                                  **metadata)
Ejemplo n.º 6
0
 def load_annotations():
     return load_coco_json(
         json_file=annotations_fpath,
         image_root=images_root,
         dataset_name=dataset_data.name,
         extra_annotation_keys=DENSEPOSE_KEYS,
     )
Ejemplo n.º 7
0
 def train_coco_data(self, coco_json):
     dataset_name = "mask_train_data"
     DatasetCatalog.register(
         dataset_name,
         lambda: load_coco_json(json_file=coco_json,
                                image_root=self.train_data_path))
     MetadataCatalog.get(dataset_name).set(
         json_file=coco_json,
         image_root=self.train_data_path,
         evaluator_type="coco",
         thing_classes=["rightmask"],
         thing_dataset_id_to_contiguous_id={1: 0})
     cfg = get_cfg()
     cfg.merge_from_file(
         "/home/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
     )
     cfg.DATASETS.TRAIN = (dataset_name, )
     cfg.DATASETS.TEST = (dataset_name, )
     cfg.DATALOADER.NUM_WORKERS = 2
     cfg.MODEL.WEIGHTS = "/home/detectron2/train_data/model_final_280758.pkl"
     cfg.SOLVER.IMS_PER_BATCH = 2
     cfg.SOLVER.BASE_LR = 0.01  # 学习率
     cfg.SOLVER.MAX_ITER = 300  # 最大迭代次数
     cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
     cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
     os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
     print("模型存储路径" + cfg.OUTPUT_DIR)
     trainer = DefaultTrainer(cfg)
     trainer.resume_or_load(resume=False)
     trainer.train()  # 开始训练
Ejemplo n.º 8
0
def train_model(dataset):
    # Export the dataset to COCO format
    export_file, image_dir = export_dataset(dataset)

    # Register it as a COCO dataset in the Detectron2 framework
    try:
        register_coco_instances('my_dataset', {}, export_file, image_dir)
    except:
        print('Dataset was already registered')
    dataset_dicts = load_coco_json(export_file, image_dir)
    MetadataCatalog.get('my_dataset').set(
        thing_classes=[c['name'] for c in dataset.categories])
    segments_metadata = MetadataCatalog.get('my_dataset')
    print(segments_metadata)

    # Configure the training run
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))
    cfg.DATASETS.TRAIN = ('my_dataset', )
    cfg.DATASETS.TEST = ()
    cfg.INPUT.MASK_FORMAT = 'bitmask'
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 4  # 4
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 6000  # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(
        dataset.categories)  # number of categories
    #     cfg.MODEL.DEVICE = 'cuda'
    print('Max iter is ', cfg.SOLVER.MAX_ITER)
    # Start the training
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Return the model
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set the testing threshold for this model
    cfg.DATASETS.TEST = ('my_dataset', )
    cfg.TEST.DETECTIONS_PER_IMAGE = 1000

    built_model = build_model(cfg)  # returns a torch.nn.Module
    DetectionCheckpointer(built_model).load(
        cfg.MODEL.WEIGHTS)  #capture trained model
    checkpointer = DetectionCheckpointer(
        built_model, save_dir="/content/gdrive/My Drive/Colab Notebooks")
    checkpointer.save("model_final")  # save to output/model_999.pth

    predictor = DefaultPredictor(cfg)
    model = Model(predictor)

    return model
Ejemplo n.º 9
0
def register_dataset_instances(name, metadate, json_file, image_root):
    """
    purpose: register dataset to DatasetCatalog,
             register metadata to MetadataCatalog and set attribute
    """
    DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
    MetadataCatalog.get(name).set(json_file=json_file,
                                  image_root=image_root,
                                  evaluator_type="coco",
                                  **metadate)
    def __init__(self, args):
        self.args = args
        use_cuda = True

        #         for d in ["Training", "Validation"]:
        #             DatasetCatalog.register("mywaymo1_" + d, lambda d=d: load_coco_json(self.args.datasetpath + d + "/annotations.json", self.args.datasetpath + d + "/"))

        d = "Validation"
        self.datasetname = args.datasetname + d
        allregistereddataset = DatasetCatalog.list()  #return List[str]
        print(allregistereddataset)
        if self.datasetname not in allregistereddataset:
            jsonpath = os.path.join(self.args.datasetpath, d,
                                    'annotations.json')
            DatasetCatalog.register(
                self.datasetname,
                lambda d=d: load_coco_json(
                    jsonpath, os.path.join(self.args.datasetpath, d)))
            print("Registered dataset: ", self.datasetname)

        #FULL_LABEL_CLASSES = ['unknown', 'vehicle', 'pedestrian', 'sign', 'cyclist']#['ignored-regions', 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle','bus',  'motor', 'others']
        MetadataCatalog.get(
            self.datasetname).set(thing_classes=args.FULL_LABEL_CLASSES)
        #         for d in ["Training", "Validation"]:
        #             MetadataCatalog.get("mywaymo1_" + d).set(thing_classes=FULL_LABEL_CLASSES)

        cfg = get_cfg()
        #cfg.OUTPUT_DIR='./output' #'./output_x101'
        cfg.merge_from_file(
            model_zoo.get_config_file(
                "COCO-Detection/" + self.args.modelname +
                ".yaml"))  #faster_rcnn_X_101_32x8d_FPN_3x
        #cfg.DATASETS.TRAIN = ("mywaymo1_Training",)
        cfg.DATASETS.TEST = (self.datasetname, )
        cfg.DATALOADER.NUM_WORKERS = 1  #2
        #cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml")  # Let training initialize from model zoo
        #cfg.MODEL.WEIGHTS = os.path.join('/home/010796032/PytorchWork', "fasterrcnn_x101_fpn_model_final_68b088.pkl")#using the local
        #cfg.MODEL.WEIGHTS = os.path.join('/home/010796032/PytorchWork/output', "model_final.pth")
        cfg.MODEL.WEIGHTS = os.path.join(
            self.args.modelbasefolder,
            self.args.modelfilename)  #model_0159999.pth
        cfg.SOLVER.IMS_PER_BATCH = 4
        cfg.SOLVER.LR_SCHEDULER_NAME = 'WarmupCosineLR'
        cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
        cfg.SOLVER.MAX_ITER = 80000  # 140000    # you may need to train longer for a practical dataset
        cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  #512#128   # faster, and good enough for this toy dataset (default: 512)
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(
            args.FULL_LABEL_CLASSES
        )  #5 #12  # Kitti has 9 classes (including donot care)

        cfg.TEST.EVAL_PERIOD = 5000

        self.cfg_detectron2 = cfg

        self.predictor = DefaultPredictor(cfg)
Ejemplo n.º 11
0
def get_xview2_dicts(data_dir, split):
    ann_path = os.path.join(data_dir, 'annotations', split + '.json')
    img_dir = os.path.join(data_dir, 'images', split)
    ann = load_coco_json(ann_path, img_dir, dataset_name='xView2_' + split)
    for a in ann:
        img_name = '_'.join(
            ['post' if i == 'pre' else i for i in a['file_name'].split('_')])
        img_name = os.path.basename(img_name)
        a['post_file_name'] = os.path.join(data_dir, 'images', split + '_post',
                                           img_name)
        a['pre_file_name'] = a['file_name']
        del (a['file_name'])
    return ann
Ejemplo n.º 12
0
def load_ssigns(json_file, dataset_name=None):
    """
    Load a json file in coco's annotation format.

    Args:
        json_file (str): full path to the ssigns json annotation file.
        dataset_name (str): the name of the dataset (e.g., "ssigns_train").
            If provided, this function will put "thing_classes" into the metadata
            associated with this dataset.

    Returns:
        list[dict]: a list of dicts in Detectron2 standard format. (See
        `Using Custom Datasets </tutorials/datasets.html>`_ )

    Notes:
        1. This function does not read the image files.
           The results do not have the "image" field.
    """
    return load_coco_json(json_file, "", dataset_name)
Ejemplo n.º 13
0
def compare_gt_coco(cfg, annotation_file, weight, dest_dir, score_thres_test = 0.7, num_sample = 10):
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = score_thres_test
    if weight is not None:
        cfg.MODEL.WEIGHTS = weight

    predictor = DefaultPredictor(cfg)
    dataset_list_dict = load_coco_json(annotation_file,
                                    image_root = '',
                                    dataset_name = cfg.DATASETS.TEST[1])
    
    if len(dataset_list_dict) > num_sample:
        sample = random.sample(range(len(dataset_list_dict)), num_sample)
    else:
        sample = range(len(dataset_list_dict))
    for s in sample:
        img_dict = dataset_list_dict[s]
        print(img_dict['file_name'])
        img = read_image(img_dict['file_name'],format = 'BGR')
        h, w = img_dict['height'], img_dict['width']
        v_gt = Visualizer(img[:, :, ::-1],
                                metadata=MetadataCatalog.get(cfg.DATASETS.TEST[0]),
                                scale=0.5)
        v_gt = v_gt.draw_dataset_dict(img_dict)

        #predicting
        outputs = predictor(img)

        #visualizing frmo prediction result
        
        v_pd = Visualizer(img[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TEST[0]), scale=1.2)
        v_pd = v_pd.draw_instance_predictions(outputs["instances"].to("cpu"))

        gt = cv2.resize(v_gt.get_image()[:, :, ::-1], (w,h))
        pd = cv2.resize(v_pd.get_image()[:, :, ::-1], (w,h))

        #stacking groudtruth and prediction
        merge_img = np.hstack((gt, pd))
        result_name = os.path.join(dest_dir, os.path.split(img_dict['file_name'])[1])
        cv2.imwrite(result_name, merge_img)
Ejemplo n.º 14
0
def build_players_loader(json_file, image_root, batch_size):
    """
    """
    dataset_dicts = load_coco_json(json_file, image_root)

    dataset = DatasetFromList(dataset_dicts)
    dataset = MapDataset(dataset, PlayerMapper())

    sampler = samplers.InferenceSampler(len(dataset))
    # Always use 1 image per worker during inference since this is the
    # standard when reporting inference time in papers.
    batch_sampler = torch.utils.data.sampler.BatchSampler(sampler,
                                                          1,
                                                          drop_last=False)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        num_workers=batch_size,
        batch_sampler=batch_sampler,
        collate_fn=trivial_batch_collator,
    )
    return data_loader
Ejemplo n.º 15
0
def register_fiber_instances(name, metadata, json_file, image_root):
    """
    Register a dataset in COCO's json annotation format for
    fiber detection, i.e. mask, keypoint and fiber width detection.

    Args:
        name (str): the name that identifies a dataset, e.g. "coco_2014_train".
        metadata (dict): extra metadata associated with this dataset.  You can
            leave it as an empty dict.
        json_file (str): path to the json instance annotation file.
        image_root (str): directory which contains all the images.
    """
    DatasetCatalog.register(
        name,
        lambda: load_coco_json(json_file,
                               image_root,
                               name,
                               extra_annotation_keys=
                               ["fiberwidth", "fiberlength"]),
    )
    MetadataCatalog.get(name).set(json_file=json_file,
                                  image_root=image_root,
                                  evaluator_type="coco",
                                  **metadata)
Ejemplo n.º 16
0
def register_dataset(
        # This function is necessary!
        # See https://stackoverflow.com/questions/19837486/python-lambda-in-a-loop
        image_root,
        instances_json,
        panoptic_root,
        panoptic_json,
        sem_seg_root,
        metadata):
    DatasetCatalog.register(
        prefix,
        lambda: merge_to_panoptic_sog(
            load_coco_json(instances_json, image_root, prefix, extra_keys),
            load_sem_seg(sem_seg_root, image_root),
            load_pan_seg(panoptic_root, image_root)),
    )
    MetadataCatalog.get(prefix).set(
        panoptic_root=panoptic_root,
        image_root=image_root,
        panoptic_json=panoptic_json,
        sem_seg_root=sem_seg_root,
        json_file=instances_json,  # TODO rename
        evaluator_type="coco_panoptic_seg",
        **sog_metadata)
Ejemplo n.º 17
0
cfg.DATALOADER.NUM_WORKERS = 2
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=True)
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set the testing threshold for this model

predictor = DefaultPredictor(cfg)

#store metadata
#MetadataCatalog.get("endeffector_val").set(thing_classes=["endeffector"])
end_metadata = MetadataCatalog.get("endeffector_val")

#visualize
from detectron2.utils.visualizer import ColorMode
dataset_dicts = load_coco_json(
    "endeffector_val/annotations/json_annotation_val.json",
    "endeffector_val/images", "endeffector_val")
#dataset_dicts = register_coco_instances("endeffector_val", {}, "endeffector_val/annotations/json_annotation_val.json", "endeffector_val/images")
for d in dataset_dicts:
    im = cv2.imread(d["file_name"])
    outputs = predictor(im)
    v = Visualizer(
        im[:, :, ::-1],
        metadata=end_metadata,
        scale=0.8,
        instance_mode=ColorMode.
        IMAGE_BW  # remove the colors of unsegmented pixels
    )
    out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
    cv2.imshow('image', out.get_image()[:, :, ::-1])
    cv2.waitKey(0)
Ejemplo n.º 18
0
        if i == 0 and value == 1:
            counts.append(0)
        counts.append(len(list(elements)))
    compressed_rle = maskutil.frPyObjects(rle,
                                          rle.get('size')[0],
                                          rle.get('size')[1])
    compressed_rle['counts'] = str(compressed_rle['counts'], encoding='utf-8')
    return compressed_rle


setup_logger()
train_path = "train_images/"
json_file = os.path.join(train_path, "pascal_train.json")
# convert COCO format to Detectron2 format
register_coco_instances("VOC_dataset", {}, json_file, train_path)
dataset_dicts = load_coco_json(json_file, train_path, "VOC_dataset")
VOC_metadata = MetadataCatalog.get("VOC_dataset")

cfg = get_cfg()
cfg.merge_from_file(
    "configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml")
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set the testing threshold for this model
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 20
cfg.DATASETS.TEST = ("VOC_dataset", )
predictor = DefaultPredictor(cfg)

test_dir = "test_images/"
json_file = os.path.join(test_dir, "test.json")
cocoGt = COCO(json_file)
result_dir = 'test_result'
Ejemplo n.º 19
0
def main(args):
    setup_logger()
    train_path = "./datas/train_images/"
    json_file = "./datas/pascal_train.json"
    # convert COCO format to Detectron2 format
    register_coco_instances("VOC_dataset", {}, json_file, train_path)
    dataset_dicts = load_coco_json(json_file, train_path, "VOC_dataset")

    VOC_metadata = MetadataCatalog.get("VOC_dataset")

    os.makedirs('train_results', exist_ok=True)
    # ============ train ===========
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"))
    if (args.resume):
        cfg.MODEL.WEIGHTS = args.resume
    cfg.DATASETS.TRAIN = ("VOC_dataset", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.SOLVER.IMS_PER_BATCH = args.gpu_num * args.batch_size_per_gpu
    cfg.SOLVER.BASE_LR = args.lr
    cfg.SOLVER.MAX_ITER = args.num_iters
    cfg.SOLVER.LR_SCHEDULER_NAME = "WarmupCosineLR"
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 20

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    tr = Trainer(cfg)
    tr.resume_or_load(resume=False)
    tr.train()

    # ============= training results ===========
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"))
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set the testing threshold for this model
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 20
    cfg.DATASETS.TEST = ("VOC_dataset", )
    predictor = DefaultPredictor(cfg)
    json_file = "./datas/pascal_train.json"
    cocoGt = COCO(json_file)

    coco_dt_train = []
    for imgid in cocoGt.imgs:
        filename = cocoGt.loadImgs(ids=imgid)[0]['file_name']
        print('predicting ' + filename)
        im = cv2.imread(train_path + filename)  # load image
        outputs = predictor(im)  # run inference of your model

        output_path = os.path.join('train_results', filename)
        v = Visualizer(
            im[:, :, ::-1],
            metadata=VOC_metadata,
            scale=3,
        )
        v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
        cv2.imwrite(output_path, v.get_image()[:, :, ::-1])

        anno = outputs["instances"].to("cpu").get_fields()
        masks = anno['pred_masks'].numpy()
        categories = anno['pred_classes'].numpy()
        scores = anno['scores'].numpy()

        n_instances = len(scores)
        if len(categories) > 0:  # If any objects are detected in this image
            for i in range(n_instances):  # Loop all instances
                # save information of the instance in a dictionary then append on coco_dt list
                pred = {}
                pred[
                    'image_id'] = imgid  # this imgid must be same as the key of test.json
                pred['category_id'] = int(categories[i]) + 1
                # save binary mask to RLE, e.g. 512x512 -> rle
                pred['segmentation'] = binary_mask_to_rle(masks[i, :, :])
                pred['score'] = float(scores[i])
                coco_dt_train.append(pred)

    with open("train_result.json", "w") as f:
        json.dump(coco_dt_train, f)

    cocoDt = cocoGt.loadRes("train_result.json")

    imgIds = sorted(cocoGt.getImgIds())

    cocoEval = COCOeval(cocoGt, cocoDt, 'segm')
    cocoEval.params.imgIds = imgIds
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Ejemplo n.º 20
0
cfg = get_cfg()
cfg.merge_from_file(
    "../../detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"
)
# cfg.MODEL.WEIGHTS = "/w/jinlinyi/faster_rcnn_models/model_0044999.pth"  # initialize from model zoo
cfg.MODEL.WEIGHTS = 'model_final_68b088.pkl'
save_dir = '/y/jinlinyi/nyu_test/debug'

os.makedirs(save_dir, exist_ok=True)
cfg.NUM_GPUS = 1
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set the testing threshold for this model
cfg.DATASETS.TEST = ("nyu", )
predictor = DefaultPredictor(cfg)
from detectron2.utils.visualizer import ColorMode
from detectron2.data.datasets import load_coco_json
dataset_dicts = load_coco_json(nyu_json_path, nyu_img_path, dataset_name="nyu")
suncg_metadata = MetadataCatalog.get("nyu")  #.set(evaluator_type='coco')
# dataset_dicts = get_dicts("test")
#  = MetadataCatalog.get("nyu")
# evaluators = DatasetEvaluators(COCOEvaluator("suncg/validation", cfg, True, save_dir),)
# res = cls.test(cfg, model, evaluators)
# import pdb;pdb.set_trace()
for idx, d in enumerate(tqdm(random.sample(dataset_dicts, 300))):
    im = cv2.imread(d["file_name"])
    # im = im[:, :, ::-1]
    outputs = predictor(im)
    # import pdb;pdb.set_trace()
    v = Visualizer(
        im[:, :, ::-1],
        metadata=suncg_metadata,
        scale=0.8,
Ejemplo n.º 21
0
    "obj365_train": (TRAIN_PATH, TRAIN_JSON),
    "obj365_val": (VAL_PATH, VAL_JSON)
}

rootdir = "/home/houbowei/adet/"
obj = os.path.join(rootdir, "./datasets/obj365/objects365_Tiny_train.json")
with open(obj, 'r') as obj:
    obj = json.load(obj)
    things = [0] * 65
for cat in obj['categories']:
    things[cat['id'] - 301] = cat['name']

thing_dataset_id_to_contiguous_id = {i + 301: i for i in range(65)}

DatasetCatalog.register("obj365_train",
                        lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH, "coco"))
DatasetCatalog.register("obj365_val",
                        lambda: load_coco_json(VAL_JSON, VAL_PATH, "coco"))

MetadataCatalog.get('obj365_val').set(
    evaluator_type='coco',
    json_file=VAL_JSON,
    image_root=VAL_PATH,
    thing_classes=things,
    thing_dataset_id_to_contiguous_id=thing_dataset_id_to_contiguous_id)
MetadataCatalog.get('obj365_train').set(
    evaluator_type='coco',
    json_file=TRAIN_JSON,
    image_root=TRAIN_PATH,
    thing_classes=things,
    thing_dataset_id_to_contiguous_id=thing_dataset_id_to_contiguous_id)
Ejemplo n.º 22
0
 parser.add_argument('--opts', default=None, nargs=argparse.REMAINDER)
 args = parser.parse_args()
 try:
     os.mkdir(args.output_path)
 except FileExistsError:
     pass
 annotations_dir = os.path.join(args.dataset_path, "annotations")
 images_dir = os.path.join(args.dataset_path, "images")
 register_coco_instances(
     "custom_dataset_train", {},
     os.path.join(annotations_dir, "instances_train.json"), images_dir)
 register_coco_instances(
     "custom_dataset_val", {},
     os.path.join(annotations_dir, "instances_val.json"), images_dir)
 train_json = load_coco_json(os.path.join(annotations_dir,
                                          "instances_train.json"),
                             image_root=images_dir,
                             dataset_name="custom_dataset_train")
 val_json = load_coco_json(os.path.join(annotations_dir,
                                        "instances_val.json"),
                           image_root=images_dir,
                           dataset_name="custom_dataset_val")
 cfg = get_cfg()
 cfg.merge_from_file(args.config_file)
 cfg.DATASETS.TRAIN = ("custom_dataset_train", )
 cfg.DATASETS.TEST = ("custom_dataset_val", )
 cfg.TEST.EVAL_PERIOD = 300
 cfg.TEST.EXPECTED_RESULTS = [['bbox', 'AP', 38.5, 0.2]]
 cfg.SOLVER.CHECKPOINT_PERIOD = 300
 #cfg.SOLVER.IMS_PER_BATCH = 2
 cfg.SOLVER.MAX_ITER = args.max_iterations
 cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" if not args.weights_path else args.weights_path
Ejemplo n.º 23
0
def get_metadata():
    meta = {
        "thing_classes": [
            'io', 'lo', 'tl', 'rn', 'ro', 'wo', 'ors', 'sc1', 'sc0', 'p26',
            'p20', 'p23', 'ps', 'pne', 'pg', 'pn', 'po', 'pl', 'pm', 'p10',
            'p11', 'p19', 'p5'
        ],
    }
    return meta


SPLITS = {
    "sfdet_train": ("/home/youtian/data/ft_det/ft_det_cleanedup/images",
                    "/home/youtian/data/ft_det/ft_det_cleanedup/train.json"),
    "sfdet_val": ("/home/youtian/data/ft_det/ft_det_cleanedup/images",
                  "/home/youtian/data/ft_det/ft_det_cleanedup/valid.json"),
}

for key, (image_root, json_file) in SPLITS.items():
    # Assume pre-defined datasets live in `./datasets`.

    DatasetCatalog.register(
        key,
        lambda key=key, json_file=json_file, image_root=image_root:
        load_coco_json(json_file, image_root, key),
    )

    MetadataCatalog.get(key).set(json_file=json_file,
                                 image_root=image_root,
                                 **get_metadata())
Ejemplo n.º 24
0
    ("coco/train2014", "coco/annotations/densepose_train2014.json"),
    "densepose_coco_2014_minival":
    ("coco/val2014", "coco/annotations/densepose_minival2014.json"),
    "densepose_coco_2014_minival_100": (
        "coco/val2014",
        "coco/annotations/densepose_minival2014_100.json",
    ),
    "densepose_coco_2014_valminusminival": (
        "coco/val2014",
        "coco/annotations/densepose_valminusminival2014.json",
    ),
}

DENSEPOSE_KEYS = ["dp_x", "dp_y", "dp_I", "dp_U", "dp_V", "dp_masks"]

for key, (image_root, json_file) in SPLITS.items():
    # Assume pre-defined datasets live in `./datasets`.
    json_file = os.path.join("datasets", json_file)
    image_root = os.path.join("datasets", image_root)

    DatasetCatalog.register(
        key,
        lambda key=key, json_file=json_file, image_root=image_root:
        load_coco_json(
            json_file, image_root, key, extra_annotation_keys=DENSEPOSE_KEYS),
    )

    MetadataCatalog.get(key).set(json_file=json_file,
                                 image_root=image_root,
                                 **get_densepose_metadata())
Ejemplo n.º 25
0
# import some common libraries
import numpy as np
import os, json, cv2, random

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog

# if your dataset is in COCO format, this cell can be replaced by the following three lines:
from detectron2.data.datasets import load_coco_json
from detectron2.data.datasets.coco import convert_to_coco_json
train_data = load_coco_json("./data/20210204_Digi_train.json", "./data/20210204_Digi_generated/images/", "train")
val_data = load_coco_json("./data/20210204_Digi_val.json", "./data/20210204_Digi_generated/valid_images/", "val")

# convert_to_coco_json(dataset_name=train_data, output_file="./data/20210204_Digi_coco_train.json", allow_cached=True)
# convert_to_coco_json(dataset_name=val_data, output_file="./data/20210204_Digi_coco_val.json", allow_cached=True)

DatasetCatalog.register("train", lambda: load_coco_json("./data/20210204_Digi_train.json", "./data/20210204_Digi_generated/images/", "train"))
MetadataCatalog.get("train").set(thing_classes=["trash"],
                                                    json_file="./data/20210204_Digi_train.json",
                                                    image_root="./data/20210204_Digi_generated/images/")
DatasetCatalog.register("val", lambda: load_coco_json("./data/20210204_Digi_val.json", "./data/20210204_Digi_generated/valid_images/", "val"))
MetadataCatalog.get("val").set(thing_classes=["trash"],
                                                json_file="./data/20210204_Digi_val.json",
                                                image_root="./data/20210204_Digi_generated/valid_images/")

Ejemplo n.º 26
0
if __name__ == "__main__":
    """
    Test the COCO json dataset loader.

    Usage:
        python -m detectron2.data.datasets.coco \
            path/to/json path/to/image_root dataset_name

        "dataset_name" can be "coco_2014_minival_100", or other
        pre-registered ones
    """
    from detectron2.utils.logger import setup_logger
    from detectron2.utils.visualizer import Visualizer
    import sys

    logger = setup_logger(name=__name__)
    assert sys.argv[3] in DatasetCatalog.list()
    meta = MetadataCatalog.get(sys.argv[3])

    dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
    logger.info("Done loading {} samples.".format(len(dicts)))

    dirname = "coco-data-vis"
    os.makedirs(dirname, exist_ok=True)
    for d in dicts:
        img = np.array(Image.open(d["file_name"]))
        visualizer = Visualizer(img, metadata=meta)
        vis = visualizer.draw_dataset_dict(d)
        fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
        vis.save(fpath)
Ejemplo n.º 27
0
print(os.getcwd())
register_coco_instances(
    "coco_train_2", {},
    "./Coco/detectron2/datasets/coco/annotations/instances_train.json",
    "./Coco/detectron2/datasets/coco/train")
register_coco_instances(
    "coco_val_2", {},
    "./Coco/detectron2/datasets/coco/annotations/instances_val.json",
    "./Coco/detectron2/datasets/coco/val")

# import random
#from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import builtin, load_coco_json

dataset_dicts = load_coco_json(
    "./Coco/detectron2/datasets/coco/annotations/instances_train.json",
    "./Coco/detectron2/datasets/coco/train")
print(dataset_dicts)
coco_train_metadata = MetadataCatalog.get("coco_train_2")
for d in random.sample(dataset_dicts, 3):
    img = cv2.imread(d["file_name"])
    visualizer = Visualizer(img[:, :, ::-1],
                            metadata=coco_train_metadata,
                            scale=0.5)
    vis = visualizer.draw_dataset_dict(d)
    cv2_imshow(vis.get_image()[:, :, ::-1])

from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
# print(os.getcwd())
#os.chdir(r"/home/xuyifei/anaconda3/envs/detectron2/lib/python3.7/site-packages/detectron2")
Ejemplo n.º 28
0
 def load_asis(*l, **kv):
     dataset_dicts = load_coco_json(*l, **kv)
     [[ann.update(category_id=0) for ann in d["annotations"]]
      for d in dataset_dicts]
     return dataset_dicts
Ejemplo n.º 29
0
train_annotation_json = os.path.join(BaseFolder, "Training/annotations.json")
train_images = os.path.join(BaseFolder, "Training")
# train_annotation_json='/data/cmpe295-liu/Waymo/WaymoCOCOtest/Training/annotations.json'
# train_images='/data/cmpe295-liu/Waymo/WaymoCOCOtest/Training'

# val_annotation_json='/data/cmpe295-liu/Waymo/WaymoCOCOsmall/Validation/annotations.json'
# val_images='/data/cmpe295-liu/Waymo/WaymoCOCOsmall/Validation'
train_annotation_json = os.path.join(BaseFolder, "Validation/annotations.json")
train_images = os.path.join(BaseFolder, "Validation")
#dataset_dicts = load_coco_json(train_annotation_json,train_images, "mywaymo_dataset_train", extrakeys)

for d in ["Training", "Validation"]:
    #DatasetCatalog.register("myuav1_" + d, lambda d=d: load_mycoco_json("/data/cmpe295-liu/UAVision/VisDrone2019-DET-" + d + "/annotations.json", "/data/cmpe295-liu/UAVision/VisDrone2019-DET-" + d + "/images", extrakeys))
    DatasetCatalog.register(
        "mywaymo1_" + d,
        lambda d=d: load_coco_json(BaseFolder + d + "/annotations.json",
                                   BaseFolder + d + "/"))

FULL_LABEL_CLASSES = [
    'unknown', 'vehicle', 'pedestrian', 'sign', 'cyclist'
]  #['ignored-regions', 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle','bus',  'motor', 'others']
for d in ["Training", "Validation"]:
    MetadataCatalog.get("mywaymo1_" + d).set(thing_classes=FULL_LABEL_CLASSES)
# from detectron2.data.datasets import register_coco_instances
# register_coco_instances("myuav1_train", {}, "/data/cmpe295-liu/UAVision/VisDrone2019-DET-train/annotations.json", "/data/cmpe295-liu/UAVision/VisDrone2019-DET-train/images")

cfg = get_cfg()
cfg.OUTPUT_DIR = './output_waymo'  #'./output_x101'
cfg.merge_from_file(
    model_zoo.get_config_file(
        "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml")
)  #faster_rcnn_R_101_FPN_3x.yaml, faster_rcnn_X_101_32x8d_FPN_3x
 def register_coco_instances(name, metadata, classRemapping, json_file, image_root):
     DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, None, classRemapping=classRemapping))
     MetadataCatalog.get(name).set(
         json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
     )