from detectron2.engine import DefaultPredictor
from detectron2.data import MetadataCatalog
from detectron2.data.datasets import register_coco_instances
import numpy as np
from PIL import ImageFont, ImageDraw, Image  # 한글 폰트 사용하기 위해
from Acheck_TR import Acheck_TR

cfg = get_cfg()
yaml = "./configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml"
weight = './output/model_final_cascade_with_hair/mask_rcnn_R_101_C4_3x/model_0029999.pth'
cfg.merge_from_file(yaml)
cfg.DATALOADER.NUM_WORKERS = 2
# cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/137849600/model_final_f10217.pkl"  # initialize from model zoo
## (load pretrained weights)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3  # 3 classes (Kong, lee, Huh)
register_coco_instances("Acheck", {}, "./Acheck_hair.json", "./img_hair")
MetadataCatalog.get("Acheck").thing_classes = ["Kong", "Lee", "Huh"]
Acheck_metadata = MetadataCatalog.get("Acheck")
cfg.MODEL.WEIGHTS = weight
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8  # set the testing threshold for this model
cfg.DATASETS.TEST = ("Acheck", )
predictor = DefaultPredictor(cfg)


def image_test(start, end):
    for i in range(start, end):
        k = "./test_images/{}.jpg".format(i)
        im = cv2.imread(k)
        outputs = predictor(im)
        v = Visualizer(im[:, :, ::-1],
                       metadata=Acheck_metadata,
Esempio n. 2
0
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()
    logger = setup_logger()
    logger.info("Command line arguments: " + str(args))
    os.makedirs(args.output, exist_ok=True)

    # Register custom dataset for export
    dataset = "dandelions_v3"
    register_coco_instances(
        f"{dataset}_test", {},
        f"/home/appuser/detectron2_repo/datasets/{dataset}/test/instances_plants_test2021.json",
        f"/home/appuser/detectron2_repo/datasets/{dataset}/test/plants_test2021"
    )
    register_coco_instances(
        f"{dataset}_val", {},
        f"/home/appuser/detectron2_repo/datasets/{dataset}/validate/instances_plants_validate2021.json",
        f"/home/appuser/detectron2_repo/datasets/{dataset}/validate/plants_validate2021"
    )

    cfg = setup_cfg(args)

    # create a torch model
    torch_model = build_model(cfg)
    DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
    torch_model.eval()
    #det_file_1 = data_folder + 'val_thermal_only_predictions_IOU50_'+time+'_with_logits.json'#'val_thermal_only_predictions_IOU50_day.json'#
    #det_file_2 = data_folder + 'val_early_fusion_predictions_IOU50_'+time+'_with_logits.json'
    path_1 = '../../../Datasets/FLIR/' + data_set + '/resized_RGB/'
    path_2 = '../../../Datasets/FLIR/' + data_set + '/thermal_8_bit/'
    out_folder = 'out/box_comparison/'
    #train_json_path = '../../../Datasets/'+dataset+'/train/thermal_annotations_4_channel_no_dogs.json'

    val_json_path = '../../../Datasets/' + dataset + '/val/' + val_file_name
    val_folder = '../../../Datasets/FLIR/val/thermal_8_bit'

    if not os.path.exists(out_folder):
        os.mkdir(out_folder)

    # Register dataset
    dataset = 'FLIR_val'
    register_coco_instances(dataset, {}, val_json_path, val_folder)
    FLIR_metadata = MetadataCatalog.get(dataset)
    dataset_dicts = DatasetCatalog.get(dataset)

    # Create config
    cfg = get_cfg()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.OUTPUT_DIR = out_folder
    cfg.merge_from_file(
        "./configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
    cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/model_final_f6e8b1.pkl"
    #cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "good_model/out_model_iter_32000.pth")
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 80
    cfg.DATASETS.TEST = (dataset, )
    cfg.INPUT.FORMAT = 'BGR'
Esempio n. 4
0
import cv2
import numpy as np
import os
from detectron2.data import MetadataCatalog, DatasetCatalog
from os import listdir
from os.path import isfile, join
import json
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--test_model", default="model_final.pkl")
args = parser.parse_args()
dir = os.listdir('./test')
dir.sort()
dir.sort(key = lambda x: int(x[:-4]))
register_coco_instances("dataset_train", {}, "./dataset_coco/annotations/train.json", "./dataset_coco/images")

imgs = [f for f in dir if isfile(join('./test/', f))]

cfg = get_cfg()
cfg.merge_from_file(
    "./configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml"
)
cfg.DATASETS.TEST = ("dataset_train",)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0
cfg.MODEL.WEIGHTS = os.path.join('./output', args.test_model)
cfg.MODEL.DEVICE = "cuda"
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 10

predictor = DefaultPredictor(cfg)
print(MetadataCatalog.get('dataset_train'))
Esempio n. 5
0
    max_train_iter = 15000
    ims_per_batch = 6
    num_labels = 13

    work_root = "/home/user/qunosen/2_project/4_train/2_zhuchao/6_d2_final_train/2_train/1_train_2020_9_17/test6"
    model_name = "mask_rcnn_R_50_FPN_3x.yaml"
    log_file = os.path.join(work_root, "log_dat.txt")
    regist_train_name = "zc_train_data"
    regist_val_name = 'zc_val_1data'

    train_json_path = "/home/user/qunosen/2_project/4_train/2_zhuchao/6_d2_final_train/1_data/1_data_2020_9_17/new_set_13/train.json"
    val_json_path = "/home/user/qunosen/2_project/4_train/2_zhuchao/6_d2_final_train/1_data/1_data_2020_9_17/new_set_13/val.json"
    train_images_dir = "/home/user/qunosen/2_project/4_train/2_zhuchao/6_d2_final_train/1_data/1_data_2020_9_17/new_set_13/train"
    val_images_dir = "/home/user/qunosen/2_project/4_train/2_zhuchao/6_d2_final_train/1_data/1_data_2020_9_17/new_set_13/val"

    register_coco_instances(regist_train_name, {}, train_json_path,
                            train_images_dir)
    register_coco_instances(regist_val_name, {}, val_json_path, val_images_dir)

    train_metadata = MetadataCatalog.get(regist_train_name)
    val_metadata = MetadataCatalog.get(regist_val_name)
    dataset_dict = DatasetCatalog.get(regist_train_name)

    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = (regist_train_name, )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = num_workers
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
import cv2
#from google.colab.patches import cv2_imshow

# detectron2 utilities
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog

from detectron2.data.datasets import register_coco_instances

# register dataset
register_coco_instances("plates", {}, "./plates_coco/annotations.json",
                        "./plates_coco/")

plates_metadata = MetadataCatalog.get("plates")
dataset_dicts = DatasetCatalog.get("plates")

from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
import torch, os

cfg = get_cfg()
cfg.merge_from_file(
    "./detectron2_repo/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml")

cfg.DATASETS.TRAIN = ("plates", )
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
Esempio n. 7
0
    return trainer.train()


if __name__ == "__main__":

    # register_coco_instances("my_dataset_train", {}, "datasets/waterbody/annotations/instancesonly_filtered_train.json", "datasets/waterbody/train/image")
    # register_coco_instances("my_dataset_val", {}, "datasets/waterbody/annotations/instancesonly_filtered_val.json", "datasets/waterbody/val/image")
    # # configfile = "/deekongai/data/backupfiles/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"
    # configfile = "/deekongai/configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml"

    # register_coco_instances("my_dataset_train", {}, "datasets/bridge/annotations/instances_train.json", "datasets/bridge/data")
    # register_coco_instances("my_dataset_val", {}, "datasets/bridge/annotations/instances_val.json", "datasets/bridge/data")
    # # configfile = "/deekongai/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"
    # configfile = "/deekongai/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml"

    register_coco_instances("my_dataset_train", {}, "/deekongai/data/0geofen/shipdetection/data/annotations/instances_train.json", "/deekongai/data/0geofen/shipdetection/data/trainimage")
    register_coco_instances("my_dataset_val", {}, "/deekongai/data/0geofen/shipdetection/data/annotations/instances_val.json", "/deekongai/data/0geofen/shipdetection/data/trainimage")
    # configfile = "/deekongai/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"
    configfile = "/deekongai/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml"


    #visualize training data
    my_dataset_train_metadata = MetadataCatalog.get("my_dataset_train")
    dataset_dicts = DatasetCatalog.get("my_dataset_train")
    for d in random.sample(dataset_dicts, 3):
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1], metadata=my_dataset_train_metadata, scale=0.5)
        vis = visualizer.draw_dataset_dict(d)
        cv2.imwrite(d["file_name"].split('/')[-1].replace('tif','jpg'),vis.get_image()[:, :, ::-1])

    # # model train
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import (
    CommonMetricPrinter,
    EventStorage,
    JSONWriter,
    TensorboardXWriter,
)

import numpy as np

from detectron2.data.datasets import register_coco_instances
register_coco_instances(
    "bridge_dataset_train", {},
    "/home/asd/Mission/GaoFen/bridge_new/data/data_wenhe_gai/aug/train/coco/bridge_train_cocostyle.json",
    "/home/asd/Mission/GaoFen/bridge_new/data/data_wenhe_gai/aug/train/coco/bridge_train"
)
register_coco_instances(
    "208_test", {},
    "/home/asd/Mission/GaoFen/bridge_new/data/data_aug_coco/test_data_coco/bridge_test_cocostyle.json",
    "/home/asd/Mission/GaoFen/bridge_new/data/data_aug_coco/test_data_coco/bridge_test"
)
register_coco_instances(
    "laji_test", {},
    "/home/asd/Mission/GaoFen/bridge_new/data/data_wenhe_gai/aug/test/coco/bridge_test_cocostyle.json",
    "/home/asd/Mission/GaoFen/bridge_new/data/data_wenhe_gai/aug/test/coco/bridge_test"
)

logger = logging.getLogger("桥梁训练器")
Esempio n. 9
0
    get_ipython().system('unzip -o $dataset -d .')


# In[9]:


DATASET_NAME = DATASET_DIR = f"{dataset.replace('.zip','')}"


# In[10]:


# In[11]:


register_coco_instances(f"{DATASET_NAME}_train", {
}, f"{DATASET_DIR}/train/annotations.json", f"{DATASET_DIR}/train/")
register_coco_instances(f"{DATASET_NAME}_valid", {
}, f"{DATASET_DIR}/valid/annotations.json", f"{DATASET_DIR}/valid/")


# In[12]:


_dataset_metadata = MetadataCatalog.get(f"{DATASET_NAME}_train")
_dataset_metadata.thing_colors = [cc['color']
                                  for cc in builtin_meta.COCO_CATEGORIES]


# In[13]:

Esempio n. 10
0
from detectron2.structures import BoxMode
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.utils.logger import setup_logger
setup_logger()

import itertools
from itertools import groupby
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskutil
from pycocotools import mask as maskUtils

register_coco_instances("T", {},
                        "gdrive/My Drive/HW3/dataset/train/pascal_train.json",
                        "gdrive/My Drive/HW3/dataset/train/train_images")

# metadata
train_metadata = MetadataCatalog.get("T")
test_metadata = MetadataCatalog.get("V")

# dataset dictionary
train_dataset_dicts = DatasetCatalog.get("T")
test_dataset_dicts = DatasetCatalog.get("V")

cfg = get_cfg()
cfg.merge_from_file(
    "gdrive/MyDrive/HW3/configs/mask_rcnn_X_101_32x8d_FPN_3x.yaml")

# load dataset
Esempio n. 11
0

def show_example(dataset_dicts, metadata, num=3):
    for d in random.sample(dataset_dicts, num):
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1], metadata=metadata, scale=0.5)
        vis = visualizer.draw_dataset_dict(d)
        plt.imshow(vis.get_image()[:, :, ::-1])
        plt.show()


setup_logger()
train_path = "train_images/"
json_file = os.path.join(train_path, "pascal_train.json")
# convert COCO format to Detectron2 format
register_coco_instances("VOC_dataset", {}, json_file, train_path)
dataset_dicts = load_coco_json(json_file, train_path, "VOC_dataset")

VOC_metadata = MetadataCatalog.get("VOC_dataset")
show_example(dataset_dicts, VOC_metadata, 3)

os.makedirs('train_results', exist_ok=True)
# ============ train ===========
cfg = get_cfg()
cfg.merge_from_file(
    "configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml")
cfg.DATASETS.TRAIN = ("VOC_dataset", )
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 0
cfg.MODEL.WEIGHTS = "X-101-32x8d.pkl"  # pre-trained model file location
cfg.SOLVER.IMS_PER_BATCH = 2
Esempio n. 12
0
def main():
    """ Mask RCNN Object Detection with Detectron2 """
    rospy.init_node("mask_rcnn", anonymous=True)
    bridge = CvBridge()
    start_time = time.time()
    image_counter = 0

    register_coco_instances(
        "train_set", {},
        "/home/labuser/ros_ws/src/odhe_ros/arm_camera_dataset/train/annotations.json",
        "/home/labuser/ros_ws/src/odhe_ros/arm_camera_dataset/train")
    register_coco_instances(
        "test_set", {},
        "/home/labuser/ros_ws/src/odhe_ros/arm_camera_dataset/test/annotations.json",
        "/home/labuser/ros_ws/src/odhe_ros/arm_camera_dataset/test")

    train_metadata = MetadataCatalog.get("train_set")
    print(train_metadata)
    dataset_dicts_train = DatasetCatalog.get("train_set")

    test_metadata = MetadataCatalog.get("test_set")
    print(test_metadata)
    dataset_dicts_test = DatasetCatalog.get("test_set")

    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = ("train_set")
    cfg.DATASETS.TEST = ()  # no metrics implemented for this dataset
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )  # initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.01
    cfg.SOLVER.MAX_ITER = 1000  # 300 iterations seems good enough, but you can certainly train longer
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (
        128)  # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 5  # 5 classes (Plate, Carrot, Celery, Pretzel, Gripper)

    # Temporary Solution. If I train again I think I can use the dynamically set path again
    cfg.MODEL.WEIGHTS = os.path.join(
        cfg.OUTPUT_DIR,
        "/home/labuser/ros_ws/src/odhe_ros/arm_camera_dataset/output/model_final.pth"
    )
    # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.4  # set the testing threshold for this model
    cfg.DATASETS.TEST = ("test_set")
    predictor = DefaultPredictor(cfg)

    class_names = MetadataCatalog.get("train_set").thing_classes

    # Set up custom cv2 visualization parameters
    # Classes: [name, id]
    #               -
    #          [Plate,   0]
    #          [Carrot,  1]
    #          [Celery,  2]
    #          [Pretzel, 3]
    #          [Gripper, 4]

    # Colors = [blue, green, red]
    color_plate = [0, 255, 0]  # green
    color_carrot = [255, 200, 0]  # blue
    color_celery = [0, 0, 255]  # red
    color_pretzel = [0, 220, 255]  # yellow
    color_gripper = [204, 0, 150]  # purple
    colors = list([
        color_plate, color_carrot, color_celery, color_pretzel, color_gripper
    ])

    alpha = .4

    run = maskRCNN()
    while not rospy.is_shutdown():
        # Get images
        img = run.get_img()

        if img is not None:
            outputs = predictor(img)
            predictions = outputs["instances"].to("cpu")

            # Get results
            unsorted = run.getResult(predictions, class_names)

            # Sort detections by x and y
            sorted = run.sort_detections(unsorted)

            result = Result()
            for i in range(len(sorted)):
                result.class_ids.append(sorted[i][0])
                result.class_names.append(sorted[i][1])
                result.scores.append(sorted[i][2])
                result.boxes.append(sorted[i][3])
                result.masks.append(sorted[i][4])

            # Visualize using detectron2 built in visualizer
            # v = Visualizer(im[:, :, ::-1],
            #             metadata=train_metadata,
            #             scale=1.0
            #             # instance_mode=ColorMode.IMAGE_BW   # remove the colors of unsegmented pixels
            # )
            # v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
            # im = v.get_image()[:, :, ::-1]
            # im_msg = bridge.cv2_to_imgmsg(im, encoding="bgr8")

            # Visualize using custom cv2 code
            if result is not None:
                result_cls = result.class_names
                result_clsId = result.class_ids
                result_scores = result.scores
                result_masks = result.masks

                # Create copies of the original image
                im = img.copy()
                output = img.copy()

                # Initialize lists
                masks = []
                masks_indices = []
                for i in range(len(result_clsId)):
                    # Obtain current object mask as a numpy array (black and white mask of single object)
                    current_mask = bridge.imgmsg_to_cv2(result_masks[i])

                    # Find current mask indices
                    mask_indices = np.where(current_mask == 255)

                    # Add to mask indices list
                    if len(masks_indices) > len(result_clsId):
                        masks_indices = []
                    else:
                        masks_indices.append(mask_indices)

                    # Add to mask list
                    if len(masks) > len(result_clsId):
                        masks = []
                    else:
                        masks.append(current_mask)

                if len(masks) > 0:
                    # Create composite mask
                    composite_mask = sum(masks)

                    # Clip composite mask between 0 and 255
                    composite_mask = composite_mask.clip(0, 255)

                for i in range(len(result_clsId)):
                    # Select correct object color
                    color = colors[result_clsId[i]]

                    # Change the color of the current mask object
                    im[masks_indices[i][0], masks_indices[i][1], :] = color

                # Apply alpha scaling to image to adjust opacity
                cv2.addWeighted(im, alpha, output, 1 - alpha, 0, output)

                for i in range(len(result_clsId)):
                    # Draw Bounding boxes
                    start_point = (result.boxes[i].x_offset,
                                   result.boxes[i].y_offset)
                    end_point = (result.boxes[i].x_offset +
                                 result.boxes[i].width,
                                 result.boxes[i].y_offset +
                                 result.boxes[i].height)
                    start_point2 = (result.boxes[i].x_offset + 2,
                                    result.boxes[i].y_offset + 2)
                    end_point2 = (result.boxes[i].x_offset +
                                  result.boxes[i].width - 2,
                                  result.boxes[i].y_offset + 12)
                    color = colors[result_clsId[i]]
                    box_thickness = 1

                    name = result_cls[i]
                    score = result_scores[i]
                    conf = round(score.item() * 100, 1)
                    string = str(name) + ":" + str(conf) + "%"
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    org = (result.boxes[i].x_offset + 2,
                           result.boxes[i].y_offset + 10)
                    fontScale = .3
                    text_thickness = 1
                    output = cv2.rectangle(output, start_point, end_point,
                                           color, box_thickness)
                    output = cv2.rectangle(output, start_point2, end_point2,
                                           color, -1)  # Text box
                    output = cv2.putText(output, string, org, font, fontScale,
                                         [0, 0, 0], text_thickness,
                                         cv2.LINE_AA, False)

                im_rgb = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
                im_msg = bridge.cv2_to_imgmsg(im_rgb, encoding="rgb8")

                ##### The entire goal of the below code is to get N random points on the mask in 3D
                ##### and publish on cloud samples topic for GPD
                item_ids = result_clsId
                idx = [i for i, e in enumerate(item_ids) if e > 0 and e < 4]
                numFoodItems = len(idx)

                mask = bridge.imgmsg_to_cv2(result_masks[idx[0]])
                coord = cv2.findNonZero(
                    mask)  # Coordinates of the mask that are on the food item

                # Pick 3 random points on the object mask
                sample_list = list()
                for ii in range(3):
                    point = Point()
                    x = random.choice(
                        coord[:, 0, 1])  # x and y reversed for some reason
                    y = random.choice(
                        coord[:, 0, 0])  # x and y reversed for some reason
                    depth = (run.depth_array[y, x]) / 1000
                    # Deproject pixels and depth to 3D coordinates (camera frame)
                    X, Y, Z = run.convert_depth_to_phys_coord_using_realsense(
                        y, x, depth, run.cam_info)
                    # print("(x,y,z) to convert: ("+str(y)+", "+str(x)+", "+str(depth)+")")
                    # print("(X,Y,Z) converted: ("+str(X)+", "+str(Y)+", "+str(Z)+")")
                    point.x = X
                    point.y = Y
                    point.z = Z
                    sample_list.append(point)

                # print(sample_list)

                cam_source = Int64()
                cam_source.data = 0

                cloud_source = CloudSources()
                cloud_source.cloud = run.pointCloud
                cloud_source.camera_source = [cam_source]
                view_point = Point()
                view_point.x = 0.640
                view_point.y = 0.828
                view_point.z = 0.505
                # view_point.x = 0; view_point.y = 0; view_point.z = 0
                cloud_source.view_points = [view_point]

                cloud_samples = CloudSamples()
                cloud_samples.cloud_sources = cloud_source
                cloud_samples.samples = sample_list

                # Print publish info
                # print(type(cloud_source.cloud))
                # print(cloud_source.camera_source)
                # print(cloud_source.view_points)
                # print("")
                # print(type(cloud_samples.cloud_sources))
                # print(cloud_samples.samples)
                # print("-------------------------\n")

            # Display Image Counter
            # image_counter = image_counter + 1
            # if (image_counter % 11) == 10:
            #     rospy.loginfo("Images detected per second=%.2f", float(image_counter) / (time.time() - start_time))

            run.publish(im_msg, result, cloud_samples)

    return 0
Esempio n. 13
0
    """
    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    if cfg.TEST.AUG.ENABLED:
        trainer.register_hooks([
            hooks.EvalHook(0,
                           lambda: trainer.test_with_TTA(cfg, trainer.model))
        ])
    return trainer.train()


if __name__ == "__main__":
    args = default_argument_parser().parse_args()
    print("Command Line Args:", args)
    register_coco_instances(
        "COCO_test_vertebre", {},
        "./vertebre_data/datasets/coco/annotations/test_segmentation.json",
        "./vertebre_data/datasets/coco/images")
    register_coco_instances(
        "COCO_train_vertebre", {},
        "./vertebre_data/datasets/coco/annotations/train_segmentation.json",
        "./vertebre_data/datasets/coco/images")
    launch(
        main,
        args.num_gpus,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        args=(args, ),
    )
Esempio n. 14
0
 def register_dataset(self):
     for d in ["train", "test"]:
         register_coco_instances(f"bladder_{d}", {},
                                 f"Data/Vezica/{d}.json",
                                 f"Data/Vezica/{d}")
Esempio n. 15
0
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, DatasetCatalog

# if your dataset is in COCO format, this cell can be replaced by the following three lines:
from detectron2.data.datasets import register_coco_instances
# used to traning
from detectron2.engine import DefaultTrainer

# model config for this training/test
from config.default import get_cfg_defaults
from utils import default_argument_parser

register_coco_instances("my_dataset_train", {},
                        "dataset/coco/annotations/pascal_train.json",
                        "dataset/coco/train2017")
dataset_metadata = MetadataCatalog.get("my_dataset_train")
# get the actual internal representation of the catalog stores information about the datasets and how to obtain them. The internal format uses one dict to represent the annotations of one image.
dataset_dicts = DatasetCatalog.get("my_dataset_train")
print(dataset_metadata)
# print(dataset_dicts)

# parse argument from cli
args = default_argument_parser().parse_args()

# configuration
ep_config = get_cfg_defaults()
if args.experiment_file is not None:
    ep_config.merge_from_file(
        args.experiment_file)  # configuration for this experiment
Esempio n. 16
0
    CityscapesInstanceEvaluator,
    CityscapesSemSegEvaluator,
    COCOEvaluator,
    COCOPanopticEvaluator,
    DatasetEvaluators,
    LVISEvaluator,
    PascalVOCDetectionEvaluator,
    SemSegEvaluator,
    verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA

# Register Custom Dataset
from detectron2.data.datasets import register_coco_instances

register_coco_instances("CIHP_train", {}, "../../data/msrcnn_finetune_annotations/CIHP_train.json",
                        "../../data/instance-level_human_parsing/Training/Images")
register_coco_instances("CIHP_val", {}, "../../data/msrcnn_finetune_annotations/CIHP_val.json",
                        "../../data/instance-level_human_parsing/Validation/Images")
register_coco_instances("demo_train", {}, "../../demo/annotations/demo_train.json",
                        "../../demo/img")
register_coco_instances("demo_val", {}, "../../demo/annotations/demo_val.json",
                        "../../demo/img")


class Trainer(DefaultTrainer):
    """
    We use the "DefaultTrainer" which contains pre-defined default logic for
    standard training workflow. They may not work for you, especially if you
    are working on a new research project. In that case you can use the cleaner
    "SimpleTrainer", or write your own training loop. You can use
    "tools/plain_train_net.py" as an example.
Esempio n. 17
0
# import some common libraries
import numpy as np
import os, json, cv2, random
from google.colab.patches import cv2_imshow

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog

from detectron2.data.datasets import register_coco_instances

# register trainset
register_coco_instances("dataset_train0", {}, "train_patch_0.json", "/content/thre_whole_patches")
register_coco_instances("dataset_val0", {}, "val_patch_0.json", "/content/thre_whole_patches")

register_coco_instances("dataset_train1", {}, "train_patch_1.json", "/content/thre_whole_patches")
register_coco_instances("dataset_val1", {}, "val_patch_1.json", "/content/thre_whole_patches")

register_coco_instances("dataset_train2", {}, "train_patch_2.json", "/content/thre_whole_patches")
register_coco_instances("dataset_val2", {}, "val_patch_2.json", "/content/thre_whole_patches")

print("done")

"""### first"""

cfg = get_cfg()
# cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"))
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/retinanet_R_101_FPN_3x.yaml"))
Esempio n. 18
0
from pygame import mixer  # Load the popular external library














register_coco_instances("my_dataset_train", {}, "train/_annotations.coco.json", "train/imagenes")
register_coco_instances("my_dataset_val", {}, "valid/_annotations.coco.json", "valid/imagenes")
register_coco_instances("my_dataset_test", {}, "test/_annotations.coco.json", "test/imagenes")
my_dataset_train_metadata = MetadataCatalog.get("my_dataset_train")
dataset_dicts = DatasetCatalog.get("my_dataset_train")


from detectron2.utils.visualizer import Visualizer

a1=1
a2=2
b1=1
b2=2
cord1,cord2,ord1,ord2=100,200,100,200
valorgrisimagen=0
valorgrisimagen1=0
# Train path
train_path = '../../../Datasets/'+ dataset +'/train/'
train_folder = '../../../Datasets/FLIR/train/'
#train_json_path = '../../../Datasets/'+dataset+'/train/thermal_annotations_4class.json'
train_json_path = '../../../Datasets/'+dataset+'/train/thermal_annotations_4_channel_no_dogs_3_class.json'
#train_json_path = '../../../Datasets/'+dataset+'/train/thermal_annotations.json'
# Validation path
val_path = '../../../Datasets/'+ dataset +'/val/'
val_folder = '../../../Datasets/FLIR/val/'
#val_json_path = '../../../Datasets/'+dataset+'/val/thermal_annotations_4class.json'
val_json_path = '../../../Datasets/'+dataset+'/val/thermal_annotations_4_channel_no_dogs_3_class.json'
print(train_json_path)

# Register dataset
dataset_train = 'FLIR_train'
register_coco_instances(dataset_train, {}, train_json_path, train_folder)
FLIR_metadata_train = MetadataCatalog.get(dataset_train)
dataset_dicts_train = DatasetCatalog.get(dataset_train)

# Test on validation set
dataset_test = 'FLIR_val'
register_coco_instances(dataset_test, {}, val_json_path, val_folder)
FLIR_metadata_test = MetadataCatalog.get(dataset_test)
dataset_dicts_test = DatasetCatalog.get(dataset_test)

model = 'faster_rcnn_R_101_FPN_3x'

#files_names = [f for f in listdir(train_path) if isfile(join(train_path, f))]

out_folder = 'output_mid_fusion_prob_1110'
out_model_path = os.path.join(out_folder, 'out_model_final.pth')
Esempio n. 20
0
def test_recognition(model_yaml,
                     checkpoint,
                     dataset,
                     annotations,
                     imagedir,
                     outdir=None,
                     use_rpn=False,
                     record_individual_scores=False):
    """
    Computes detections and uses them to compute recognition accuracies.

    Arguments:
        model_yaml: Path to model config in yaml format.
        dataset: Dataset name, used to name output files.
        annotations: Path to ground truth annotations (json file with COCO-style annotations).
        imagedir: Path to image directory.
        outdir: Directory where output files are stored. When None is passed, the output directory specified in the model's config file is used.
        use_rpn: If True, the region proposal network of the model is used instead of bounding boxes from the ground truth.
    """

    # Register testset
    register_coco_instances(dataset, {}, annotations, imagedir)

    # Load model
    cfg = get_cfg()
    with open(model_yaml) as f:
        cfg = cfg.load_cfg(f)

    print("Numer of classes: {}".format(cfg.MODEL.ROI_HEADS.NUM_CLASSES))

    cfg.DATASETS.TEST = (dataset, )
    model = build_model(cfg)

    # Create outdir
    if outdir == None:
        outdir = cfg.OUTPUT_DIR

    pathlib.Path(outdir).mkdir(exist_ok=True)

    print("Evaluation output directory: " + outdir)

    # Create data loader
    if not use_rpn:
        # returns a list of dicts. Every entry in the list corresponds to one sample, represented by a dict.
        dataset_dicts = detectron2.data.get_detection_dataset_dicts(dataset)

        # add proposal boxes
        for i, s in enumerate(dataset_dicts):
            s["proposal_boxes"] = np.array(
                [ann["bbox"] for ann in dataset_dicts[i]["annotations"]]
            )  # np.array([[xmin, ymin, xmax, ymax],[xmin, ymin, xmax, ...], ...]) # kx4 matrix for k proposed bounding boxes
            s["proposal_objectness_logits"] = np.full(
                (s["proposal_boxes"].shape[0], ),
                10)  # logit of 10 is 99.999...%
            s["proposal_bbox_mode"] = detectron2.structures.BoxMode.XYWH_ABS  # 1 # (x0, y0, w, h) in absolute floating points coordinates

        print("Proposal boxes added.")

        model.proposal_generator = None  # deactivate such that precomputed proposals are used
        print(
            "Region proposal deactivated, ground truth bounding boxes are used."
        )

        val_loader = build_detection_test_loader(
            dataset_dicts,
            mapper=DatasetMapper(is_train=False,
                                 augmentations=[],
                                 image_format=cfg.INPUT.FORMAT,
                                 precomputed_proposal_topk=500))
    else:
        val_loader = build_detection_test_loader(cfg, dataset)

    # load model state (weights) from checkpoint
    DetectionCheckpointer(model).load(checkpoint)

    # evaluate detections
    evaluator = COCOEvaluator(dataset, ("bbox", ), False, output_dir=outdir)
    result = inference_on_dataset(model, val_loader, evaluator)
    print_csv_format(result)

    with open(os.path.join(outdir, "evaluation_" + dataset + ".json"),
              "w") as outfile:
        json.dump(result, outfile)

    # compute accuracies
    detection2accuracy(detections=os.path.join(outdir,
                                               "coco_instances_results.json"),
                       groundtruth=annotations,
                       outdir=outdir,
                       record_individual_scores=record_individual_scores)
Esempio n. 21
0
import torch
from detectron2.engine import DefaultTrainer
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data.datasets import register_coco_instances

#parser = argparse.ArgumentParser(description='Input parameters need to be Specified for hypertuning')
#parser.add_argument('--lr', default=0.001, type=float, help='Learning rate parameter')
#args = parser.parse_args()
#lr = args.lr

train_json = "/share/datasets/coco/scratch/annotations/instances_train.json"
val_json = "/share/datasets/coco/scratch/annotations/instances_validation.json"
test_json = "/share/datasets/coco/scratch/annotations/instances_test.json"

img_dir = "/share/datasets/coco/images/"
register_coco_instances("scratch_train", {}, train_json, img_dir)
register_coco_instances("scratch_val", {}, val_json, img_dir)
register_coco_instances("scratch_test", {}, val_json, img_dir)

cfg = get_cfg()
cfg.merge_from_file(
    model_zoo.get_config_file(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = ("scratch_train", )
cfg.DATASETS.TEST = ("scratch_val", )
cfg.DATALOADER.NUM_WORKERS = 4
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
    "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
)  # Let training initialize from mode$
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    SemSegEvaluator,
    inference_on_dataset,
    print_csv_format,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import (
    CommonMetricPrinter,
    EventStorage,
    JSONWriter,
    TensorboardXWriter,
)

from detectron2.data.datasets import register_coco_instances
register_coco_instances(
    "bridge_dataset_train", {},
    "/home/asd/Mission/GaoFen/bridge_new/data/data_aug_coco/coco/bridge_train_cocostyle.json",
    "/home/asd/Mission/GaoFen/bridge_new/data/data_aug_coco/coco/bridge_train")
register_coco_instances(
    "bridge_dataset_test", {},
    "/home/asd/Mission/GaoFen/bridge_new/data/test_data_coco/bridge_test_cocostyle.json",
    "/home/asd/Mission/GaoFen/bridge_new/data/test_data_coco/bridge_test")

logger = logging.getLogger("detectron2")

from contextlib import contextmanager

from detectron2.utils.comm import get_world_size, is_main_process

from tqdm import tqdm

from PIL import Image
    parser.add_argument("--output", required=True, help="output directory")
    parser.add_argument("--dataset",
                        help="name of the dataset",
                        default="coco_2017_val")
    parser.add_argument("--conf-threshold",
                        default=0.5,
                        type=float,
                        help="confidence threshold")
    args = parser.parse_args()
    from detectron2.data.datasets import register_coco_instances
    # register_coco_instances('asparagus_train_rotated', {'_background_': 0, 'clump': 1, 'stalk': 2, 'spear': 3, 'bar': 4} , "../datasets/coco/annotations/train_rotated_637/annotations.json", "../datasets/coco/annotations/train_rotated_637")
    register_coco_instances(
        'asparagus_val', {
            '_background_': 0,
            'clump': 1,
            'stalk': 2,
            'spear': 3,
            'bar': 4
        }, "../datasets/coco/annotations/val_ASABE/annotations.json",
        "../datasets/coco/annotations/val_ASABE")
    logger = setup_logger()

    with PathManager.open(args.input, "r") as f:
        predictions = json.load(f)

    pred_by_image = defaultdict(list)
    for p in predictions:
        pred_by_image[p["image_id"]].append(p)

    dicts = list(DatasetCatalog.get(args.dataset))
    metadata = MetadataCatalog.get(args.dataset)
Esempio n. 24
0
# import some common libraries
import numpy as np
import os, json, cv2, random

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog

from detectron2.data.datasets import register_coco_instances

register_coco_instances(
    "custom_coco_val", {},
    "/home/jinsuby/Desktop/PycharmProjects/data/coco/annotations/instances_val2017.json",
    "/home/jinsuby/Desktop/PycharmProjects/data/coco/val2017")

coco_test_metadata = MetadataCatalog.get("custom_coco_val")
coco_test_dataset_dicts = DatasetCatalog.get("custom_coco_val")

# Inference should use the config with parameters that are used in training
# cfg now already contains everything we've set previously. We changed it a little bit for inference:

cfg = get_cfg()
"""
cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml"))
cfg.DATASETS.TRAIN = ("Kia_region_coco_trainval",)
cfg.DATALOADER.NUM_WORKERS = 2
"""
if __name__ == '__main__':
	
	cfg = get_cfg()
	cfg.merge_from_file('../detectron2-ResNeSt/configs/COCO-Detection/'
						'faster_cascade_rcnn_R_50_FPN_syncbn_range-scale_1x.yaml')
	
	cfg.MODEL.DEVICE = "cpu"
	
	cfg.MODEL.BACKBONE.NAME = 'CheXNet' # Change to CheXNet
	# I freeze it manually in __init__
	cfg.MODEL.BACKBONE.FREEZE_AT = 2
	
	# prepare dataset
	register_coco_instances("train", {},
							DATASET_DIR + 'annotations/anno_train.json',
							DATASET_DIR + 'train/', )
	register_coco_instances("valid", {},
							DATASET_DIR + 'annotations/anno_val.json',
							DATASET_DIR + 'val/', )
	cfg.DATASETS.TRAIN = ("train",)
	cfg.DATASETS.TEST = ("valid",)
	
	# assume H < W, scale to (1200 x ?) (1200 < ? < 2000)
	cfg.INPUT.MIN_SIZE_TRAIN = (1200, 1400)
	cfg.INPUT.MAX_SIZE_TRAIN = 2000
	cfg.INPUT.MIN_SIZE_TEST = (1200,)
	cfg.INPUT.MAX_SIZE_TEST = 2000
	cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING = "range"
	
	# Image Size after Crop: H * [0.8 ~ 1] x W * [0.8 ~ 1]
Esempio n. 26
0
# from detectron2.data.datasets import register_coco_instances
# register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir")
# register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir")

import os
import numpy as np
import json
from detectron2.structures import BoxMode
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import register_coco_instances
from detectron2.data.datasets import load_coco_json

print(detectron2.data.datasets.__file__)
print(os.getcwd())
register_coco_instances(
    "coco_train_2", {},
    "./Coco/detectron2/datasets/coco/annotations/instances_train.json",
    "./Coco/detectron2/datasets/coco/train")
register_coco_instances(
    "coco_val_2", {},
    "./Coco/detectron2/datasets/coco/annotations/instances_val.json",
    "./Coco/detectron2/datasets/coco/val")

# import random
#from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets import builtin, load_coco_json

dataset_dicts = load_coco_json(
    "./Coco/detectron2/datasets/coco/annotations/instances_train.json",
    "./Coco/detectron2/datasets/coco/train")
print(dataset_dicts)
coco_train_metadata = MetadataCatalog.get("coco_train_2")
Esempio n. 27
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2021-01-20
# @Author  : Yifer Huang
# @File    : dataset.py
# @Desc    : dataset register based on detectron2.data.dataset

from detectron2.data.datasets import register_coco_instances

# NOTICE: ILSVRC 2016 DET = ILSVRC 2015 DET = ILSVRC 2014 DET + ILSVRC 2013 DET

# VIDOR
register_coco_instances(
    'vidor_train_16',
    {},
    'datasets/vidor/d2_train_16.json',
    'datasets/vidor/frames@16'
)
register_coco_instances(
    'vidor_test_16',
    {},
    'datasets/vidor/d2_test_16.json',
    'datasets/vidor/frames@16'
)
register_coco_instances(
    'vidor_train_32',
    {},
    'datasets/vidor/d2_train_32.json',
    'datasets/vidor/frames@32'
)
register_coco_instances(
Esempio n. 28
0
if __name__ == "__main__":
    parser = default_argument_parser()

    # Extra Configurations for dataset names and paths
    parser.add_argument("--dataset_name",          default="", help="The Dataset Name")
    parser.add_argument("--json_annotation_train", default="", metavar="FILE", help="The path to the training set JSON annotation")
    parser.add_argument("--image_path_train",      default="", metavar="FILE", help="The path to the training set image folder")
    parser.add_argument("--json_annotation_val",   default="", metavar="FILE", help="The path to the validation set JSON annotation")
    parser.add_argument("--image_path_val",        default="", metavar="FILE", help="The path to the validation set image folder")

    args = parser.parse_args()
    print("Command Line Args:", args)
    
    # Register Datasets 
    dataset_name = args.dataset_name
    register_coco_instances(f"{dataset_name}-train", {}, 
                            args.json_annotation_train, 
                            args.image_path_train)

    register_coco_instances(f"{dataset_name}-val",   {}, 
                            args.json_annotation_val,   
                            args.image_path_val)

    launch(
        main,
        args.num_gpus,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        args=(args,),
    )
from detectron2.data.datasets import register_coco_instances

register_coco_instances("baseball_setup", {},
                        "./baseball_2019_setup_without_0/result.json",
                        "./baseball_2019_setup_without_0/")

from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
import os

cfg = get_cfg()
print(cfg)
cfg.merge_from_file(
    "./detectron2_repo/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml")
cfg.DATASETS.TRAIN = ("baseball_setup", )
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 4
cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/retinanet_R_101_FPN_3x/190397697/model_final_971ab9.pkl"  # initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00005
cfg.OUTPUT_DIR = cfg.OUTPUT_DIR + '_setup'
cfg.SOLVER.MAX_ITER = (
    10000
)  # 300 iterations seems good enough, but you can certainly train longer
cfg.MODEL.RETINANET.BATCH_SIZE_PER_IMAGE = (
    128)  # faster, and good enough for this toy dataset
cfg.MODEL.RETINANET.NUM_CLASSES = 3  # 3 classes

os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
Esempio n. 30
0
#cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"  # initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.02
cfg.SOLVER.MAX_ITER = 10000  # 300 iterations seems good enough, but you can certainly train longer
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 300  # faster, and good enough for this toy dataset
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2  # 3 classes (data, fig, hazelnut)
cfg.MODEL.WEIGHTS = os.path.join("model_final_final_good.pth")
# cfg.MODEL.WEIGHTS = os.path.join("/content/drive/My Drive/THINGSWENEED/model_final_final_good.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.4  # set the testing threshold for this model
cfg.DATASETS.TEST = ("test")
predictor = DefaultPredictor(cfg)

# we don't know why this is needed
# register_coco_instances("Clutterpics", {}, "clutterpics.json" ,"Clutterpics")
# register_coco_instances("Clutterpics", {},"clutterpics-lesslite.json","Clutterpics")
register_coco_instances("Clutterpics", {}, "clutterpics-lite.json",
                        "Clutterpics")
plastic_metadata = MetadataCatalog.get("Clutterpics")
# whut, somehow this line needs to be here for the classes to show in the visualiser
DatasetCatalog.get("Clutterpics")
"""
import random
from detectron2.utils.visualizer import Visualizer

for i, d in enumerate(random.sample(dataset_dicts, 3)):
    img = cv2.imread(d["file_name"])
    visualizer = Visualizer(img[:, :, ::-1], metadata=plastic_metadata, scale=0.5)
    vis = visualizer.draw_dataset_dict(d)
    cv2.imwrite('{}.png'.format(i),vis.get_image()[:, :, ::-1])
    # cv2_imshow(vis.get_image()[:, :, ::-1])
# """