# %% -------------------- SETUP LOGGER
setup_logger(output=output_dir)

# %% --------------------REGISTER DATASETs and METADATA
thing_classes = [
    "Aortic enlargement", "Atelectasis", "Calcification", "Cardiomegaly",
    "Consolidation", "ILD", "Infiltration", "Lung Opacity", "Nodule/Mass",
    "Other lesion", "Pleural effusion", "Pleural thickening", "Pneumothorax",
    "Pulmonary fibrosis"
]

# lambda is anonymous function
# train dataset
DatasetCatalog.register(
    "train", lambda:
    get_train_detectron_dataset(IMAGE_DIR, train_gt_dataframe, EXTERNAL_DIR +
                                "/512", external_gt_dataframe))
MetadataCatalog.get("train").set(thing_classes=thing_classes)

# validation dataset
DatasetCatalog.register(
    "validation",
    lambda: get_train_detectron_dataset(IMAGE_DIR, val_gt_dataframe))
MetadataCatalog.get("validation").set(thing_classes=thing_classes)

# %% --------------------CONFIGURATIONS
cfg = get_cfg()

# add augmentation dictionary to configuration
cfg.aug_kwargs = CN(flag.get("aug_kwargs"))
예제 #2
0
        record["height"] = height
        record["width"] = width
      
        annos = v["regions"]
        objs = []
        for _, anno in annos.items():
            assert not anno["region_attributes"]
            anno = anno["shape_attributes"]
            px = anno["all_points_x"]
            py = anno["all_points_y"]
            poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
            poly = list(itertools.chain.from_iterable(poly))

            obj = {
                "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
                "bbox_mode": BoxMode.XYXY_ABS,
                "segmentation": [poly],
                "category_id": 0,
                "iscrowd": 0
            }
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)
    return dataset_dicts

for d in ["train", "val"]:
    DatasetCatalog.register("balloon_" + d, lambda d=d: get_balloon_dicts("balloon/" + d))
    MetadataCatalog.get("balloon_" + d).set(thing_classes=["balloon"])
balloon_metadata = MetadataCatalog.get("balloon_train")

예제 #3
0
WORKERS = int(os.getenv("NUM_WORKERS"))

# %% --------------------
# DYNAMIC
train_gt_dataframe = MERGED_DIR + "/wbf_merged/90_percent_train/object_detection/90_percent" \
                                  "/train_df_0.csv"
val_gt_dataframe = MERGED_DIR + "/wbf_merged/90_percent_train/object_detection/10_percent" \
                                "/holdout_df_0.csv"

# %% -------------------- SETUP LOGGER
setup_logger(output=DETECTRON2_DIR + "faster_rcnn/outputs/current/")

# %% --------------------DATASET
# lambda is anonymous function
# train dataset
DatasetCatalog.register("train", lambda: get_train_detectron_dataset(IMAGE_DIR, train_gt_dataframe))
MetadataCatalog.get("train").set(
    thing_classes=["Aortic enlargement", "Atelectasis", "Calcification", "Cardiomegaly",
                   "Consolidation", "ILD", "Infiltration", "Lung Opacity", "Nodule/Mass",
                   "Other lesion", "Pleural effusion", "Pleural thickening", "Pneumothorax",
                   "Pulmonary fibrosis"])

# validation dataset
DatasetCatalog.register("validation",
                        lambda: get_train_detectron_dataset(IMAGE_DIR, val_gt_dataframe))
MetadataCatalog.get("validation").set(
    thing_classes=["Aortic enlargement", "Atelectasis", "Calcification", "Cardiomegaly",
                   "Consolidation", "ILD", "Infiltration", "Lung Opacity", "Nodule/Mass",
                   "Other lesion", "Pleural effusion", "Pleural thickening", "Pneumothorax",
                   "Pulmonary fibrosis"])
예제 #4
0
import os

from detectron2.config import get_cfg
from detectron2 import model_zoo
from detectron2.engine import DefaultTrainer
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from loader import get_lesion_dicts

if __name__ == "__main__":
    # Register balloon dataset
    for d in ["train", "val"]:
        DatasetCatalog.register("lesion_" + d,
                                lambda d=d: get_lesion_dicts("lesion/" + d))
        MetadataCatalog.get("lesion_" + d).set(thing_classes=["lesion"])
    balloon_metadata = MetadataCatalog.get("lesion_train")

    # Model configuration
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = ("lesion_train", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 1
    cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 10
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
def registerDataset(data_name, d, data, classes):
    DatasetCatalog.register(data_name, lambda d=d: data)
    MetadataCatalog.get(data_name).set(thing_classes=classes)

    return MetadataCatalog.get(data_name)
예제 #6
0
def register_hat(name, dirname, split, year):
    DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split))
    MetadataCatalog.get(name).set(thing_classes=CLASS_NAMES,
                                  dirname=dirname,
                                  year=year,
                                  split=split)
예제 #7
0
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2 import model_zoo
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
# from detectron2.engine import DefaultPredictor
# from detectron2.utils.visualizer import Visualizer
# from detectron2.evaluation import COCOEvaluator

with open(r'E:\PyProjects\Faster-RCNN\Database\data32\train\data_test.txt',
          'r') as in_file:
    dataset_dicts = json.load(in_file)
# with open(r'E:\PyProjects\Faster-RCNN\Database\data32\val\data_test.txt', 'r') as in_file:
#     testset_dicts = json.load(in_file)

DatasetCatalog.register("mol_train", lambda d="train": dataset_dicts)
MetadataCatalog.get("mol_train").set(thing_classes=["L", "R"])
# DatasetCatalog.register("mol_val" , lambda d="val": testset_dicts)
# MetadataCatalog.get("mol_val").set(thing_classes=["L", "R"])
mol_metadata = MetadataCatalog.get("mol_train")

cfg = get_cfg()
cfg.merge_from_file(
    model_zoo.get_config_file("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = ("mol_train", )
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 0
cfg.MODEL.WEIGHTS = "E:\PyProjects\Faster-RCNN\Results\data24\output\model_final.pth"  # Let training initialize from model zoo
# cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")  # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.SOLVER.BASE_LR = 0.005  # pick a good LR
예제 #8
0
    with open(json_file) as f:
        imgs_anns = json.load(f)
    for idx,(filename,image_dicts) in enumerate(imgs_anns.items()):
        record = {}
        record["file_name"] = filename
        record["id"] = image_dicts["image id"]
        record["height"] = image_dicts["image size"]["height"]
        record["width"] =image_dicts["image size"]["width"]
        images.append(record)
    attrDict["images"] = images
    attrDict["annotations"] = annotations
    attrDict["type"] = "instances"
    return attrDict

# "test": ("/root/data/gvision/dataset/predict/17_01/image_annos/s0.5_17_01_split_test.json",pv_split_test_impath),
DatasetCatalog.register("test" ,lambda:get_test_dicts("/root/data/gvision/dataset/raw_data",annos_file="test_14_15_17.json"))
MetadataCatalog.get("test").set(thing_classes=['visible body','full body','head','vehicle'])

cfg = get_cfg()
cfg.merge_from_file("/root/data/gvision/detectron2-master/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
# cfg.merge_from_file("/root/data/gvision/detectron2-master/configs/COCO-InstanceSegmentation/my_mask_rcnn_R_50_FPN_3x.yaml")
cfg.SOLVER.BASE_LR = 0.02 # pick a good LR
cfg.SOLVER.IMS_PER_BATCH = 2*5# batch_size=2*5; iters_in_one_epoch = dataset_imgs/batch_size 22302
ITERS_IN_ONE_EPOCH = int(9254/cfg.SOLVER.IMS_PER_BATCH )
cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS=False
cfg.TEST.DETECTIONS_PER_IMAGE=500
# ITERS_IN_ONE_EPOCH = int(61/cfg.SOLVER.IMS_PER_BATCH )
# 保存模型文件的命名数据减1# Save a checkpoint after every this number of iterations
cfg.SOLVER.CHECKPOINT_PERIOD =ITERS_IN_ONE_EPOCH
# cfg.SOLVER.MAX_ITER =18260
cfg.SOLVER.MAX_ITER =ITERS_IN_ONE_EPOCH *20 # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
예제 #9
0
import PIL
import matplotlib.pyplot as plt
import cv2
import numpy as np

ROOT = os.path.abspath('../../')
DATA_FOLDER = 'data/plates_with_json'
CONFIG = 'config'
WEIGHTS = 'weights'
DEVICE = 'cuda'

sys.path.append(ROOT)

from data_handler.dataset_handler import get_carplate_dicts

DatasetCatalog.register("carplate_train",
                        lambda x='train': get_carplate_dicts(x, ROOT))
DatasetCatalog.register("carplate_val",
                        lambda x='val': get_carplate_dicts(x, ROOT))
MetadataCatalog.get("carplate_val").set(thing_classes=["carplate"])
# carplate_metadata = MetadataCatalog.get("carplate_train")

MetadataCatalog.get("carplate_val").set(evaluator_type='coco')

cfg = get_cfg()
cfg.merge_from_file(os.path.join(ROOT, CONFIG, "mask_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = ("carplate", )
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.DEVICE = DEVICE
cfg.MODEL.WEIGHTS = os.path.join(
    ROOT, WEIGHTS,
예제 #10
0
    "static_object.bicycle_rack", "vehicle.bicycle", "vehicle.bus.bendy",
    "vehicle.bus.rigid", "vehicle.car", "vehicle.construction", "vehicle.ego",
    "vehicle.emergency.ambulance", "vehicle.emergency.police",
    "vehicle.motorcycle", "vehicle.trailer", "vehicle.truck"
]
categories = [
    'human.pedestrian', 'vehicle.car', 'vehicle.bus', 'vehicle.truck',
    'vehicle.cycle', 'vehicle.cycle.withrider'
]
dataset = 'nuimages_mini'
# version = 'v1.0-train'
dataset = 'nuimages'
version = 'v1.0-mini'
get_dicts = lambda p=root_path, c=categories: load_nuimages_dicts(
    path=p, version=version, categories=c)
DatasetCatalog.register(dataset, get_dicts)
MetadataCatalog.get(dataset).thing_classes = categories
MetadataCatalog.get(dataset).evaluator_type = "coco"

dataset_dicts = load_nuimages_dicts(root_path, version, categories)
print(MetadataCatalog.get(dataset))
print(dataset_dicts)

for d in dataset_dicts:
    img = cv2.imread(d["file_name"])
    print(d)
    #print(MetadataCatalog.get(dataset))
    if len(d['annotations']) == 0:
        continue
    visualizer = Visualizer(img[:, :, ::-1],
                            MetadataCatalog.get(dataset),
def getInstances():
    setup_logger()
    basePath = os.getcwd()

    # TODO: some or all of this may not be necessary
    annotationTrainDicts = getPickleFile(basePath, "annotations_Train.txt")
    annotationValidateDicts = getPickleFile(basePath,
                                            "annotations_Validation.txt")
    annotationDicts = [annotationTrainDicts, annotationValidateDicts]

    dirNameSet = set()
    maskTypeSet = set()
    for annotationDictList in annotationDicts:
        for annotationDict in annotationDictList:
            parentDirName = os.path.split(
                os.path.split(annotationDict['file_name'])[0])[-1]
            if parentDirName not in dirNameSet:
                dirNameSet.add(parentDirName)
            if isinstance(annotationDict['annotations'][0], list):
                fileMaskType = 'polygon'
            elif isinstance(annotationDict['annotations'][0], dict):
                fileMaskType = 'bitmask'
            else:
                fileMaskType = ''
            assert fileMaskType, 'The annotation dict annotations did not match the expected pattern for polygon or bitmask encoding. Check your annotation creation.'
            if fileMaskType not in maskTypeSet:
                maskTypeSet.add(fileMaskType)
    assert len(
        maskTypeSet
    ) == 1, "The number of detected mask types is not 1, check your annotation creation and file choice."
    # dirNameSet should return {'Train', 'Validation'}
    assert 'Train' in dirNameSet and 'Validation' in dirNameSet, 'You are missing either a Train or Validation directory in your annotations'
    dirnames = [
        'Train', 'Validation'
    ]  # After making sure these are directories as expected, lets force the order to match the annotationDicts order

    rawImage, scaleBarMicronsPerPixel, setupOptions = importRawImageAndScale()

    if not setupOptions.isVerticalSubSection and not setupOptions.tiltAngle == 0:
        # Correct for tilt angle, this is equivalent to multiplying the measured length, but is more convenient here
        scaleBarMicronsPerPixel = scaleBarMicronsPerPixel / np.sin(
            np.deg2rad(setupOptions.tiltAngle))
    npImage = np.array(rawImage)

    if npImage.ndim < 3:
        if npImage.ndim == 2:
            # Assuming black and white image, just copy to all 3 color channels
            npImage = np.repeat(npImage[:, :, np.newaxis], 3, axis=2)
        else:
            print(
                'The imported rawImage is 1 dimensional for some reason, check it out.'
            )
            quit()

    if setupOptions.tiltAngle == 0:
        nanowireStr = 'TopDownNanowires'
        for d in range(len(dirnames)):
            if nanowireStr + "_" + dirnames[d] not in DatasetCatalog.__dict__[
                    '_REGISTERED']:
                DatasetCatalog.register(nanowireStr + "_" + dirnames[d],
                                        lambda d=d: annotationDicts[d])
            MetadataCatalog.get(nanowireStr + "_" + dirnames[d]).set(
                thing_classes=setupOptions.classNameList)

    else:

        nanowireStr = 'VerticalNanowires'
        for d in range(len(dirnames)):
            if nanowireStr + "_" + dirnames[d] not in DatasetCatalog.__dict__[
                    '_REGISTERED']:
                DatasetCatalog.register(nanowireStr + "_" + dirnames[d],
                                        lambda d=d: annotationDicts[d])
            MetadataCatalog.get(nanowireStr + "_" + dirnames[d]).set(
                thing_classes=["VerticalNanowires"])

    DatasetCatalog.get(nanowireStr + '_Train')
    nanowire_metadata = MetadataCatalog.get(nanowireStr + "_Train")

    cfg = get_cfg()
    cfg.MODEL.DEVICE = 'cpu'
    # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.MODEL.WEIGHTS = os.path.join(setupOptions.modelPath)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512  # (default: 512, balloon test used 128)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = setupOptions.numClasses  # only has one class (VerticalNanowires)
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
    cfg.TEST.DETECTIONS_PER_IMAGE = 2000  # Increased from COCO default, should never have more than 2000 wires per image (default: 100)

    predictor = DefaultPredictor(cfg)

    return predictor, npImage, scaleBarMicronsPerPixel * 1000, setupOptions, nanowire_metadata
예제 #12
0
        train = []
        test = []

        for r in dataset_dicts:
            if r['video_name'] in train_vids:
                train.append(r)
            else:
                test.append(r)

        print("train: {} test: {}".format(len(train), len(test)))
        with open(pickle_path, 'wb') as f:
            pickle.dump((train, test), f, pickle.HIGHEST_PROTOCOL)
    else:
        with open(pickle_path, 'rb') as f:
            (train, test) = pickle.load(f)

    DatasetCatalog.register("nflimpact_train", lambda: train)
    DatasetCatalog.register("nflimpact_test", lambda: test)

    classes = ["helmet"]
    MetadataCatalog.get("nflimpact_train").set(thing_classes=classes)
    MetadataCatalog.get("nflimpact_test").set(thing_classes=classes)

    cfg = new_model_cfg()
    cfg.DATASETS.TRAIN = ("nflimpact_train", )
    cfg.DATASETS.TEST = ("nflimpact_test", )

    trainer = NflImpactTrainer(cfg)
    trainer.resume_or_load(resume=True)
    trainer.train()
예제 #13
0
        # obj = {
        # "bbox": ann,
        # "bbox_mode": BoxMode.XYXY_ABS,
        # "category_id": 1
        # }
        # objs.append(obj)

        record["annotations"] = objs
        dataset_dicts.append(record)

    return dataset_dicts


DatasetCatalog.register(
    "nusc_train",
    lambda d="train": get_dicts(
        "/mnt/nfs/scratch1/pmallya/nusc_kitti/val/image_2/",
        "./output/ground.json"))
DatasetCatalog.register(
    "nusc_infer",
    lambda d="infer": get_dicts(
        "/mnt/nfs/scratch1/pmallya/nusc_kitti/val/image_2/",
        "./output/infer.json"))
# MetadataCatalog.get("nusc_train").set(thing_classes=["barrier", "bicycle", "bus", "car", "construction_vehicle", "motorcycle", "pedestrian", "traffic_cone", "trailer", "truck", "void"])
MetadataCatalog.get("nusc_train").set(thing_classes=["car", "pedestrian"])
MetadataCatalog.get("nusc_infer").set(thing_classes=["car", "pedestrian"])
nusc_metadata_ground = MetadataCatalog.get("nusc_train")
nusc_metadata_infer = MetadataCatalog.get("nusc_infer")

nusc_dicts_ground = get_dicts(
    "/mnt/nfs/scratch1/pmallya/nusc_kitti/val/image_2/",
예제 #14
0
    import cv2
    import os
    from detectron2.data import MetadataCatalog, DatasetCatalog
    from detectron2.utils.visualizer import Visualizer
    from detectron2.evaluation import COCOEvaluator, inference_on_dataset, LVISEvaluator
    from detectron2.data import build_detection_test_loader
    from detectron2.config import get_cfg
    from detectron2 import model_zoo
    from detectron2.engine import DefaultTrainer

    thing_classes = ["Car", "Pedestrian"]

    #DatasetCatalog.register("mots", get_MOTS_dicts)
    #MetadataCatalog.get("mots").set(thing_classes=thing_classes)

    DatasetCatalog.register("kitti-mots", get_KITTI_MOTS_dicts)
    MetadataCatalog.get("kitti-mots").set(thing_classes=thing_classes)

    #dataset_dicts = get_MOTS_dicts()

    # We train the model with the weight initialized.
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = ("kitti-mots", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"
    )  # Let training initialize from model zoo
예제 #15
0

def get_caltech_dicts(split):
    json_file = split + '_annos.json'
    with open(json_file) as f:
        imgs_anns = json.load(f)

    for i in range(len(imgs_anns)):
        for j in range(len(imgs_anns[i]['annotations'])):
            imgs_anns[i]['annotations'][j]['bbox_mode'] = BoxMode.XYWH_ABS
        imgs_anns[i]['proposal_bbox_mode'] = BoxMode.XYXY_ABS

    return imgs_anns


# register caltech dataset
for d in ["train", "test"]:
    DatasetCatalog.register("caltech_" + d, lambda d=d: get_caltech_dicts(d))
    MetadataCatalog.get("caltech_" + d).set(thing_classes=["person"])
caltech_metadata = MetadataCatalog.get("caltech_train")

cfg = get_cfg()
cfg.merge_from_file("./configs/faster_rcnn.yaml")
# Train begin with coco-detection model. If you want to train from scratch, comment it out.
cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_C4_1x/137257644/model_final_721ade.pkl"

os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
예제 #16
0
#-----------------------------------------------------#

myDictGetters = getters.DictionaryGetters(baseDirectory=baseDirectory,
                                          trainDirectory=trainDirectory,
                                          valDirectory=valDirectory)

SharkClassDictionary = myDictGetters.getSharkClassDictionary()
ClassList = myDictGetters.getClassList()


#-----------------------------------------------------#
#                  Register to Catalogs
#-----------------------------------------------------#
for d in ["train", "val"]:
  # Register shark_train and shark_val
  DatasetCatalog.register("shark_" + d, lambda d=d: myDictGetters.getSharkDicts(d))
  MetadataCatalog.get("shark_" + d).set(thing_classes=ClassList)

shark_metadata = MetadataCatalog.get("shark_train")



#-----------------------------------------------------#
#                 Create the config
#-----------------------------------------------------#
modelLink,modelOutputFolderName,meta_arch_override = ModelPaths.GetModelLinks(parser.parse_args().model)

# modelLink = ""
# modelOutputFolderName = ""
# if(parser.parse_args().model == 0):
#   modelLink = "COCO-Detection/retinanet_R_50_FPN_1x.yaml"
예제 #17
0
파일: task_a.py 프로젝트: EddieMG/T4M5
                       instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(
            os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' +
                         str(i) + '.png'),
            v.get_image()[:, :, ::-1])


if __name__ == '__main__':
    # Loading data
    print('Loading data')
    dataloader_train = KITTIMOTS_Dataloader()
    dataloader_val = KITTIMOTS_Dataloader()

    def kitti_train():
        return dataloader_train.get_dicts(train_flag=True)

    def kitti_val():
        return dataloader_val.get_dicts(train_flag=False)

    DatasetCatalog.register('KITTIMOTS_train', kitti_train)
    MetadataCatalog.get('KITTIMOTS_train').set(
        thing_classes=list(KITTI_CATEGORIES.keys()))
    DatasetCatalog.register('KITTIMOTS_val', kitti_val)
    MetadataCatalog.get('KITTIMOTS_val').set(
        thing_classes=list(KITTI_CATEGORIES.keys()))

    task_a("MaskRCNN_R_50_FPN_Cityscapes_Aug3",
           "Cityscapes/mask_rcnn_R_50_FPN.yaml",
           augmentation=True)
예제 #18
0
def register_flying_chairs(name, metadata, json_file, image_root):
    DatasetCatalog.register(
        name, lambda: load_flying_chairs_json(name, json_file, image_root))
    MetadataCatalog.get(name).set(**metadata)
예제 #19
0
def experiment_1(exp_name, model_file):

    print('Running Task B experiment', exp_name)
    SAVE_PATH = os.path.join('./results_week_6_task_b', exp_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Loading data
    print('Loading data')
    kittiloader = KittiMots()

    def rkitti_train():
        return kittiloader.get_dicts(flag='train',
                                     method='complete',
                                     percentage=1.0)

    def rkitti_val():
        return kittiloader.get_dicts(flag='val')

    def rkitti_test():
        return kittiloader.get_dicts(flag='test')

    DatasetCatalog.register('KITTI_train', rkitti_train)
    MetadataCatalog.get('KITTI_train').set(
        thing_classes=list(KITTI_CATEGORIES.keys()))
    DatasetCatalog.register('KITTI_val', rkitti_val)
    MetadataCatalog.get('KITTI_val').set(
        thing_classes=list(KITTI_CATEGORIES.keys()))
    DatasetCatalog.register('KITTI_test', rkitti_test)
    MetadataCatalog.get('KITTI_test').set(
        thing_classes=list(KITTI_CATEGORIES.keys()))

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('KITTI_train', )
    cfg.DATASETS.TEST = ('KITTI_val', )
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 4000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    # Training
    print('Training')
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    cfg.DATASETS.TEST = ('KITTI_test', )
    evaluator = COCOEvaluator('KITTI_test', cfg, False, output_dir=SAVE_PATH)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, exp_name, SAVE_PATH,
                         'validation_loss.png')

    # Qualitative results: visualize some results
    print('Getting qualitative results')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    inputs = rkitti_test()
    inputs = [inputs[i] for i in TEST_INFERENCE_VALUES]
    for i, input in enumerate(inputs):
        file_name = input['file_name']
        print('Prediction on image ' + file_name)
        img = cv2.imread(file_name)
        outputs = predictor(img)
        v = Visualizer(img[:, :, ::-1],
                       metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                       scale=0.8,
                       instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(
            os.path.join(SAVE_PATH,
                         'Inference_' + exp_name + '_inf_' + str(i) + '.png'),
            v.get_image()[:, :, ::-1])
    base_path + str(i).zfill(4) + '.txt'
    for i in [2, 6, 7, 8, 10, 13, 14, 16, 18]
]

train_paths.append(
    "/home/group00/mcv/datasets/MOTSChallenge/train/instances_txt/0002.txt")
train_paths.append(
    "/home/group00/mcv/datasets/MOTSChallenge/train/instances_txt/0005.txt")
train_paths.append(
    "/home/group00/mcv/datasets/MOTSChallenge/train/instances_txt/0009.txt")
train_paths.append(
    "/home/group00/mcv/datasets/MOTSChallenge/train/instances_txt/0011.txt")

#train dataset
for d in [train_paths]:
    DatasetCatalog.register('train_kitti-mots',
                            lambda d=d: get_kitti_dataset_train(d))
    MetadataCatalog.get('train_kitti-mots').set(
        thing_classes=['Pedestrian', 'Car'])

#val dataset
for d in [val_paths]:
    DatasetCatalog.register('val_kitti-mots',
                            lambda d=d: get_kitti_dataset_val(d))
    MetadataCatalog.get('val_kitti-mots').set(
        thing_classes=['Pedestrian', 'Car'])

# models = ["mask_rcnn_R_50_FPN_3x.yaml","mask_rcnn_R_50_C4_3x.yaml","mask_rcnn_R_50_DC5_3x.yaml","mask_rcnn_R_101_FPN_3x.yaml",  "mask_rcnn_R_101_C4_3x.yaml","mask_rcnn_R_101_DC5_3x.yaml"]
models = ["Cityscapes/mask_rcnn_R_50_FPN.yaml"]

for to_evaluate in models:
    print('* ' * 30, to_evaluate, '* ' * 30)
예제 #21
0
    "84e535cb81d1c5294b0f76de15edfa18_PANSHARPEN.tif",
    "e944ba762190c66444bd657b13809275_PANSHARPEN.tif"
]


def my_dataset():
    with open(training_data, "r") as fin:
        data = json.load(fin)
    filt_data = []
    for d in data:
        if os.path.basename(d["file_name"]) not in BAD:
            filt_data.append(d)
    return filt_data


DatasetCatalog.register("custom_dataset", my_dataset)
metadata = MetadataCatalog.get("custom_dataset")

cfg = get_cfg()
cfg.merge_from_file(
    "configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml")

cfg.DATASETS.TRAIN = ("custom_dataset", )
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 4

cfg.MODEL.WEIGHTS = "/wdata/pretrained_backbone.pkl"
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.0015  # pick a good LR
cfg.SOLVER.MAX_ITER = 20000
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 64
예제 #22
0
                "bbox_mode":
                BoxMode.XYWH_ABS,
                "segmentation":
                pycocotools.mask.encode(
                    b_a
                ),  #cfg.INPUT.MASK_FORMAT must be set to bitmask if using the default data loader with such format.
                "category_id":
                0,
            }]

        dataset_dicts.append(record)
    return dataset_dicts  # Returns a dict of all images with their respective descriptions


for d in ["train", "test"]:
    DatasetCatalog.register("logo_" + d,
                            lambda d=d: get_logos(path + d + "set.txt"))
    MetadataCatalog.get("logo_" + d).set(thing_classes=["logo"])

model = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"

cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(model))
cfg.INPUT.MASK_FORMAT = 'bitmask'
cfg.DATASETS.TRAIN = ("logo_train", )  # Train with the logos dataset
cfg.DATASETS.TEST = ()  # Train with the logos dataset

cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
    model)  # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.SOLVER.BASE_LR = 0.001  # pick a good LR
예제 #23
0
def train(model_name: str,
          results_path: str,
          train_idx: List[int],
          test_idx: List[int],
          train_annotations: OrderedDict,
          test_annotations: OrderedDict,
          lr: float = 0.0025,
          max_it: int = 500,
          img_per_batch: int = 16,
          batch_size: int = 512,
          num_freeze: int = 1) -> NoReturn:

    if Path(results_path).exists():
        shutil.rmtree(results_path)

    os.makedirs(results_path, exist_ok=True)

    for catalog_type in ['train', 'test']:
        catalog = f'aic19_{catalog_type}'
        if catalog in DatasetCatalog.list():
            DatasetCatalog.remove(catalog)

        if catalog_type == 'train':
            DatasetCatalog.register(
                catalog,
                lambda d=catalog_type: get_dicts(train_idx, train_annotations))
        else:
            DatasetCatalog.register(
                catalog,
                lambda d=catalog_type: get_dicts(test_idx, test_annotations))

        MetadataCatalog.get(catalog).set(thing_classes=['Car'])

    cfg = get_cfg()
    cfg.OUTPUT_DIR = results_path
    cfg.merge_from_file(model_zoo.get_config_file(model_name))

    cfg.DATASETS.TRAIN = (f'aic19_train', )
    cfg.DATASETS.TEST = ()

    cfg.DATALOADER.NUM_WORKERS = 16

    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_name)
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    #cfg.MODEL.BACKBONE.FREEZE_AT = 1
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = batch_size
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1

    cfg.INPUT.MAX_SIZE_TEST = 1200
    cfg.INPUT.MAX_SIZE_TRAIN = 1200

    cfg.SOLVER.IMS_PER_BATCH = img_per_batch
    cfg.SOLVER.BASE_LR = lr
    cfg.SOLVER.MAX_ITER = max_it
    cfg.SOLVER.STEPS = []

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')

    evaluator = COCOEvaluator('aic19_test',
                              cfg,
                              False,
                              output_dir=results_path)
    val_loader = build_detection_test_loader(cfg, "aic19_test")
    print(inference_on_dataset(trainer.model, val_loader, evaluator))
예제 #24
0
def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):
    DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))
    MetadataCatalog.get(name).set(
        thing_classes=list(class_names), dirname=dirname, year=year, split=split
    )
예제 #25
0
            lambda d=d: get_COCO14_modal_dicts(
                "instances_val.json",
                "/Users/lizhixuan/PycharmProjects/amodal_dataset/KINS/image/testing"
            ))
        MetadataCatalog.get("KINS_" + d).set(thing_classes=[
            'cyclist', 'pedestrian', "no", 'car', "tram", "truck", "van",
            "misc"
        ])
    print("REGISTERED!")


if __name__ == '__main__':
    for d in ["val"]:
        DatasetCatalog.register(
            "KINS_" + d,
            lambda d=d: get_COCO14_modal_dicts(
                "instances_val.json",
                "/Users/lizhixuan/PycharmProjects/amodal_dataset/KINS/image/testing"
            ))
        MetadataCatalog.get("KINS_" + d).set(thing_classes=[
            'cyclist', 'pedestrian', "no", 'car', "tram", "truck", "van",
            "misc"
        ])
    balloon_metadata = MetadataCatalog.get("KINS_val")

    dataset_dicts = get_COCO14_modal_dicts(
        "instances_val.json",
        "/Users/lizhixuan/PycharmProjects/amodal_dataset/KINS/image/testing")

    for i in range(len(dataset_dicts)):
        print("[ {} / {} ] saving KINS gt vis".format(i, len(dataset_dicts)))
        d = dataset_dicts[i]
    return dataset_dicts


# Register dataset and metadata catologues
DatasetCatalog._REGISTERED.clear()
# del DatasetCatalog._REGISTERED['weapons_train']
# del DatasetCatalog._REGISTERED['weapons_val']
print(DatasetCatalog._REGISTERED)
print(len(DatasetCatalog._REGISTERED))

entriesToRemove = ('weapons_train', 'weapons_val')
for k in entriesToRemove:
    DatasetCatalog._REGISTERED.pop(k, None)

for d in ["train", "val"]:
    DatasetCatalog.register("weapons_" + d, lambda d=d: create_dataset_dicts(
        train_df if d == "train" else test_df, classes))
    MetadataCatalog.get("weapons_" + d).set(thing_classes=classes)

statement_metadata = MetadataCatalog.get("weapons_train")


class CocoTrainer(DefaultTrainer):

    @classmethod
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):

        if output_folder is None:
            os.makedirs("coco_eval", exist_ok=True)
            output_folder = "coco_eval"
            # output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        return COCOEvaluator(dataset_name, cfg, False, output_folder)
예제 #27
0
PREDEFINED_SPLITS_DATASET = {
    "obj365_train": (TRAIN_PATH, TRAIN_JSON),
    "obj365_val": (VAL_PATH, VAL_JSON)
}

rootdir = "/home/houbowei/adet/"
obj = os.path.join(rootdir, "./datasets/obj365/objects365_Tiny_train.json")
with open(obj, 'r') as obj:
    obj = json.load(obj)
    things = [0] * 65
for cat in obj['categories']:
    things[cat['id'] - 301] = cat['name']

thing_dataset_id_to_contiguous_id = {i + 301: i for i in range(65)}

DatasetCatalog.register("obj365_train",
                        lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH, "coco"))
DatasetCatalog.register("obj365_val",
                        lambda: load_coco_json(VAL_JSON, VAL_PATH, "coco"))

MetadataCatalog.get('obj365_val').set(
    evaluator_type='coco',
    json_file=VAL_JSON,
    image_root=VAL_PATH,
    thing_classes=things,
    thing_dataset_id_to_contiguous_id=thing_dataset_id_to_contiguous_id)
MetadataCatalog.get('obj365_train').set(
    evaluator_type='coco',
    json_file=TRAIN_JSON,
    image_root=TRAIN_PATH,
    thing_classes=things,
    thing_dataset_id_to_contiguous_id=thing_dataset_id_to_contiguous_id)
예제 #28
0
def register_all_cityscapes_panoptic(root):
    meta = {}
    # The following metadata maps contiguous id from [0, #thing categories +
    # #stuff categories) to their names and colors. We have to replica of the
    # same name and color under "thing_*" and "stuff_*" because the current
    # visualization function in D2 handles thing and class classes differently
    # due to some heuristic used in Panoptic FPN. We keep the same naming to
    # enable reusing existing visualization functions.
    thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
    thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
    stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
    stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]

    meta["thing_classes"] = thing_classes
    meta["thing_colors"] = thing_colors
    meta["stuff_classes"] = stuff_classes
    meta["stuff_colors"] = stuff_colors

    # There are three types of ids in cityscapes panoptic segmentation:
    # (1) category id: like semantic segmentation, it is the class id for each
    #   pixel. Since there are some classes not used in evaluation, the category
    #   id is not always contiguous and thus we have two set of category ids:
    #       - original category id: category id in the original dataset, mainly
    #           used for evaluation.
    #       - contiguous category id: [0, #classes), in order to train the classifier
    # (2) instance id: this id is used to differentiate different instances from
    #   the same category. For "stuff" classes, the instance id is always 0; for
    #   "thing" classes, the instance id starts from 1 and 0 is reserved for
    #   ignored instances (e.g. crowd annotation).
    # (3) panoptic id: this is the compact id that encode both category and
    #   instance id by: category_id * 1000 + instance_id.
    thing_dataset_id_to_contiguous_id = {}
    stuff_dataset_id_to_contiguous_id = {}

    for k in CITYSCAPES_CATEGORIES:
        if k["isthing"] == 1:
            thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
        else:
            stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]

    meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
    meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id

    for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():
        image_dir = os.path.join(root, image_dir)
        gt_dir = os.path.join(root, gt_dir)
        gt_json = os.path.join(root, gt_json)

        DatasetCatalog.register(
            key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)
        )
        MetadataCatalog.get(key).set(
            panoptic_root=gt_dir,
            image_root=image_dir,
            panoptic_json=gt_json,
            gt_dir=gt_dir.replace("cityscapes_panoptic_", ""),
            evaluator_type="cityscapes_panoptic_seg",
            ignore_label=255,
            label_divisor=1000,
            **meta,
        )
예제 #29
0
                         np.max(py)],
                "bbox_mode": BoxMode.XYXY_ABS,
                "segmentation": [poly],
                "category_id": 0,
                # "Things" are well-defined countable objects,
                # while "stuff" is amorphous something with a different label than the background.
            }
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)
    return dataset_dicts


for d in ["train", "val"]:

    DatasetCatalog.register("renner_" + d,
                            lambda d=d: get_balloon_dicts("surface_img/" + d))
    MetadataCatalog.get("renner_" + d).set(thing_classes=["renner"])
    balloon_metadata = MetadataCatalog.get("renner_train")
    #dataset_dicts = get_balloon_dicts("surface_img/train")
cfg = get_cfg()
cfg.MODEL.DEVICE = 'cpu'

cfg.merge_from_file(
    model_zoo.get_config_file(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = ("renner_train", )
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
    "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
)  # Let training initialize from model zoo
예제 #30
0
from scripts.utils import Trainer  #custom trainer

config_path = r"/custom_configs/retinanet_ambulance.yaml"

# Starting logger for some verbose info (i.e. loss printing)
setup_logger()

#Data preparation
dataset_name = 'toy_ambulance'
ratio = 0.8
data_path = Path(r"/home/jsieb/Downloads/data/drone2go")

dataset_name_train = dataset_name + '_train'
dataset_name_test = dataset_name + '_test'

DatasetCatalog.register(dataset_name,
                        lambda: retinanet_dataloader(input_path=data_path))
DatasetCatalog.register(
    dataset_name_train, lambda: retinanet_dataloader(
        input_path=data_path, mode='train', ratio=ratio))
DatasetCatalog.register(
    dataset_name_test, lambda: retinanet_dataloader(
        input_path=data_path, mode='test', ratio=ratio))
#Reserved by detectron2:
#thing_class, thing_colors. stuff_classes, stuff_colors, keypoint_names, keypoint_flip_map, keypoint_connection_rules
MetadataCatalog.get(dataset_name).set(thing_classes=['ambulance'])
MetadataCatalog.get(dataset_name_train).set(thing_classes=['ambulance'])
MetadataCatalog.get(dataset_name_test).set(thing_classes=['ambulance'])
#MetadataCatalog.get(dataset_name).data_folder = data_path

#dataset_name = dataset_name_train
data = DatasetCatalog.get(dataset_name)