Пример #1
0
    #     plt.imshow(vis.get_image()[:, :, ::-1])

    trainer.resume_or_load(resume=False)
    trainer.train()

    output_dir = cfg['OUTPUT_DIR']
    save_dir = os.path.join(output_dir, 'result')
    if not os.path.exists(save_dir): os.makedirs(save_dir)

    ##### 模型测试
    cfg.MODEL.WEIGHTS = os.path.join(output_dir, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2  # set the testing threshold for this model
    cfg.DATASETS.TEST = (regist_val_name, )
    predictor = DefaultPredictor(cfg)

    evaluator = COCOEvaluator(regist_val_name, cfg, False, output_dir=save_dir)
    val_loader = build_detection_test_loader(cfg, regist_val_name)
    my_eval = inference_on_dataset(trainer.model, val_loader, evaluator)

    log_file = os.path.join(save_dir, "log.txt")
    print(my_eval, file=open(log_file, "a"))
    print("评估结果:\n {}".format(my_eval))

    test_dir = val_images_dir
    imgs_list = [
        os.path.join(test_dir, file_name) for file_name in os.listdir(test_dir)
        if file_name.endswith(".jpg") or file_name.endswith(".png")
        or file_name.endswith(".bmp") or file_name.endswith(".jpeg")
    ]

    for d in imgs_list:
Пример #2
0
 def evaluate(self, trainer, output_val):
     evaluator = COCOEvaluator("dataset_val",
                               self.cfg,
                               False,
                               output_dir=output_val)
     trainer.test(self.cfg, trainer.model, evaluators=evaluator)
Пример #3
0
    torch_model = build_model(cfg)
    DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
    torch_model.eval()

    # get a sample data
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    first_batch = next(iter(data_loader))

    # convert and save model
    if args.export_method == "caffe2_tracing":
        exported_model = export_caffe2_tracing(cfg, torch_model, first_batch)
    elif args.export_method == "scripting":
        exported_model = export_scripting(torch_model)
    elif args.export_method == "tracing":
        exported_model = export_tracing(torch_model, first_batch)

    # run evaluation with the converted model
    if args.run_eval:
        assert exported_model is not None, (
            "Python inference is not implemented for "
            f"export_method={args.export_method}, format={args.format}.")
        logger.info(
            "Running evaluation ... this takes a long time if you export to CPU."
        )
        dataset = cfg.DATASETS.TEST[0]
        data_loader = build_detection_test_loader(cfg, dataset)
        # NOTE: hard-coded evaluator. change to the evaluator for your dataset
        evaluator = COCOEvaluator(dataset, output_dir=args.output)
        metrics = inference_on_dataset(exported_model, data_loader, evaluator)
        print_csv_format(metrics)
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         makedirs("coco_eval", exist_ok=True)
         output_folder = "coco_eval"
     return COCOEvaluator(dataset_name, cfg, False, output_folder)
Пример #5
0
 def build_evaluator(cls, cfg, dataset_name):
     return COCOEvaluator(dataset_name, output_dir=cfg.OUTPUT_DIR)
Пример #6
0
cfg_target.DATASETS.TRAIN = ("dataset_train_real", )
cfg_target.INPUT.MIN_SIZE_TRAIN = (0, )
cfg_target.DATALOADER.NUM_WORKERS = 0
cfg_target.SOLVER.IMS_PER_BATCH = 2

cfg_target2 = get_cfg()
cfg_target2.DATASETS.TRAIN = ("dataset_train_real2", )
cfg_target2.INPUT.MIN_SIZE_TRAIN = (0, )
cfg_target2.DATALOADER.NUM_WORKERS = 0
cfg_target2.SOLVER.IMS_PER_BATCH = 2

do_train(cfg_source, cfg_target, cfg_target2, model)

#test Hololens
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
evaluator = COCOEvaluator("dataset_test_real",
                          cfg_source,
                          False,
                          output_dir="./output/")
val_loader = build_detection_test_loader(cfg_source, "dataset_test_real")
inference_on_dataset(model, val_loader, evaluator)

#test GoPro
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
evaluator = COCOEvaluator("dataset_test_real2",
                          cfg_source,
                          False,
                          output_dir="./output/")
val_loader = build_detection_test_loader(cfg_source, "dataset_test_real2")
inference_on_dataset(model, val_loader, evaluator)
Пример #7
0
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 8
cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes)

cfg.TEST.EVAL_PERIOD = 500
# cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.WEIGHTS = os.path.join("../output/model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.85

os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

trainer = CocoTrainer(cfg)
trainer.resume_or_load(resume=False)

predictor = DefaultPredictor(cfg)

evaluator = COCOEvaluator("weapons_val", cfg, False, output_dir="./output/")
val_loader = build_detection_test_loader(cfg, "weapons_val")
inference_on_dataset(trainer.model, val_loader, evaluator)

# experiment_folder = './output/model_iter4000_lr0005_wf1_date2020_03_20__05_16_45'
experiment_folder = './output/'


def load_json_arr(json_path):
    lines = []
    with open(json_path, 'r') as f:
        for line in f:
            lines.append(json.loads(line))
    return lines

Пример #8
0
def train_task(model_name, model_file):
    path = os.path.join(SAVE_PATH, 'train_task', model_name)
    if not os.path.exists(path):
        os.makedirs(path)
    # Load Data
    print('Loading Data.')
    dataloader = KITTI_Dataloader()
    def kitti_train(): return dataloader.get_dicts(train_flag=True)
    def kitti_test(): return dataloader.get_dicts(train_flag=False)
    DatasetCatalog.register("KITTI_train", kitti_train)
    MetadataCatalog.get("KITTI_train").set(thing_classes=[k for k,_ in CATEGORIES.items()])
    DatasetCatalog.register("KITTI_test", kitti_test)
    MetadataCatalog.get("KITTI_test").set(thing_classes=[k for k,_ in CATEGORIES.items()])

    # Load MODEL and configure train hyperparameters
    print('Loading Model.')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('KITTI_train',)
    cfg.DATASETS.TEST = ('KITTI_test',)
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = NUM_IMGS // cfg.SOLVER.IMS_PER_BATCH + 1 
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 9

    # TRAIN!!
    print('Training.......')
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(resume=False)
    trainer.train()
    print('Training Done.')

    # EVAL
    print('Evaluating......')
    cfg.TEST.KEYPOINT_OKS_SIGMAS
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    predictor = DefaultPredictor(cfg)
    dataset_dicts = kitti_test()
    for i,d in enumerate(random.sample(dataset_dicts, 5)):    
        im = cv2.imread(d['file_name'])
        outputs = predictor(im)
        v = Visualizer(im[:, :, ::-1],
                   metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                   scale=0.8, 
                   instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(os.path.join(path, 'Evaluation_' + model_name + '_trained_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
    print('COCO EVALUATOR....')
    evaluator = COCOEvaluator('KITTI_test', cfg, False, output_dir="./output/")
    trainer.test(cfg, trainer.model, evaluators=[evaluator])

    # Loading training and test examples
    inference_dataloader = Inference_Dataloader(MIT_DATA_DIR)
    inference_dataset = inference_dataloader.load_data()

    # Qualitative results: visualize some prediction results on MIT_split dataset
    for i, img_path in enumerate([i for i in inference_dataset['test'] if 'inside_city' in i][:20]):
        img = cv2.imread(img_path)
        outputs = predictor(img)
        v = Visualizer(
            img[:, :, ::-1],
            metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),

            scale=0.8, 
            instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(os.path.join(path, 'Inference_' + model_name + '_trained_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
    
    """
    val_loader = build_detection_test_loader(cfg, 'KITTI_test')
    inference_on_dataset(trainer.model, val_loader, evaluator)
    """
    print('DONE!!')
Пример #9
0
cfg.DATASETS.TEST = ("basepath_Val", )
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
predictor = DefaultPredictor(cfg)

p1 = '/content/Synthetic Train Set - Detection & Recognition/Train/Image'
x = 0
for i in os.listdir(p1):
  x = x + len(os.listdir(p1+'/'+i)) 

print(x)

from detectron2.utils.visualizer import ColorMode
dataset_dicts = get_dicts("/content/Synthetic Train Set - Detection & Recognition/Val")
for d in random.sample(dataset_dicts, 3):    
    im = cv2.imread(d["file_name"])
    outputs = predictor(im)
    v = Visualizer(im[:, :, ::-1],
                   metadata=My_metadata, 
                   scale=0.8, 
                   instance_mode=ColorMode.IMAGE_BW   # remove the colors of unsegmented pixels
    )
    v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
    cv2_imshow(v.get_image()[:, :, ::-1])

from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
evaluator = COCOEvaluator("basepath_Val", cfg, False, output_dir="./output/")
val_loader = build_detection_test_loader(cfg, "basepath_Val")
inference_on_dataset(trainer.model, val_loader, evaluator)

print(dataset_dicts[0])
Пример #10
0
def retrain_detector(settings):
    """
    settings: properties to be used in the retraining process

    Splits the COCO-formatted data located in annotation_path, then trains and 
    evaluates a Detectron2 model from scratch. The resulting model is saved in 
    the model_path/ folder. 

    Returns an object mapping different AP (average precision) metrics to the 
    model's scores. 
    """

    if len(settings) == 0:
        settings["trainSplit"] = 0.7
        settings["learningRate"] = 0.005
        settings["maxIters"] = 100

    base_path = "annotation_data/"
    coco_path = os.path.join(base_path, "coco")
    output_path = os.path.join(base_path, "output")
    annotation_path = os.path.join(coco_path, "coco_results.json")
    train_path = os.path.join(coco_path, "train.json")
    test_path = os.path.join(coco_path, "test.json")

    # 1) Split coco json file into train and test using cocosplit code
    # Adapted from https://github.com/akarazniewicz/cocosplit/blob/master/cocosplit.py
    with open(annotation_path, "rt", encoding="UTF-8") as annotations_file:

        # Extract info from json
        coco = json.load(annotations_file)
        info = coco["info"]
        licenses = coco["licenses"]
        images = coco["images"]
        annotations = coco["annotations"]
        categories = coco["categories"]

        # Remove images without annotations
        images_with_annotations = set(
            map(lambda a: int(a["image_id"]), annotations))
        images = list(
            filter(lambda i: i["id"] in images_with_annotations, images))

        # Split images and annotations
        x_images, y_images = train_test_split(
            images, train_size=settings["trainSplit"])
        x_ids = list(map(lambda i: int(i["id"]), x_images))
        x_annots = list(
            filter(lambda a: int(a["image_id"]) in x_ids, annotations))
        y_ids = list(map(lambda i: int(i["id"]), y_images))
        y_annots = list(
            filter(lambda a: int(a["image_id"]) in y_ids, annotations))

        # Save to file
        def save_coco(file, info, licenses, images, annotations, categories):
            with open(file, 'wt', encoding="UTF-8") as coco:
                json.dump(
                    {
                        "info": info,
                        "licenses": licenses,
                        "images": images,
                        "annotations": annotations,
                        "categories": categories
                    },
                    coco,
                    indent=2,
                    sort_keys=True)

        save_coco(train_path, info, licenses, x_images, x_annots, categories)
        save_coco(test_path, info, licenses, y_images, y_annots, categories)

    # 2) Use train/test files to retrain detector
    dataset_name = "annotation_coco"
    image_dir = base_path + "rgb/"
    train_data = dataset_name + "_train"
    test_data = dataset_name + "_test"

    DatasetCatalog.clear()
    MetadataCatalog.clear()
    register_coco_instances(train_data, {}, train_path, image_dir)
    register_coco_instances(test_data, {}, test_path, image_dir)

    MetadataCatalog.get(train_data)
    coco_yaml = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"

    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(coco_yaml))
    cfg.DATASETS.TRAIN = (train_data, )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(categories)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        coco_yaml)  # Let training initialize from model zoo
    cfg.OUTPUT_DIR = output_path
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = settings["learningRate"]  # Make sure LR is good
    cfg.SOLVER.MAX_ITER = settings[
        "maxIters"]  # 300 is good for small datasets

    # Train
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Move model to most recent model folder
    model_dir = os.path.join(base_path, "model")
    model_names = os.listdir(model_dir)
    # Get highest x for model/vx
    model_dirs = list(
        filter(lambda n: os.path.isdir(os.path.join(model_dir, n)),
               model_names))
    model_nums = list(map(lambda x: int(x.split("v")[1]), model_dirs))
    last_model_num = max(model_nums)
    # Add model to new folder
    model_path = os.path.join(model_dir, "v" + str(last_model_num))
    new_model_path = os.path.join(model_path, "model_999.pth")
    old_model_path = os.path.join(output_path, "model_final.pth")
    os.replace(old_model_path, new_model_path)

    # Evaluate
    evaluator = COCOEvaluator(test_data, ("bbox", "segm"),
                              False,
                              output_dir="../../annotation_data/output/")
    val_loader = build_detection_test_loader(cfg, test_data)
    inference = inference_on_dataset(trainer.model, val_loader, evaluator)

    # inference keys: bbox, semg
    # bbox and segm keys: AP, AP50, AP75, APs, APm, AP1, AP-category1, ...
    inference_json = json.loads(json.dumps(inference).replace("NaN", "null"))
    return inference_json
Пример #11
0
def evaluate(cfg):
    # Quantitative results: compute AP
    trainer = DefaultTrainer(cfg)
    evaluator = COCOEvaluator('MIT_split_test', cfg, False)
    val_loader = build_detection_test_loader(cfg, 'MIT_split_test')
    inference_on_dataset(trainer.model, val_loader, evaluator)
Пример #12
0
def task_b(model_name, model_file, percentage, augmentation=False):

    try:
        dataloader_train_v_r = Virtual_Real_KITTI()

        def virtual_real_kitti():
            return dataloader_train_v_r.get_dicts(percentage)

        DatasetCatalog.register('VirtualReal', virtual_real_kitti)
        MetadataCatalog.get('VirtualReal').set(
            thing_classes=list(KITTI_CATEGORIES.keys()))
    except:
        print("VirtualReal already defined!")

    model_name = model_name + '_inference'
    print('Running task B for model', model_name)

    SAVE_PATH = os.path.join('./results_week_6_task_b', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('VirtualReal', )
    cfg.DATASETS.TEST = ('KITTIMOTS_test', )
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH

    #load saved model
    '''checkpoint = '/home/grupo04/jobs_w6/results_week_6_task_b/MaskRCNN_R_50_FPN_Cityscapes_2_inference/model_final.pth'
    last_checkpoint = torch.load(checkpoint)
    new_path = checkpoint.split('.')[0] + '_modified.pth'
    last_checkpoint['iteration'] = -1
    torch.save(last_checkpoint, new_path)
    cfg.MODEL.WEIGHTS = new_path'''

    #load a model form detectron2 model zoo
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)

    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 1000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5
    print(cfg)
    # Training

    print('Training')
    if augmentation:
        print("data augmentation")
        trainer = OurTrainer(cfg)
    else:
        print("NO data augmentation")
        trainer = DefaultTrainer(cfg)

    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    evaluator = COCOEvaluator('KITTIMOTS_test',
                              cfg,
                              False,
                              output_dir='./output')
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    draw_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH)

    # Qualitative results: visualize some results
    print('Getting qualitative results')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    inputs = kitti_val()
    inputs = inputs[:20] + inputs[-20:]
    for i, input in enumerate(inputs):
        file_name = input['file_name']
        print('Prediction on image ' + file_name)
        img = cv2.imread(file_name)
        outputs = predictor(img)
        v = Visualizer(img[:, :, ::-1],
                       metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                       scale=0.8,
                       instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(
            os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' +
                         str(i) + '.png'),
            v.get_image()[:, :, ::-1])
Пример #13
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         os.makedirs(os.path.join(cfg.OUTPUT_DIR, "coco_eval"),
                     exist_ok=True)
         output_folder = os.path.join(cfg.OUTPUT_DIR, "coco_eval")
     return COCOEvaluator(dataset_name, cfg, False, output_folder)
Пример #14
0
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Look at training curves in tensorboard:
    # %load_ext tensorboard
    # %tensorboard --logdir output/run_rdd/
else:
    # Inference & evaluation using the trained model
    # Now, let's run inference with the trained model on the validation dataset.
    # First, let's create a predictor using the model we just trained:
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set a custom testing threshold for this model
    cfg.DATASETS.TEST = ("rdd2020_val", )
    predictor = DefaultPredictor(cfg)
    trainer.resume_or_load(resume=True)

    # Then, we randomly select several samples to visualize the prediction results.
    from detectron2.utils.visualizer import ColorMode
    from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, inference_on_dataset
    from detectron2.data import build_detection_test_loader

    evaluator = COCOEvaluator("rdd2020_val", cfg, False, "coco_eval")
    val_loader = build_detection_test_loader(cfg, "rdd2020_val")
    eval_results = inference_on_dataset(trainer.model, val_loader,
                                        DatasetEvaluators([evaluator]))
    # another equivalent way is to use trainer.test
    print(eval_results)

# Empty the GPU Memory
torch.cuda.empty_cache()
Пример #15
0
'''
@Descripttion: 
@version: 
@Author: 周耀海 [email protected]
@LastEditTime: 2020-07-23 17:25:51
'''
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
from detectron2.data.datasets import register_coco_instances

cfg = get_cfg()
trainer = DefaultTrainer(cfg)

register_coco_instances(
    "coco_train_2", {},
    "./Coco/detectron2/datasets/coco/annotations/instances_train.json",
    "./Coco/detectron2/datasets/coco/train")
register_coco_instances(
    "coco_val_2", {},
    "./Coco/detectron2/datasets/coco/annotations/instances_val.json",
    "./Coco/detectron2/datasets/coco/val")

evaluator = COCOEvaluator("coco_val_2", cfg, True, output_dir="./output")
val_loader = build_detection_test_loader(cfg, "coco_val_2")
print(inference_on_dataset(trainer.model, val_loader, evaluator))
Пример #16
0
if __name__ == '__main__':
    # Training
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Inference and Evaluation
    from detectron2.data import build_detection_test_loader
    from detectron2.evaluation import COCOEvaluator, inference_on_dataset

    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set a custom testing threshold for this model
    cfg.DATASETS.TEST = ('debris_dataset_test', )
    predictor = DefaultPredictor(cfg)
    evaluator = COCOEvaluator('debris_dataset_test',
                              cfg,
                              False,
                              output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, "debris_dataset_test")
    inference_on_dataset(trainer.model, val_loader, evaluator)

    from detectron2.utils.visualizer import Visualizer, ColorMode
    import random

    test_metadata = MetadataCatalog.get('debris_dataset_train')
    dataset_dicts = get_dataset(test_data_path)
    for d in random.sample(dataset_dicts, 10):
        im = cv2.imread(d['file_name'])
        outputs = predictor(im)
        v = Visualizer(im[:, :, ::-1], metadata=test_metadata, scale=0.5)
        out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
        cv2.imshow('img', out.get_image()[:, :, ::-1])
Пример #17
0
    # OUTPUT
    cfg.OUTPUT_DIR = OUTPUT_DIR
    cfg.SEED = 998  # for reproducing

    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    print(cfg)

    metadata = MetadataCatalog.get("train")
    dataset_dicts = DatasetCatalog.get("train")

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)

    evaluator = COCOEvaluator("train",
                              cfg,
                              False,
                              output_dir=os.path.join(cfg.OUTPUT_DIR,
                                                      "inference"))
    val_loader = build_detection_test_loader(cfg, "train")
    inference_on_dataset(trainer.model, val_loader, evaluator)

    predictor = DefaultPredictor(cfg)
    for d in random.sample(dataset_dicts, 3):
        img = cv2.imread(d["file_name"])
        outputs = predictor(img)
        print(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1], metadata=metadata, scale=1)
        visualizer = visualizer.draw_instance_predictions(
            outputs["instances"].to("cpu"))
        visualizer = visualizer.get_image()[:, :, ::-1]
        visualizer = Visualizer(visualizer[:, :, ::-1],
    )
    out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
    plt.figure()

    #cv2_imshow(out.get_image()[:, :, ::-1])
    im_i = out.get_image()[:, :, ::-1]
    b, g, r = cv2.split(im_i)
    image_rgb_i = cv2.merge([r, g, b])
    plt.imshow(image_rgb_i)
    plt.show()

### eval
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
evaluator = COCOEvaluator("my_dataset_val", ("bbox", ),
                          False,
                          output_dir="./output/")
val_loader = build_detection_test_loader(cfg, "my_dataset_val")
print(inference_on_dataset(trainer.model, val_loader, evaluator))

###
###########
# Inference with a keypoint detection model
#cfg = get_cfg()   # get a fresh new config
#cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
#cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set threshold for this model
#cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
#predictor = DefaultPredictor(cfg)
#outputs = predictor(im)
#v = Visualizer(im[:,:,::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
#out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
Пример #19
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = str(Path(cfg.OUTPUT_DIR) / "inference")
     return COCOEvaluator(dataset_name, cfg, True, output_folder)
Пример #20
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         path = os.path.join(cfg.OUTPUT_DIR, "evals")
         os.makedirs(path, exist_ok=True)
         output_folder = path
     return COCOEvaluator(dataset_name, cfg, False, output_folder)
                    cfg.TEST.EVAL_PERIOD, self.model,
                    build_detection_test_loader(self.cfg,
                                                self.cfg.DATASETS.TEST[0],
                                                DatasetMapper(self.cfg,
                                                              True))))
            return hooks

    # Training

    trainer = MyTrainer(cfg)
    trainer.resume_or_load(resume=False)
    last_results = trainer.train()

    with open(f'{OUTPUT_DIR}/model_trained_results_last.pkl', 'wb') as f:
        pickle.dump(last_results, file=f)

    # Evaluate
    print('Evaluating...')

    evaluator = COCOEvaluator("val_kitti-mots", (
        "segm",
        "bbox",
    ),
                              False,
                              output_dir=OUTPUT_DIR)
    val_loader = build_detection_test_loader(cfg, "val_kitti-mots")
    results = inference_on_dataset(trainer.model, val_loader, evaluator)

    with open(f'{OUTPUT_DIR}/model_trained_results.pkl', 'wb') as f:
        pickle.dump(results, file=f)
Пример #22
0
def task_b_MOTS_and_KITTI_training(model_name, model_file):
    # model_name = model_name + '_inference'
    print('Running task B for model', model_name)

    SAVE_PATH = os.path.join('./results_week_5_task_c', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('MOTS_KITTI_train', )
    cfg.DATASETS.TEST = ('KITTIMOTS_val', )
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.LR_SCHEDULER_NAME = "WarmupCosineLR"
    #hyperparameters
    #cfg.SOLVER.LR_POLICY = 'steps_with_decay'
    #cfg.SOLVER.STEPS = [0, 1000, 2000]
    #cfg.SOLVER.GAMMA = 0.1
    #cfg.DATASETS.TRAIN.USE_FLIPPED = True #Eeste no va
    #cfg.MODEL.RPN.IOU_THRESHOLDS = [0.1, 0.9] #defatults 0.3 and 0.7
    #cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]#default: [[32, 64, 128, 256, 512]]
    #cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
    #End of hyperparameters playing
    cfg.SOLVER.MAX_ITER = 1000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5
    print(cfg)
    # Training
    print('Training')
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    evaluator = COCOEvaluator('KITTIMOTS_val',
                              cfg,
                              False,
                              output_dir=SAVE_PATH)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH)

    # Qualitative results: visualize some results
    print('Getting qualitative results')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    inputs = kitti_val()
    # inputs = inputs[:20] + inputs[-20:]
    inputs = inputs[220:233] + inputs[1995:2100]
    for i, input in enumerate(inputs):
        file_name = input['file_name']
        print('Prediction on image ' + file_name)
        img = cv2.imread(file_name)
        outputs = predictor(img)
        v = Visualizer(img[:, :, ::-1],
                       metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                       scale=0.8,
                       instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(
            os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' +
                         str(i) + '.png'),
            v.get_image()[:, :, ::-1])
Пример #23
0
            imgs_anns[i]['annotations'][j]['bbox_mode'] = BoxMode.XYWH_ABS
        imgs_anns[i]['proposal_bbox_mode'] = BoxMode.XYXY_ABS

    return imgs_anns


for d in ["train", "test"]:
    DatasetCatalog.register("caltech_" + d, lambda d=d: get_caltech_dicts(d))
    MetadataCatalog.get("caltech_" + d).set(thing_classes=["person"])
caltech_metadata = MetadataCatalog.get("caltech_train")

cfg = get_cfg()
cfg.merge_from_file("./configs/frcn_dt.yaml")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = .0  # set to 0 to achieve smaller miss rate
predictor = DefaultPredictor(cfg)
evaluator = COCOEvaluator("caltech_test", cfg, False, output_dir='./output/')
val_loader = build_detection_test_loader(cfg, "caltech_test")
model = build_model(cfg)
ckpt = Checkpointer(model)
ckpt.load(os.path.join(cfg.OUTPUT_DIR, "model_0049999.pth"))
# inference_on_dataset(model, val_loader, evaluator)  # compute map value

# convert to caltech data eval format
res = {}
with torch.no_grad():
    model.eval()
    for inputs in tqdm(val_loader):
        outputs = model(inputs)
        for i, o in zip(inputs, outputs):
            fn = i['file_name']
            idx = fn.rfind('/') + 1
Пример #24
0
def task_a_KITTI_training(model_name, model_file):
    #model_name = model_name + '_inference'
    print('Running task A for model', model_name)

    SAVE_PATH = os.path.join('./results_week_5_task_a', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('KITTIMOTS_train', )
    cfg.DATASETS.TEST = ('MOTS_train', )
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 1000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    # Training
    print('Training')
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    evaluator = COCOEvaluator('MOTS_train', cfg, False, output_dir=SAVE_PATH)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH)

    # Qualitative results: visualize some results
    print('Getting qualitative results')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    inputs = mots_train()
    inputs = inputs[:20] + inputs[-20:]
    for i, input in enumerate(inputs):
        file_name = input['file_name']
        print('Prediction on image ' + file_name)
        img = cv2.imread(file_name)
        outputs = predictor(img)
        v = Visualizer(img[:, :, ::-1],
                       metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                       scale=0.8,
                       instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(
            os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' +
                         str(i) + '.png'),
            v.get_image()[:, :, ::-1])
Пример #25
0
 def build_evaluator(cls, cfg, dataset_name, output_folder=None):
     if output_folder is None:
         output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     return COCOEvaluator(dataset_name, cfg, distributed=False, output_dir=output_folder)
Пример #26
0
cfg.SOLVER.BASE_LR = lr  # pick a good LR
cfg.SOLVER.MAX_ITER = 4000
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (scratch)
cfg.TEST.EVAL_PERIOD = 500

os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
#trainer = CocoTrainer(cfg)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()

cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set a custom testing threshold for this model

from detectron2.data import build_detection_test_loader
evaluator = COCOEvaluator("scratch_test", cfg, False, output_dir="./output/")
predictor = DefaultPredictor(cfg)
val_loader = build_detection_test_loader(cfg, "scratch_test")
#print(inference_on_dataset(trainer.model, val_loader, evaluator))
# another equivalent way is to use trainer.test

with open(test_json) as f:
    data = json.load(f)
dice = 0
l = 0
for i in tqdm(range(len(data['images']))):
    h = data['images'][i]['height']
    w = data['images'][i]['width']
    mask = np.zeros((h, w), dtype='uint8')
    for j in range(len(data['annotations'])):
        if data['annotations'][j]['image_id'] == data['images'][i]['id']:
Пример #27
0
    elif args.run_type == "test":
        print("Beginning evaluation run")
        if args.train_time == None:
            print("ERROR: Specify a time training took place to load results from")
            exit(1)
        cfg = setup_cfg(args, output_file, mask_head=mask_head)
        if not (os.path.exists(cfg.MODEL.WEIGHTS)):
            print("ERROR: specfied path to weights doesn't exist, exiting")
            exit(1)
        cfg.MODEL.ROI_MASK_HEAD.NAME=mask_head
        trainer = DefaultTrainer(cfg)
        trainer.resume_or_load(resume=False)
        predictor = DefaultPredictor(cfg)
        eval_output_dir = os.path.join(cfg.OUTPUT_DIR, "eval_output")
        os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
        evaluator = COCOEvaluator(dataset_name_val, cfg, False, output_dir=eval_output_dir)
        val_loader = build_detection_test_loader(cfg, dataset_name_val)
        print(inference_on_dataset(trainer.model, val_loader, evaluator))
        dataset_dicts = DatasetCatalog.get(dataset_name_val)
        dataset_metadata = MetadataCatalog.get(dataset_name_val)

        for d in random.sample(dataset_dicts, 3):
            im = cv2.imread(d["file_name"])
            outputs= predictor(im)
            v = Visualizer(im[:,:,::-1],metadata=dataset_metadata, scale=0.5,
                    instance_mode=ColorMode.IMAGE_BW)
            out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
            window = "prediction"
            cv2.imshow(window, out.get_image()[:,:,::-1])
            cv2.waitKey(0)
            cv2.destroyAllWindows()
    cfg.DATASETS.VAL = (dataset + '_val', )
    cfg.DATASETS.TEST = (dataset + '_test', )
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(thing_classes)
    cfg.SOLVER.IMS_PER_BATCH = 2

    cfg.SOLVER.BASE_LR = args.lr
    cfg.SOLVER.MAX_ITER = args.iter
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = args.batch  # faster, and good enough for the tutorial dataset (default: 512)

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    ###-------INFERENCE AND EVALUATION---------------------------
    cfg.MODEL.WEIGHTS = os.path.join(
        cfg.OUTPUT_DIR, "model_final.pth")  # path to the model we just trained
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set the testing threshold for this model

    ### MAP #####
    #We can also evaluate its performance using AP metric implemented in COCO API.
    evaluator = COCOEvaluator(dataset + '_val',
                              cfg,
                              False,
                              output_dir=cfg.OUTPUT_DIR)
    val_loader = build_detection_test_loader(cfg, dataset + '_val')
    print('---------------------------------------------------------')
    print(model)
    print(inference_on_dataset(trainer.model, val_loader, evaluator))
    print('---------------------------------------------------------')
Пример #29
0
    def build_evaluator(cls, cfg, dataset_name):
        output_folder = cfg.OUTPUT_DIR

        return COCOEvaluator(dataset_name, cfg, False, output_folder)
Пример #30
0
 def build_evaluator(cls, cfg, dataset_name):
     output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
     evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
     return DatasetEvaluators(evaluators)