Пример #1
0
def main():
    args = parse_args()

    with open(args.config, "r") as f:
        config = yaml.safe_load(f)
    if "classes" not in config:
        raise Exception("Could not find class names")
    n_classes = len(config["classes"])
    classes = config["classes"]

    for d in ["train"]:
        DatasetCatalog.register("custom_" + d, lambda d=d: get_annotated_dataset(args.annotator_root, args.data_folders))
        MetadataCatalog.get("custom_" + d).set(thing_classes=classes)
    custom_metadata = MetadataCatalog.get("custom_train")

    cfg = get_cfg()
    cfg.merge_from_file(args.model_config)
    cfg.DATASETS.TRAIN = ("custom_train",)
    cfg.DATASETS.TEST = ()   # no metrics implemented for this dataset
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = args.initial_weights
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = args.max_iter
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #2
0
def train(*args):

    prepare_dataset()

    # D2 configuration
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = ("balloon_train", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = args[
        0]  # number ims_per_batch should be divisible by number of workers. D2 assertion.
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 300  # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon)
    cfg.OUTPUT_DIR = os.environ[
        'SM_OUTPUT_DATA_DIR']  # TODO check that this config works fine

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
def model_configuration(model_url, learning_rate, max_iter):

    model_a = os.path.join("COCO-InstanceSegmentation", model_url)
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_a))
    cfg.DATASETS.TRAIN = ("kitti-mots", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = learning_rate
    cfg.SOLVER.MAX_ITER = max_iter
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
    # cfg.INPUT.MASK_FORMAT = 'rle'
    # cfg.INPUT.MASK_FORMAT='bitmask'

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    print('Training...')
    trainer.train()

    # EVALUATION
    print('Evaluating...')
    evaluator = COCOEvaluator("kitti-mots", cfg, False, output_dir="./Search/")
    val_loader = build_detection_test_loader(cfg, "kitti-mots")
    results = inference_on_dataset(trainer.model, val_loader, evaluator)
    print(results)

    return results
Пример #4
0
def task_b(model_name, model_file):
    save_path = Path("output/task_b") / model_name
    os.makedirs(save_path, exist_ok=True)

    cfg = base_cfg(model_file, save_path)
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 1000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    evaluator = COCOEvaluator("kitti-mots-val",
                              cfg,
                              False,
                              output_dir=save_path)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, save_path)

    get_qualitative_results(cfg, save_path)
Пример #5
0
def train_model():
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = ("balloon_train",)
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2

    # Let training initialize from model zoo
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR

    # 300 iterations seems good enough for this toy dataset;
    #   you may need to train longer for a practical dataset
    cfg.SOLVER.MAX_ITER = 300

    # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon)

    cfg.OUTPUT_DIR = gcfg.get_ou_dir
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #6
0
def mAPtest(yaml, weight):
    from detectron2.engine import DefaultTrainer
    from detectron2.config import get_cfg
    cfg = get_cfg()
    cfg.merge_from_file(yaml)
    cfg.DATALOADER.NUM_WORKERS = 2
    # cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/137849600/model_final_f10217.pkl"  # initialize from model zoo
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3  # 3 classes (Kong, lee, Huh)

    from detectron2.data.datasets import register_coco_instances
    register_coco_instances("Acheck_test", {}, "./Acheck_hair_test.json",
                            "./img_hair_test")

    from detectron2.data import MetadataCatalog
    MetadataCatalog.get("Acheck_test").thing_classes = ["Kong", "Lee", "Huh"]
    Acheck_metadata = MetadataCatalog.get("Acheck_test")
    from detectron2.data import DatasetCatalog
    dataset_dicts = DatasetCatalog.get("Acheck_test")
    cfg.DATASETS.TRAIN = ("Acheck_test", )

    from detectron2.engine import DefaultPredictor

    cfg.MODEL.WEIGHTS = weight
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8  # set the testing threshold for this model
    cfg.DATASETS.TEST = ("Acheck_test", )
    predictor = DefaultPredictor(cfg)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    from detectron2.evaluation import COCOEvaluator, inference_on_dataset
    from detectron2.data import build_detection_test_loader
    evaluator = COCOEvaluator("Acheck_test", cfg, False, "./output/")
    val_loader = build_detection_test_loader(cfg, "Acheck_test")
    inference_on_dataset(trainer.model, val_loader, evaluator)
def train_model(trainset_name: str, learning_rate: int, num_iteration: int,
                batch_per_image: int, num_classes: int):
    cfg = get_cfg()
    cfg.MODEL.DEVICE = "cpu"
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = (trainset_name, )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = learning_rate  # pick a good LR
    cfg.SOLVER.MAX_ITER = num_iteration  # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = batch_per_image  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes  # (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Inference should use the config with parameters that are used in training
    # cfg now already contains everything we've set previously. We changed it a little bit for inference:
    cfg.MODEL.WEIGHTS = os.path.join(
        cfg.OUTPUT_DIR, "model_final.pth")  # path to the model we just trained
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set a custom testing threshold
    predictor = DefaultPredictor(cfg)
    return predictor
def train_detectron2():
    dataset_storage = {
        'synthetic': generate_synthetic_datasets(),
        'real': generate_real_datasets()
    }

    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = (SYNTHETIC_DATASET_NAME[MODES[0]], REAL_DATASET_NAME[MODES[0]])
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 500
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(dataset_storage["synthetic"][MODES[0]]['unit_classes'])
    cfg.OUTPUT_DIR = str(OUTPUT_PATH)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    return cfg
Пример #9
0
def testing():
    name = "SYNTHIA_test"  # "HoliCity_train" "HoliCity_valid"
    assign_global(name)
    cache_pth = f"/home/dxl/Code/PlanarReconstruction/data/SYNTHIA_test_coco_format.json"
    register_coco_instances(name=_name,
                            metadata={'thing_classes': ["P"]},
                            json_file=cache_pth,
                            image_root="/home/dxl/Data/PlaneRecover")

    cfg__ = make_cfg()

    # begin train
    trainer = DefaultTrainer(cfg__)
    # trainer.resume_or_load(resume=True)
    # trainer.train()

    # predict and visualization
    # extract_roi_feature(cfg__)
    # build_predictor_vis(cfg__)
    # evaluator_metric(cfg__)

    # evaluating validation data set
    trainer.resume_or_load(resume=True)
    evaluator, val_loader = build_evaluator(cfg__)
    inference_on_dataset(trainer.model, val_loader, evaluator)
Пример #10
0
def task_a(model_name, model_file, checkpoint=None, evaluate=True, visualize=True):
    print('Running task A for model', model_name)
    if checkpoint:
        SAVE_PATH = os.path.join('./results_week_5_task_a', model_name + '_wCheckpoint')
    else:
        SAVE_PATH = os.path.join('./results_week_5_task_a', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Loading data
    print('Loading data')
    dataloader = MOTS_Dataloader(dataset='motschallenge')
    def mots_train(): return dataloader.get_dicts(train_flag=True)
    def mots_val(): return dataloader.get_dicts(train_flag=False)
    DatasetCatalog.register('MOTS_train', mots_train)
    MetadataCatalog.get('MOTS_train').set(thing_classes=list(MOTS_CATEGORIES.keys()))
    DatasetCatalog.register('MOTS_val', mots_val)
    MetadataCatalog.get('MOTS_val').set(thing_classes=list(MOTS_CATEGORIES.keys()))

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    model_training_metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) # Store current model training metadata
    cfg.DATASETS.TRAIN = ('MOTS_train', )
    cfg.DATASETS.TEST = ('MOTS_val', )
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    if checkpoint:
        print('Using Checkpoint')
        cfg.MODEL.WEIGHTS = checkpoint
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    else:
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    
    if evaluate:
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

        # Evaluation
        print('Evaluating')
        evaluator = COCOEvaluator('MOTS_val', cfg, False, output_dir=SAVE_PATH)
        trainer = DefaultTrainer(cfg)
        trainer.test(cfg, model, evaluators=[evaluator])

    if visualize:
        # Qualitative results: visualize some results
        print('Getting qualitative results')
        predictor = DefaultPredictor(cfg)
        inputs = mots_val()
        inputs = inputs[:20] + inputs[-20:]
        for i, input in enumerate(inputs):
            img = cv2.imread(input['file_name'])
            outputs = predictor(img)
            v = Visualizer(
                img[:, :, ::-1],
                metadata=model_training_metadata,
                scale=0.8,
                instance_mode=ColorMode.IMAGE)
            v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
            cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
Пример #11
0
def train(train_dir, name_data, json_dir, config, resume_status, iteration,
          batch, lr):
    cfg = get_cfg()
    cfg.merge_from_file("./configs/" + config)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = lr
    cfg.SOLVER.WARMUP_ITERS = 1200
    cfg.SOLVER.MAX_ITER = iteration
    cfg.SOLVER.GAMMA = 0.05
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = batch
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
    cfg.DATASETS.TRAIN = (name_data, )
    cfg.DATASETS.TEST = ()

    try:
        register_coco_instances(name_data, {}, json_dir,
                                train_dir)  # Train data
    except ValueError:
        print("Data already registerd. Continue.")

    if resume_status:
        cfg.MODEL.WEIGHTS = os.path.join('output', 'model_final.pth')

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=resume_status)
    trainer.train()
Пример #12
0
def main():
    args = parser.parse_args()
    register_coco_instances(args.dataset, {}, args.label, args.file)  # training dataset
    register_coco_instances(args.test_dataset, {}, args.test_label, args.test_file)  # testing dataset

    ### set metadata
    MetadataCatalog.get(args.test_dataset).evaluator_type="coco"
    DatasetCatalog.get(args.test_dataset)

    ### cfg setting
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(args.model))  
    cfg.DATASETS.TRAIN = (args.dataset,)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_class  # excavator, dump_truck, cement_truck
    cfg.MODEL.WEIGHTS = args.weight 
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7   # set the testing threshold for this model
    cfg.DATASETS.TEST = (args.test_dataset,)

    ### trainner setting
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(cfg.MODEL.WEIGHTS)

    ### evaluation setting
    evaluator = COCOEvaluator(args.test_dataset, cfg, False, output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, args.test_dataset)
    inference_on_dataset(trainer.model, val_loader, evaluator)
Пример #13
0
def main(args):

    dataset_train = "faces_train"
    dataset_validation = "faces_validation"
    cfg = setup(args)
    cfg.DATASETS.TRAIN = (dataset_train, )
    ### TODO:
    ### - Test with validation set.
    ###
    # cfg.DATASETS.TEST = (dataset_validation,)
    cfg.DATASETS.TEST = ()

    train_data = args.train_data
    train_annots = args.train_annotations
    validation_data = args.validation_data
    validation_annots = args.validation_annotations

    DatasetCatalog.register(dataset_train,
                            lambda: get_dataset_dict(train_annots, train_data))
    MetadataCatalog.get(dataset_train).set(thing_classes=["face"])
    DatasetCatalog.register(
        dataset_validation,
        lambda: get_dataset_dict(validation_annots, validation_data))
    MetadataCatalog.get(dataset_validation).set(thing_classes=["face"])
    faces_metadata = MetadataCatalog.get(dataset_train)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #14
0
def Train():
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # register train dataset
    register_coco_instances("custom", {},
                            "datasets/traindata/midv500_coco.json",
                            "datasets/traindata/")
    custom_metadata = MetadataCatalog.get("custom")
    dataset_dicts = DatasetCatalog.get("custom")

    # set cfg
    cfg = get_cfg()
    cfg.merge_from_file(
        "configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml")
    cfg.DATASETS.TRAIN = ("custom", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = 'model_final_maskrcnn_dc5.pkl'
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.02
    cfg.SOLVER.MAX_ITER = (1000)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1

    # training
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #15
0
def inference(config_file, coco_to_kitti_dict):
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(config_file))
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)
    cfg.DATASETS.TRAIN = ("kitti_mots_train", )
    cfg.DATASETS.TEST = ("kitti_mots_test", )
    cfg.SOLVER.IMS_PER_BATCH = 8
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    evaluator = COCOEvaluator("kitti_mots_test",
                              cfg,
                              False,
                              output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, "kitti_mots_test")
    inference_on_dataset(trainer.model, val_loader, evaluator)

    preds = evaluator._predictions

    filtered_preds = filter_preds(preds, coco_to_kitti_dict)
    evaluator._predictions = filtered_preds

    evaluator.evaluate()
def Train():
    register_coco_instances(
        "custom", {}, "datasets/coco/annotations/instances_train2017.json",
        "datasets/coco/train2017")
    custom_metadata = MetadataCatalog.get("custom")
    dataset_dicts = DatasetCatalog.get("custom")
    for d in random.sample(dataset_dicts, 3):
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1],
                                metadata=custom_metadata,
                                scale=1)
        vis = visualizer.draw_dataset_dict(d)
        cv2.imshow('Sample', vis.get_image()[:, :, ::-1])
        cv2.waitKey()

    cfg = get_cfg()
    cfg.merge_from_file(
        "configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml")
    cfg.DATASETS.TRAIN = ("custom", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = 'model_final_3c3198.pkl'
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.0001

    cfg.SOLVER.MAX_ITER = (150000)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (128)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #17
0
def test_model(path,
               model,
               weights,
               dataset,
               action_type='test',
               mode="full",
               visualize=False):
    dataset_name = os.path.basename(path)
    test = bottle_loader.register_dataset(path, dataset_name, action_type,
                                          mode)
    bottle_loader.register_dataset(path, dataset, 'train', mode)
    cfg_test = gen_cfg_test(dataset, model, dataset_name)
    cfg = gen_cfg_train(model, weights, dataset)
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.9
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    evaluator = COCOEvaluator("%s_%s" % (dataset_name, action_type),
                              cfg_test,
                              False,
                              output_dir="./output_%s/" % (dataset))
    val_loader = build_detection_test_loader(cfg_test,
                                             "%s_%s" % (dataset, 'train'))
    result = inference_on_dataset(trainer.model, val_loader, evaluator)

    #Visualize the test
    if visualize:
        visualize_images_dict(
            dataset_name, test,
            MetadataCatalog.get('%s_%s' % (dataset, 'train')), cfg,
            dataset_name)
    return result
def Train():
    register_coco_instances(
        "custom", {}, "/home/lsc/datasets/butterfly/Annotations/train.json",
        "/home/lsc/datasets/butterfly/TrainData/JPEGImages")
    custom_metadata = MetadataCatalog.get("custom")
    dataset_dicts = DatasetCatalog.get("custom")

    cfg = get_cfg()
    cfg.merge_from_file(
        "/home/lsc/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"
    )
    cfg.DATASETS.TRAIN = ("custom", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 8
    cfg.MODEL.WEIGHTS = 'detectron2://COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x/139173657/model_final_68b088.pkl'
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.02
    cfg.SOLVER.MAX_ITER = (500)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (128)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 94

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #19
0
def main():
    # log
    setup_logger(output='./output/log.txt')

    # train args
    args = parse()

    # model configurations
    cfg = get_cfg()

    cfg.merge_from_file(args.config)

    register_coco_instances("val_data", {}, args.val_json_path,
                            args.val_img_dir)
    cfg.DATASETS.TRAIN = ("val_data", )
    cfg.DATASETS.TEST = ("val_data", )

    cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_class
    cfg.MODEL.WEIGHTS = args.pretrain

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    # train
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)

    # validation
    evaluator = COCOEvaluator("val_data",
                              cfg,
                              False,
                              output_dir=cfg.OUTPUT_DIR)
    val_loader = build_detection_test_loader(cfg, "val_data")
    inference_on_dataset(trainer.model, val_loader, evaluator)
Пример #20
0
def train(args, mode, _appcfg):
    name = "hmd"
    for subset in ["train", "val"]:
        metadata = load_and_register_dataset(name, subset, _appcfg)

    cfg = config.get_cfg()
    cfg.merge_from_file(
        "/aimldl-cod/external/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )
    cfg.DATASETS.TRAIN = ("hmd_train", "hmd_val")
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"  # initialize from model zoo
    # cfg.MODEL.WEIGHTS = "/codehub/apps/detectron2/release/model_final.pth"
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025
    # cfg.SOLVER.MAX_ITER = 350000    # 300 iterations seems good enough, but you can certainly train longer
    # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512  # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3  # only has one class (ballon)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
def validation(cfg: DictConfig) -> None:
    """
    Transfer learning using pretrained models from detectron2 model zoo.

    :param cfg: the configuration dictionary of dataset_model.
    :type cfg: omegaconf.dictconfig.DictConfig.
    :return: None
    """
    log.info('--- Start Validation ---')
    val_dataset_dicts, val_dataset_metadata = register_custom_coco_dataset(
        cfg=cfg, process='val')
    visualizing_coco_dataset(dataset_dicts=val_dataset_dicts,
                             dataset_metadata=val_dataset_metadata,
                             num_ims=cfg.validation.show_images)
    model_cfg: CfgNode = get_model_configs(cfg=cfg, process='val')
    evaluator = COCOEvaluator(dataset_name=cfg.name + '_val',
                              cfg=model_cfg,
                              distributed=False,
                              output_dir=os.getcwd())
    val_loader = build_detection_test_loader(cfg=model_cfg,
                                             dataset_name=cfg.name + '_val')
    trainer: DefaultTrainer = DefaultTrainer(model_cfg)
    trainer.resume_or_load(resume=True)
    inference_on_dataset(model=trainer.model,
                         data_loader=val_loader,
                         evaluator=evaluator)
    log.info('--- Validation Done ---')
Пример #22
0
def train(train_flag,resume_load=False):
    # trainer= Trainer(cfg)
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(resume_load)
    if train_flag:
        trainer.train()
    return trainer
Пример #23
0
def task_b_MOTS_training(model_name, model_file):
    #model_name = model_name + '_inference'
    print('Running task B for model', model_name)

    SAVE_PATH = os.path.join('./results_week_5_task_b', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('MOTS_train',)
    cfg.DATASETS.TEST = ('KITTIMOTS_val',)
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 200
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    # Training
    print('Training')
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    evaluator = COCOEvaluator('KITTIMOTS_val', cfg, False, output_dir=SAVE_PATH)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    draw_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH)

    # Qualitative results: visualize some results
    print('Getting qualitative results')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    inputs = kitti_val()
    #inputs = inputs[:20] + inputs[-20:]
    inputs = inputs[220:233] + inputs[1995:2100]
    for i, input in enumerate(inputs):
        file_name = input['file_name']
        print('Prediction on image ' + file_name)
        img = cv2.imread(file_name)
        outputs = predictor(img)
        v = Visualizer(
            img[:, :, ::-1],
            metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
            scale=0.8,
            instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
Пример #24
0
 def train_coco_data(self, coco_json):
     dataset_name = "mask_train_data"
     DatasetCatalog.register(
         dataset_name,
         lambda: load_coco_json(json_file=coco_json,
                                image_root=self.train_data_path))
     MetadataCatalog.get(dataset_name).set(
         json_file=coco_json,
         image_root=self.train_data_path,
         evaluator_type="coco",
         thing_classes=["rightmask"],
         thing_dataset_id_to_contiguous_id={1: 0})
     cfg = get_cfg()
     cfg.merge_from_file(
         "/home/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
     )
     cfg.DATASETS.TRAIN = (dataset_name, )
     cfg.DATASETS.TEST = (dataset_name, )
     cfg.DATALOADER.NUM_WORKERS = 2
     cfg.MODEL.WEIGHTS = "/home/detectron2/train_data/model_final_280758.pkl"
     cfg.SOLVER.IMS_PER_BATCH = 2
     cfg.SOLVER.BASE_LR = 0.01  # 学习率
     cfg.SOLVER.MAX_ITER = 300  # 最大迭代次数
     cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
     cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
     os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
     print("模型存储路径" + cfg.OUTPUT_DIR)
     trainer = DefaultTrainer(cfg)
     trainer.resume_or_load(resume=False)
     trainer.train()  # 开始训练
def train_iobjectspy_voc(train_data_path, train_config_path, weight_path,
                         max_iter, out_dir, register_train_name,
                         ml_set_tracking_path, experiment_id,
                         ml_experiment_tag):
    cfg = get_cfg()
    cfg.merge_from_file(train_config_path)
    cfg.DATASETS.TRAIN = (register_train_name, )
    cfg.DATASETS.TEST = ()  # no metrics implemented for this dataset
    cfg.MODEL.WEIGHTS = weight_path  # initialize from model zoo
    if max_iter == -1:
        pass
    else:
        cfg.SOLVER.MAX_ITER = max_iter
    num_class = get_class_num(train_data_path)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class  # get classes from sda
    cfg.OUTPUT_DIR = out_dir
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)
    trainer.train()
    try:
        import mlflow as ml
        # 设置mlflow
        ml.set_tracking_uri(ml_set_tracking_path)
        # 通过设置不同的实验id来管理实验,建议这一层级为项目名称,比如:iobjectspy_faster_rcnn_dota
        ml.set_experiment(experiment_id)
        # 通过设置
        ml.set_tag('experiment_id', ml_experiment_tag)
        ml.log_param('lr', cfg.SOLVER.BASE_LR)
        ml.log_param('max_iter', cfg.SOLVER.MAX_ITER)
        ml.log_param('epoch', cfg.SOLVER.IMS_PER_BATCH)
    except:
        pass
Пример #26
0
def main(args):

    # first regiest dataset I will use 
    register_coco_instances("my_dataset_train", {}, "training.json", "../data/ade20k/full_data/images/training/")
    register_coco_instances("my_dataset_val", {}, "validation.json", "../data/ade20k/full_data/images/validation/")

    # this is just a default cfg files 
    cfg = get_cfg()
    # accordinig to different yaml file, it will change cfg files accordiningly 
    cfg.merge_from_file("../detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml")

    # This is some task specific changes I made for training ade20k dataset 
    cfg.DATASETS.TRAIN = ("my_dataset_train",)
    cfg.DATASETS.TEST = ()  # no metrics implemented for this dataset
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml") 
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 150  # 150 classes 
    cfg.SOLVER.IMS_PER_BATCH = 16 #  this is default one


    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    # I highly suggest read source code of DefaultTrainer again, if you forget why you did this. 
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #27
0
def prepare_for_training(N_iter,
                         output_dir,
                         train_dataset_name,
                         N_classes,
                         start_training=False,
                         gpu_avail=True,
                         model_type="COCO-Detection/faster_rcnn_R_50_C4_1x.yaml"):
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_type))
    cfg.OUTPUT_DIR = output_dir
    cfg.DATASETS.TRAIN = (train_dataset_name,)
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_type)  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.GAMMA = 0.99 #lr decay
    cfg.SOLVER.STEPS = list(range(1000, N_iter, 1000)) #(decay steps,)
    cfg.SOLVER.WARMUP_ITERS = 500 #warmup steps
    cfg.SOLVER.MAX_ITER = N_iter    # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = N_classes  # 4 classes

    if not gpu_avail:
        cfg.MODEL.DEVICE = 'cpu'

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    if start_training:
        trainer.train()

    return trainer, cfg    
def train(args):
    dataset_name = "dataset_train"
    register_coco_instances(dataset_name, {}, args.annotations_path,
                            args.images_dir)
    cfg = get_cfg()
    if args.type.lower() == "maskrcnn":
        cfg.merge_from_file(
            model_zoo.get_config_file(
                "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    else:
        cfg.merge_from_file(
            model_zoo.get_config_file(
                "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
        setKeypoints(dataset_name)
        cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 14
    cfg.DATASETS.TRAIN = (dataset_name, )
    cfg.DATASETS.TEST = ()
    cfg.INPUT.MASK_FORMAT = 'bitmask'
    cfg.DATALOADER.NUM_WORKERS = 2
    setWeights(args, cfg)
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 500  # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    setNumClasses(cfg)
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #29
0
def trainILSVRC(args):
    register_ILSVRC()
    yaml_path, outdir, weights_name = get_cfg_info()
    cfg = setup(yaml_path, outdir, weights_name)
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)
    return trainer.train()
Пример #30
0
def train(config, data_path):
    """Train the Mask-RCNN for the given configuration and the given data"""
    register_data(data_path, prefix='yeast_cells_')
    os.makedirs(config.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(config)
    trainer.resume_or_load(resume=True)
    trainer.train()
    return trainer