def train_model(trainset_name: str, learning_rate: int, num_iteration: int,
                batch_per_image: int, num_classes: int):
    cfg = get_cfg()
    cfg.MODEL.DEVICE = "cpu"
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = (trainset_name, )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = learning_rate  # pick a good LR
    cfg.SOLVER.MAX_ITER = num_iteration  # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = batch_per_image  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes  # (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Inference should use the config with parameters that are used in training
    # cfg now already contains everything we've set previously. We changed it a little bit for inference:
    cfg.MODEL.WEIGHTS = os.path.join(
        cfg.OUTPUT_DIR, "model_final.pth")  # path to the model we just trained
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set a custom testing threshold
    predictor = DefaultPredictor(cfg)
    return predictor
def train_detectron2():
    dataset_storage = {
        'synthetic': generate_synthetic_datasets(),
        'real': generate_real_datasets()
    }

    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = (SYNTHETIC_DATASET_NAME[MODES[0]], REAL_DATASET_NAME[MODES[0]])
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 500
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(dataset_storage["synthetic"][MODES[0]]['unit_classes'])
    cfg.OUTPUT_DIR = str(OUTPUT_PATH)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    return cfg
Пример #3
0
def train(train_dir, name_data, json_dir, config, resume_status, iteration,
          batch, lr):
    cfg = get_cfg()
    cfg.merge_from_file("./configs/" + config)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = lr
    cfg.SOLVER.WARMUP_ITERS = 1200
    cfg.SOLVER.MAX_ITER = iteration
    cfg.SOLVER.GAMMA = 0.05
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = batch
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
    cfg.DATASETS.TRAIN = (name_data, )
    cfg.DATASETS.TEST = ()

    try:
        register_coco_instances(name_data, {}, json_dir,
                                train_dir)  # Train data
    except ValueError:
        print("Data already registerd. Continue.")

    if resume_status:
        cfg.MODEL.WEIGHTS = os.path.join('output', 'model_final.pth')

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=resume_status)
    trainer.train()
Пример #4
0
def task_b_MOTS_training(model_name, model_file):
    #model_name = model_name + '_inference'
    print('Running task B for model', model_name)

    SAVE_PATH = os.path.join('./results_week_5_task_b', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('MOTS_train',)
    cfg.DATASETS.TEST = ('KITTIMOTS_val',)
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 200
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    # Training
    print('Training')
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    evaluator = COCOEvaluator('KITTIMOTS_val', cfg, False, output_dir=SAVE_PATH)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    draw_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH)

    # Qualitative results: visualize some results
    print('Getting qualitative results')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    inputs = kitti_val()
    #inputs = inputs[:20] + inputs[-20:]
    inputs = inputs[220:233] + inputs[1995:2100]
    for i, input in enumerate(inputs):
        file_name = input['file_name']
        print('Prediction on image ' + file_name)
        img = cv2.imread(file_name)
        outputs = predictor(img)
        v = Visualizer(
            img[:, :, ::-1],
            metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
            scale=0.8,
            instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
Пример #5
0
def train(train_flag,resume_load=False):
    # trainer= Trainer(cfg)
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(resume_load)
    if train_flag:
        trainer.train()
    return trainer
def Train():
    register_coco_instances(
        "custom", {}, "/home/lsc/datasets/butterfly/Annotations/train.json",
        "/home/lsc/datasets/butterfly/TrainData/JPEGImages")
    custom_metadata = MetadataCatalog.get("custom")
    dataset_dicts = DatasetCatalog.get("custom")

    cfg = get_cfg()
    cfg.merge_from_file(
        "/home/lsc/detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"
    )
    cfg.DATASETS.TRAIN = ("custom", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 8
    cfg.MODEL.WEIGHTS = 'detectron2://COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x/139173657/model_final_68b088.pkl'
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.02
    cfg.SOLVER.MAX_ITER = (500)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (128)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 94

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
def train_iobjectspy_voc(train_data_path, train_config_path, weight_path,
                         max_iter, out_dir, register_train_name,
                         ml_set_tracking_path, experiment_id,
                         ml_experiment_tag):
    cfg = get_cfg()
    cfg.merge_from_file(train_config_path)
    cfg.DATASETS.TRAIN = (register_train_name, )
    cfg.DATASETS.TEST = ()  # no metrics implemented for this dataset
    cfg.MODEL.WEIGHTS = weight_path  # initialize from model zoo
    if max_iter == -1:
        pass
    else:
        cfg.SOLVER.MAX_ITER = max_iter
    num_class = get_class_num(train_data_path)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class  # get classes from sda
    cfg.OUTPUT_DIR = out_dir
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)
    trainer.train()
    try:
        import mlflow as ml
        # 设置mlflow
        ml.set_tracking_uri(ml_set_tracking_path)
        # 通过设置不同的实验id来管理实验,建议这一层级为项目名称,比如:iobjectspy_faster_rcnn_dota
        ml.set_experiment(experiment_id)
        # 通过设置
        ml.set_tag('experiment_id', ml_experiment_tag)
        ml.log_param('lr', cfg.SOLVER.BASE_LR)
        ml.log_param('max_iter', cfg.SOLVER.MAX_ITER)
        ml.log_param('epoch', cfg.SOLVER.IMS_PER_BATCH)
    except:
        pass
Пример #8
0
def train(*args):

    prepare_dataset()

    # D2 configuration
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = ("balloon_train", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = args[
        0]  # number ims_per_batch should be divisible by number of workers. D2 assertion.
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 300  # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon)
    cfg.OUTPUT_DIR = os.environ[
        'SM_OUTPUT_DATA_DIR']  # TODO check that this config works fine

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
def Train():
    register_coco_instances(
        "custom", {}, "datasets/coco/annotations/instances_train2017.json",
        "datasets/coco/train2017")
    custom_metadata = MetadataCatalog.get("custom")
    dataset_dicts = DatasetCatalog.get("custom")
    for d in random.sample(dataset_dicts, 3):
        img = cv2.imread(d["file_name"])
        visualizer = Visualizer(img[:, :, ::-1],
                                metadata=custom_metadata,
                                scale=1)
        vis = visualizer.draw_dataset_dict(d)
        cv2.imshow('Sample', vis.get_image()[:, :, ::-1])
        cv2.waitKey()

    cfg = get_cfg()
    cfg.merge_from_file(
        "configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml")
    cfg.DATASETS.TRAIN = ("custom", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = 'model_final_3c3198.pkl'
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.0001

    cfg.SOLVER.MAX_ITER = (150000)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (128)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #10
0
def main():
    args = parse_args()

    with open(args.config, "r") as f:
        config = yaml.safe_load(f)
    if "classes" not in config:
        raise Exception("Could not find class names")
    n_classes = len(config["classes"])
    classes = config["classes"]

    for d in ["train"]:
        DatasetCatalog.register("custom_" + d, lambda d=d: get_annotated_dataset(args.annotator_root, args.data_folders))
        MetadataCatalog.get("custom_" + d).set(thing_classes=classes)
    custom_metadata = MetadataCatalog.get("custom_train")

    cfg = get_cfg()
    cfg.merge_from_file(args.model_config)
    cfg.DATASETS.TRAIN = ("custom_train",)
    cfg.DATASETS.TEST = ()   # no metrics implemented for this dataset
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = args.initial_weights
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = args.max_iter
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(resume=False)
    trainer.train()
def model_configuration(model_url, learning_rate, max_iter):

    model_a = os.path.join("COCO-InstanceSegmentation", model_url)
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_a))
    cfg.DATASETS.TRAIN = ("kitti-mots", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = learning_rate
    cfg.SOLVER.MAX_ITER = max_iter
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
    # cfg.INPUT.MASK_FORMAT = 'rle'
    # cfg.INPUT.MASK_FORMAT='bitmask'

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    print('Training...')
    trainer.train()

    # EVALUATION
    print('Evaluating...')
    evaluator = COCOEvaluator("kitti-mots", cfg, False, output_dir="./Search/")
    val_loader = build_detection_test_loader(cfg, "kitti-mots")
    results = inference_on_dataset(trainer.model, val_loader, evaluator)
    print(results)

    return results
Пример #12
0
def task_b(model_name, model_file):
    save_path = Path("output/task_b") / model_name
    os.makedirs(save_path, exist_ok=True)

    cfg = base_cfg(model_file, save_path)
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 1000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    evaluator = COCOEvaluator("kitti-mots-val",
                              cfg,
                              False,
                              output_dir=save_path)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, save_path)

    get_qualitative_results(cfg, save_path)
Пример #13
0
def main(args):

    dataset_train = "faces_train"
    dataset_validation = "faces_validation"
    cfg = setup(args)
    cfg.DATASETS.TRAIN = (dataset_train, )
    ### TODO:
    ### - Test with validation set.
    ###
    # cfg.DATASETS.TEST = (dataset_validation,)
    cfg.DATASETS.TEST = ()

    train_data = args.train_data
    train_annots = args.train_annotations
    validation_data = args.validation_data
    validation_annots = args.validation_annotations

    DatasetCatalog.register(dataset_train,
                            lambda: get_dataset_dict(train_annots, train_data))
    MetadataCatalog.get(dataset_train).set(thing_classes=["face"])
    DatasetCatalog.register(
        dataset_validation,
        lambda: get_dataset_dict(validation_annots, validation_data))
    MetadataCatalog.get(dataset_validation).set(thing_classes=["face"])
    faces_metadata = MetadataCatalog.get(dataset_train)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #14
0
def train(args, mode, _appcfg):
    name = "hmd"
    for subset in ["train", "val"]:
        metadata = load_and_register_dataset(name, subset, _appcfg)

    cfg = config.get_cfg()
    cfg.merge_from_file(
        "/aimldl-cod/external/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )
    cfg.DATASETS.TRAIN = ("hmd_train", "hmd_val")
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"  # initialize from model zoo
    # cfg.MODEL.WEIGHTS = "/codehub/apps/detectron2/release/model_final.pth"
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025
    # cfg.SOLVER.MAX_ITER = 350000    # 300 iterations seems good enough, but you can certainly train longer
    # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512  # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3  # only has one class (ballon)

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #15
0
def Train():
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # register train dataset
    register_coco_instances("custom", {},
                            "datasets/traindata/midv500_coco.json",
                            "datasets/traindata/")
    custom_metadata = MetadataCatalog.get("custom")
    dataset_dicts = DatasetCatalog.get("custom")

    # set cfg
    cfg = get_cfg()
    cfg.merge_from_file(
        "configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml")
    cfg.DATASETS.TRAIN = ("custom", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = 'model_final_maskrcnn_dc5.pkl'
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.02
    cfg.SOLVER.MAX_ITER = (1000)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1

    # training
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
def train(args):
    dataset_name = "dataset_train"
    register_coco_instances(dataset_name, {}, args.annotations_path,
                            args.images_dir)
    cfg = get_cfg()
    if args.type.lower() == "maskrcnn":
        cfg.merge_from_file(
            model_zoo.get_config_file(
                "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    else:
        cfg.merge_from_file(
            model_zoo.get_config_file(
                "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
        setKeypoints(dataset_name)
        cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 14
    cfg.DATASETS.TRAIN = (dataset_name, )
    cfg.DATASETS.TEST = ()
    cfg.INPUT.MASK_FORMAT = 'bitmask'
    cfg.DATALOADER.NUM_WORKERS = 2
    setWeights(args, cfg)
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 500  # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
    setNumClasses(cfg)
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #17
0
def main(args):

    # first regiest dataset I will use 
    register_coco_instances("my_dataset_train", {}, "training.json", "../data/ade20k/full_data/images/training/")
    register_coco_instances("my_dataset_val", {}, "validation.json", "../data/ade20k/full_data/images/validation/")

    # this is just a default cfg files 
    cfg = get_cfg()
    # accordinig to different yaml file, it will change cfg files accordiningly 
    cfg.merge_from_file("../detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml")

    # This is some task specific changes I made for training ade20k dataset 
    cfg.DATASETS.TRAIN = ("my_dataset_train",)
    cfg.DATASETS.TEST = ()  # no metrics implemented for this dataset
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml") 
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 150  # 150 classes 
    cfg.SOLVER.IMS_PER_BATCH = 16 #  this is default one


    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    # I highly suggest read source code of DefaultTrainer again, if you forget why you did this. 
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #18
0
def prepare_for_training(N_iter,
                         output_dir,
                         train_dataset_name,
                         N_classes,
                         start_training=False,
                         gpu_avail=True,
                         model_type="COCO-Detection/faster_rcnn_R_50_C4_1x.yaml"):
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_type))
    cfg.OUTPUT_DIR = output_dir
    cfg.DATASETS.TRAIN = (train_dataset_name,)
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_type)  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.GAMMA = 0.99 #lr decay
    cfg.SOLVER.STEPS = list(range(1000, N_iter, 1000)) #(decay steps,)
    cfg.SOLVER.WARMUP_ITERS = 500 #warmup steps
    cfg.SOLVER.MAX_ITER = N_iter    # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = N_classes  # 4 classes

    if not gpu_avail:
        cfg.MODEL.DEVICE = 'cpu'

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    if start_training:
        trainer.train()

    return trainer, cfg    
Пример #19
0
 def train_coco_data(self, coco_json):
     dataset_name = "mask_train_data"
     DatasetCatalog.register(
         dataset_name,
         lambda: load_coco_json(json_file=coco_json,
                                image_root=self.train_data_path))
     MetadataCatalog.get(dataset_name).set(
         json_file=coco_json,
         image_root=self.train_data_path,
         evaluator_type="coco",
         thing_classes=["rightmask"],
         thing_dataset_id_to_contiguous_id={1: 0})
     cfg = get_cfg()
     cfg.merge_from_file(
         "/home/detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
     )
     cfg.DATASETS.TRAIN = (dataset_name, )
     cfg.DATASETS.TEST = (dataset_name, )
     cfg.DATALOADER.NUM_WORKERS = 2
     cfg.MODEL.WEIGHTS = "/home/detectron2/train_data/model_final_280758.pkl"
     cfg.SOLVER.IMS_PER_BATCH = 2
     cfg.SOLVER.BASE_LR = 0.01  # 学习率
     cfg.SOLVER.MAX_ITER = 300  # 最大迭代次数
     cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
     cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
     os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
     print("模型存储路径" + cfg.OUTPUT_DIR)
     trainer = DefaultTrainer(cfg)
     trainer.resume_or_load(resume=False)
     trainer.train()  # 开始训练
Пример #20
0
def train_model():
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = ("balloon_train",)
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2

    # Let training initialize from model zoo
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR

    # 300 iterations seems good enough for this toy dataset;
    #   you may need to train longer for a practical dataset
    cfg.SOLVER.MAX_ITER = 300

    # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon)

    cfg.OUTPUT_DIR = gcfg.get_ou_dir
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #21
0
def train(config, data_path):
    """Train the Mask-RCNN for the given configuration and the given data"""
    register_data(data_path, prefix='yeast_cells_')
    os.makedirs(config.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(config)
    trainer.resume_or_load(resume=True)
    trainer.train()
    return trainer
Пример #22
0
def main(setupoptions: SetupOptions):
    baseStr = 'VerticalNanowires_noSn_16'

    maskType = setDatasetAndMetadata(baseStr, setupoptions)
    configurator = setConfigurator(setupoptions, baseStr, maskType)
    trainer = DefaultTrainer(configurator)
    trainer.resume_or_load(resume=setupoptions.continueTraining)
    trainer.train()
Пример #23
0
def train_model(dataset):
    # Export the dataset to COCO format
    export_file, image_dir = export_dataset(dataset)

    # Register it as a COCO dataset in the Detectron2 framework
    try:
        register_coco_instances('my_dataset', {}, export_file, image_dir)
    except:
        print('Dataset was already registered')
    dataset_dicts = load_coco_json(export_file, image_dir)
    MetadataCatalog.get('my_dataset').set(
        thing_classes=[c['name'] for c in dataset.categories])
    segments_metadata = MetadataCatalog.get('my_dataset')
    print(segments_metadata)

    # Configure the training run
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))
    cfg.DATASETS.TRAIN = ('my_dataset', )
    cfg.DATASETS.TEST = ()
    cfg.INPUT.MASK_FORMAT = 'bitmask'
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 4  # 4
    cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
    cfg.SOLVER.MAX_ITER = 6000  # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512  # faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(
        dataset.categories)  # number of categories
    #     cfg.MODEL.DEVICE = 'cuda'
    print('Max iter is ', cfg.SOLVER.MAX_ITER)
    # Start the training
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Return the model
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set the testing threshold for this model
    cfg.DATASETS.TEST = ('my_dataset', )
    cfg.TEST.DETECTIONS_PER_IMAGE = 1000

    built_model = build_model(cfg)  # returns a torch.nn.Module
    DetectionCheckpointer(built_model).load(
        cfg.MODEL.WEIGHTS)  #capture trained model
    checkpointer = DetectionCheckpointer(
        built_model, save_dir="/content/gdrive/My Drive/Colab Notebooks")
    checkpointer.save("model_final")  # save to output/model_999.pth

    predictor = DefaultPredictor(cfg)
    model = Model(predictor)

    return model
def main(args):

    # Register DeepSports dataset
    register_coco_instances("deepsports_train", {}, os.path.join(args.dataset_path,"train.json"),
                            os.path.join(args.dataset_path,"train"))
    register_coco_instances("deepsports_val", {}, os.path.join(args.dataset_path,"val.json"),
                            os.path.join(args.dataset_path,"val"))
    register_coco_instances("deepsports_test", {},
                            os.path.join(args.dataset_path,"test_nolabels.json"),
                            os.path.join(args.dataset_path,"test"))

    # Load config file and adjust where needed
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.INPUT.MASK_FORMAT = 'bitmask'
    cfg.DATASETS.TRAIN = ("deepsports_train",)
    cfg.DATASETS.TEST = ("deepsports_val",)
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.SOLVER.IMS_PER_BATCH = args.bs
    cfg.SOLVER.BASE_LR = args.lr
    cfg.SOLVER.MAX_ITER = args.iter
    cfg.SOLVER.STEPS = []
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2  # human (1), ball (2)
    cfg.MODEL.WEIGHTS = None # transfer learning is prohibited!
    print(cfg)

    # Create output dir
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    if args.predict:
        # load model and weights
        model = build_model(cfg)
        DetectionCheckpointer(model).load(args.predict)
        # make dataloader
        test_loader = build_detection_test_loader(cfg, "deepsports_test")
        # predict
        outputs = []
        model.eval()
        with torch.no_grad():
            for batch in tqdm(test_loader):
                pred = model(batch)
                pred = batch_to_dict(pred, batch) # convert pred to serializable format
                outputs.extend(pred)

        with open('prediction.json', 'w') as fout:
            json.dump(outputs, fout)

        # exit
        sys.exit()

    # Train
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(resume=False)
    trainer.train()
    evaluator = COCOEvaluator("deepsports_val", ("segm",), False, output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, "deepsports_val")
    print(inference_on_dataset(model, val_loader, evaluator))
    def train(self, dataset_path, max_iterations=30000):
        DatasetCatalog.register('training_dataset', lambda : data.to_coco(dataset_path))
        self.cfg.DATASETS.TRAIN = ('training_dataset',)
        self.cfg.DATASETS.TEST = ()
        self.cfg.SOLVER.MAX_ITER = max_iterations

        trainer = DefaultTrainer(self.cfg)
        trainer.resume_or_load(resume=False)
        trainer.train()
    def train(self, training_frames, network, get_images):

        if (training_frames <= 0):
            raise ValueError(
                "The number of input frames must be bigger than 0")

        self.cfg.OUTPUT_DIR = (f'../datasets/detectron2/{network}')

        self.generate_datasets(training_frames, get_images)

        retinanet_path = "COCO-Detection/retinanet_R_101_FPN_3x.yaml"
        faster_rcnn_path = "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"
        if (detectron2.__version__ == "0.1"):
            if network == 'faster_rcnn':
                self.cfg.merge_from_file(
                    pkg_resources.resource_filename(
                        "detectron2.model_zoo",
                        os.path.join("configs", faster_rcnn_path)))
                self.cfg.MODEL.WEIGHTS = model_zoo.ModelZooUrls.get(
                    faster_rcnn_path)
            if network == 'retinanet':
                self.cfg.merge_from_file(
                    pkg_resources.resource_filename(
                        "detectron2.model_zoo",
                        os.path.join("configs", retinanet_path)))
                self.cfg.MODEL.WEIGHTS = model_zoo.ModelZooUrls.get(
                    retinanet_path)
        else:
            if network == 'faster_rcnn':
                self.cfg.merge_from_file(
                    model_zoo.get_config_file(faster_rcnn_path))
                self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
                    faster_rcnn_path)
            if network == 'retinanet':
                self.cfg.merge_from_file(
                    model_zoo.get_config_file(retinanet_path))
                self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
                    retinanet_path)

        self.cfg.DATASETS.TRAIN = ('train_set', )
        self.cfg.DATASETS.TEST = ('val_set', )
        self.cfg.DATALOADER.NUM_WORKERS = 1
        self.cfg.SOLVER.IMS_PER_BATCH = 1
        self.cfg.SOLVER.BASE_LR = 0.001
        self.cfg.SOLVER.MAX_ITER = 2000
        self.cfg.SOLVER.STEPS = (1000, 2000)
        self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
        self.cfg.MODEL.DEVICE = 'cuda'

        if not os.path.isfile(
                os.path.join(self.cfg.OUTPUT_DIR, 'model_final.pth')):

            os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)

            trainer = DefaultTrainer(self.cfg)
            trainer.resume_or_load(resume=False)
            trainer.train()
Пример #27
0
def main():
    # for d in ["train", "val"]:
    thing_classes = np.load('./dataset/thing_classes.npy', allow_pickle=True)
    for d in ["trainval"]:
        DatasetCatalog.register(
            "aihub_" + d,
            lambda d=d: get_dataset_dicts(f"dataset/{d}_dataframe.csv"))
        MetadataCatalog.get("aihub_" +
                            d).set(thing_classes=list(thing_classes))

    aihub_metadata = MetadataCatalog.get("aihub_trainval")

    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file("COCO-Detection/retinanet_R_50_FPN_3x.yaml"))
    cfg.DATASETS.TRAIN = ("aihub_trainval", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 12
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-Detection/retinanet_R_50_FPN_3x.yaml")
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.0001
    cfg.SOLVER.MAX_ITER = 3000000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 32
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1031

    # RetinaNet
    cfg.MODEL.RETINANET.NUM_CLASSES = 1031
    cfg.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
    cfg.MODEL.RETINANET.NUM_CONVS = 4
    cfg.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5]
    cfg.MODEL.RETINANET.IOU_LABELS = [0, -1, 1]
    cfg.MODEL.RETINANET.PRIOR_PROB = 0.01
    cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05
    cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000
    cfg.MODEL.RETINANET.NMS_THRESH_TEST = 0.5

    # Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets
    cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)

    # Loss parameters
    cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0
    cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25
    cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1
    # Options are: "smooth_l1", "giou"
    cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1"
    cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN = 1500
    cfg.DATALOADER.ASPECT_RATIO_GROUPING = False
    # One of BN, SyncBN, FrozenBN, GN
    # Only supports GN until unshared norm is implemented
    cfg.MODEL.RETINANET.NORM = ""

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Пример #28
0
 def train(self):
     trainer = DefaultTrainer(self.cfg)
     trainer.resume_or_load(resume=False)
     if os.path.exists(self.output):
         path.remove(self.output)
         os.makedirs(self.output)
     else:
         os.makedirs(self.output)
     trainer.train()
     return trainer
Пример #29
0
def d2_run():

    train_metadata, val_metadata = register_dataset()
    cfg = set_up()
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    output_dir = cfg['OUTPUT_DIR']
    save_dir = os.path.join(output_dir, 'result')
    if not os.path.exists(save_dir): os.makedirs(save_dir)

    cfg.MODEL.WEIGHTS = os.path.join(output_dir, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2  # set the testing threshold for this model
    cfg.DATASETS.TEST = (regist_val_name, )

    predictor = DefaultPredictor(cfg)

    test_dir = val_images_dir
    imgs_list = [
        os.path.join(test_dir, file_name) for file_name in os.listdir(test_dir)
        if file_name.endswith(".jpg") or file_name.endswith(".png")
        or file_name.endswith(".bmp") or file_name.endswith(".jpeg")
    ]

    for d in imgs_list:
        im = cv2.imread(d)
        outputs = predictor(im)
        a = outputs["instances"].pred_classes.data.cpu().numpy().tolist(
        ) if outputs["instances"].get_fields()["pred_boxes"] else None
        print(a)

        v = Visualizer(im[:, :, ::-1],
                       metadata=train_metadata,
                       scale=0.9,
                       instance_mode=ColorMode.IMAGE_BW)
        v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
        predict_file = os.path.join(
            save_dir,
            os.path.splitext(os.path.basename(d))[0] + "_predict.png")

        cv2.imwrite(predict_file, v.get_image()[:, :, ::-1])

        if os.path.exists(predict_file): print("Done: %s" % predict_file)

    ### evaluate
    evaluator = COCOEvaluator(regist_val_name,
                              cfg,
                              False,
                              output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, regist_val_name)
    my_eval = inference_on_dataset(trainer.model, val_loader, evaluator)
    print(my_eval)
    log = ("%s evaluate: \n" % (model_name), my_eval)
    print(log, file=open(log_file, "a"))
Пример #30
0
 def train_predictor(self):
     self.cfg.SOLVER.CHECKPOINT_PERIOD = 2000
     self.cfg.SOLVER.IMS_PER_BATCH = 2
     self.cfg.SOLVER.BASE_LR /= 2.
     self.predictor = DefaultPredictor(self.cfg)
     self.cfg.OUTPUT_DIR = '/scratch/users/zzweng/output/coco/classagnostic'
     print(self.cfg.OUTPUT_DIR)
     os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)
     trainer = DefaultTrainer(self.cfg)
     trainer.resume_or_load(resume=False)
     trainer.train()