Пример #1
0
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = num_workers
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
    cfg.SOLVER.IMS_PER_BATCH = ims_per_batch
    cfg.SOLVER.BASE_LR = model_lr
    cfg.SOLVER.MAX_ITER = max_train_iter
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = bach_size_per_img
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_labels
    cfg.OUTPUT_DIR = "{}/maskrcnn_{}_{}_{}_{}_{}".format(work_root,num_labels,max_train_iter,bach_size_per_img,ims_per_batch,model_lr)
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    ####### 训练

    # trainer = CustomTrainer(cfg)
    trainer = DefaultTrainer(cfg)
    train_data_loader = trainer.build_train_loader(cfg)

    ######## 可视化
    # data_iter = iter(train_data_loader)
    # batch = next(data_iter)
    # rows, cols = 2, 2
    # plt.figure(figsize=(20,20))
    #
    # for i, per_image in enumerate(batch[:4]):
    #
    #     plt.subplot(rows, cols, i+1)
    #
    #     # Pytorch tensor is in (C, H, W) format
    #     img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy()
    #     img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT)
    #
Пример #2
0


cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"))
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml")  # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
cfg.SOLVER.MAX_ITER = 1000    # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256   # faster, and good enough for this toy dataset (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon)
cfg.DATASETS.TRAIN = ('my_dataset',)
cfg.DATASETS.TEST = ()
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg) 
trainer.build_train_loader = build_detection_train_loader(cfg, mapper=train_mapper)
trainer.resume_or_load(resume=False)
trainer.train()

from contextlib import redirect_stdout

#ouput config
with open('test.yaml', 'w') as f:
    with redirect_stdout(f): print(cfg.dump())

from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
evaluator = COCOEvaluator("test_dataset", cfg, False, output_dir="./output/")
val_loader = build_detection_test_loader(cfg, "test_dataset")
inference_on_dataset(trainer.model, val_loader, evaluator)