def visual_iobjectspy_voc(train_data_path, train_config_path, image_path,
                          register_val_name, model_path, show_images):
    cfg = get_cfg()
    cfg.merge_from_file(train_config_path)
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_path  # initialize from model zoo
    cfg.SOLVER.MAX_ITER = 300  # 300 iterations seems good enough, but you can certainly train longer
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset
    num_class = get_class_num(train_data_path)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class  # get classes from sda

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    cfg.DATASETS.TEST = (register_val_name, )
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
    predictor = DefaultPredictor(cfg)

    from detectron2.utils.visualizer import ColorMode

    # dataset_dicts = get_balloon_dicts(os.path.join(train_dir, 'test'))

    pic_names = os.listdir(image_path)
    balloon_metadata = MetadataCatalog.get("iobjectspy_voc")
    for d in random.sample(pic_names, 3):
        im = cv2.imread(os.path.join(image_path, d))
        outputs = predictor(im)
        print(outputs["instances"].pred_classes)
        print(outputs["instances"].pred_boxes)

        v = Visualizer(im[:, :, ::-1],
                       balloon_metadata,
                       scale=0.8,
                       instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to("cpu"))
        cv2.imwrite(os.path.join(show_images, d), v.get_image()[:, :, ::-1])
def train_iobjectspy_voc(train_data_path, train_config_path, weight_path,
                         max_iter, out_dir, register_train_name,
                         ml_set_tracking_path, experiment_id,
                         ml_experiment_tag):
    cfg = get_cfg()
    cfg.merge_from_file(train_config_path)
    cfg.DATASETS.TRAIN = (register_train_name, )
    cfg.DATASETS.TEST = ()  # no metrics implemented for this dataset
    cfg.MODEL.WEIGHTS = weight_path  # initialize from model zoo
    if max_iter == -1:
        pass
    else:
        cfg.SOLVER.MAX_ITER = max_iter
    num_class = get_class_num(train_data_path)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class  # get classes from sda
    cfg.OUTPUT_DIR = out_dir
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)
    trainer.train()
    try:
        import mlflow as ml
        # 设置mlflow
        ml.set_tracking_uri(ml_set_tracking_path)
        # 通过设置不同的实验id来管理实验,建议这一层级为项目名称,比如:iobjectspy_faster_rcnn_dota
        ml.set_experiment(experiment_id)
        # 通过设置
        ml.set_tag('experiment_id', ml_experiment_tag)
        ml.log_param('lr', cfg.SOLVER.BASE_LR)
        ml.log_param('max_iter', cfg.SOLVER.MAX_ITER)
        ml.log_param('epoch', cfg.SOLVER.IMS_PER_BATCH)
    except:
        pass
def inference_detectron2(train_data_path, train_config_path, image_path,
                         tile_size, tile_offset_size, register_val_name,
                         model_path, outpath):
    cfg = get_cfg()
    cfg.merge_from_file(train_config_path)
    # cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_path  # initialize from model zoo
    # cfg.SOLVER.MAX_ITER = 300  # 300 iterations seems good enough, but you can certainly train longer
    # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset
    num_class = get_class_num(train_data_path)
    category_name = get_classname(train_data_path)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class  # get classes from sda

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    cfg.DATASETS.TEST = (register_val_name, )
    # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
    # cfg.MODEL.RETINANET.NMS_THRESH_TEST = 0.7
    # cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05
    predictor = DefaultPredictor(cfg)

    pic_names = os.listdir(image_path)
    for d in pic_names:
        _estimation_img(os.path.join(image_path, d),
                        os.path.join(outpath,
                                     str(d).split('.')[0] + ".txt"),
                        str(d).split('.')[0], tile_size, tile_offset_size,
                        predictor, category_name)
示例#4
0
def setup(args):
    """
    Create configs and perform basic setups.
    """
    cfg = get_cfg()
    cfg.merge_from_file(args.train_config_path)
    cfg.merge_from_list(args.opts)

    train_data_path = args.train_data_path
    data_path_name = train_data_path.split("/")[-1]
    class_names = get_classname(train_data_path)
    register_all_pascal_voc(train_data_path=train_data_path, class_names=class_names)
    register_train_name = data_path_name + '_trainval'
    cfg.DATASETS.TRAIN = (register_train_name,)
    cfg.DATASETS.TEST = ()  # no metrics implemented for this dataset
    cfg.MODEL.WEIGHTS = args.weight_path
    num_class = get_class_num(train_data_path)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_class  # get classes from sda
    cfg.OUTPUT_DIR = args.out_dir
    cfg.freeze()
    default_setup(cfg, args)
    return cfg