コード例 #1
0
    def get_predictor(cls):
        ''' load trained model'''

        with cls.lock:
            # check if model is already loaded
            if cls.predictor:
                return cls.predictor

            os.environ['TENSORPACK_FP16'] = 'true'
        
            # create a mask r-cnn model
            mask_rcnn_model = ResNetFPNModel(True)

            try:
                model_dir = os.environ['SM_MODEL_DIR']
            except KeyError:
                model_dir = '/opt/ml/model'

            try:
                cls.pretrained_model = os.environ['PRETRAINED_MODEL']
            except KeyError:
                pass

            # file path to previoulsy trained mask r-cnn model
            latest_trained_model = ""
            model_search_path = os.path.join(model_dir, "model-*.index" )
            for model_file in glob.glob(model_search_path):
                if model_file > latest_trained_model:
                    latest_trained_model = model_file

            trained_model = latest_trained_model
            print(f'Using model: {trained_model}')

            # fixed resnet50 backbone weights
            cfg.BACKBONE.WEIGHTS = os.path.join(cls.pretrained_model)
            cfg.MODE_FPN = True
            cfg.MODE_MASK = True

            # calling detection dataset gets the number of coco categories 
            # and saves in the configuration
            DetectionDataset()
            finalize_configs(is_training=False)

            # Create an inference model
            # PredictConfig takes a model, input tensors and output tensors
            cls.predictor = OfflinePredictor(PredictConfig(
                model=mask_rcnn_model,
                session_init=get_model_loader(trained_model),
                input_names=['images', 'orig_image_dims'],
                output_names=[
                    'generate_{}_proposals_topk_per_image/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
                    'generate_{}_proposals_topk_per_image/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
                    'fastrcnn_all_scores',
                    'output/boxes',
                    'output/scores',
                    'output/labels',
                    'output/masks'
                ]))
            return cls.predictor
コード例 #2
0
def setup_predict_config(config, gpu=True):
    if eval(gpu) == False:
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    if config:
        cfg.update_args(config)
    register_coco(cfg.DATA.BASEDIR)  # add COCO datasets to the registry
    register_ic(cfg.DATA.BASEDIR)

    finalize_configs(is_training=False)
    cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
コード例 #3
0
 def _init_model(self):
     logger.set_logger_dir("/tmp/test_log/", 'd')
     from dataset import DetectionDataset
     from train import ResNetFPNTrackModel
     # init tensorpack model
     cfg.freeze(False)
     model = ResNetFPNTrackModel()
     DetectionDataset(
     )  # initialize the config with information from our dataset
     finalize_configs(is_training=False)
     return model
コード例 #4
0
    def __init__(self, name, need_network=True, need_img=True, model="best"):
        super().__init__(name=name, is_deterministic=True)
        self._resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE,
                                     cfg.PREPROC.MAX_SIZE)
        self._prev_box = None
        self._ff_gt_feats = None
        self._need_network = need_network
        self._need_img = need_img
        self._rotated_bbox = None

        if need_network:
            logger.set_logger_dir(
                "/tmp/test_log_/" + str(random.randint(0, 10000)), 'd')
            if model == "best":
                load = "train_log/hard_mining3/model-1360500"
            elif model == "nohardexamples":
                load = "train_log/condrcnn_all_2gpu_lrreduce2/model-1200500"
            elif model == "newrpn":
                load = "train_log/newrpn1/model"
            elif model == "resnet50_nohardexamples":
                load = "train_log/condrcnn_all_resnet50/model-1200500"
                cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]
            elif model == "resnet50":
                load = "train_log/hard_mining3_resnet50/model-1360500"
                cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]
            elif model == "gotonly":
                load = "train_log/hard_mining3_onlygot/model-1361000"
            elif model.startswith("checkpoint:"):
                load = model.replace("checkpoint:", "")
            else:
                assert False, ("unknown model", model)
            from dataset import DetectionDataset
            # init tensorpack model
            # cfg.freeze(False)
            DetectionDataset(
            )  # initialize the config with information from our dataset

            cfg.EXTRACT_GT_FEATURES = True
            cfg.MODE_TRACK = False
            extract_model = ResNetFPNModel()
            extract_ff_feats_cfg = PredictConfig(
                model=extract_model,
                session_init=get_model_loader(load),
                input_names=['image', 'roi_boxes'],
                output_names=['rpn/feature'])
            finalize_configs(is_training=False)
            self._extract_func = OfflinePredictor(extract_ff_feats_cfg)

            cfg.EXTRACT_GT_FEATURES = False
            cfg.MODE_TRACK = True
            cfg.USE_PRECOMPUTED_REF_FEATURES = True
            self._pred_func = self._make_pred_func(load)
コード例 #5
0
ファイル: train.py プロジェクト: lg-lab/Mating_Detector
    def __init__(self, path):
        finalize_configs(is_training=False)

        self.model = ResNetFPNModel()
        self.pred_func = OfflinePredictor(
            PredictConfig(
                model=self.model,
                session_init=get_model_loader(path),
                input_names=self.model.get_inference_tensor_names()[0],
                output_names=self.model.get_inference_tensor_names()[1]))

        cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
        COCODetection(None, None)  # Only to load the class names into caches
コード例 #6
0
    def get_predictor(cls):
        ''' load trained model'''

        with cls.lock:
            # check if model is already loaded
            if cls.predictor:
                return cls.predictor

            # create a mask r-cnn model
            mask_rcnn_model = ResNetFPNModel()

            try:
                model_dir = os.environ['SM_MODEL_DIR']
            except KeyError:
                model_dir = '/opt/ml/model'

            try:
                cls.pretrained_model = os.environ['PRETRAINED_MODEL']
            except KeyError:
                pass

            # file path to previoulsy trained mask r-cnn model
            latest_trained_model = ""
            model_search_path = os.path.join(model_dir, "model-*.index")
            for model_file in glob.glob(model_search_path):
                if model_file > latest_trained_model:
                    latest_trained_model = model_file

            trained_model = latest_trained_model[:-6]
            print(f'Using model: {trained_model}')

            # fixed resnet50 backbone weights
            cfg.BACKBONE.WEIGHTS = os.path.join(cls.pretrained_model)
            cfg.MODE_FPN = True
            cfg.MODE_MASK = True
            cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
            finalize_configs(is_training=False)

            # Create an inference model
            # PredictConfig takes a model, input tensors and output tensors
            input_tensors = mask_rcnn_model.get_inference_tensor_names()[0]
            output_tensors = mask_rcnn_model.get_inference_tensor_names()[1]

            cls.predictor = OfflinePredictor(
                PredictConfig(model=mask_rcnn_model,
                              session_init=get_model_loader(trained_model),
                              input_names=input_tensors,
                              output_names=output_tensors))
            return cls.predictor
コード例 #7
0
def init_predictor():
    register_coco(cfg.DATA.BASEDIR)
    MODEL = ResNetFPNModel()
    finalize_configs(is_training=False)

    predcfg = PredictConfig(
        model=MODEL,
        #session_init=SmartInit("/home/jetson/Documents/trained_model/500000_17/checkpoint"),
        session_init=SmartInit(
            "/home/jetson/Documents/trained_model/255000_04.01/checkpoint"),
        input_names=MODEL.get_inference_tensor_names()[0],
        output_names=MODEL.get_inference_tensor_names()[1])

    predictor = OfflinePredictor(predcfg)

    return predictor
コード例 #8
0
ファイル: model.py プロジェクト: atalwalkar/determined-1
    def build_model(self, trainer_type: str) -> tp.ModelDesc:
        cfg.DATA.NUM_WORKERS = self.context.get_hparam("num_workers")
        cfg.MODE_MASK = True
        cfg.MODE_FPN = True
        if not self.context.get_hparam("is_gcs"):
            cfg.DATA.BASEDIR = "/rcnn-data/COCO/DIR"
        cfg.TRAIN.LR_SCHEDULE = [240000, 320000, 360000]  # "2x" schedule in Detectron.
        cfg.TRAIN.BASE_LR = 1e-2 * self.context.get_experiment_config().get("optimizations").get(
            "aggregation_frequency"
        )
        cfg.TRAIN.WARMUP = self.context.get_hparam("warmup_iterations")
        cfg.TRAIN.GRADIENT_CLIP = self.context.get_hparam("gradient_clipping")
        cfg.TRAINER = trainer_type
        self.trainer_type = trainer_type

        finalize_configs(is_training=True)  # type: ignore

        return DeterminedResNetFPNModel()
コード例 #9
0
def evaluate_rcnn(model_name, paper_arxiv_id, cfg_list, model_file):
    evaluator = COCOEvaluator(
        root=COCO_ROOT, model_name=model_name, paper_arxiv_id=paper_arxiv_id
    )
    category_id_to_coco_id = {
        v: k for k, v in COCODetection.COCO_id_to_category_id.items()
    }

    cfg.update_config_from_args(cfg_list)  # TODO backup/restore config
    finalize_configs(False)
    MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
    predcfg = PredictConfig(
        model=MODEL,
        session_init=SmartInit(model_file),
        input_names=MODEL.get_inference_tensor_names()[0],
        output_names=MODEL.get_inference_tensor_names()[1],
    )
    predictor = OfflinePredictor(predcfg)

    def xyxy_to_xywh(box):
        box[2] -= box[0]
        box[3] -= box[1]
        return box

    df = get_eval_dataflow("coco_val2017")
    df.reset_state()
    for img, img_id in tqdm.tqdm(df, total=len(df)):
        results = predict_image(img, predictor)
        res = [
            {
                "image_id": img_id,
                "category_id": category_id_to_coco_id.get(
                    int(r.class_id), int(r.class_id)
                ),
                "bbox": xyxy_to_xywh([round(float(x), 4) for x in r.box]),
                "score": round(float(r.score), 3),
            }
            for r in results
        ]
        evaluator.add(res)
        if evaluator.cache_exists:
            break

    evaluator.save()
コード例 #10
0
def config_setup(data_config: DataConfig):
    # config_yaml_path = os.path.join(os.path.abspath(cfg.PROJECT_ROOT), 'train_config/default.yaml')
    # cfg.to_yaml(output_path=config_yaml_path)

    if data_config is None:
        data_config = DataConfig(image_data_basedir=None)
        data_config.pop_with_default()

    set_config_v1(data_config=data_config)

    arrange_multiprocess()

    train_args = add_args()
    train_args = maybe_overwrite_config(train_args)

    register_coco_format(data_config=data_config)
    is_horovod_ = cfg.TRAINER == 'horovod'

    _setup_logging(train_args.logdir, is_horovod_)

    # TODO: what does freeze do?
    finalize_configs(is_training=True)

    return train_args, is_horovod_
コード例 #11
0
ファイル: predict.py プロジェクト: zhaorui8/tensorpack
        nargs='+')
    parser.add_argument('--compact', help='Save a model to .pb')
    parser.add_argument('--serving', help='Save a model to serving file')

    args = parser.parse_args()
    if args.config:
        cfg.update_args(args.config)
    register_coco(cfg.DATA.BASEDIR)  # add COCO datasets to the registry
    MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()

    if not tf.test.is_gpu_available():
        from tensorflow.python.framework import test_util
        assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \
            "Inference requires either GPU support or MKL support!"
    assert args.load
    finalize_configs(is_training=False)

    if args.predict or args.visualize:
        cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS

    if args.visualize:
        do_visualize(MODEL, args.load)
    else:
        predcfg = PredictConfig(
            model=MODEL,
            session_init=get_model_loader(args.load),
            input_names=MODEL.get_inference_tensor_names()[0],
            output_names=MODEL.get_inference_tensor_names()[1])

        if args.compact:
            ModelExporter(predcfg).export_compact(args.compact, optimize=False)
コード例 #12
0
ファイル: train.py プロジェクト: xiaolaozai/tensorpack
    args = parser.parse_args()
    if args.config:
        cfg.update_args(args.config)
    register_coco(cfg.DATA.BASEDIR)  # add COCO datasets to the registry

    # Setup logger ...
    is_horovod = cfg.TRAINER == 'horovod'
    if is_horovod:
        hvd.init()
        logger.info("Horovod Rank={}, Size={}".format(hvd.rank(), hvd.size()))

    if not is_horovod or hvd.rank() == 0:
        logger.set_logger_dir(args.logdir, 'd')
    logger.info("Environment Information:\n" + collect_env_info())

    finalize_configs(is_training=True)

    # Compute the training schedule from the number of GPUs ...
    stepnum = cfg.TRAIN.STEPS_PER_EPOCH
    # warmup is step based, lr is epoch based
    init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
    warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
    warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
    lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]

    factor = 8. / cfg.TRAIN.NUM_GPUS
    for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
        mult = 0.1**(idx + 1)
        lr_schedule.append(
            (steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
    logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
コード例 #13
0
ファイル: data.py プロジェクト: leeshien/mytensorpack
    img_per_shard = num_imgs // num_shards
    img_range = (shard * img_per_shard, (shard + 1) *
                 img_per_shard if shard + 1 < num_shards else num_imgs)

    # no filter for training
    ds = DataFromListOfDict(roidbs[img_range[0]:img_range[1]],
                            ["file_name", "image_id"])

    def f(fname):
        im = cv2.imread(fname, cv2.IMREAD_COLOR)
        assert im is not None, fname
        return im

    ds = MapDataComponent(ds, f, 0)
    # Evaluation itself may be multi-threaded, therefore don't add prefetch here.
    return ds


if __name__ == "__main__":
    import os
    from tensorpack.dataflow import PrintData
    from config import finalize_configs

    register_coco(os.path.expanduser("~/data/coco"))
    finalize_configs()
    ds = get_train_dataflow()
    ds = PrintData(ds, 10)
    TestDataSpeed(ds, 50000).start()
    for _ in ds:
        pass
コード例 #14
0
                                 6):  # tensorflow version 을 1.6에 맞춰주어라 라는 의미.
        # https://github.com/tensorflow/tensorflow/issues/14657
        logger.warn(
            "TF<1.6 has a bug which may lead to crash in FasterRCNN training if you're unlucky."
        )

    args = parser.parse_args()  # 인자들을 파싱한다.
    if args.config:  # config 정보를 업데이트한다.
        cfg.update_args(args.config)

    MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model(
    )  # config에 fpn 쓸꺼면 ResNetFPNModel을 사용한다. 아니면 ResNetC4Model을 사용한다.
    # FPN 논문 읽어야한다. object detection에 쓰이는 pyramid 방법이다.
    if args.visualize or args.evaluate or args.predict:  # 시각화나, 평가나 예측 하려면
        assert args.load  #모델을 가져온다. 없으면 에러.
        finalize_configs(is_training=False)  # gpu 세팅하는것 같은데 일단 이렇게 알고있자.

        if args.predict or args.visualize:  # 예측을 하거나 시각화를 하면
            cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS  # 잘 나온 것들만 시각화하는 것 같다.

        if args.visualize:  # 시각화를 한다면
            visualize(MODEL, args.load)  # 모델을 입력받아서 그림을 그려준다.
        else:  # 시각화를 하지 않으면
            pred = OfflinePredictor(
                PredictConfig(  # 
                    model=MODEL,
                    session_init=get_model_loader(args.load),
                    input_names=MODEL.get_inference_tensor_names()[0],
                    output_names=MODEL.get_inference_tensor_names()[1]))
            if args.evaluate:
                assert args.evaluate.endswith('.json'), args.evaluate
コード例 #15
0
    def get_predictor(cls):
        """load trained model"""

        with cls.lock:
            # check if model is already loaded
            if cls.predictor:
                return cls.predictor

            os.environ["TENSORPACK_FP16"] = "true"

            # create a mask r-cnn model
            mask_rcnn_model = ResNetFPNModel(True)

            try:
                model_dir = os.environ["SM_MODEL_DIR"]
            except KeyError:
                model_dir = "/opt/ml/model"
            try:
                resnet_arch = os.environ["RESNET_ARCH"]
            except KeyError:
                resnet_arch = "resnet50"

            # file path to previoulsy trained mask r-cnn model
            latest_trained_model = ""
            model_search_path = os.path.join(model_dir, "model-*.index")
            for model_file in glob.glob(model_search_path):
                if model_file > latest_trained_model:
                    latest_trained_model = model_file

            trained_model = latest_trained_model
            print(f"Using model: {trained_model}")

            # fixed resnet50 backbone weights
            cfg.MODE_FPN = True
            cfg.MODE_MASK = True
            if resnet_arch == "resnet101":
                cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 23, 3]
            else:
                cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]

            cfg_prefix = "CONFIG__"
            for key, value in dict(os.environ).items():
                if key.startswith(cfg_prefix):
                    attr_name = key[len(cfg_prefix):]
                    attr_name = attr_name.replace("__", ".")
                    value = eval(value)
                    print(f"update config: {attr_name}={value}")
                    nested_var = cfg
                    attr_list = attr_name.split(".")
                    for attr in attr_list[0:-1]:
                        nested_var = getattr(nested_var, attr)
                    setattr(nested_var, attr_list[-1], value)

            # calling detection dataset gets the number of coco categories
            # and saves in the configuration
            DetectionDataset()
            finalize_configs(is_training=False)

            # Create an inference model
            # PredictConfig takes a model, input tensors and output tensors
            cls.predictor = OfflinePredictor(
                PredictConfig(
                    model=mask_rcnn_model,
                    session_init=get_model_loader(trained_model),
                    input_names=["images", "orig_image_dims"],
                    output_names=[
                        "generate_{}_proposals_topk_per_image/boxes".format(
                            "fpn" if cfg.MODE_FPN else "rpn"),
                        "generate_{}_proposals_topk_per_image/scores".format(
                            "fpn" if cfg.MODE_FPN else "rpn"),
                        "fastrcnn_all_scores",
                        "output/boxes",
                        "output/scores",
                        "output/labels",
                        "output/masks",
                    ],
                ))
            return cls.predictor
コード例 #16
0
        help="A list of KEY=VALUE to overwrite those defined in config.py",
        nargs='+')

    # if get_tf_version_tuple() < (1, 6):
    # https://github.com/tensorflow/tensorflow/issues/14657
    #    logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN training if you're unlucky.")

    args = parser.parse_args()
    if args.config:
        cfg.update_args(args.config)

    assert args.load
    #############################################################################################
    #############################################################################################
    Trainining_is = False
    finalize_configs(is_training=Trainining_is)  ### IMP
    cfg.TRAIN.BASE_LR = 0.001
    cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
    #############################################################################################
    #############################################################################################
    is_training = Trainining_is
    image_P = tf.placeholder(tf.float32, (800, 1067, 3), 'image')
    anchor_labels = tf.placeholder(tf.int32, (None, None, cfg.RPN.NUM_ANCHOR),
                                   'anchor_labels')
    anchor_boxes = tf.placeholder(tf.float32,
                                  (None, None, cfg.RPN.NUM_ANCHOR, 4),
                                  'anchor_boxes')
    gt_boxes = tf.placeholder(tf.float32, (None, 4), 'gt_boxes')
    gt_labels = tf.placeholder(tf.int64, (None, ), 'gt_labels')

    image = preprocess(image_P)
コード例 #17
0
        if not os.path.exists(
                os.path.dirname(
                    os.path.join(VISPATH, "result_{}.jpeg".format(jj)))):
            os.makedirs(
                os.path.dirname(
                    os.path.join(VISPATH, "result_{}.jpeg".format(jj))))
        assert cv2.imwrite(os.path.join(VISPATH, "result_{}.jpeg".format(jj)),
                           vis)

        if jj > maxvis:
            break


if __name__ == "__main__":
    # visualize augmented data
    # Follow README.md to set necessary environment variables, then
    # CUDA_VISIBLE_DEVICES=0 VISPATH=<your-save-path> AUGTYPE='strong' python data.py
    cfg.DATA.NUM_WORKERS = 0
    register_coco(os.path.expanduser(os.environ["DATADIR"]))
    finalize_configs(True)

    cfg.DATA.TRAIN = ("coco_unlabeled2017", )
    cfg.TRAIN.AUGTYPE = os.environ["AUGTYPE"]
    VISPATH = os.environ["VISPATH"]
    VISPATH = os.path.join(os.environ["VISPATH"], str(cfg.TRAIN.AUGTYPE))
    if os.path.isdir(VISPATH):
        shutil.rmtree(VISPATH)
    os.makedirs(VISPATH)
    cfg.TRAIN.CONFIDENCE = 0.5
    visualize_dataflow2(cfg, VISPATH)
コード例 #18
0
    def get_predictor(cls):
        ''' load trained model'''

        with cls.lock:
            # check if model is already loaded
            if cls.predictor:
                return cls.predictor

            os.environ['TENSORPACK_FP16'] = 'true'

            # create a mask r-cnn model
            mask_rcnn_model = ResNetFPNModel(True)

            try:
                model_dir = os.environ['SM_MODEL_DIR']
            except KeyError:
                model_dir = '/opt/ml/model'

            try:
                cls.pretrained_model = os.environ['PRETRAINED_MODEL']
            except KeyError:
                pass

            try:
                div = int(eval(os.environ['divisor']))
            except KeyError:
                div = 1
                pass

            rpn_anchor_stride = int(16 / div)
            rpn_anchor_sizes = (int(32 / div), int(64 / div), int(128 / div),
                                int(256 / div), int(512 / div))

            try:
                rpn_anchor_stride = int(eval(os.environ['rpnanchor_stride']))
            except KeyError:
                pass

            try:
                nms_topk = int(eval(os.environ['NMS_TOPK']))
            except KeyError:
                nms_topk = 2
                pass

            try:
                nms_thresh = eval(os.environ['NMS_THRESH'])
            except KeyError:
                nms_thresh = 0.7
                pass

            try:
                results_per_img = eval(os.environ['res_perimg'])
            except KeyError:
                results_per_img = 400
                pass

            # file path to previoulsy trained mask r-cnn model
            latest_trained_model = ""
            model_search_path = os.path.join(model_dir, "model-*.index")
            for model_file in glob.glob(model_search_path):
                if model_file > latest_trained_model:
                    latest_trained_model = model_file

            trained_model = latest_trained_model
            print(f'Using model: {trained_model}')

            # fixed resnet50 backbone weights
            cfg.BACKBONE.WEIGHTS = os.path.join(cls.pretrained_model)
            cfg.MODE_FPN = True
            cfg.MODE_MASK = True
            cfg.RPN.ANCHOR_STRIDE = rpn_anchor_stride
            cfg.RPN.ANCHOR_SIZES = rpn_anchor_sizes
            cfg.RPN.TEST_PRE_NMS_TOPK = int(6000 * nms_topk)
            cfg.RPN.TEST_POST_NMS_TOPK = int(1000 * nms_topk)
            cfg.RPN.TEST_PER_LEVEL_NMS_TOPK = int(1000 * nms_topk)
            # testing -----------------------
            cfg.TEST.FRCNN_NMS_THRESH = nms_thresh
            cfg.TEST.RESULT_SCORE_THRESH = 0.05
            cfg.TEST.RESULT_SCORE_THRESH_VIS = 0.2  # only visualize confident results
            cfg.TEST.RESULTS_PER_IM = results_per_img

            # calling detection dataset gets the number of coco categories
            # and saves in the configuration
            DetectionDataset()
            finalize_configs(is_training=False)

            # Create an inference model
            # PredictConfig takes a model, input tensors and output tensors
            cls.predictor = OfflinePredictor(
                PredictConfig(
                    model=mask_rcnn_model,
                    session_init=get_model_loader(trained_model),
                    input_names=['images', 'orig_image_dims'],
                    output_names=[
                        'generate_{}_proposals_topk_per_image/boxes'.format(
                            'fpn' if cfg.MODE_FPN else 'rpn'),
                        'generate_{}_proposals_topk_per_image/scores'.format(
                            'fpn' if cfg.MODE_FPN else 'rpn'),
                        'fastrcnn_all_scores', 'output/boxes', 'output/scores',
                        'output/labels', 'output/masks'
                    ]))
            return cls.predictor
コード例 #19
0
ファイル: train.py プロジェクト: tobyma/tensorpack
                                           "This argument is the path to the output json evaluation file")
    parser.add_argument('--predict', help="Run prediction on a given image. "
                                          "This argument is the path to the input image file")

    if get_tf_version_number() < 1.6:
        # https://github.com/tensorflow/tensorflow/issues/14657
        logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN training if you're unlucky.")

    args = parser.parse_args()
    cfg.update_args(args.config)

    MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()

    if args.visualize or args.evaluate or args.predict:
        assert args.load
        finalize_configs(is_training=False)

        if args.predict or args.visualize:
            cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS

        if args.visualize:
            assert not cfg.MODE_FPN, "FPN visualize is not supported!"
            visualize(args.load)
        else:
            pred = OfflinePredictor(PredictConfig(
                model=MODEL,
                session_init=get_model_loader(args.load),
                input_names=MODEL.get_inference_tensor_names()[0],
                output_names=MODEL.get_inference_tensor_names()[1]))
            if args.evaluate:
                assert args.evaluate.endswith('.json'), args.evaluate
コード例 #20
0
    def get_predictor(cls):
        """load trained model"""

        with cls.lock:
            # check if model is already loaded
            if cls.predictor:
                return cls.predictor

            # create a mask r-cnn model
            mask_rcnn_model = ResNetFPNModel()

            try:
                model_dir = os.environ["SM_MODEL_DIR"]
            except KeyError:
                model_dir = "/opt/ml/model"

            try:
                resnet_arch = os.environ["RESNET_ARCH"]
            except KeyError:
                resnet_arch = "resnet50"

            # file path to previoulsy trained mask r-cnn model
            latest_trained_model = ""
            model_search_path = os.path.join(model_dir, "model-*.index")
            for model_file in glob.glob(model_search_path):
                if model_file > latest_trained_model:
                    latest_trained_model = model_file

            trained_model = latest_trained_model[:-6]
            print(f"Using model: {trained_model}")

            cfg.MODE_FPN = True
            cfg.MODE_MASK = True
            if resnet_arch == "resnet101":
                cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 23, 3]
            else:
                cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]

            cfg_prefix = "CONFIG__"
            for key, value in dict(os.environ).items():
                if key.startswith(cfg_prefix):
                    attr_name = key[len(cfg_prefix) :]
                    attr_name = attr_name.replace("__", ".")
                    value = eval(value)
                    print(f"update config: {attr_name}={value}")
                    nested_var = cfg
                    attr_list = attr_name.split(".")
                    for attr in attr_list[0:-1]:
                        nested_var = getattr(nested_var, attr)
                    setattr(nested_var, attr_list[-1], value)

            cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
            cfg.DATA.BASEDIR = "/data"
            cfg.DATA.TRAIN = "coco_train2017"
            cfg.DATA.VAL = "coco_val2017"
            register_coco(cfg.DATA.BASEDIR)
            finalize_configs(is_training=False)

            # Create an inference model
            # PredictConfig takes a model, input tensors and output tensors
            input_tensors = mask_rcnn_model.get_inference_tensor_names()[0]
            output_tensors = mask_rcnn_model.get_inference_tensor_names()[1]

            cls.predictor = OfflinePredictor(
                PredictConfig(
                    model=mask_rcnn_model,
                    session_init=get_model_loader(trained_model),
                    input_names=input_tensors,
                    output_names=output_tensors,
                )
            )
            return cls.predictor