def _load_file(self, f):
        # pdb.set_trace()
        # catalog lookup
        if f.startswith("catalog://"):
            paths_catalog = import_file(
                "maskrcnn_benchmark.config.paths_catalog", self.cfg.PATHS_CATALOG, True
            )
            catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
            self.logger.info("{} points to {}".format(f, catalog_f))
            f = catalog_f
        # download url files
        if f.startswith("http"):
            # if the file is a url path, download it and cache it
            cached_f = cache_url(f)
            self.logger.info("url {} cached in {}".format(f, cached_f))
            f = cached_f
        # convert Caffe2 checkpoint from pkl
        if f.endswith(".pkl"):
            return load_c2_format(self.cfg, f)
        
        # pdb.set_trace()
        # load native detectron.pytorch checkpoint
        loaded = super(DetectronCheckpointer, self)._load_file(f)
        # pdb.set_trace()

        if "model" not in loaded:
            loaded = dict(model=loaded)
        # pdb.set_trace()
        return loaded
Beispiel #2
0
 def _load_file(self, f):
     # catalog lookup
     if f.startswith("catalog://"):
         from maskrcnn_benchmark.config import paths_catalog
         catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://"):])
         self.logger.info("{} points to {}".format(f, catalog_f))
         f = catalog_f
     # download url files
     if f.startswith("http"):
         # if the file is a url path, download it and cache it
         cached_f = cache_url(f)
         self.logger.info("url {} cached in {}".format(f, cached_f))
         f = cached_f
     # convert Caffe2 checkpoint from pkl
     if f.endswith(".pkl"):
         return load_c2_format(self.cfg, f)
     # load native detectron.pytorch checkpoint
     loaded = super(DetectronCheckpointer, self)._load_file(f)
     if "model" not in loaded:
         loaded = dict(model=loaded)
     if self.cfg is not None:
         from .c2_model_loading import _rename_conv_weights_for_deformable_conv_layers
         _rename_conv_weights_for_deformable_conv_layers(
             loaded['model'], self.cfg)
     return loaded
Beispiel #3
0
    def load_checkpoint(self, weights, is_restore=False):

        t_start = time.time()

        if weights.endswith(".pkl"):
            # for caffe2 model
            from maskrcnn_benchmark.utils.c2_model_loading import \
                load_c2_format
            loaded = load_c2_format(self.cfg, weights)
        else:
            loaded = torch.load(weights, map_location=torch.device("cpu"))

        t_io_end = time.time()
        if "model" not in loaded:
            loaded = dict(model=loaded)

        self.state.model = load_model(self.state.model,
                                      loaded['model'],
                                      self.logger,
                                      is_restore=is_restore)

        if "optimizer" in loaded:
            self.state.optimizer.load_state_dict(loaded['optimizer'])
        if "iteration" in loaded:
            self.state.iteration = loaded['iteration']
        if "scheduler" in loaded:
            self.state.scheduler.load_state_dict(loaded["scheduler"])
        del loaded

        t_end = time.time()
        self.logger.info("Load checkpoint from file {}, "
                         "Time usage:\n\tIO: {}, restore snapshot: {}".format(
                             weights, t_io_end - t_start, t_end - t_io_end))
Beispiel #4
0
    def _load_file(self, f):
        # catalog lookup
        if f.startswith("catalog://"):
            paths_catalog = import_file(
                "maskrcnn_benchmark.config.paths_catalog",
                self.cfg.PATHS_CATALOG, True)
            catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://"):])
            self.logger.info("{} points to {}".format(f, catalog_f))
            f = catalog_f
        # download url files
        if f.startswith("http"):
            # if the file is a url path, download it and cache it
            cached_f = cache_url(f)
            self.logger.info("url {} cached in {}".format(f, cached_f))
            f = cached_f
        # convert Caffe2 checkpoint from pkl
        if f.endswith(".pkl"):
            print
            return load_c2_format(self.cfg, f)
        # load native detectron.pytorch checkpoint
        loaded = super(DetectronCheckpointer, self)._load_file(f)

        # unload_keyword = list(self.cfg.FEW_SHOT.UNLOAD_KEYWORD)
        # if self.cfg.FEW_SHOT.LOAD_PRETRIANED_RPN_ONLY:
        #     unload_keyword += ['roi_heads']
        # new_dict = {}
        # for key,value in loaded.items():
        #     if any([ keyword in key for keyword in unload_keyword]):
        #         continue
        #     new_dict[key] = value
        # loaded = new_dict
        if "model" not in loaded:
            loaded = dict(model=loaded)
        return loaded
Beispiel #5
0
    def _load_file(self, f):
        # f: catalog://ImageNetPretrained/MSRA/R-50
        if f.startswith("catalog://"):
            # 把paths_catalog.py作为一个module导入, 这个文件的路径是配置文件中设置的
            # 有可能存在于文件系统的任何位置, 因此不能直接import
            paths_catalog = import_file(
                "maskrcnn_benchmark.config.paths_catalog",
                self.cfg.PATHS_CATALOG,  # paths_catalog.py的绝对路径
                True)

            # 'https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/MSRA/R-50.pkl'
            # 获取pretrained model的url
            catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://"):])
            self.logger.info("{} points to {}".format(f, catalog_f))
            f = catalog_f

        # download url files
        if f.startswith("http"):
            # if the file is a url path, download it and cache it
            cached_f = cache_url(f)
            self.logger.info("url {} cached in {}".format(f, cached_f))
            f = cached_f

        # 把pkl文件转换成Caffe2模型文件
        if f.endswith(".pkl"):
            return load_c2_format(self.cfg, f)

        # load native detectron.pytorch checkpoint
        loaded = super(DetectronCheckpointer, self)._load_file(f)
        if "model" not in loaded:
            loaded = dict(model=loaded)

        return loaded
Beispiel #6
0
def _load_file(cfg):
    f = cfg.MODEL.WEIGHT
    # catalog lookup
    if f.startswith("catalog://"):
        paths_catalog = import_file("maskrcnn_benchmark.config.paths_catalog",
                                    cfg.PATHS_CATALOG, True)
        catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://"):])

        f = catalog_f
    # download url files
    if f.startswith("http"):
        # if the file is a url path, download it and cache it
        cached_f = cache_url(f)

        f = cached_f
    # convert Caffe2 checkpoint from pkl
    if f.endswith(".pkl"):
        return load_c2_format(cfg, f)
 def _load_file(self, f):
     # catalog lookup
     if f.startswith("catalog://"):
         paths_catalog = import_file(
             "maskrcnn_benchmark.config.paths_catalog", self.cfg.PATHS_CATALOG, True
         )
         catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
         self.logger.info("{} points to {}".format(f, catalog_f))
         f = catalog_f
     # download url files
     if f.startswith("http"):
         # if the file is a url path, download it and cache it
         cached_f = cache_url(f)
         self.logger.info("url {} cached in {}".format(f, cached_f))
         f = cached_f
     # convert Caffe2 checkpoint from pkl
     if f.endswith(".pkl"):
         return load_c2_format(self.cfg, f)
     # load native detectron.pytorch checkpoint
     loaded = super(DetectronCheckpointer, self)._load_file(f)
     if "model" not in loaded:
         loaded = dict(model=loaded)
     return loaded
Beispiel #8
0
import torch as t

from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.utils.c2_model_loading import load_c2_format

path = "FAST_RCNN_MLP_DIM2048_FPN_DIM512.pkl"
config_name = 'modified_for_conversion_e2e_faster_rcnn_X-101-64x4d-FPN_1x_MLP_2048_FPN_512.yaml'
base_path = '/private/home/meetshah/detectron/vmb/configs/visual_genome_vqa/c2/'
cfg.merge_from_file(base_path + config_name)

_d = load_c2_format(cfg, path)
newdict = _d
t.save(newdict, "model_final.pth")
Beispiel #9
0
    default="~/.torch/models/_detectron_35858933_12_2017_baselines_e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC_output_train_coco_2014_train%3Acoco_2014_valminusminival_generalized_rcnn_model_final.pkl",
    help="path to detectron pretrained weight(.pkl)",
    type=str,
)
parser.add_argument(
    "--save_path",
    default="./pretrained_model/no_last_layers.pth",
    help="path to save the converted model",
    type=str,
)
parser.add_argument(
    "--cfg",
    default="../ configs/e2e_mask_rcnn_R_50_FPN_1x.yaml",
    help="path to config file",
    type=str,
)

args = parser.parse_args()
#
DETECTRON_PATH = os.path.expanduser(args.pretrained_path)
print('detectron path: {}'.format(DETECTRON_PATH))

cfg.merge_from_file(args.cfg)
_d = load_c2_format(cfg, DETECTRON_PATH)
newdict = _d

newdict['model'] = removekey(_d['model'],
                             ['cls_score.bias', 'cls_score.weight', 'bbox_pred.bias', 'bbox_pred.weight'])
torch.save(newdict, args.save_path)
print('saved to {}.'.format(args.save_path))