Ejemplo n.º 1
0
 def __init__(self, desc=None, weight_file=None, pb_file=None):
     super(GraphGetter, self).__init__()
     if isinstance(desc, dict):
         src_model = ModelZoo().get_model(desc)
     else:
         src_model = desc
     weights = OrderedDict()
     if is_tf_backend():
         import tensorflow.compat.v1 as tf
         from tensorflow.python.framework import tensor_util
         tf.reset_default_graph()
         data_shape = (1, 224, 224, 3)
         x = tf.ones(data_shape)
         if pb_file:
             with tf.io.gfile.GFile(pb_file, 'rb') as f:
                 graph_def = tf.GraphDef()
             graph_def.ParseFromString(f.read())
             graph = tf.Graph()
             with graph.as_default():
                 tf.import_graph_def(graph_def, name='')
             weight_file = None
             wts = [n for n in graph_def.node if n.op == 'Const']
             for n in wts:
                 weights[n.name] = tensor_util.MakeNdarray(
                     n.attr['value'].tensor)
         else:
             src_model(x, self.training)
             graph = tf.get_default_graph()
         desc = graph2desc(graph)
         tf.reset_default_graph()
     self.model = ModelZoo().get_model(desc, weight_file)
     if weights:
         self.model.load_checkpoint_from_numpy(weights)
Ejemplo n.º 2
0
 def __init__(self, desc, from_graph=None, weight_file=None):
     super(Deformation, self).__init__()
     self._desc = {"props": deepcopy(desc.get('props')) or {}}
     if from_graph:
         self.model = GraphGetter(desc, weight_file).model
     else:
         self.model = ModelZoo().get_model(desc, weight_file)
     self._apply_names()
     self.get_search_space()
     self.decode()
     self.deform()
     self.props.clear()
Ejemplo n.º 3
0
 def _init_model(self):
     """Load model desc from save path and parse to model."""
     model = self.trainer.model
     if self.trainer.config.is_detection_trainer:
         model_desc = self.trainer.model_desc
     else:
         model_desc = self._get_model_desc()
     if model_desc:
         ModelConfig.model_desc = model_desc
     pretrained_model_file = self._get_pretrained_model_file()
     if not model:
         if not model_desc:
             raise Exception(
                 "Failed to Init model, can not get model description.")
         model = ModelZoo.get_model(model_desc, pretrained_model_file)
     if model:
         if zeus.is_torch_backend():
             import torch
             if self.trainer.use_cuda:
                 model = model.cuda()
             if General._parallel and General.devices_per_trainer > 1:
                 model = torch.nn.DataParallel(self.trainer.model)
         if zeus.is_tf_backend():
             if pretrained_model_file:
                 model_folder = os.path.dirname(pretrained_model_file)
                 FileOps.copy_folder(model_folder,
                                     self.trainer.get_local_worker_path())
     return model
Ejemplo n.º 4
0
 def __init__(self,
              model_name=None,
              pretrained_model_file=None,
              downsamples=[4, 14, 27, 46],
              residauls=[7, 10, 17, 20, 23, 30, 33, 36, 39, 42, 49, 52],
              model=None,
              in_channels=None,
              out_channels=None,
              num_classes=None,
              **kwargs):
     super(PruneGetter, self).__init__()
     self.model = model
     if model_name is not None:
         model_desc = dict(type=model_name)
         model_desc.update(kwargs)
         self.model = ModelZoo().get_model(model_desc,
                                           pretrained_model_file)
     self.num_classes = num_classes
     convs = [
         module for name, module in self.model.named_modules()
         if isinstance(module, torch.nn.Conv2d)
     ]
     self.in_channels_size = sum([conv.in_channels for conv in convs])
     self.out_channels_size = sum([conv.out_channels for conv in convs])
     if in_channels and len(in_channels) < self.in_channels_size:
         raise ValueError(
             "in channels mask length should be getter than {}".format(
                 self.in_channels_size))
     if out_channels and len(out_channels) < self.out_channels_size:
         raise ValueError(
             "out channels mask length should be getter than {}".format(
                 self.out_channels_size))
     in_channels_code = self.define_props('in_channels', in_channels)
     out_channels_code = self.define_props('out_channels', out_channels)
     if not in_channels_code and not out_channels_code:
         logging.info(
             "channels_code is null. use 1 as default, this model will not pruned."
         )
         in_channels_code = [1] * self.in_channels_size
         out_channels_code = [1] * self.out_channels_size
     # TODO check downsample auto
     self.downsamples = downsamples
     self.residauls = residauls
     self.model = self._prune(self.model, in_channels_code,
                              out_channels_code)
Ejemplo n.º 5
0
def _get_model(args):
    """Get model."""
    from zeus.model_zoo import ModelZoo
    model = ModelZoo.get_model(args.model_desc, args.model)
    if vega.is_torch_backend():
        if args.device == "GPU":
            model = model.cuda()
        model.eval()
    return model
Ejemplo n.º 6
0
 def __init__(self, desc, weight_file=None):
     super(Deformation, self).__init__()
     self.search_space = OrderedDict()
     self.conditions = deque()
     self.desc = deepcopy(desc.get('desc')) or desc
     self.model = ModelZoo().get_model(desc, weight_file)
     self.get_search_space(self.model)
     self.decode()
     self.deform()
     self.props.clear()
Ejemplo n.º 7
0
 def load_model(self):
     """Load model."""
     if not self.model_desc and not self.weights_file:
         saved_folder = self.get_local_worker_path(self.step_name,
                                                   self.worker_id)
         self.weights_file = FileOps.join_path(
             saved_folder, 'model_{}.pth'.format(self.worker_id))
         self.model_desc = FileOps.join_path(
             saved_folder, 'desc_{}.json'.format(self.worker_id))
     if 'modules' not in self.model_desc:
         self.model_desc = ModelConfig.model_desc
     self.model = ModelZoo.get_model(self.model_desc, self.weights_file)
Ejemplo n.º 8
0
class Deformation(Module):
    """Get output layer by layer names and connect into a OrderDict."""

    def __init__(self, desc, from_graph=None, weight_file=None):
        super(Deformation, self).__init__()
        self._desc = {"props": deepcopy(desc.get('props')) or {}}
        if from_graph:
            self.model = GraphGetter(desc, weight_file).model
        else:
            self.model = ModelZoo().get_model(desc, weight_file)
        self._apply_names()
        self.get_search_space()
        self.decode()
        self.deform()
        self.props.clear()

    def deform(self):
        """Deform Modules."""
        raise NotImplementedError

    def decode(self):
        """Decode Condition and search space."""
        for k, v in self.conditions:
            if self.props.get(v):
                self.props[k] = self.props.get(v)

    def to_desc(self, recursion=True):
        """Convert to model desc."""
        return dict(self.model.to_desc(), **self._desc)

    def state_dict(self):
        """Get state dict."""
        return self.model.state_dict()

    def get_search_space(self):
        """Get Search Space from model."""
        search_space = DeformationSearchSpace()
        search_space._traversal(self.model)
        self.search_space = search_space.search_space
        self.conditions = search_space.conditions
Ejemplo n.º 9
0
    def load_model(self):
        """Load model."""
        self.saved_folder = self.get_local_worker_path(self.step_name, self.worker_id)
        if not self.model_desc:
            self.model_desc = FileOps.join_path(self.saved_folder, 'desc_{}.json'.format(self.worker_id))
        if not self.weights_file:
            if zeus.is_torch_backend():
                self.weights_file = FileOps.join_path(self.saved_folder, 'model_{}.pth'.format(self.worker_id))
            elif zeus.is_ms_backend():
                for file in os.listdir(self.saved_folder):
                    if file.startswith("CKP") and file.endswith(".ckpt"):
                        self.weights_file = FileOps.join_path(self.saved_folder, file)

        if 'modules' not in self.model_desc:
            self.model_desc = ModelConfig.model_desc
        self.model = ModelZoo.get_model(self.model_desc, self.weights_file)
Ejemplo n.º 10
0
 def get_space(self, desc):
     """Get model and input."""
     model_desc = PipeStepConfig.model.model_desc
     model = ModelZoo().get_model(
         dict(type='PruneDeformation', desc=model_desc))
     search_space = model.search_space
     params = []
     for key, value in search_space.items():
         hparam_name = 'network.props.{}'.format(key)
         params.append(
             dict(key=hparam_name, type="BINARY_CODE", range=[value]))
     params.append(
         dict(key='network.deformation',
              type="CATEGORY",
              range=['PruneDeformation']))
     logging.info("Prune Search Space: {}".format(params))
     return {"hyperparameters": params}
Ejemplo n.º 11
0
 def get_space(self, desc):
     """Get model and input."""
     model_desc = PipeStepConfig.model.model_desc
     model = ModelZoo().get_model(
         dict(type='BackboneDeformation', desc=model_desc))
     search_space = model.search_space
     times = random.randint(3, 5)
     params = [
         dict(key="network.props.doublechannel",
              type="BINARY_CODE",
              range=[len(search_space), times]),
         dict(key="network.props.downsample",
              type="BINARY_CODE",
              range=[len(search_space), times])
     ]
     params.append(
         dict(key='network.deformation',
              type="CATEGORY",
              range=['BackboneDeformation']))
     logging.info("Backbone Search Space: {}".format(params))
     return {"hyperparameters": params}
Ejemplo n.º 12
0
 def _init_model(self):
     """Load model desc from save path and parse to model."""
     model = self.trainer.model
     if self.trainer.config.is_detection_trainer:
         model_desc = self.trainer.model_desc or self._get_model_desc()
     else:
         model_desc = self._get_model_desc()
     pretrained_model_file = self._get_pretrained_model_file()
     if not model:
         if not model_desc:
             raise Exception(
                 "Failed to Init model, can not get model description.")
         model = ModelZoo.get_model(model_desc, pretrained_model_file,
                                    ModelConfig.head)
     if model:
         self.trainer.model_desc = model.desc
         if zeus.is_torch_backend():
             import torch
             if self.trainer.use_cuda:
                 model = model.cuda()
             if General._parallel and General.devices_per_trainer > 1:
                 model = torch.nn.DataParallel(model)
     return model
Ejemplo n.º 13
0
 def load_model(self):
     """Load model."""
     self.saved_folder = self.get_local_worker_path(self.step_name,
                                                    self.worker_id)
     if not self.model_desc:
         model_config = Config(
             FileOps.join_path(self.saved_folder,
                               'desc_{}.json'.format(self.worker_id)))
         if "type" not in model_config and "modules" not in model_config:
             model_config = ModelConfig.model_desc
         self.model_desc = model_config
     if not self.weights_file:
         if zeus.is_torch_backend():
             self.weights_file = FileOps.join_path(
                 self.saved_folder, 'model_{}.pth'.format(self.worker_id))
         elif zeus.is_ms_backend():
             for file in os.listdir(self.saved_folder):
                 if file.endswith(".ckpt"):
                     self.weights_file = FileOps.join_path(
                         self.saved_folder, file)
         elif zeus.is_tf_backend():
             self.weights_file = FileOps.join_path(
                 self.saved_folder, 'model_{}'.format(self.worker_id))
     self.model = ModelZoo.get_model(self.model_desc, self.weights_file)
Ejemplo n.º 14
0
class PruneGetter(Module):
    """Get output layer by layer names and connect into a OrderDict."""
    def __init__(self,
                 model_name=None,
                 pretrained_model_file=None,
                 downsamples=[4, 14, 27, 46],
                 residauls=[7, 10, 17, 20, 23, 30, 33, 36, 39, 42, 49, 52],
                 model=None,
                 in_channels=None,
                 out_channels=None,
                 num_classes=None,
                 **kwargs):
        super(PruneGetter, self).__init__()
        self.model = model
        if model_name is not None:
            model_desc = dict(type=model_name)
            model_desc.update(kwargs)
            self.model = ModelZoo().get_model(model_desc,
                                              pretrained_model_file)
        self.num_classes = num_classes
        convs = [
            module for name, module in self.model.named_modules()
            if isinstance(module, torch.nn.Conv2d)
        ]
        self.in_channels_size = sum([conv.in_channels for conv in convs])
        self.out_channels_size = sum([conv.out_channels for conv in convs])
        if in_channels and len(in_channels) < self.in_channels_size:
            raise ValueError(
                "in channels mask length should be getter than {}".format(
                    self.in_channels_size))
        if out_channels and len(out_channels) < self.out_channels_size:
            raise ValueError(
                "out channels mask length should be getter than {}".format(
                    self.out_channels_size))
        in_channels_code = self.define_props('in_channels', in_channels)
        out_channels_code = self.define_props('out_channels', out_channels)
        if not in_channels_code and not out_channels_code:
            logging.info(
                "channels_code is null. use 1 as default, this model will not pruned."
            )
            in_channels_code = [1] * self.in_channels_size
            out_channels_code = [1] * self.out_channels_size
        # TODO check downsample auto
        self.downsamples = downsamples
        self.residauls = residauls
        self.model = self._prune(self.model, in_channels_code,
                                 out_channels_code)

    def load_state_dict(self, state_dict, strict=True):
        """Call subclass load_state_dict function."""
        return self.model.load_state_dict(state_dict, strict)

    def state_dict(self, destination=None, prefix='', keep_vars=False):
        """Call subclass state_dict function."""
        return self.model.state_dict(destination, prefix, keep_vars)

    def _prune(self, model, in_channels_code, out_channels_code):
        logging.info("Start to Prune Model.")
        convs = [
            module for name, module in model.named_modules()
            if isinstance(module, torch.nn.Conv2d)
        ]
        org_convs = copy.deepcopy(convs)
        batch_norms = [
            module for name, module in model.named_modules()
            if isinstance(module, torch.nn.BatchNorm2d)
        ]
        pre_end_mask_code = None
        block_start_mask = None
        for idx, conv in enumerate(convs):
            end_mask = None
            if idx == 0:
                start_mask = None
            else:
                pre_out_channels = org_convs[idx - 1].out_channels
                if conv.in_channels == pre_out_channels and pre_end_mask_code:
                    start_mask = pre_end_mask_code
                else:
                    start_mask = in_channels_code[:conv.in_channels]
                # cache downsaple conv mask code
                if not block_start_mask:
                    block_start_mask = start_mask
                if idx in self.downsamples:
                    # downsaple conv
                    start_mask = block_start_mask
                    block_start_mask = None
                    end_mask = pre_end_mask_code
                elif idx in self.residauls:
                    # Identify jump node
                    end_mask = block_start_mask
                    block_start_mask = None
                else:
                    in_channels_code = in_channels_code[conv.in_channels:]
            end_mask = end_mask or out_channels_code[:conv.out_channels]
            out_channels_code = out_channels_code[conv.out_channels:]
            # all code is 0, default 1
            PruneConv2DFilter(conv).filter(end_mask, start_mask)
            batch_norm = batch_norms[idx]
            PruneBatchNormFilter(batch_norm).filter(end_mask)
            pre_end_mask_code = end_mask
        linear = [
            module for name, module in model.named_modules()
            if isinstance(module, torch.nn.Linear)
        ][-1]
        if self.num_classes:
            linear.out_features = self.num_classes
        PruneLinearFilter(linear).filter(pre_end_mask_code)
        # remove the redundant code and calculate the length
        if in_channels_code:
            self.in_channels_size = self.in_channels_size - len(
                in_channels_code)
        if out_channels_code:
            self.out_channels_size = self.out_channels_size - len(
                out_channels_code)
        return model
Ejemplo n.º 15
0
Archivo: cam.py Proyecto: ylfzr/vega
def _get_model(args):
    from zeus.model_zoo import ModelZoo
    model = ModelZoo.get_model(args.model_desc_file, args.model_weights_file)
    model = model.cuda()
    model.eval()
    return model