Beispiel #1
0
def get_normalized_recipe(recipe):
    return ThinningRecipe(
        modules={
            utils.normalize_module_name(k): v
            for k, v in recipe.modules.items()
        },
        parameters={
            utils.normalize_module_name(k): v
            for k, v in recipe.parameters.items()
        },
    )
Beispiel #2
0
    def successors_f(self,
                     node_name,
                     successors_types,
                     done_list=None,
                     logging=None,
                     denorm_names=True):
        """Returns a list of <op>'s successors, if they match the <successors_types> criteria.

        Traverse the graph, starting at node <node_name>, and search for successor
        nodes, that have one of the node types listed in <successors_types>.
        If none is found, then return an empty list.

        <node_name> and the returned list of successors are strings, because
        """
        node_name = utils.normalize_module_name(node_name)
        node = self.find_op(node_name)
        node_is_an_op = True
        if node is None:
            node_is_an_op = False
            node = self.find_param(node_name)
            if node is None:
                msglogger.warning(
                    "successors_f: Could not find node {}".format(node_name))
                return []

        if done_list is None:
            done_list = []

        done_list.append(node_name)

        if not isinstance(successors_types, list):
            successors_types = [successors_types]

        if node_is_an_op:
            # We check if we found the type of node we're looking for,
            # and that this is not the first node in our search.
            if node['type'] in successors_types and len(done_list) > 1:
                return [
                    utils.denormalize_module_name(self._src_model, node_name)
                    if denorm_names else node_name
                ]

            # This is an operation node
            succs = [
                edge.dst for edge in self.edges
                if (edge.src == node_name and edge.dst not in done_list)
            ]
        else:
            # This is a data node
            succs = [
                edge.dst for edge in self.edges
                if (edge.src == node_name and edge.dst not in done_list)
            ]
        ret = []
        for successor in succs:
            ret += self.successors_f(successor, successors_types, done_list,
                                     logging, denorm_names)

        return ret
Beispiel #3
0
 def named_params_layers(self):
     for param_name, param in self._src_model.named_parameters():
         # remove the extension of param_name, and then normalize it
         # to create a normalized layer name
         normalized_layer_name = utils.normalize_module_name('.'.join(
             param_name.split('.')[:-1]))
         sgraph_layer_name = utils.denormalize_module_name(
             self._src_model, normalized_layer_name)
         yield sgraph_layer_name, param_name, param
Beispiel #4
0
    def predecessors_f(self,
                       node_name,
                       predecessors_types,
                       done_list=None,
                       logging=None,
                       denorm_names=True):
        """Returns a list of <op>'s predecessors, if they match the <predecessors_types> criteria.
        """
        node_name = utils.normalize_module_name(node_name)
        node = self.find_op(node_name)
        node_is_an_op = True
        if node is None:
            node_is_an_op = False
            node = self.find_param(node_name)
            if node is None:
                msglogger.warning(
                    "predecessors_f: Could not find node {}".format(node_name))
                return []

        if done_list is None:
            done_list = []

        done_list.append(node_name)

        if not isinstance(predecessors_types, list):
            predecessors_types = [predecessors_types]

        if node_is_an_op:
            # We check if we found the type of node we're looking for,
            # and that this is not the first node in our search.
            if node['type'] in predecessors_types and len(done_list) > 1:
                return [
                    utils.denormalize_module_name(self._src_model, node_name)
                    if denorm_names else node_name
                ]

            # This is an operation node
            preds = [
                edge.src for edge in self.edges
                if (edge.dst == node_name and edge.src not in done_list)
            ]
        else:
            # This is a data node
            preds = [
                edge.src for edge in self.edges
                if (edge.dst == node_name and edge.src not in done_list)
            ]
        ret = []
        for predecessor in preds:
            ret += self.predecessors_f(predecessor, predecessors_types,
                                       done_list, logging, denorm_names)

        return ret
def get_model_compute_budget(model, dataset, layers_to_prune=None):
    """Return the compute budget of the Convolution layers in an image-classifier.
    """
    dummy_input = utils.get_dummy_input(dataset)
    g = SummaryGraph(model, dummy_input)
    total_macs = 0
    for name, m in model.named_modules():
        if isinstance(m, torch.nn.Conv2d):
            # Use the SummaryGraph to obtain some other details of the models
            conv_op = g.find_op(normalize_module_name(name))
            assert conv_op is not None
            total_macs += conv_op['attrs']['MACs']
    del g
    return total_macs
Beispiel #6
0
    def load_state_dict(self, state, normalize_dataparallel_keys=False):
        try:
            loaded_masks = state['masks_dict']
        except KeyError as exception:
            msglogger.error('could not load the CompressionScheduler state.'
                            ' masks_dict is missing from state')
            with contextlib.suppress(TypeError):
                msglogger.debug('Scheduler state keys are: {}'.format(', '.join(state)))
            raise

        if normalize_dataparallel_keys:
            loaded_masks = {normalize_module_name(k): v for k, v in loaded_masks.items()}
        device = model_device(self.model)
        for name, mask in self.zeros_mask_dict.items():
            masker = self.zeros_mask_dict[name]
            masker.mask = loaded_masks[name]
            if masker.mask is not None:
                masker.mask = masker.mask.to(device)
Beispiel #7
0
    def log_model_buffers(self, model, buffer_names, tag_prefix, epoch,
                          completed, total, freq):
        """Logs values of model buffers.

        Notes:
            1. Each buffer provided is logged in a separate CSV file
            2. Each CSV file is continuously updated during the run.
            3. In each call, a line is appended for each layer (i.e. module) containing the named buffers.
        """
        with ExitStack() as stack:
            files = {}
            writers = {}
            for buf_name in buffer_names:
                fname = self.get_fname(buf_name)
                new = not os.path.isfile(fname)
                files[buf_name] = stack.enter_context(open(fname, 'a'))
                writer = csv.writer(files[buf_name])
                if new:
                    writer.writerow(
                        ['Layer', 'Epoch', 'Step', 'Total', 'Values'])
                writers[buf_name] = writer

            for n, m in model.named_modules():
                for buffer_name in buffer_names:
                    try:
                        p = getattr(m, buffer_name)
                    except AttributeError:
                        continue
                    writer = writers[buffer_name]
                    if isinstance(p, (list, torch.nn.ParameterList)):
                        values = []
                        for v in p:
                            values += v.view(-1).tolist()
                    else:
                        values = p.view(-1).tolist()
                    writer.writerow([
                        utils.normalize_module_name(n) + '.' +
                        buffer_name, epoch, completed,
                        int(total)
                    ] + values)
Beispiel #8
0
    def make_fc(model, fc_module, g, name, seq_id, layer_id):
        fc = SimpleNamespace()
        fc.type = "Linear"
        fc.name = name
        fc.id = layer_id
        fc.t = seq_id

        # Use the SummaryGraph to obtain some other details of the models
        fc_op = g.find_op(normalize_module_name(name))
        assert fc_op is not None

        fc.weights_vol = fc_op['attrs']['weights_vol']
        fc.macs = fc_op['attrs']['MACs']
        fc.n_ofm = fc_op['attrs']['n_ofm']
        fc.n_ifm = fc_op['attrs']['n_ifm']
        fc_pname = name + ".weight"
        fc_p = utils.utils.model_find_param(model, fc_pname)
        fc.ofm_h = g.param_shape(fc_op['outputs'][0])[0]
        fc.ofm_w = g.param_shape(fc_op['outputs'][0])[1]
        fc.ifm_h = g.param_shape(fc_op['inputs'][0])[0]
        fc.ifm_w = g.param_shape(fc_op['inputs'][0])[1]

        return fc
Beispiel #9
0
    def make_conv(model, conv_module, g, name, seq_id, layer_id):
        conv = SimpleNamespace()
        conv.type = "Conv2D"
        conv.name = name
        conv.id = layer_id
        conv.t = seq_id
        conv.k = conv_module.kernel_size[0]
        conv.stride = conv_module.stride

        # Use the SummaryGraph to obtain some other details of the models
        conv_op = g.find_op(normalize_module_name(name))
        assert conv_op is not None

        conv.weights_vol = conv_op['attrs']['weights_vol']
        conv.macs = conv_op['attrs']['MACs']
        conv.n_ofm = conv_op['attrs']['n_ofm']
        conv.n_ifm = conv_op['attrs']['n_ifm']
        conv_pname = name + ".weight"
        conv_p = utils.utils.model_find_param(model, conv_pname)
        conv.ofm_h = g.param_shape(conv_op['outputs'][0])[2]
        conv.ofm_w = g.param_shape(conv_op['outputs'][0])[3]
        conv.ifm_h = g.param_shape(conv_op['inputs'][0])[2]
        conv.ifm_w = g.param_shape(conv_op['inputs'][0])[3]
        return conv
Beispiel #10
0
    def log_model_buffers(self, model, buffer_names, tag_prefix, epoch,
                          completed, total, freq):
        """Logs values of model buffers.

        Notes:
            1. Each buffer provided in 'buffer_names' is displayed in a separate table.
            2. Within each table, each value is displayed in a separate column.
        """
        datas = {name: [] for name in buffer_names}
        maxlens = {name: 0 for name in buffer_names}
        for n, m in model.named_modules():
            for buffer_name in buffer_names:
                try:
                    p = getattr(m, buffer_name)
                except AttributeError:
                    continue
                data = datas[buffer_name]
                values = p if isinstance(
                    p,
                    (list, torch.nn.ParameterList)) else p.view(-1).tolist()
                data.append([
                    utils.normalize_module_name(n) + '.' + buffer_name, *values
                ])
                maxlens[buffer_name] = max(maxlens[buffer_name], len(values))
Beispiel #11
0
 def find_op(self, lost_op_name):
     return self.ops.get(utils.normalize_module_name(lost_op_name), None)