Esempio n. 1
0
def create_summaries_pruning(pruning_op_vars: List[PruningOpVars]):
    """
    Create TensorBoard summary ops in the current graph for the
    given list of PruningOpVars.

    :param pruning_op_vars: the list of named tuples containing the masked input to the
        pruned op to record sparsity for in TensorBoard.
    :return: the created summaries for the pruned op vars
    """
    summaries = []

    for op_vars in pruning_op_vars:
        try:
            zero_fraction = tf_compat.zero_fraction
        except Exception:

            def zero_fraction(inp: tf_compat.Tensor):
                nonzero = tf_compat.cast(
                    tf_compat.reduce_sum(
                        tf_compat.cast(tf_compat.not_equal(inp, 0),
                                       tf_compat.int64)),
                    tf_compat.float32,
                )
                size = tf_compat.size(inp, out_type=tf_compat.float32)

                return 1 - tf_compat_div(nonzero, size)

        if is_prunable_op(op_vars.op):
            sum_op = tf_compat.summary.scalar(
                "Modifier_Pruning/{}".format(clean_tensor_name(op_vars.op)),
                zero_fraction(op_vars.masked),
            )
            summaries.append(sum_op)

    return summaries
Esempio n. 2
0
    def model(
        op_tens: tf_compat.Tensor,
        ks_group: str,
        additional: str = None,
        trailing_slash: bool = False,
    ) -> str:
        """
        Create a model specific kernel sparsity scope in the tf graph.
        Use cases are for the specific mask, threshold, etc variables
        to induce sparsity along with the ops to update those vars.

        :param op_tens: the op tensor to create the scope for
        :param ks_group: the group identifier the scope should be created under
        :param additional: any additional scope that should be added to the end
        :param trailing_slash: include a trailing forward slash if True, else False
        :return: the proper scope
        """
        op_name = clean_tensor_name(op_tens)
        scope = PruningScope._format(
            "{}_{}".format(op_name, PruningScope.NM_KS), ks_group)
        scope = PruningScope._format(scope,
                                     additional=additional,
                                     trailing_slash=trailing_slash)

        return scope
Esempio n. 3
0
def test_op_var_name():
    graph = tf_compat.Graph()

    with graph.as_default():
        var = tf_compat.Variable(tf_compat.random_normal([64]),
                                 dtype=tf_compat.float32,
                                 name="test_var_name")
        name = clean_tensor_name(var)
        assert name == "test_var_name"
Esempio n. 4
0
def test_get_ops_and_inputs_by_name_or_regex(
    net_const,
    var_names,
    expected_ops,
    expected_tens,
):
    with tf_compat.Graph().as_default() as graph:
        net_const()
        ops_and_inputs = get_ops_and_inputs_by_name_or_regex(var_names, graph)
        assert len(ops_and_inputs) == len(expected_ops)

        for op, inp in ops_and_inputs:
            assert op.name in expected_ops
            assert clean_tensor_name(inp.name) in expected_tens
Esempio n. 5
0
    def create_ops(
        self,
        steps_per_epoch: int,
        global_step: tf_compat.Tensor,
        graph: tf_compat.Graph,
    ) -> Tuple[List[Union[tf_compat.Tensor, tf_compat.Operation]], Dict[str, Any]]:
        """
        Create the sparsity ops to modify the training graph according to the settings
        for the current instance.

        :param steps_per_epoch: the number of steps (batches) per training epoch
        :param global_step: the global step used while training
        :param graph: the graph to be modified
        :return: a tuple (list of ops, dict of named ops / tensors)
            to be run or used for modifying the training process.
        """
        mod_ops, mod_extras = super().create_ops(graph, None, None)
        start_step, end_step = self.start_end_steps(steps_per_epoch, after_optim=True)

        params = (
            self._params
            if self._params != ALL_TOKEN
            else [
                clean_tensor_name(var.name)
                for _, var in
                # Have ALL_TOKEN match to all variable names for now
                get_ops_and_inputs_by_name_or_regex(["re:.*"], graph)
            ]
        )

        with graph.as_default():
            update_op, prune_op_vars = create_ks_scheduled_constant_graph_ops(
                graph,
                global_step,
                params,
                start_step,
                end_step,
                self.ks_group,
            )

            if self.log_types == ALL_TOKEN or "tensorboard" in self.log_types:
                mod_extras[EXTRAS_KEY_SUMMARIES] = create_summaries_pruning(
                    prune_op_vars
                )

        mod_ops.append(update_op)
        self._prune_op_vars = prune_op_vars
        # self._update_ready = tf_compat.constant(False, name="nm_update_ready")

        return mod_ops, mod_extras
Esempio n. 6
0
    def create_ops(
        self,
        steps_per_epoch: int,
        global_step: tf_compat.Tensor,
        graph: tf_compat.Graph,
    ) -> Tuple[List[Union[tf_compat.Tensor, tf_compat.Operation]], Dict[str,
                                                                        Any]]:
        """
        Create the sparsity ops to modify the training graph according to the settings
        for the current instance.

        :param steps_per_epoch: the number of steps (batches) per training epoch
        :param global_step: the global step used while training
        :param graph: the graph to be modified
        :return: a tuple (list of ops, dict of named ops / tensors)
            to be run or used for modifying the training process.
        """
        mod_ops, mod_extras = super().create_ops(graph, steps_per_epoch,
                                                 global_step)
        start_step, end_step = self.start_end_steps(steps_per_epoch,
                                                    after_optim=True)
        update_frequency_step = self.update_frequency_steps(steps_per_epoch)
        params = (
            self._params if self._params != ALL_TOKEN else [
                clean_tensor_name(var.name) for _, var in
                # Have ALL_TOKEN match to all variable names for now
                get_ops_and_inputs_by_name_or_regex(["re:.*"], graph)
            ])

        with graph.as_default():
            (
                update_op,
                prune_op_vars,
                update_ready,
                sparsity,
            ) = get_or_create_ks_scheduled_graph_ops(
                graph,
                global_step,
                params,
                start_step,
                end_step,
                update_frequency_step,
                self._init_sparsity,
                self._final_sparsity,
                self.exponent,
                self._leave_enabled,
                self.ks_group,
                self._mask_creator,
            )

            if self.log_types == ALL_TOKEN or "tensorboard" in self.log_types:
                mod_extras[EXTRAS_KEY_SUMMARIES] = create_summaries_pruning(
                    prune_op_vars)

        mod_ops.append(update_op)
        self._prune_op_vars = prune_op_vars
        self._update_ready = update_ready
        self._sparsity = sparsity

        # Create and cache the mask initializers to be run
        # through initialize_session. When using the estimator,
        # the initialization is done as part of the init_fn of
        # the training scaffold object, at which the graph cannot
        # be changed (hence the creation and caching)
        masks = [op_vars.mask for op_vars in self._prune_op_vars]
        self._mask_initializer = (tf_compat.variables_initializer(masks)
                                  if masks else None)

        return mod_ops, mod_extras