Пример #1
0
    def create_sparsity_mask(
        self,
        tensor: tf_compat.Tensor,
        sparsity: tf_compat.Tensor,
    ) -> tf_compat.Tensor:
        """
        :param tensor: A tensor of a model layer's weights
        :param sparsity: the target sparsity to use for assigning the masks
        :return: A sparsity mask close to the set sparsity based on the values of
            the input tensor
        """
        abs_var = tf_compat.abs(tensor)  # Magnitudes of weights
        sparse_threshold_index = tf_compat.cast(
            tf_compat.round(
                tf_compat.cast(tf_compat.size(abs_var), tf_compat.float32) *
                sparsity),
            tf_compat.int32,
        )
        sparse_threshold_index = tf_compat.minimum(
            tf_compat.maximum(sparse_threshold_index, 0),
            tf_compat.size(tensor) - 1,
        )

        try:
            argsort = tf_compat.argsort
        except Exception:
            try:
                argsort = tf_compat.contrib.framework.argsort
            except Exception:
                raise RuntimeError(
                    "cannot find argsort function in tensorflow_v1, "
                    "currently unsupported")

        # produce tensor where each element is the index in sorted order of abs_var
        abs_var_flat = tf_compat.reshape(abs_var, [-1])
        element_ranks_flat = tf_compat.scatter_nd(
            tf_compat.expand_dims(argsort(abs_var_flat), 1),
            tf_compat.range(abs_var_flat.get_shape()[0].value),
            abs_var_flat.get_shape(),
        )
        element_ranks = tf_compat.reshape(element_ranks_flat,
                                          abs_var.get_shape())
        return tf_compat.cast(
            tf_compat.greater(element_ranks, sparse_threshold_index),
            tf_compat.float32,
        )
Пример #2
0
def conv_net():
    inp = tf_compat.placeholder(tf_compat.float32, [None, 28, 28, 1], name="inp")

    with tf_compat.name_scope("conv_net"):
        conv1 = _conv("conv1", inp, 1, 32, 3, 2, "SAME")
        conv2 = _conv("conv2", conv1, 32, 32, 3, 2, "SAME")
        avg_pool = tf_compat.reduce_mean(conv2, axis=[1, 2])
        reshape = tf_compat.reshape(avg_pool, [-1, 32])
        mlp = _fc("mlp", reshape, 32, 10, add_relu=False)

    out = tf_compat.sigmoid(mlp, name="out")

    return out, inp
Пример #3
0
    def _map_mask_to_tensor(
        self,
        grouped_mask: tf_compat.Tensor,
        original_tensor_shape: tf_compat.TensorShape,
    ) -> tf_compat.Tensor:
        """
        :param grouped_mask: A binary mask the size of a tensor from group_tensor
        :param original_tensor_shape: Shape of the original tensor grouped_mask
            derives from
        :return: The values from grouped_mask mapped to a tensor of size
            original_tensor_shape
        """
        (
            blocked_tens_shape,
            original_tensor_shape,
        ) = self._get_blocked_tens_shape_and_validate(original_tensor_shape)
        block_values_shape = [blocked_tens_shape[0], blocked_tens_shape[2]]
        # expand so every element has a corresponding value in the original tensor
        block_mask = tf_compat.reshape(grouped_mask, block_values_shape)
        block_mask = tf_compat.expand_dims(block_mask, 1)

        # Recover reduced dimension of block_mask, using tile instead of broadcast_to
        # for compatibility with older versions of tf
        block_mask_shape = [dim.value for dim in block_mask.shape]
        tile_shape = [
            int(block_dim / mask_dim)
            for (block_dim,
                 mask_dim) in zip(blocked_tens_shape, block_mask_shape)
        ]
        # equivalent to: tf_compat.broadcast_to(block_mask, blocked_tens_shape)
        tensor_mask_blocked = tf_compat.tile(block_mask, tile_shape)

        mask = tf_compat.reshape(tensor_mask_blocked, original_tensor_shape)
        # Undo channel / kernel transpose if applicable
        n_dims = len(original_tensor_shape)
        if n_dims >= 3:
            tens_trans_dims = [*range(2, n_dims), 0, 1]
            mask = tf_compat.transpose(mask, tens_trans_dims)
        return mask
Пример #4
0
def _classifier(
    x_tens: tf_compat.Tensor,
    training: Union[bool, tf_compat.Tensor],
    num_classes: int,
    class_type: str,
    kernel_initializer,
    bias_initializer,
    beta_initializer,
    gamma_initializer,
) -> tf_compat.Tensor:
    with tf_compat.variable_scope("classifier", reuse=tf_compat.AUTO_REUSE):
        logits = pool2d(name="avgpool", x_tens=x_tens, type_="global_avg", pool_size=1)

        if num_classes:
            logits = tf_compat.layers.dropout(
                logits, 0.2, training=training, name="dropout"
            )
            logits = tf_compat.reshape(logits, [-1, int(logits.shape[3])])

            if class_type:
                if class_type == "single":
                    act = "softmax"
                elif class_type == "multi":
                    act = "sigmoid"
                else:
                    raise ValueError(
                        "unknown class_type given of {}".format(class_type)
                    )
            else:
                act = None

            logits = dense_block(
                "dense",
                logits,
                training,
                num_classes,
                include_bn=False,
                act=act,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
                beta_initializer=beta_initializer,
                gamma_initializer=gamma_initializer,
            )

    return logits
Пример #5
0
 def group_tensor(self, tensor: tf_compat.Tensor) -> tf_compat.Tensor:
     """
     :param tensor: The tensor to transform
     :return: The absolute mean values of the tensor grouped by blocks of
         shape self._block_shape
     """
     blocked_tens_shape, _ = self._get_blocked_tens_shape_and_validate(
         tensor.shape)
     # reorder so that in and out channel dimensions come before kernel
     n_dims = len(tensor.shape)
     if n_dims >= 3:
         tens_trans_dims = [n_dims - 2, n_dims - 1, *range(n_dims - 2)]
         tensor = tf_compat.transpose(tensor, tens_trans_dims)
     blocked_tens = tf_compat.reshape(tensor, blocked_tens_shape)
     reduced_blocks = self._grouping_op(tf_compat.abs(blocked_tens),
                                        1,
                                        keepdims=True)
     return reduced_blocks
Пример #6
0
    def processor(self, file_path: tf_compat.Tensor, label: tf_compat.Tensor):
        """
        :param file_path: the path to the file to load an image from
        :param label: the label for the given image
        :return: a tuple containing the processed image and label
        """
        with tf_compat.name_scope("img_to_tensor"):
            img = tf_compat.read_file(file_path)

            # Decode and reshape the image to 3 dimensional tensor
            # Note: "expand_animations" not available for TF 1.13 and prior,
            # hence the reshape trick below
            img = tf_compat.image.decode_image(img)
            img_shape = tf_compat.shape(img)
            img = tf_compat.reshape(img,
                                    [img_shape[0], img_shape[1], img_shape[2]])
            img = tf_compat.cast(img, dtype=tf_compat.float32)

        if self.pre_resize_transforms:
            transforms = (self.pre_resize_transforms.train
                          if self.train else self.pre_resize_transforms.val)
            if transforms:
                with tf_compat.name_scope("pre_resize_transforms"):
                    for trans in transforms:
                        img = trans(img)

        if self._image_size:
            res_callable = resize((self.image_size, self.image_size))
            img = res_callable(img)

        if self.post_resize_transforms:
            transforms = (self.post_resize_transforms.train
                          if self.train else self.post_resize_transforms.val)
            if transforms:
                with tf_compat.name_scope("post_resize_transforms"):
                    for trans in transforms:
                        img = trans(img)

        return img, label
Пример #7
0
def _classifier(
    x_tens: tf_compat.Tensor,
    training: Union[bool, tf_compat.Tensor],
    num_classes: int,
    class_type: str,
    kernel_initializer,
    bias_initializer,
    beta_initializer,
    gamma_initializer,
) -> tf_compat.Tensor:
    with tf_compat.variable_scope("classifier", reuse=tf_compat.AUTO_REUSE):
        if num_classes:
            if class_type:
                if class_type == "single":
                    final_act = "softmax"
                elif class_type == "multi":
                    final_act = "sigmoid"
                else:
                    raise ValueError(
                        "unknown class_type given of {}".format(class_type))
            else:
                final_act = None

            out = tf_compat.transpose(x_tens, [0, 3, 1, 2])
            out = tf_compat.reshape(out, [-1, 7 * 7 * 512])
            out = dense_block(
                "mlp_0",
                out,
                training,
                channels=4096,
                include_bn=False,
                include_bias=True,
                dropout_rate=0.5,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
                beta_initializer=beta_initializer,
                gamma_initializer=gamma_initializer,
            )
            out = dense_block(
                "mlp_1",
                out,
                training,
                channels=4096,
                include_bn=False,
                include_bias=True,
                dropout_rate=0.5,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
                beta_initializer=beta_initializer,
                gamma_initializer=gamma_initializer,
            )
            logits = dense_block(
                "mlp_2",
                out,
                training,
                channels=num_classes,
                include_bn=False,
                include_bias=True,
                act=final_act,
                dropout_rate=0.5,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
                beta_initializer=beta_initializer,
                gamma_initializer=gamma_initializer,
            )
        else:
            logits = x_tens

    return logits
Пример #8
0
def mnist_net(inputs: tf_compat.Tensor,
              num_classes: int = 10,
              act: str = None) -> tf_compat.Tensor:
    """
    A simple convolutional model created for the MNIST dataset

    :param inputs: the inputs tensor to create the network for
    :param num_classes: the number of classes to create the final layer for
    :param act: the final activation to use in the model,
        supported: [None, relu, sigmoid, softmax]
    :return: the logits output from the created network
    """
    if act not in [None, "sigmoid", "softmax"]:
        raise ValueError("unsupported value for act given of {}".format(act))

    with tf_compat.variable_scope(BASE_NAME_SCOPE, reuse=tf_compat.AUTO_REUSE):
        with tf_compat.variable_scope("blocks", reuse=tf_compat.AUTO_REUSE):
            x_tens = conv2d(
                name="conv0",
                x_tens=inputs,
                in_chan=1,
                out_chan=16,
                kernel=5,
                stride=1,
                padding="SAME",
                act="relu",
            )
            x_tens = conv2d(
                name="conv1",
                x_tens=x_tens,
                in_chan=16,
                out_chan=32,
                kernel=5,
                stride=2,
                padding="SAME",
                act="relu",
            )
            x_tens = conv2d(
                name="conv2",
                x_tens=x_tens,
                in_chan=32,
                out_chan=64,
                kernel=5,
                stride=1,
                padding="SAME",
                act="relu",
            )
            x_tens = conv2d(
                name="conv3",
                x_tens=x_tens,
                in_chan=64,
                out_chan=128,
                kernel=5,
                stride=2,
                padding="SAME",
                act="relu",
            )

        with tf_compat.variable_scope("classifier"):
            x_tens = tf_compat.reduce_mean(x_tens, axis=[1, 2])
            x_tens = tf_compat.reshape(x_tens, [-1, 128])
            x_tens = fc(name="fc",
                        x_tens=x_tens,
                        in_chan=128,
                        out_chan=num_classes)

        with tf_compat.variable_scope("logits"):
            logits = activation(x_tens, act)

    return logits