示例#1
0
文件: resnet.py 项目: PIlotcnc/neural
def _basic_block(
    name: str,
    x_tens: tf_compat.Tensor,
    training: Union[bool, tf_compat.Tensor],
    out_channels: int,
    stride: int,
    kernel_initializer,
    bias_initializer,
    beta_initializer,
    gamma_initializer,
) -> tf_compat.Tensor:
    with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
        out = conv2d_block(
            "conv_bn_0",
            x_tens,
            training,
            out_channels,
            kernel_size=3,
            stride=stride,
            padding=1,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
        )
        out = conv2d_block(
            "conv_bn_1",
            out,
            training,
            out_channels,
            kernel_size=3,
            padding=1,
            act=None,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
        )

        if stride > 1 or int(x_tens.shape[3]) != out_channels:
            out = tf_compat.add(
                out,
                _identity_modifier(
                    x_tens,
                    training,
                    out_channels,
                    stride,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    beta_initializer=beta_initializer,
                    gamma_initializer=gamma_initializer,
                ),
            )
        else:
            out = tf_compat.add(out, x_tens)

        out = activation(out, act="relu", name="act_out")

    return out
示例#2
0
def _inverted_bottleneck_block(
    name: str,
    x_tens: tf_compat.Tensor,
    training: Union[bool, tf_compat.Tensor],
    out_channels: int,
    exp_channels: int,
    stride: int,
    kernel_initializer,
    bias_initializer,
    beta_initializer,
    gamma_initializer,
) -> tf_compat.Tensor:
    with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
        out = conv2d_block(
            "expand",
            x_tens,
            training,
            exp_channels,
            kernel_size=1,
            act="relu6",
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
        )
        out = depthwise_conv2d_block(
            "spatial",
            out,
            training,
            exp_channels,
            kernel_size=3,
            stride=stride,
            act="relu6",
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
        )
        out = conv2d_block(
            "compress",
            out,
            training,
            out_channels,
            kernel_size=1,
            act=None,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
        )

        if stride == 1 and int(x_tens.shape[3]) == out_channels:
            out = tf_compat.add(out, x_tens)

    return out
示例#3
0
文件: resnet.py 项目: PIlotcnc/neural
def _identity_modifier(
    x_tens: tf_compat.Tensor,
    training: Union[bool, tf_compat.Tensor],
    out_channels: int,
    stride: int,
    kernel_initializer,
    bias_initializer,
    beta_initializer,
    gamma_initializer,
) -> tf_compat.Tensor:
    out = conv2d_block(
        "identity",
        x_tens,
        training,
        out_channels,
        kernel_size=1,
        stride=stride,
        act=None,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
        beta_initializer=beta_initializer,
        gamma_initializer=gamma_initializer,
    )

    return out
示例#4
0
文件: vgg.py 项目: PIlotcnc/neural
    def create(
        self,
        name: str,
        x_tens: tf_compat.Tensor,
        training: Union[bool, tf_compat.Tensor],
        kernel_initializer,
        bias_initializer,
        beta_initializer,
        gamma_initializer,
    ) -> tf_compat.Tensor:
        """
        Create the section in the current graph and scope

        :param name: the name for the scope to create the section under
        :param x_tens: The input tensor to the MobileNet architecture
        :param training: bool or Tensor to specify if the model should be run
            in training or inference mode
        :param kernel_initializer: Initializer to use for the conv and
            fully connected kernels
        :param bias_initializer: Initializer to use for the bias in the fully connected
        :param beta_initializer: Initializer to use for the batch norm beta variables
        :param gamma_initializer: Initializer to use for the batch norm gama variables
        :return: the output tensor from the section
        """
        out = x_tens

        with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
            for block in range(self.num_blocks):
                out = conv2d_block(
                    name="block_{}".format(block),
                    x_tens=out,
                    training=training,
                    channels=self.out_channels,
                    kernel_size=3,
                    include_bn=self.use_batchnorm,
                    include_bias=True,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    beta_initializer=beta_initializer,
                    gamma_initializer=gamma_initializer,
                )

            out = pool2d(
                name="pool",
                x_tens=out,
                type_="max",
                pool_size=2,
                strides=2,
                padding="valid",
            )

        return out
示例#5
0
文件: resnet.py 项目: PIlotcnc/neural
def _input(
    x_tens: tf_compat.Tensor,
    training: Union[bool, tf_compat.Tensor],
    kernel_initializer,
    bias_initializer,
    beta_initializer,
    gamma_initializer,
    simplified_arch: bool = False,
) -> tf_compat.Tensor:
    if not simplified_arch:
        out = conv2d_block(
            "input",
            x_tens,
            training,
            channels=64,
            kernel_size=7,
            stride=2,
            padding=3,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
        )
        out = pool2d(
            name="pool", x_tens=out, type_="max", pool_size=3, strides=2, padding=1
        )
    else:
        out = conv2d_block(
            "input",
            x_tens,
            training,
            channels=16,
            kernel_size=3,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
        )
    return out
示例#6
0
def _input(
    x_tens: tf_compat.Tensor,
    training: Union[bool, tf_compat.Tensor],
    kernel_initializer,
    bias_initializer,
    gamma_initializer,
) -> tf_compat.Tensor:
    out = conv2d_block(
        "input",
        x_tens,
        training,
        channels=32,
        kernel_size=3,
        stride=2,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
        gamma_initializer=gamma_initializer,
    )

    return out
示例#7
0
def _dw_sep_block(
    name: str,
    x_tens: tf_compat.Tensor,
    training: Union[bool, tf_compat.Tensor],
    out_channels: int,
    stride: int,
    kernel_initializer,
    bias_initializer,
    beta_initializer,
    gamma_initializer,
) -> tf_compat.Tensor:
    with tf_compat.variable_scope(name, reuse=tf_compat.AUTO_REUSE):
        out = depthwise_conv2d_block(
            "depth",
            x_tens,
            training,
            int(x_tens.shape[3]),
            kernel_size=3,
            padding=1,
            stride=stride,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            gamma_initializer=gamma_initializer,
        )
        out = conv2d_block(
            "point",
            out,
            training,
            out_channels,
            kernel_size=1,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
        )

    return out
示例#8
0
def mobilenet_v2_const(
    x_tens: tf_compat.Tensor,
    training: Union[bool, tf_compat.Tensor],
    sec_settings: List[MobileNetV2Section],
    num_classes: int,
    class_type: str,
    kernel_initializer,
    bias_initializer,
    beta_initializer,
    gamma_initializer,
) -> tf_compat.Tensor:
    """
    Graph constructor for MobileNet V2 implementation.

    :param x_tens: The input tensor to the MobileNet architecture
    :param training: bool or Tensor to specify if the model should be run
        in training or inference mode
    :param sec_settings: The settings for each section in the MobileNet modoel
    :param num_classes: The number of classes to classify
    :param class_type: One of [single, multi, None] to support multi class training.
        Default single. If None, then will not add the fully connected at the end.
    :param kernel_initializer: Initializer to use for the conv and
        fully connected kernels
    :param bias_initializer: Initializer to use for the bias in the fully connected
    :param beta_initializer: Initializer to use for the batch norm beta variables
    :param gamma_initializer: Initializer to use for the batch norm gama variables
    :return: the output tensor from the created graph
    """

    with tf_compat.variable_scope(BASE_NAME_SCOPE, reuse=tf_compat.AUTO_REUSE):
        out = x_tens

        for sec_index, section in enumerate(sec_settings):
            out = section.create(
                name="section_{}".format(sec_index),
                x_tens=out,
                training=training,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
                beta_initializer=beta_initializer,
                gamma_initializer=gamma_initializer,
            )

        out = conv2d_block(
            name="feat_extraction",
            x_tens=out,
            training=training,
            channels=1280,
            kernel_size=1,
            act=None,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
        )

        logits = _classifier(
            out,
            training,
            num_classes,
            class_type,
            kernel_initializer,
            bias_initializer,
            beta_initializer,
            gamma_initializer,
        )

    return logits