示例#1
0
    def __init__(
            self,
            prev_layer,
            n_filter=32,
            filter_size=(3, 3),
            out_size=(30, 30),  # remove
            strides=(2, 2),
            padding='SAME',
            batch_size=None,  # remove
            act=None,
            W_init=tf.truncated_normal_initializer(stddev=0.02),
            b_init=tf.constant_initializer(value=0.0),
            W_init_args=None,  # TODO: Remove when TF <1.3 not supported
            b_init_args=None,  # TODO: Remove when TF <1.3 not supported
            name='decnn2d'):
        super(DeConv2d, self).__init__(prev_layer=prev_layer,
                                       act=act,
                                       W_init_args=W_init_args,
                                       b_init_args=b_init_args,
                                       name=name)

        logging.info(
            "DeConv2d %s: n_filters: %s strides: %s pad: %s act: %s" %
            (self.name, str(n_filter), str(strides), padding,
             self.act.__name__ if self.act is not None else 'No Activation'))

        if len(strides) != 2:
            raise ValueError(
                "len(strides) should be 2, DeConv2d and DeConv2dLayer are different."
            )

        conv2d_transpose = tf.layers.Conv2DTranspose(filters=n_filter,
                                                     kernel_size=filter_size,
                                                     strides=strides,
                                                     padding=padding,
                                                     activation=self.act,
                                                     kernel_initializer=W_init,
                                                     bias_initializer=b_init,
                                                     name=name)

        self.outputs = conv2d_transpose(self.inputs)
        # new_variables = conv2d_transpose.weights  # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
        # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=self.name)  #vs.name)
        new_variables = get_collection_trainable(self.name)

        self._add_layers(self.outputs)
        self._add_params(new_variables)
示例#2
0
    def __init__(self,
                 prev_layer,
                 n_filter=32,
                 filter_size=5,
                 stride=1,
                 dilation_rate=1,
                 act=None,
                 padding='SAME',
                 data_format="channels_last",
                 W_init=tf.truncated_normal_initializer(stddev=0.02),
                 b_init=tf.constant_initializer(value=0.0),
                 W_init_args=None,
                 b_init_args=None,
                 name='conv1d'):
        super(Conv1d, self).__init__(prev_layer=prev_layer,
                                     act=act,
                                     W_init_args=W_init_args,
                                     b_init_args=b_init_args,
                                     name=name)

        logging.info(
            "Conv1d %s: n_filter: %d filter_size: %s stride: %d pad: %s act: %s dilation_rate: %d"
            % (self.name, n_filter, filter_size, stride, padding,
               self.act.__name__ if self.act is not None else 'No Activation',
               dilation_rate))

        _conv1d = tf.layers.Conv1D(filters=n_filter,
                                   kernel_size=filter_size,
                                   strides=stride,
                                   padding=padding,
                                   data_format=data_format,
                                   dilation_rate=dilation_rate,
                                   activation=self.act,
                                   use_bias=(True if b_init else False),
                                   kernel_initializer=W_init,
                                   bias_initializer=b_init,
                                   name=name)

        # _conv1d.dtype = LayersConfig.tf_dtype   # unsupport, it will use the same dtype of inputs
        self.outputs = _conv1d(self.inputs)
        # new_variables = _conv1d.weights  # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
        # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=self.name)  #vs.name)
        new_variables = get_collection_trainable(self.name)

        self._add_layers(self.outputs)
        self._add_params(new_variables)
示例#3
0
    def __init__(
            self,
            prev_layer,
            n_filter=32,
            filter_size=(3, 3, 3),
            strides=(2, 2, 2),
            padding='SAME',
            act=None,
            W_init=tf.truncated_normal_initializer(stddev=0.02),
            b_init=tf.constant_initializer(value=0.0),
            W_init_args=None,  # TODO: Remove when TF <1.3 not supported
            b_init_args=None,  # TODO: Remove when TF <1.3 not supported
            name='decnn3d'):
        super(DeConv3d, self).__init__(prev_layer=prev_layer,
                                       act=act,
                                       W_init_args=W_init_args,
                                       b_init_args=b_init_args,
                                       name=name)

        logging.info(
            "DeConv3d %s: n_filters: %s strides: %s pad: %s act: %s" %
            (self.name, str(n_filter), str(strides), padding,
             self.act.__name__ if self.act is not None else 'No Activation'))

        # with tf.variable_scope(name) as vs:
        nn = tf.layers.Conv3DTranspose(filters=n_filter,
                                       kernel_size=filter_size,
                                       strides=strides,
                                       padding=padding,
                                       activation=self.act,
                                       kernel_initializer=W_init,
                                       bias_initializer=b_init,
                                       name=name)

        self.outputs = nn(self.inputs)
        # new_variables = nn.weights  # tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
        # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=self.name)  #vs.name)
        new_variables = get_collection_trainable(self.name)

        self._add_layers(self.outputs)
        self._add_params(new_variables)
示例#4
0
    def __init__(
            self,
            prev_layer,
            n_filter=100,
            filter_size=3,
            strides=1,
            act=None,
            padding='valid',
            data_format='channels_last',
            dilation_rate=1,
            depth_multiplier=1,
            # activation=None,
            # use_bias=True,
            depthwise_init=None,
            pointwise_init=None,
            b_init=tf.zeros_initializer(),
            # depthwise_regularizer=None,
            # pointwise_regularizer=None,
            # bias_regularizer=None,
            # activity_regularizer=None,
            # depthwise_constraint=None,
            # pointwise_constraint=None,
            # W_init=tf.truncated_normal_initializer(stddev=0.1),
            # b_init=tf.constant_initializer(value=0.0),
            W_init_args=None,  # TODO: Remove when TF <1.3 not supported
            b_init_args=None,  # TODO: Remove when TF <1.3 not supported
            name='seperable1d',
    ):
        super(SeparableConv1d, self
             ).__init__(prev_layer=prev_layer, act=act, W_init_args=W_init_args, b_init_args=b_init_args, name=name)

        logging.info(
            "SeparableConv1d  %s: n_filter: %d filter_size: %s filter_size: %s depth_multiplier: %d act: %s" % (
                self.name, n_filter, str(filter_size), str(strides), depth_multiplier, self.act.__name__
                if self.act is not None else 'No Activation'
            )
        )
        # with tf.variable_scope(name) as vs:
        nn = tf.layers.SeparableConv1D(
            filters=n_filter,
            kernel_size=filter_size,
            strides=strides,
            padding=padding,
            data_format=data_format,
            dilation_rate=dilation_rate,
            depth_multiplier=depth_multiplier,
            activation=self.act,
            use_bias=(True if b_init is not None else False),
            depthwise_initializer=depthwise_init,
            pointwise_initializer=pointwise_init,
            bias_initializer=b_init,
            # depthwise_regularizer=None,
            # pointwise_regularizer=None,
            # bias_regularizer=None,
            # activity_regularizer=None,
            # depthwise_constraint=None,
            # pointwise_constraint=None,
            # bias_constraint=None,
            trainable=True,
            name=name
        )

        self.outputs = nn(self.inputs)
        # new_variables = nn.weights
        # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=self.name)  #vs.name)
        new_variables = get_collection_trainable(self.name)

        self._add_layers(self.outputs)
        self._add_params(new_variables)
示例#5
0
    def __init__(self,
                 prev_layer,
                 groups=32,
                 epsilon=1e-06,
                 act=None,
                 data_format='channels_last',
                 name='groupnorm'):
        super(GroupNormLayer, self).__init__(prev_layer=prev_layer,
                                             act=act,
                                             name=name)

        logging.info(
            "GroupNormLayer %s: act: %s" %
            (self.name,
             self.act.__name__ if self.act is not None else 'No Activation'))

        shape = self.inputs.get_shape().as_list()
        if len(shape) != 4:
            raise Exception("GroupNormLayer only supports 2D images.")

        if data_format == 'channels_last':
            channels = shape[-1]
            int_shape = tf.concat([
                tf.shape(self.inputs)[0:3],
                tf.convert_to_tensor([groups, channels // groups])
            ],
                                  axis=0)
        elif data_format == 'channels_first':
            channels = shape[1]
            int_shape = tf.concat([
                tf.shape(self.inputs)[0:1],
                tf.convert_to_tensor([groups, channels // groups]),
                tf.shape(self.inputs)[2:4]
            ],
                                  axis=0)
        else:
            raise ValueError(
                "data_format must be 'channels_last' or 'channels_first'.")

        if groups > channels:
            raise ValueError('Invalid groups %d for %d channels.' %
                             (groups, channels))
        if channels % groups != 0:
            raise ValueError(
                '%d channels is not commensurate with %d groups.' %
                (channels, groups))

        with tf.variable_scope(name):
            x = tf.reshape(self.inputs, int_shape)
            if data_format == 'channels_last':
                mean, var = tf.nn.moments(x, [1, 2, 4], keep_dims=True)
                gamma = tf.get_variable('gamma',
                                        channels,
                                        initializer=tf.ones_initializer())
                beta = tf.get_variable('beta',
                                       channels,
                                       initializer=tf.zeros_initializer())
            else:
                mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
                gamma = tf.get_variable('gamma', [1, channels, 1, 1],
                                        initializer=tf.ones_initializer())
                beta = tf.get_variable('beta', [1, channels, 1, 1],
                                       initializer=tf.zeros_initializer())

            x = (x - mean) / tf.sqrt(var + epsilon)

            self.outputs = tf.reshape(x, tf.shape(self.inputs)) * gamma + beta
            self.outputs = self._apply_activation(self.outputs)

        variables = get_collection_trainable(self.name)

        self._add_layers(self.outputs)
        self._add_params(variables)
示例#6
0
    def __init__(
        self,
        prev_layer,
        n_filter=32,
        filter_size=(3, 3),
        strides=(1, 1),
        act=None,
        padding='SAME',
        data_format='channels_last',
        dilation_rate=(1, 1),
        W_init=tf.truncated_normal_initializer(stddev=0.02),
        b_init=tf.constant_initializer(value=0.0),
        W_init_args=None,
        b_init_args=None,
        use_cudnn_on_gpu=None,
        name='conv2d',
    ):
        # if len(strides) != 2:
        #     raise ValueError("len(strides) should be 2, Conv2d and Conv2dLayer are different.")

        # try:
        #     pre_channel = int(layer.outputs.get_shape()[-1])

        # except Exception:  # if pre_channel is ?, it happens when using Spatial Transformer Net
        #     pre_channel = 1
        #     logging.info("[warnings] unknow input channels, set to 1")

        super(Conv2d, self).__init__(prev_layer=prev_layer,
                                     act=act,
                                     W_init_args=W_init_args,
                                     b_init_args=b_init_args,
                                     name=name)

        logging.info(
            "Conv2d %s: n_filter: %d filter_size: %s strides: %s pad: %s act: %s"
            % (self.name, n_filter, str(filter_size), str(strides), padding,
               self.act.__name__ if self.act is not None else 'No Activation'))
        # with tf.variable_scope(name) as vs:
        conv2d = tf.layers.Conv2D(
            # inputs=self.inputs,
            filters=n_filter,
            kernel_size=filter_size,
            strides=strides,
            padding=padding,
            data_format=data_format,
            dilation_rate=dilation_rate,
            activation=self.act,
            use_bias=(False if b_init is None else True),
            kernel_initializer=W_init,  # None,
            bias_initializer=b_init,  # f.zeros_initializer(),
            kernel_regularizer=None,
            bias_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            trainable=True,
            name=name,
            # reuse=None,
        )
        self.outputs = conv2d(self.inputs)  # must put before ``new_variables``
        # new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=self.name)  #vs.name)
        new_variables = get_collection_trainable(self.name)
        # new_variables = []
        # for p in tf.trainable_variables():
        #     # print(p.name.rpartition('/')[0], self.name)
        #     if p.name.rpartition('/')[0] == self.name:
        #         new_variables.append(p)
        # exit()
        # TF_GRAPHKEYS_VARIABLES  TF_GRAPHKEYS_VARIABLES
        # print(self.name, name)
        # print(tf.trainable_variables())#tf.GraphKeys.TRAINABLE_VARIABLES)
        # print(new_variables)
        # print(conv2d.weights)

        self._add_layers(self.outputs)
        self._add_params(new_variables)  # conv2d.weights)