예제 #1
0
    def call(self, inputs, params=None):
        if params[self.name + '/depthwise_kernel:0'] is None:
            return super(layers.SeparableConv2D, self).call(inputs)
        else:
            depthwise_kernel = params.get(self.name + '/depthwise_kernel:0')
            pointwise_kernel = params.get(self.name + '/pointwise_kernel:0')
            bias = params.get(self.name + '/bias:0')
        # Apply the actual ops.
        if self.data_format == 'channels_last':
            strides = (1, ) + self.strides + (1, )
        else:
            strides = (1, 1) + self.strides
        outputs = nn.separable_conv2d(
            inputs,
            depthwise_kernel,
            pointwise_kernel,
            strides=strides,
            padding=self.padding.upper(),
            rate=self.dilation_rate,
            data_format=conv_utils.convert_data_format(self.data_format,
                                                       ndim=4))

        if self.use_bias:
            outputs = nn.bias_add(outputs,
                                  bias,
                                  data_format=conv_utils.convert_data_format(
                                      self.data_format, ndim=4))

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
예제 #2
0
    def call(self, inputs):
        inputs_shape = array_ops.shape(inputs)
        batch_size = inputs_shape[0]
        if self.data_format == 'channels_first':
            w_axis = 2
        else:
            w_axis = 1

        width = inputs_shape[w_axis]
        kernel_w, = self.kernel_size
        stride_w, = self.strides

        if self.output_padding is None:
            out_pad_w = None
        else:
            out_pad_w, = self.output_padding

        # Infer the dynamic output shape:
        out_width = conv_utils.deconv_output_length(
            width,
            kernel_w,
            padding=self.padding,
            output_padding=out_pad_w,
            stride=stride_w,
            dilation=self.dilation_rate[0],
        )
        if self.data_format == 'channels_first':
            output_shape = (batch_size, self.filters, out_width)
        else:
            output_shape = (batch_size, out_width, self.filters)

        output_shape_tensor = array_ops.stack(output_shape)
        outputs = conv1d_transpose(
            inputs,
            self.kernel,
            output_shape_tensor,
            stride=stride_w,
            padding=self.padding.upper(),
            data_format=conv_utils.convert_data_format(self.data_format, ndim=3),
        )

        if not context.executing_eagerly():
            # Infer the static output shape:
            out_shape = self.compute_output_shape(inputs.shape)
            outputs.set_shape(out_shape)

        if self.use_bias:
            outputs = nn.bias_add(
                outputs,
                self.bias,
                data_format=conv_utils.convert_data_format(self.data_format, ndim=4),
            )

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
예제 #3
0
    def build(self, input_shape):
        with tf.variable_scope(self.name):
            input_shape = TensorShape(input_shape)
            if self.data_format == 'channels_first':
                channel_axis = 1
            else:
                channel_axis = -1
            if input_shape[channel_axis].value is None:
                raise ValueError('The channel dimension of the inputs '
                                 'should be defined. Found `None`.')
            input_dim = int(input_shape[channel_axis])
            kernel_shape = self.kernel_size + (input_dim, self.filters)

            print("input_shape: {}".format(input_shape))
            self.kernel = sn_kernel(shape=kernel_shape, scope='kernel')
            self.bias = tf.get_variable(
                name='bias',
                shape=[self.filters],
                initializer=tf.initializers.zeros(dtype=self.dtype))
            self._convolution_op = Convolution(
                input_shape,
                filter_shape=self.kernel.get_shape(),
                dilation_rate=self.dilation_rate,
                strides=self.strides,
                padding=self.padding.upper(),
                data_format=conv_utils.convert_data_format(
                    self.data_format, self.rank + 2))
            self.built = True
    def build_dilated_conv(self, input_shape, input_dim):
        """
        Define trainable kernel and bias variables for dilated conv layers
        Prepare conv operations
        """
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None

        for dilation in self.dilations:
            convolution_op = nn_ops.Convolution(
                input_shape,
                filter_shape=self.kernel.get_shape(),
                dilation_rate=dilation,
                strides=self.strides,
                padding="SAME",
                data_format=conv_utils.convert_data_format(
                    self.data_format, self.rank + 2))
            self.conv_ops.append(convolution_op)
예제 #5
0
    def call(self, inputs):
        data_format = conv_utils.convert_data_format(self.data_format,
                                                     self.rank + 2)
        inputs, tf_data_format = K._preprocess_conv2d_input(
            inputs, self.data_format)

        inputs = tf.extract_image_patches(
            inputs,
            ksizes=(1, ) + K.int_shape(self.kernel)[:2] + (1, ),
            strides=(1, ) + self.strides + (1, ),
            rates=(1, ) + self.dilation_rate + (1, ),
            padding=self.padding.upper(),
        )

        kernel = K.reshape(self.kernel, (-1, self.filters))
        outputs = self.kernel_function([inputs, kernel])

        if self.data_format == 'channels_first':
            outputs = K.permute_dimensions(outputs, (0, 3, 1, 2))

        if self.use_bias:
            outputs = nn.bias_add(outputs, self.bias, data_format=data_format)

        if self.activation is not None:
            outputs = self.activation(outputs)
        return outputs
예제 #6
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        kernel_shape = self.kernel_size + (input_channel, self.filters)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)

        try:
            # Disable variable partitioning when creating the variable
            if hasattr(self, '_scope') and self._scope:
                partitioner = self._scope.partitioner
                self._scope.set_partitioner(None)
            else:
                partitioner = None

            self.u = self.add_weight(
                name='sn_u',
                shape=(1, tf.reduce_prod(kernel_shape[:-1])),
                dtype=self.dtype,
                initializer=tf.keras.initializers.ones,
                synchronization=tf.VariableSynchronization.ON_READ,
                trainable=False,
                aggregation=tf.VariableAggregation.MEAN)
        finally:
            if partitioner:
                self._scope.set_partitioner(partitioner)

        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None
        channel_axis = self._get_channel_axis()
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_channel})

        self._build_conv_op_input_shape = input_shape
        self._build_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format)
        self.built = True
예제 #7
0
    def __init__(
        self,
        wavelet,
        level=1,
        strides=2,
        rank=1,
        mode='symmetric',
        data_format=None,
        **kwargs
    ):
        L.Layer.__init__(self, **kwargs)
        Wavelet.__init__(self, wavelet, level)

        self.mode = mode
        self.rank = rank

        self.strides = conv_utils.normalize_tuple(strides, rank, "strides")
        self.data_format = conv_utils.normalize_data_format(data_format)
        self._channels_first = self.data_format == "channels_first"

        # All the convolutions will be permuted to channels last
        self.conv_format = conv_utils.convert_data_format(
            "channels_last", self.rank+2)

        self.filters = self.build_filters(rank, self.dec_lo, self.dec_hi)

        offset = 1 if self.p % 2 or self.p <= 2 else 2
        padding = self.rank * ([self.p-1, self.p-offset],)

        if self._channels_first:
            self.padding = tf.constant([[0, 0], [0, 0], *padding])
        else:
            self.padding = tf.constant([[0, 0], *padding, [0, 0]])
예제 #8
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        kernel_shape = self.kernel_size + (input_channel, self.filters)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)

        channel_axis = self._get_channel_axis()
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_channel})

        self._build_conv_op_input_shape = input_shape
        self._build_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format)

        self.mask = self.create_mask(self.mask_type, self.color_conditioning)

        self.built = True
예제 #9
0
    def Conv2D(self, error, layer):
        """
            Todo: Propagate error through Conv2D layer
            :param error: Propagated Error Tensor from previous layer --> l-1
            :param layer: Current Layer for doing error Propagation --> l
            :return: Propagated Error for next layer --> l
        """


        if error is None:
            return None

        logging.info(f'Propagate Error {error.shape}')

        error = tf.nn.conv2d(input=error,
                                 filters=layer.weights[0]**2,
                                 strides=layer.strides,
                                 # Hier muss man das groß schreiben im model klein (Conv(Layer) fkt. _get_padding_op)!
                                 padding=layer.padding.upper(),
                                 # conv_utils.py convert_data_format convertiert zu NH...
                                 data_format=convert_data_format(layer.data_format, error.shape.ndims),
                                 dilations=layer.dilation_rate
                                 )
        if self._debug[self.DEBUG.plot]:
            if not isinstance(error, (np.ndarray)):
                error = error.numpy()

            sns.kdeplot(error.flatten(), cumulative=False, bw=0.001, shade=True,
                        label=f"Error-{layer.__class__.__name__}")
            plt.legend()

        ''' Check if an activation function is placed within the layer'''
        return self.Activation(error, layer)
예제 #10
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape.dims[channel_axis].value is None:
            raise ValueError(
                'The channel dimension of the inputs should be defined. Found `None`.'
            )
        input_dim = int(input_shape[channel_axis])

        wshape = [
            self.kernel_size[0] * self.kernel_size[1] - 1, input_dim,
            self.filters
        ]
        W = self.add_weight(name='kernel',
                            shape=wshape,
                            initializer=self.kernel_initializer,
                            regularizer=self.kernel_regularizer,
                            constraint=self.kernel_constraint,
                            trainable=True,
                            dtype=self.dtype)

        self.kernel = array_ops.concat(
            (W[:wshape[0] // 2], array_ops.zeros(
                (1, wshape[1], wshape[2])), W[wshape[0] // 2:]),
            axis=0)
        self.kernel = array_ops.reshape(self.kernel, [
            self.kernel_size[0], self.kernel_size[1], input_dim, self.filters
        ])

        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None

        self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
        if self.padding == 'causal':
            op_padding = 'valid'
        else:
            op_padding = self.padding
        if not isinstance(op_padding, (list, tuple)):
            op_padding = op_padding.upper()

        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=op_padding,
            data_format=conv_utils.convert_data_format(self.data_format, 4))
        self.built = True
예제 #11
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape.dims[channel_axis].value is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')

        input_dim = int(input_shape[channel_axis])
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.wn_g = self.add_weight(
            name='wn_g',
            shape=(self.filters, ),
            initializer=tf.keras.initializers.RandomUniform(1, 1),
            trainable=True,
            dtype=self.dtype)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)

        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None

        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})

        if self.padding == 'causal':
            op_padding = 'valid'
        else:
            op_padding = self.padding
        if not isinstance(op_padding, (list, tuple)):
            op_padding = op_padding.upper()

        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=op_padding,
            data_format=conv_utils.convert_data_format(self.data_format,
                                                       self.rank + 2))

        self.built = True
예제 #12
0
    def call(self, inputs, training=True):  # 抄自系统的源码
        inputs_shape = array_ops.shape(inputs)
        batch_size = inputs_shape[0]
        if self.data_format == 'channels_first':
            h_axis, w_axis = 2, 3
        else:
            h_axis, w_axis = 1, 2

        height, width = inputs_shape[h_axis], inputs_shape[w_axis]
        kernel_h, kernel_w = self.kernel_size
        stride_h, stride_w = self.strides

        if self.output_padding is None:
            out_pad_h = out_pad_w = None
        else:
            out_pad_h, out_pad_w = self.output_padding

        # Infer the dynamic output shape:
        out_height = conv_utils.deconv_output_length(height,
                                                     kernel_h,
                                                     padding=self.padding,
                                                     output_padding=out_pad_h,
                                                     stride=stride_h,
                                                     dilation=self.dilation_rate[0])
        out_width = conv_utils.deconv_output_length(width,
                                                    kernel_w,
                                                    padding=self.padding,
                                                    output_padding=out_pad_w,
                                                    stride=stride_w,
                                                    dilation=self.dilation_rate[1])
        if self.data_format == 'channels_first':
            output_shape = (batch_size, self.filters, out_height, out_width)
        else:
            output_shape = (batch_size, out_height, out_width, self.filters)

        output_shape_tensor = array_ops.stack(output_shape)
        outputs = backend.conv2d_transpose(
            inputs,
            self.compute_spectral_normal(training=training),  # self.kernel,
            output_shape_tensor,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        if not context.executing_eagerly():
            # Infer the static output shape:
            out_shape = self.compute_output_shape(inputs.shape)
            outputs.set_shape(out_shape)

        if self.use_bias:
            outputs = nn.bias_add(
                outputs,
                self.bias,
                data_format=conv_utils.convert_data_format(self.data_format, ndim=4))

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
예제 #13
0
 def call(self, inputs, **kwargs):
     outputs = self.pool_function(
         inputs,
         self.pool_size,
         strides=self.strides,
         padding=self.padding.upper(),
         data_format=conv_utils.convert_data_format(self.data_format, 3))
     return outputs
예제 #14
0
    def __init__(self,
                 rank,
                 filters,
                 kernel_size,
                 strides=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 groups=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 conv_op=None,
                 **kwargs):
        super(Conv_ada, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.rank = rank

        if isinstance(filters, float):
            filters = int(filters)
        self.filters = filters
        self.groups = groups or 1
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')

        self.activation = activations.get(activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=self.rank + 2)

        self._validate_init()
        self._is_causal = self.padding == 'causal'
        self._channels_first = self.data_format == 'channels_first'
        self._tf_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
예제 #15
0
    def build(
        self,
        input_shape,
    ):
        input_shape = tf.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        kernel_shape = self.kernel_size + (input_channel, self.filters)

        self.kernel = self.add_weight(
            name="kernel",
            shape=kernel_shape,
            initializer=self.kernel_initializer,
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            trainable=True,
            dtype=self.dtype,
        )
        # Add mask to remove weights on half of the kernel to the left
        # (only keep future
        # context)
        left_kernel_dims = (self.future_context, input_channel, self.filters)
        left_kernel = tf.fill(dims=left_kernel_dims, value=0)
        right_kernel_dims = (self.future_context + 1, input_channel, self.filters)
        right_kernel = tf.fill(dims=right_kernel_dims, value=1)
        mask_kernel = tf.cast(tf.concat([left_kernel, right_kernel], axis=0), dtype=self.dtype)
        self.kernel = tf.multiply(self.kernel, mask_kernel)

        if self.use_bias:
            self.bias = self.add_weight(
                name="bias",
                shape=(self.filters,),
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                constraint=self.bias_constraint,
                trainable=True,
                dtype=self.dtype,
            )
        else:
            self.bias = None
        channel_axis = self._get_channel_axis()
        self.input_spec = tf.keras.layers.InputSpec(ndim=self.rank + 2, axes={channel_axis: input_channel})

        self.make_conv_op_input_shape = input_shape
        self.make_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(self.data_format, self.rank + 2)
        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format,
        )
        self.built = True
예제 #16
0
    def call(self, inputs, **kwargs):
        M = self.kernel_size[0]
        N = self.kernel_size[1]
        # (D_mul, input_channel // groups, filters)
        W_shape = self.W.get_shape().as_list()
        # (M, N, input_channel // groups, filters)
        DoW_shape = (M, N, W_shape[1], W_shape[2])
        if M * N > 1:
            input_channel = W_shape[1] * self.groups

            D_diag = tf.tile(tf.reshape(tf.eye(M * N), (M * N, M * N, 1)),
                             (1, self.D_mul // (M * N), input_channel))
            if self.D_mul % (M * N) != 0:  # the cases when D_mul > MxN
                zeros = tf.zeros((M * N, self.D_mul % (M * N), input_channel))
                D_diag = tf.concat([D_diag, zeros], axis=1)

            ######################### Compute DoW #################
            # (M * N, D_mul, input_channel)
            D = self.D + D_diag
            # (D_mul, input_channel, filters // groups)
            W = tf.reshape(self.W, (self.D_mul, input_channel, -1))

            # einsum outputs (M * N, input_channel, filters // groups),
            # which is reshaped to
            # (M, N, input_channel // groups, filters)
            DoW = tf.reshape(tf.einsum('msi,sio->mio', D, W), DoW_shape)
            #######################################################
        else:
            # in this case D_mul == M*N
            # reshape from
            # (D_mul, input_channel // groups, filters)
            # to
            # (M, N, input_channel // groups, filters)
            DoW = tf.reshape(self.W, DoW_shape)

        data_format = conv_utils.convert_data_format(self.data_format, ndim=4)
        outputs = tf.nn.conv2d(inputs, DoW, strides=self.strides,
                               padding=self.padding.upper(),
                               data_format=data_format,
                               dilations=self.dilation_rate)

        if self.use_bias:
            if self.data_format == 'channels_first':
                if self.rank == 1:
                    # nn.bias_add does not accept a 1D input tensor.
                    bias = array_ops.reshape(self.bias, (1, self.filters, 1))
                    outputs += bias
                else:
                    outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
            else:
                outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
예제 #17
0
 def convolutional_similarity_single(input_i: tf.Tensor, bias_i: tf.Tensor):
     # tf.nn.conv* like its filter shaped `spatial_dims + (channels_in, channels_out)`
     # EG: conv2d filters are shaped `(H, W, C, filters)`.
     # Since I only want 1 filter, I insert an axis at -1
     bias_i = tf.expand_dims(bias_i, axis=-1)
     conv_norm = tf.nn.convolution(input=input_i,
                                   filters=bias_i,
                                   strides=strides,
                                   padding=padding,
                                   data_format=convert_data_format('channels_last', ndim=ndim))
     return tf.abs(tf.squeeze(conv_norm, axis=-1))
예제 #18
0
  def test_convert_data_format(self):
    self.assertEqual('NCDHW', conv_utils.convert_data_format(
        'channels_first', 5))
    self.assertEqual('NCHW', conv_utils.convert_data_format(
        'channels_first', 4))
    self.assertEqual('NCW', conv_utils.convert_data_format('channels_first', 3))
    self.assertEqual('NHWC', conv_utils.convert_data_format('channels_last', 4))
    self.assertEqual('NWC', conv_utils.convert_data_format('channels_last', 3))
    self.assertEqual('NDHWC', conv_utils.convert_data_format(
        'channels_last', 5))

    with self.assertRaises(ValueError):
      conv_utils.convert_data_format('invalid', 2)
예제 #19
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        if input_channel % self.groups != 0:
            raise ValueError(
                'The number of input channels must be evenly divisible by the number '
                'of groups. Received groups={}, but the input has {} channels '
                '(full input shape is {}).'.format(self.groups, input_channel,
                                                   input_shape))
        kernel_shape = self.kernel_size + (
            input_channel // self.groups,
            self.filters,
        )

        self.kernel = self.add_weight(
            name='kernel',
            shape=kernel_shape,
            initializer=self.kernel_initializer,
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            trainable=True,
            dtype=self.dtype,
        )
        if self.use_bias:
            self.bias = self.add_weight(
                name='bias',
                shape=(self.filters, ),
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                constraint=self.bias_constraint,
                trainable=True,
                dtype=self.dtype,
            )
        else:
            self.bias = None
        channel_axis = self._get_channel_axis()
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_channel})

        self._build_conv_op_input_shape = input_shape
        self._build_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
        self._convolution_op = Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format,
        )
        self.built = True
예제 #20
0
    def build(self, input_shape):
        stack_size, input_dim = input_shape[-2:]
        if stack_size is None or input_dim is None:
            raise ValueError(
                "The two last dimensions of the inputs should be defined. Found `None`."
            )

        kernel_shape = (stack_size, *self.kernel_size, stack_size, input_dim,
                        self.filters)
        self.kernel = self.add_weight(name="kernel",
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)

        if self.use_bias:
            bias_shape = (stack_size, self.filters)
            self.bias = self.add_weight(name="bias",
                                        shape=bias_shape,
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None

        kernel_mask = get_stack_kernel_mask(self.kernel_size, stack_size,
                                            self.mask_center)
        self.kernel_mask = tf.constant(kernel_mask,
                                       dtype=tf.float32,
                                       name="kernel_mask")

        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={
                                        -1: input_dim,
                                        -2: stack_size
                                    })

        strides = conv_utils.normalize_tuple(1, self.rank, "strides")
        dilation_rate = conv_utils.normalize_tuple(1, self.rank,
                                                   "dilation_rate")
        self._convolution_op = nn_ops.Convolution(
            input_shape=input_shape,
            filter_shape=tf.TensorShape(kernel_shape[1:]),
            dilation_rate=dilation_rate,
            strides=strides,
            padding=get_stack_padding(self.kernel_size),
            data_format=conv_utils.convert_data_format("channels_last",
                                                       self.rank + 2))
예제 #21
0
 def call(self, inputs):
     if self.data_format == 'channels_last':
         pool_shape = (1, ) + self.pool_size + (1, )
         strides = (1, ) + self.strides + (1, )
     else:
         pool_shape = (1, 1) + self.pool_size
         strides = (1, 1) + self.strides
     outputs = self.pool_function(
         inputs,
         ksize=pool_shape,
         strides=strides,
         padding=self.padding.upper(),
         data_format=conv_utils.convert_data_format(self.data_format, 4))
     return outputs
예제 #22
0
 def call(self, inputs):
   if self.data_format == 'channels_last':
     pool_shape = (1,) + self.pool_size + (1,)
     strides = (1,) + self.strides + (1,)
   else:
     pool_shape = (1, 1) + self.pool_size
     strides = (1, 1) + self.strides
   outputs = self.pool_function(
       inputs,
       ksize=pool_shape,
       strides=strides,
       padding=self.padding.upper(),
       data_format=conv_utils.convert_data_format(self.data_format, 4))
   return outputs
예제 #23
0
파일: l0norm.py 프로젝트: asim800/l0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis].value is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = int(input_shape[channel_axis])
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)
        self.loc = self.add_variable(
            'loc',
            shape=kernel_shape,
            initializer=tf.keras.initializers.RandomNormal(
                mean=self.loc_mean, stddev=self.loc_stddev, seed=None),
            regularizer=None,
            constraint=None,
            dtype=self.dtype,
            trainable=True)
        self.loc2 = self.loc.numpy()
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.get_shape(),
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self.padding.upper(),
            data_format=conv_utils.convert_data_format(self.data_format,
                                                       self.rank + 2))
        self.built = True
예제 #24
0
    def test_convert_data_format(self):
        self.assertEqual('NCDHW',
                         conv_utils.convert_data_format('channels_first', 5))
        self.assertEqual('NCHW',
                         conv_utils.convert_data_format('channels_first', 4))
        self.assertEqual('NCW',
                         conv_utils.convert_data_format('channels_first', 3))
        self.assertEqual('NHWC',
                         conv_utils.convert_data_format('channels_last', 4))
        self.assertEqual('NWC',
                         conv_utils.convert_data_format('channels_last', 3))
        self.assertEqual('NDHWC',
                         conv_utils.convert_data_format('channels_last', 5))

        with self.assertRaises(ValueError):
            conv_utils.convert_data_format('invalid', 2)
예제 #25
0
    def call(self, inputs):
        
        # Apply the actual ops
        
        if self.data_format is not 'channels_last':
            raise ValueError("mpusim_separable_conv2d "
                                "requires NHWC data format")
            
        strides = (1,) + self.strides + (1,)

        outputs = mpusim_separable_conv2d_op_impl(inputs,
                                                    self.depthwise_kernel,
                                                    self.pointwise_kernel,
                                                    strides,
                                                    self.padding.upper(),
                                                    self.dilation_rate,
                                                    None,
                                                    conv_utils.convert_data_format(self.data_format, ndim=4),
                                                    self.activations_datatype_size_byte,
                                                    self.weights_datatype_size_byte,
                                                    self.results_datatype_size_byte,
                                                    self.systolic_array_height,
                                                    self.systolic_array_width,
                                                    self.activation_fifo_depth,
                                                    self.accumulator_array_height,
                                                    self.log_file_output_dir,
                                                    self.model_name)

        if self.use_bias:
            outputs = nn.bias_add(outputs,
                                    self.bias,
                                    data_format= \
                                        conv_utils.convert_data_format(self.data_format, ndim=4))

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
예제 #26
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        kernel_shape = self.kernel_size + (input_channel, self.filters)

        self.kernel_weights = self.add_weight(
            name='kernel_weights',
            shape=kernel_shape,
            initializer=tf.random_normal_initializer(mean=0.0, stddev=0.05),
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            trainable=True,
            dtype=self.dtype)
        self.kernel_log_scale = self.add_weight(name='kernel_log_scale',
                                                shape=(1, 1, 1, self.filters),
                                                initializer=tf.constant_initializer(value=0.),
                                                regularizer=None,
                                                constraint=None,
                                                trainable=True,
                                                dtype=self.dtype)
        if self.use_bias:
            self.bias = self.add_weight(
                name='bias',
                shape=(self.filters,),
                initializer=self.bias_initializer,
                regularizer=self.bias_regularizer,
                constraint=self.bias_constraint,
                trainable=True,
                dtype=self.dtype)
        else:
            self.bias = None

        channel_axis = self._get_channel_axis()
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_channel})

        self._build_conv_op_input_shape = input_shape
        self._build_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format)
        self.built = True
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape)
     input_channel = self._get_input_channel(input_shape)
     kernel_shape = self.kernel_size + (input_channel, self.filters)
     if self.use_bias:
         pass
     else:
         self.bias = None
     channel_axis = self._get_channel_axis()
     self.input_spec = InputSpec(ndim=self.rank + 2,
                                 axes={channel_axis: input_channel})
     self._build_conv_op_input_shape = input_shape
     self._build_input_channel = input_channel
     self._padding_op = self._get_padding_op()
     self._conv_op_data_format = conv_utils.convert_data_format(
         self.data_format, self.rank + 2)
     self.built = True
    def call(self, x, mask=None):
        """Layer functionality."""

        self.mem_input.assign_add(x)
        _, max_idxs = tf.nn.max_pool_with_argmax(
            self.mem_input,
            self.pool_size,
            self.strides,
            self.padding.upper(),
            conv_utils.convert_data_format(self.data_format, 4),
            include_batch_in_index=True)
        x_max = tf.scatter_nd(tf.reshape(max_idxs, (-1, 1)),
                              tf.ones(tf.size(max_idxs)),
                              [tf.reduce_prod(x.shape)])
        x_max = tf.reshape(x_max, x.shape)
        x_masked = x * x_max
        return MaxPooling2D.call(self, x_masked)
    def call(self, inputs):
        if self.padding == 'causal':
            inputs = array_ops.pad(inputs, self._compute_causal_padding())
        if self.data_format == 'channels_last':
            strides = (1, ) + self.strides * 2 + (1, )
            spatial_start_dim = 1
        else:
            strides = (1, 1) + self.strides * 2
            spatial_start_dim = 2

        # Explicitly broadcast inputs and kernels to 4D.
        inputs = array_ops.expand_dims(inputs, spatial_start_dim)

        if self.common_kernel == True:
            #Need to replicate kernel {channels} times over axis 1
            dw_kernel = tf.tile(self.depthwise_kernel, (1, self.channels, 1))
            bias_kernel = tf.tile(self.bias, (self.channels, ))
        else:
            dw_kernel = self.depthwise_kernel
            bias_kernel = self.bias

        dw_kernel = array_ops.expand_dims(dw_kernel, 0)

        if self.padding == 'causal':
            op_padding = 'valid'
        else:
            op_padding = self.padding
        outputs = nn.depthwise_conv2d(
            inputs,
            dw_kernel,
            strides=strides,
            padding=op_padding.upper(),
            data_format=conv_utils.convert_data_format(self.data_format,
                                                       ndim=4))

        outputs = array_ops.squeeze(outputs, [spatial_start_dim])

        if self.use_bias:
            outputs = backend.bias_add(outputs,
                                       bias_kernel,
                                       data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs
예제 #30
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_channel = self._get_input_channel(input_shape)
        kernel_shape = self.kernel_size + (input_channel, self.filters)

        self.kernelA = self.add_weight(name='kernelA',
                                       shape=kernel_shape,
                                       initializer=self.kernel_initializer,
                                       regularizer=self.kernel_regularizer,
                                       constraint=self.kernel_constraint,
                                       trainable=True,
                                       dtype=self.dtype)

        self.kernelB = K.constant(self.kernelB_init_weight)
        self.kernel = K.transpose(
            K.dot(K.transpose(self.kernelA), self.kernelB))

        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None

        channel_axis = self._get_channel_axis()
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_channel})

        self._build_conv_op_input_shape = input_shape
        self._build_input_channel = input_channel
        self._padding_op = self._get_padding_op()
        self._conv_op_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._padding_op,
            data_format=self._conv_op_data_format)
        self.built = True
예제 #31
0
    def __init__(self, rank, filters, kernel_size, dtype, strides=1, padding='valid', data_format=None, dilation_rate=1,
                 groups=1, activation=None, use_bias=True,
                 kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(),
                 kernel_regularizer=None, bias_regularizer=None,  # TODO: Not yet working
                 activity_regularizer=None, kernel_constraint=None, bias_constraint=None,
                 trainable=True, name=None, conv_op=None, **kwargs):
        if kernel_regularizer is not None or bias_regularizer is not None:
            logger.warning(f"Sorry, regularizers are not implemented yet, this parameter will take no effect")
        super(ComplexConv, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.rank = rank
        self.my_dtype = tf.dtypes.as_dtype(dtype)
        # I use no default dtype to make sure I don't forget to give it to my ComplexConv layers
        if isinstance(filters, float):
            filters = int(filters)
        self.filters = filters
        self.groups = groups or 1
        self.kernel_size = conv_utils.normalize_tuple(
            kernel_size, rank, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')

        self.activation = activations.get(activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=self.rank + 2)

        self._validate_init()
        self._is_causal = self.padding == 'causal'
        self._channels_first = self.data_format == 'channels_first'
        self._tf_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)
    def build(self, input_shape):
        input_shape = tf.TensorShape(input_shape)
        channel_axis = self._get_channel_axis()
        if input_shape.dims[channel_axis].value is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = int(input_shape[channel_axis])
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None

        mean, variance = tf.nn.moments(self.kernel.value(), [0, 1, 2],
                                       keepdims=True)
        self.kernel.assign_sub(mean)
        self.kernel.assign(self.kernel.value() / (tf.sqrt(variance) + 1e-5))

        self.input_spec = tf.keras.backend.InputSpec(
            ndim=self.rank + 2, axes={channel_axis: input_dim})
        self._convolution_op = tf.nn.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=self._get_padding_op(),
            data_format=conv_utils.convert_data_format(self.data_format,
                                                       self.rank + 2))
        self.built = True