コード例 #1
0
ファイル: conv.py プロジェクト: sbl1996/hanser
    def call(self, inputs):
        if self._horch_impl:
            inputs = inputs[:, ::2, ::2, :]
            outputs = backend.depthwise_conv2d(inputs,
                                               self.depthwise_kernel,
                                               strides=(1, 1),
                                               padding='valid',
                                               dilation_rate=(1, 1),
                                               data_format=self.data_format)
        else:
            outputs = backend.depthwise_conv2d(
                inputs,
                self.depthwise_kernel,
                strides=self.strides,
                padding=self.padding,
                dilation_rate=self.dilation_rate,
                data_format=self.data_format)

        if self.use_bias:
            outputs = backend.bias_add(outputs,
                                       self.bias,
                                       data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs
コード例 #2
0
    def call(self, x):
        x = K.depthwise_conv2d(x,
                               self.blur_kernel,
                               padding='same',
                               strides=(self.pool_size, self.pool_size))

        return x
コード例 #3
0
    def call(self, input_tensor):
        """
        Calls the tensor for forward pass operation.

        :param input_tensor: The input dataset of 2D images with shape of `(batch_shape, rows, cols, channels)`.
        :return: 4D tensor with shape: `(batch_shape, rows, cols, input_image_channels * 4)` for 'concat' merge mode.
        4D tensor with shape: `(batch_shape, rows, cols, output_conv_filter)` for 'convolution' merge mode.
        """
        input_tensor = K.cast(tf.identity(input_tensor), tf.float32)
        result_tensors_list_img = []
        for direction, kernel in self.kernel_dic.items():
            res_sum = tf.identity(input_tensor)
            tensor = tf.identity(input_tensor)
            for i in range(self.seq_length):
                conv = K.depthwise_conv2d(x=tensor,
                                          depthwise_kernel=kernel *
                                          self.kernel_switch_dic[direction],
                                          padding='same')
                tensor = self.activation(conv)
                res_sum += tensor
            result_tensors_list_img.append(res_sum)
        result_tensors_list_img = K.concatenate(result_tensors_list_img,
                                                axis=-1)
        if self.merge_mode == 'convolution':
            result_tensors_list_img = K.conv2d(x=result_tensors_list_img,
                                               kernel=self.conv_kernel,
                                               padding='same')
            result_tensors_list_img = self.activation(result_tensors_list_img)
        return result_tensors_list_img
コード例 #4
0
    def call(self, x):

        x = tf.nn.pool(x, (self.pool_size, self.pool_size),
                       strides=(1, 1), padding='SAME', pooling_type='MAX', data_format='NHWC')
        x = K.depthwise_conv2d(x, self.blur_kernel, padding='same', strides=(self.pool_size, self.pool_size))

        return x
コード例 #5
0
    def call(self, inputs):
        x, kernel = inputs

        # kernel = kernel[:, 4:-4, 4:-4]

        n, xh, xw, c = x.shape
        x = tf.transpose(x, (1, 2, 0, 3))
        x = K.reshape(x, (1, xh, xw, -1))

        n, kh, kw, c = kernel.shape
        # kernel = K.reshape(kernel, (-1, kh * kw * c))
        # kernel = K.softmax(kernel)
        # print('softmax.kernel.shape:', kernel.shape)
        # kernel = K.reshape(kernel, (-1, kh, kw, c))

        print('kernel.shape:', kernel.shape)
        kernel = tf.transpose(kernel, (1, 2, 0, 3))
        kernel = K.reshape(kernel, (kh, kw, -1, 1))

        print('kernel.shape:', kernel.shape)
        print('x.shape:', x.shape)
        # https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d
        out = K.depthwise_conv2d(x, kernel)

        print('out.shape:', out.shape)

        _, oh, ow, _ = out.shape
        out = K.reshape(out, (oh, ow, -1, c))
        out = tf.transpose(out, (2, 0, 1, 3))
        return out
コード例 #6
0
ファイル: Q_Conv2dNorm.py プロジェクト: AntelopeCub/MAinIIIT
        def normalize_inference():
            dconvs = K.depthwise_conv2d(inputs,
                                        self.depthwise_kernel,
                                        strides=self.strides,
                                        padding=self.padding,
                                        data_format='channels_last')
            #            dconvs = K.clip(dconvs, min_value=0, max_value=self.max_activity)
            dconvs = K.clip(dconvs,
                            min_value=-self.max_activity_signed,
                            max_value=self.max_activity_signed)

            convs = K.conv2d(dconvs,
                             self.kernel * self.w_scale,
                             strides=(1, 1),
                             padding=self.padding,
                             data_format='channels_last',
                             dilation_rate=self.dilation_rate)
            convs = K.bias_add(convs,
                               self.bias * self.w_scale,
                               data_format='channels_last')

            #            outputs = convs
            #            outputs = K.clip(convs, min_value=-self.max_activity_signed, max_value=self.max_activity_signed)
            outputs = K.clip(convs, min_value=0, max_value=self.max_activity)
            return K.round(outputs * 2**
                           self.L_A[1]) * 2**-self.L_A[1]  # naechster Nachbar
コード例 #7
0
def gaussian_kernel_layer(inputs, sigma=1):
	kfilter = get_kernel_filter(inputs.shape[-1], sigma)
	kernel = tf.Variable(
        initial_value=kfilter,
        trainable=False, dtype=tf.float64)
	out = K.depthwise_conv2d(tf.cast(inputs, tf.float64), kfilter, padding='same')
	return out
コード例 #8
0
def scharr_edges(image, magnitude):
    """ Returns a tensor holding modified Scharr edge maps.

    Parameters
    ----------
    image: tensor
        Image tensor with shape [batch_size, h, w, d] and type float32. The image(s) must be 2x2
        or larger.
    magnitude: bool
        Boolean to determine if the edge magnitude or edge direction is returned

    Returns
    -------
    tensor
        Tensor holding edge maps for each channel. Returns a tensor with shape `[batch_size, h, w,
        d, 2]` where the last two dimensions hold `[[dy[0], dx[0]], [dy[1], dx[1]], ..., [dy[d-1],
        dx[d-1]]]` calculated using the Scharr filter.
    """

    # Define vertical and horizontal Scharr filters.
    static_image_shape = image.shape.dims if get_backend(
    ) == "amd" else image.get_shape()
    image_shape = K.shape(image)

    # 5x5 modified Scharr kernel ( reshape to (5,5,1,2) )
    matrix = np.array([[[[0.00070, 0.00070]], [[0.00520, 0.00370]],
                        [[0.03700, 0.00000]], [[0.00520, -0.0037]],
                        [[0.00070, -0.0007]]],
                       [[[0.00370, 0.00520]], [[0.11870, 0.11870]],
                        [[0.25890, 0.00000]], [[0.11870, -0.1187]],
                        [[0.00370, -0.0052]]],
                       [[[0.00000, 0.03700]], [[0.00000, 0.25890]],
                        [[0.00000, 0.00000]], [[0.00000, -0.2589]],
                        [[0.00000, -0.0370]]],
                       [[[-0.0037, 0.00520]], [[-0.1187, 0.11870]],
                        [[-0.2589, 0.00000]], [[-0.1187, -0.1187]],
                        [[-0.0037, -0.0052]]],
                       [[[-0.0007, 0.00070]], [[-0.0052, 0.00370]],
                        [[-0.0370, 0.00000]], [[-0.0052, -0.0037]],
                        [[-0.0007, -0.0007]]]])
    num_kernels = [2]
    kernels = K.constant(matrix, dtype='float32')
    kernels = K.tile(kernels, [1, 1, image_shape[-1], 1])

    # Use depth-wise convolution to calculate edge maps per channel.
    # Output tensor has shape [batch_size, h, w, d * num_kernels].
    pad_sizes = [[0, 0], [2, 2], [2, 2], [0, 0]]
    padded = pad(image, pad_sizes, mode='REFLECT')
    output = K.depthwise_conv2d(padded, kernels)

    if not magnitude:  # direction of edges
        # Reshape to [batch_size, h, w, d, num_kernels].
        shape = K.concatenate([image_shape, num_kernels], axis=0)
        output = K.reshape(output, shape=shape)
        output.set_shape(static_image_shape.concatenate(num_kernels))
        output = tf.atan(
            K.squeeze(output[:, :, :, :, 0] / output[:, :, :, :, 1],
                      axis=None))
    # magnitude of edges -- unified x & y edges don't work well with Neural Networks
    return output
コード例 #9
0
    def call(self, x):

        x = K.expand_dims(x, axis=-2)
        x = K.depthwise_conv2d(x,
                               self.blur_kernel,
                               padding='same',
                               strides=(self.pool_size, self.pool_size))
        x = K.squeeze(x, axis=-2)

        return x
コード例 #10
0
def gaussian_kernel_layer(inputs, sigma=1):
	# print(sigma)
	kfilter = get_kernel_filter(inputs.shape[-1], sigma)
	kfilter = tf.convert_to_tensor(np.float32(kfilter))
	kernel = tf.Variable(
        initial_value=kfilter,
        trainable=False)
	# out = K.depthwise_conv2d(tf.cast(inputs, tf.float64), kfilter, padding='same')
	out = K.depthwise_conv2d(inputs, kfilter, padding='same')

	# out = tf.cast(out, tf.float32)
	return out
コード例 #11
0
def sobel_func(batch ):
    
    batch=tf.image.rgb_to_grayscale(batch)

    filt=expandedSobel(batch)
    #calculate the sobel filters for yTrue and yPred
    #this generates twice the number of input channels 
    #a X and Y channel for each input channel
    batch = K.depthwise_conv2d(batch,filt)
    
    resize = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(height=IMG_SIZE,width=IMG_SIZE)])
    batch=resize(batch)
    batch=batch[:,:,:,0]
    batch=tf.expand_dims(batch,axis=3)
    return batch
コード例 #12
0
def laplacian_func(batch,filt=laplacianFilter):

    #get the sobel filter repeated for each input channel
    #filt = expandedLaplacian(batch)
    batch=tf.image.rgb_to_grayscale(batch)
    #calculate the sobel filters for yTrue and yPred
    #this generates twice the number of input channels 
    #a X and Y channel for each input channel
    laplacian = K.depthwise_conv2d(batch,filt)
    resize = tf.keras.Sequential([layers.experimental.preprocessing.Resizing(height=IMG_SIZE,width=IMG_SIZE)])
    batch=resize(batch)
    return batch

    #now you just apply the mse:
    return laplacian
コード例 #13
0
    def call(self, x):

        x = tf.nn.pool(x, (self.pool_size, ),
                       strides=(1, ),
                       padding='SAME',
                       pooling_type='AVG',
                       data_format='NWC')
        x = K.expand_dims(x, axis=-2)
        x = K.depthwise_conv2d(x,
                               self.blur_kernel,
                               padding='same',
                               strides=(self.pool_size, self.pool_size))
        x = K.squeeze(x, axis=-2)

        return x
コード例 #14
0
    def _depthwise_conv2d(cls, image: tf.Tensor, kernel: tf.Tensor) -> tf.Tensor:
        """ Perform a standardized depthwise convolution.

        Parameters
        ----------
        image: :class:`tf.Tensor`
            Batch of images, channels last, to perform depthwise convolution
        kernel: :class:`tf.Tensor`
            convolution kernel

        Returns
        -------
        :class:`tf.Tensor`
            The output from the convolution
        """
        return K.depthwise_conv2d(image, kernel, strides=(1, 1), padding="valid")
コード例 #15
0
ファイル: Q_Conv2dNorm.py プロジェクト: AntelopeCub/MAinIIIT
        def training_phase():
            # Depthwise-Conv mit Soft-Relu
            dconvs = K.depthwise_conv2d(inputs,
                                        self.depthwise_kernel,
                                        strides=self.strides,
                                        padding=self.padding,
                                        data_format='channels_last')

            #            dconvs = tf.where(dconvs<=2**(-self.L_A[1]-1), tf.zeros_like(dconvs), dconvs)
            #            factor2 = 0.9*self.max_activity
            #            dconvs = K.minimum(dconvs, 0.1*dconvs+factor2)
            factor2 = 0.9 * self.max_activity_signed
            dconvs = K.minimum(dconvs, 0.1 * dconvs + factor2)
            dconvs = K.maximum(dconvs, 0.1 * dconvs - factor2)

            # Pointwise-Conv
            convs = K.conv2d(dconvs,
                             self.kernel,
                             strides=(1, 1),
                             padding=self.padding,
                             data_format='channels_last',
                             dilation_rate=self.dilation_rate)
            convs = K.bias_add(convs, self.bias, data_format='channels_last')

            # Skalierung
            scale1 = K.abs(self.max_activity_x /
                           (K.max(K.abs(convs), axis=(0, 1, 2)) + 1e-6))
            indizes = K.greater(scale1, self.max_scale)
            scale1 = self.w_scale * tf.to_float(indizes) + tf.to_float(
                ~indizes) * scale1

            scale2 = self.max_weight / (K.maximum(
                tf.abs(self.bias),
                tf.reduce_max(tf.abs(self.kernel), axis=(0, 1, 2))) + 1e-6)
            scale = K.minimum(scale1, scale2)

            self.add_update(
                K.moving_average_update(self.w_scale, scale, self.momentum),
                inputs)
            # Softclipped-linear
            outputs = convs * self.w_scale
            #            outputs = K.clip(outputs, min_value=-self.max_activity_signed, max_value=self.max_activity_signed)
            outputs = tf.where(outputs <= 2**(-self.L_A[1] - 1),
                               tf.zeros_like(outputs), outputs)
            outputs = K.minimum(outputs, 0.1 * outputs + factor2)
            return outputs
コード例 #16
0
    def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs, self.bias, data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs
コード例 #17
0
    def call(self, inputs, training=None):
        if self.quant_mode not in [None, 'extrinsic', 'hybrid', 'intrinsic']:
            raise ValueError(
                'Invalid quantization mode. The \'quant_mode\' argument must be one of \'extrinsic\' , \'intrinsic\' , \'hybrid\' or None.'
            )

        # set quantizer
        if isinstance(self.quantizer, list) and len(self.quantizer) == 3:
            quantizer_input = self.quantizer[0]
            quantizer_weight = self.quantizer[1]
            quantizer_output = self.quantizer[2]
        else:
            quantizer_input = self.quantizer
            quantizer_weight = self.quantizer
            quantizer_output = self.quantizer

        # quantize kernel
        if self.quant_mode in ['hybrid', 'intrinsic']:
            inputs = quantizer_input.quantize(inputs)
        # quantize input
        if self.quant_mode in ['hybrid', 'intrinsic']:
            quantized_depthwise_kernel = quantizer_weight.quantize(
                self.depthwise_kernel)

        # depthwise convolution 2D layer call
        if self.quant_mode == 'intrinsic':
            strides = (1, self.strides[0], self.strides[1], 1)
            dilation_rate = (1, self.dilation_rate[0], self.dilation_rate[1],
                             1)
            outputs = QuantizedDepthwiseConv2DCore(
                inputs, quantized_depthwise_kernel, strides, dilation_rate,
                self.padding, self.data_format, quantizer_output)
        elif self.quant_mode == 'hybrid':
            outputs = K.depthwise_conv2d(inputs,
                                         quantized_depthwise_kernel,
                                         strides=self.strides,
                                         padding=self.padding,
                                         dilation_rate=self.dilation_rate,
                                         data_format=self.data_format)
            outputs = quantizer_output.quantize(outputs)
        elif self.quant_mode in ['extrinsic', None]:
            outputs = K.depthwise_conv2d(inputs,
                                         self.depthwise_kernel,
                                         strides=self.strides,
                                         padding=self.padding,
                                         dilation_rate=self.dilation_rate,
                                         data_format=self.data_format)

        # add bias
        if self.use_bias:
            if self.quant_mode in ['hybrid', 'intrinsic']:
                quantized_bias = quantizer_weight.quantize(self.bias)

                outputs = K.bias_add(outputs,
                                     quantized_bias,
                                     data_format=self.data_format)
                outputs = quantizer_output.quantize(outputs)
            elif self.quant_mode in ['extrinsic', None]:
                outputs = K.bias_add(outputs,
                                     self.bias,
                                     data_format=self.data_format)

        # activation function
        if self.activation is not None:
            outputs = self.activation(outputs)
        # quantize output
        if self.quant_mode in ['extrinsic', 'hybrid', 'intrinsic']:
            outputs = quantizer_output.quantize(outputs)

        return outputs
コード例 #18
0
def gaussian_kernel_layer(inputs, kfilter):
	kernel = tf.Variable(
        initial_value=kfilter,
        trainable=False, dtype=tf.float64)
	out = K.depthwise_conv2d(tf.cast(inputs, tf.float64), kfilter, padding='same')
	return out"""