示例#1
0
    def call(self, inputs, mask=None):
        '''
        We will be using the Keras conv2d method, and essentially we have
        to do here is multiply the mask with the input X, before we apply the
        convolutions. For the mask itself, we apply convolutions with all weights
        set to 1.
        Subsequently, we clip mask values to between 0 and 1
        '''

        # Both image and mask must be supplied
        if type(inputs) is not list or len(inputs) != 2:
            raise Exception(
                'PartialConvolution2D must be called on a list of two tensors [img, mask]. Instead got: ' + str(inputs))

        # Padding done explicitly so that padding becomes part of the masked partial convolution
        images = K.spatial_2d_padding(inputs[0], self.pconv_padding, self.data_format)
        masks = K.spatial_2d_padding(inputs[1], self.pconv_padding, self.data_format)

        # Apply convolutions to mask
        mask_output = K.conv2d(
            masks, self.kernel_mask,
            strides=self.strides,
            padding='valid',
            data_format=self.data_format,
            dilation_rate=self.dilation_rate
        )

        # Apply convolutions to image
        img_output = K.conv2d(
            (images * masks), self.kernel,
            strides=self.strides,
            padding='valid',
            data_format=self.data_format,
            dilation_rate=self.dilation_rate
        )

        # Calculate the mask ratio on each pixel in the output mask
        mask_ratio = self.window_size / (mask_output + 1e-8)

        # Clip output to be between 0 and 1
        mask_output = K.clip(mask_output, 0, 1)

        # Remove ratio values where there are holes
        mask_ratio = mask_ratio * mask_output

        # Normalize iamge output
        img_output = img_output * mask_ratio

        # Apply bias only to the image (if chosen to do so)
        if self.use_bias:
            img_output = K.bias_add(
                img_output,
                self.bias,
                data_format=self.data_format)

        # Apply activations on the image
        if self.activation is not None:
            img_output = self.activation(img_output)

        return [img_output, mask_output]
示例#2
0
 def to_transcoder_input(x):
     layers = []
     for I in range(10):
         tmp = K.expand_dims(x[1][:, :, I:I + 1], axis=1)
         tmp = K.spatial_2d_padding(
             tmp, ((cars_pos[I], 210 - cars_pos[I] - 1), (0, 0)))
         layers.append(tmp)
     for I in range(3):
         tmp = K.expand_dims(x[0][:, :, I:I + 1], axis=2)
         tmp = K.spatial_2d_padding(tmp,
                                    ((0, 0),
                                     (tavuk_pos, 160 - tavuk_pos - 1)))
         layers.append(tmp)
     print([l.shape for l in layers])
     return K.concatenate(layers, axis=3)
    def call(self, inputs):
        # Both image and mask must be supplied
        assert isinstance(inputs, list) and len(inputs) == 2

        # Padding done explicitly so that padding becomes part of the masked partial convolution
        images = K.spatial_2d_padding(inputs[0], self.pconv_padding,
                                      self.data_format)
        masks = K.spatial_2d_padding(inputs[1], self.pconv_padding,
                                     self.data_format)

        # Apply convolutions to image
        img_output = K.conv2d((images * masks),
                              self.kernel,
                              strides=self.strides,
                              padding='valid',
                              data_format=self.data_format,
                              dilation_rate=self.dilation_rate)

        # Apply convolutions to mask
        mask_output = K.conv2d(masks,
                               self.kernel_mask,
                               strides=self.strides,
                               padding='valid',
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate)

        # Calculate the mask ratio on each pixel in the output mask
        mask_ratio = self.window_size / (mask_output + 1e-8)
        # Clip output to be between 0 and 1
        mask_output = K.clip(mask_output, 0, 1)
        # Remove ratio values where there are holes
        mask_ratio = mask_ratio * mask_output
        # Normalize iamge output
        img_output = img_output * mask_ratio

        # Apply bias only to the image (if chosen to do so)
        if self.use_bias:
            img_output = K.bias_add(img_output,
                                    self.bias,
                                    data_format=self.data_format)

        # Apply activations on the image
        if self.activation is not None:
            img_output = self.activation(img_output)

        return [img_output, mask_output]
示例#4
0
def _upsample_neighbor_function(input_x):
    input_x_pad = K.spatial_2d_padding(input_x, padding=((2, 2), (2, 2)))
    x_length = K.int_shape(input_x)[1]
    y_length = K.int_shape(input_x)[2]
    output_x_list = []
    output_y_list = []
    for i_x in range(2, x_length + 2):
        for i_y in range(2, y_length + 2):
            output_y_list.append(input_x_pad[:, i_x-2:i_x+3, i_y-2:i_y+3, :])
        output_x_list.append(K.concatenate(output_y_list, axis=2))
        output_y_list = []
    return K.concatenate(output_x_list, axis=1)
示例#5
0
    def call(self, x):
        k = self.a
        k = k[:, None] * k[None, :]
        k = k / np.sum(k)
        k = np.tile(k[:, :, None, None], (1, 1, K.int_shape(x)[-1], 1))
        k = K.constant(k, dtype=K.floatx())

        x = K.spatial_2d_padding(x, padding=self.padding)
        x = tf.nn.depthwise_conv2d(
            x, k, strides=[1, self.strides[0], self.strides[1], 1], padding="VALID"
        )
        return x
示例#6
0
def _upsample_neighbor_function(input_x):
    """
    """
    input_x_pad = K.spatial_2d_padding(input_x, padding=((2, 2), (2, 2)))
    x_length = K.int_shape(input_x)[1]
    y_length = K.int_shape(input_x)[2]
    output_x_list = []
    for i_x in range(2, x_length + 2):
        output_y_list = []  # N[g_i(x,y)]
        for i_y in range(2, y_length + 2):
            # 5×5 neighborhood of g_i centered at (x,y)
            output_y_list.append(input_x_pad[:, i_x - 2:i_x + 3,
                                             i_y - 2:i_y + 3, :])
        output_x_list.append(K.concatenate(output_y_list, axis=2))
    return K.concatenate(output_x_list, axis=1)
示例#7
0
        def normalize_tensor_2d(X):

            X2 = K.square(X)

            half = n // 2

            extra_channels = K.spatial_2d_padding(
                K.permute_dimensions(X2, (1, 2, 3, 0)),
                padding=((0, 0), (half, half)))
            extra_channels = K.permute_dimensions(extra_channels, (3, 0, 1, 2))

            Xdims = K.int_shape(X)
            number_of_channels = int(Xdims[-1])

            scale = k
            for i in range(n):
                scale += alpha * extra_channels[:, :, :,
                                                i:(i + number_of_channels)]
            scale = scale**beta

            return (X / scale)
示例#8
0
 def call(self, inputs):
     return backend.spatial_2d_padding(inputs,
                                       padding=self.padding,
                                       data_format=self.data_format)