示例#1
0
    def fn(x):
        #conv_even = K.conv2d(K.conv2d(x, even_kernel_3d),
        #K.permute_dimensions(even_kernel_3d, (1, 0, 2, 3)))
        #conv_odd = K.conv2d(K.conv2d(x, odd_kernel_3d),
        #K.permute_dimensions(odd_kernel_3d, (1, 0, 2, 3)))
        input_shape = K.shape(x)

        dim1 = conv_utils.conv_input_length(input_shape[1],
                                            5,
                                            padding=padding_mode,
                                            stride=2)
        dim2 = conv_utils.conv_input_length(input_shape[2],
                                            5,
                                            padding=padding_mode,
                                            stride=2)

        output_shape_a = (input_shape[0], dim1, input_shape[2], input_shape[3])
        output_shape_b = (input_shape[0], dim1, dim2, input_shape[3])

        upconvolved = K.conv2d_transpose(x,
                                         kernel_3d,
                                         output_shape_a,
                                         strides=(2, 1),
                                         padding=padding_mode)
        upconvolved = K.conv2d_transpose(upconvolved,
                                         K.permute_dimensions(
                                             kernel_3d, (1, 0, 2, 3)),
                                         output_shape_b,
                                         strides=(1, 2),
                                         padding=padding_mode)

        return 4 * upconvolved
示例#2
0
def test_deconvolution_3d():
    num_samples = 6
    num_filter = 4
    stack_size = 2
    kernel_dim1 = 12
    kernel_dim2 = 10
    kernel_dim3 = 8

    for batch_size in [None, num_samples]:
        for border_mode in _convolution_border_modes:
            for subsample in [(1, 1, 1), (2, 2, 2)]:
                if border_mode == 'same' and subsample != (1, 1, 1):
                    continue

                dim1 = conv_input_length(kernel_dim1, 7,
                                         border_mode,
                                         subsample[0])
                dim2 = conv_input_length(kernel_dim2, 5,
                                         border_mode,
                                         subsample[1])
                dim3 = conv_input_length(kernel_dim3, 3,
                                         border_mode,
                                         subsample[2])
                layer_test(convolutional.Deconvolution3D,
                           kwargs={'filters': num_filter,
                                   'kernel_size': (7, 5, 3),
                                   'output_shape': (batch_size, num_filter, dim1, dim2, dim3),
                                   'padding': border_mode,
                                   'strides': subsample,
                                   'data_format': 'channels_first'},
                           input_shape=(num_samples, stack_size, kernel_dim1, kernel_dim2, kernel_dim3),

                           fixed_batch_size=True, tolerance=None)

                layer_test(convolutional.Deconvolution3D,
                           kwargs={'filters': num_filter,
                                   'kernel_size': (7, 5, 3),
                                   'output_shape': (batch_size, num_filter, dim1, dim2, dim3),
                                   'padding': border_mode,
                                   'strides': subsample,
                                   'data_format': 'channels_first',
                                   'kernel_regularizer': 'l2',
                                   'bias_regularizer': 'l2',
                                   'activity_regularizer': 'l2'},
                           input_shape=(num_samples, stack_size, kernel_dim1, kernel_dim2, kernel_dim3),
                           fixed_batch_size=True, tolerance=None)

                layer_test(convolutional.Deconvolution3D,
                           kwargs={'filters': num_filter,
                                   'kernel_size': (7, 5, 3),
                                   'output_shape': (num_filter, dim1, dim2, dim3),
                                   'padding': border_mode,
                                   'strides': subsample,
                                   'data_format': 'channels_first',
                                   'kernel_regularizer': 'l2',
                                   'bias_regularizer': 'l2',
                                   'activity_regularizer': 'l2'},
                           input_shape=(num_samples, stack_size, kernel_dim1, kernel_dim2, kernel_dim3), tolerance=None)
示例#3
0
    def get_output_shape_for_helper(self, input_shape, nb_filter, dim_ordering,
                                    nb_row, nb_col, border_mode, subsample):
        if dim_ordering == 'th':
            rows = input_shape[2]
            cols = input_shape[3]
        elif dim_ordering == 'tf':
            rows = input_shape[1]
            cols = input_shape[2]
        else:
            raise Exception('Invalid dim_ordering: ' + dim_ordering)

        rows = conv_input_length(rows, nb_row, border_mode, subsample[0])
        cols = conv_input_length(cols, nb_col, border_mode, subsample[1])

        if dim_ordering == 'th':
            return (input_shape[0], nb_filter, rows, cols)
        elif dim_ordering == 'tf':
            return (input_shape[0], rows, cols, nb_filter)
示例#4
0
    def get_output_shape_for(self, input_shape):
        if self.dim_ordering == 'th':
            rows = input_shape[2]
            cols = input_shape[3]
        elif self.dim_ordering == 'tf':
            rows = input_shape[1]
            cols = input_shape[2]
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        rows = conv_input_length(rows, self.nb_row, self.border_mode,
                                 self.subsample[0])
        cols = conv_input_length(cols, self.nb_col, self.border_mode,
                                 self.subsample[1])
        if self.dim_ordering == 'th':
            return (input_shape[0], self.nb_filter, rows, cols)
        elif self.dim_ordering == 'tf':
            return (input_shape[0], rows, cols, self.nb_filter)
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)
示例#5
0
 def test_conv_input_length(self):
     self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'same', 1))
     self.assertEqual(2, conv_utils.conv_input_length(2, 2, 'same', 2))
     self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'valid', 1))
     self.assertEqual(4, conv_utils.conv_input_length(2, 2, 'valid', 2))
     self.assertEqual(3, conv_utils.conv_input_length(4, 2, 'full', 1))
     self.assertEqual(4, conv_utils.conv_input_length(3, 2, 'full', 2))
示例#6
0
 def test_conv_input_length(self):
     self.assertEqual(3, conv_utils.conv_input_length(4, 2, "same", 1))
     self.assertEqual(2, conv_utils.conv_input_length(2, 2, "same", 2))
     self.assertEqual(4, conv_utils.conv_input_length(3, 2, "valid", 1))
     self.assertEqual(4, conv_utils.conv_input_length(2, 2, "valid", 2))
     self.assertEqual(3, conv_utils.conv_input_length(4, 2, "full", 1))
     self.assertEqual(4, conv_utils.conv_input_length(3, 2, "full", 2))
示例#7
0
def deconv2d(x,
             kernel,
             output_shape,
             strides=(1, 1),
             border_mode='valid',
             dim_ordering='th',
             image_shape=None,
             filter_shape=None):
    '''2D deconvolution (transposed convolution).
    # Arguments
        kernel: kernel tensor.
        output_shape: desired dimensions of output.
        strides: strides tuple.
        border_mode: string, "same" or "valid".
        dim_ordering: "tf" or "th".
            Whether to use Theano or TensorFlow dimension ordering
        in inputs/kernels/ouputs.
    '''
    flip_filters = False
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    x = _preprocess_conv2d_input(x, dim_ordering)
    kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
    kernel = kernel.dimshuffle((1, 0, 2, 3))
    th_border_mode = _preprocess_border_mode(border_mode)
    np_kernel = kernel.eval()
    filter_shape = _preprocess_conv2d_filter_shape(dim_ordering, filter_shape)
    filter_shape = tuple(filter_shape[i] for i in (1, 0, 2, 3))

    op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(
        imshp=output_shape,
        kshp=filter_shape,
        subsample=strides,
        border_mode=th_border_mode,
        filter_flip=not flip_filters)

    # Set output size as [None, None]
    output_size = [None] * 2

    # Check if the width and heigth dimensions are None
    for i in [-2, -1]:
        if output_shape[i] is None:
            output_size[i] = conv_input_length(x.shape[i], filter_shape[i],
                                               border_mode, strides[i])
        else:
            output_size[i] = output_shape[i]

    conv_out = op(kernel, x, output_size)
    conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel,
                                          strides, dim_ordering)
    return conv_out
示例#8
0
def test_conv_input_length():
    assert conv_utils.conv_input_length(None, 7, 'same', 1) is None
    assert conv_utils.conv_input_length(112, 7, 'same', 1) == 112
    assert conv_utils.conv_input_length(112, 7, 'same', 2) == 223
    assert conv_utils.conv_input_length(28, 5, 'valid', 1) == 32
    assert conv_utils.conv_input_length(14, 5, 'valid', 2) == 31
    assert conv_utils.conv_input_length(36, 5, 'full', 1) == 32
    assert conv_utils.conv_input_length(18, 5, 'full', 2) == 31

    with pytest.raises(AssertionError):
        conv_utils.conv_output_length(18, 5, 'diagonal', 2)
示例#9
0
def test_conv_input_length():
    assert conv_utils.conv_input_length(None, 7, 'same', 1) is None
    assert conv_utils.conv_input_length(112, 7, 'same', 1) == 112
    assert conv_utils.conv_input_length(112, 7, 'same', 2) == 223
    assert conv_utils.conv_input_length(28, 5, 'valid', 1) == 32
    assert conv_utils.conv_input_length(14, 5, 'valid', 2) == 31
    assert conv_utils.conv_input_length(36, 5, 'full', 1) == 32
    assert conv_utils.conv_input_length(18, 5, 'full', 2) == 31

    with pytest.raises(AssertionError):
        conv_utils.conv_output_length(18, 5, 'diagonal', 2)
示例#10
0
def deconv2d(x,
             kernel,
             output_shape,
             strides=(1, 1),
             border_mode='valid',
             dim_ordering='default',
             image_shape=None,
             filter_shape=None):
    """2D deconvolution (i.e. transposed convolution).
    # Arguments
        x: input tensor.
        kernel: kernel tensor.
        output_shape: 1D int tensor for the output shape.
        strides: strides tuple.
        border_mode: string, `"same"` or `"valid"`.
        dim_ordering: `"tf"` or `"th"`.
            Whether to use Theano or TensorFlow dimension ordering
            for inputs/kernels/ouputs.
    # Returns
        A tensor, result of transposed 2D convolution.
    # Raises
        ValueError: if `dim_ordering` is neither `tf` or `th`.
    """
    if dim_ordering == 'default':
        dim_ordering = K.image_dim_ordering()
    if dim_ordering not in {'th', 'tf'}:
        raise ValueError('Unknown dim_ordering ' + str(dim_ordering))

    x = _preprocess_conv2d_input(x, dim_ordering)
    strides = (1, ) + strides + (1, )

    # Get output shape
    shape_b = tf.shape(x)[0]
    shape_h = output_shape[1]
    shape_w = output_shape[2]
    shape_c = output_shape[3]

    # print('Output Shape: ' + str(output_shape))
    # print('Input Shape: ' + str(x.get_shape()))

    # Compute output height if none
    if shape_h is None:
        shape_h = conv_input_length(
            tf.shape(x)[1], filter_shape[0], border_mode, strides[1])

    # Compute output width if none
    if shape_w is None:
        shape_w = conv_input_length(
            tf.shape(x)[2], filter_shape[1], border_mode, strides[2])

    # Compose output shape without nones
    try:
        # Uses tf.pack, previous to tensorflow 1.0.0
        output_shape = tf.pack([shape_b, shape_h, shape_w, shape_c])
    except AttributeError:
        # Uses tf.stack in favor of tf.pack, which is deprecated from tensorflow v1.0.0 onwards
        output_shape = tf.stack([shape_b, shape_h, shape_w, shape_c])

    output_shape = _preprocess_deconv_output_shape(x, output_shape,
                                                   dim_ordering)
    kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
    kernel = tf.transpose(kernel, (0, 1, 3, 2))
    padding = _preprocess_border_mode(border_mode)

    x = tf.nn.conv2d_transpose(x,
                               kernel,
                               output_shape,
                               strides,
                               padding=padding)
    return _postprocess_conv2d_output(x, dim_ordering)