Exemple #1
0
    def call(self, inputs):
        depthwise_conv_on_filters = []
        sliced_inputs = [sliced for sliced in tf.split(inputs, self.input_dim, self.channel_axis)]
        sliced_kernels = [sliced for sliced in tf.split(self.depthwise_kernel, self.input_dim, 3)]
        # See https://www.tensorflow.org/versions/r0.12/api_docs/python/array_ops/slicing_and_joining
        for i in range(self.input_dim):  #在python3中xrange合并为range
            depthwise_conv_on_filters.append(K.conv3d(sliced_inputs[i],
                                                      sliced_kernels[i],
                                                      strides=self.strides,
                                                      padding=self.padding,
                                                      data_format=self.data_format,
                                                      dilation_rate=self.dilation_rate))

        depthwise_conv = K.concatenate(depthwise_conv_on_filters)
        pointwise_conv = K.conv3d(depthwise_conv, self.pointwise_kernel,
                                  strides=(1, 1, 1), padding=self.padding,
                                  data_format=self.data_format,
                                  dilation_rate=self.dilation_rate)

        outputs = pointwise_conv

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Exemple #2
0
    def call(self, inputs):
        if self.rank == 1:
            if self.Masked == False:
                outputs = K.conv1d(inputs,
                                   self.kernel,
                                   strides=self.strides[0],
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate[0])
            else:
                outputs = K.conv1d(inputs,
                                   self.kernel * self.kernel_mask,
                                   strides=self.strides[0],
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            if self.Masked == False:
                outputs = K.conv2d(inputs,
                                   self.kernel,
                                   strides=self.strides,
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate)
            else:
                outputs = K.conv2d(inputs,
                                   self.kernel * self.kernel_mask,
                                   strides=self.strides,
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate)
        if self.rank == 3:
            if self.Masked == False:
                outputs = K.conv3d(inputs,
                                   self.kernel,
                                   strides=self.strides,
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate)
            else:
                outputs = K.conv3d(inputs,
                                   self.kernel * self.kernel_mask,
                                   strides=self.strides,
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
    def call(self, input_tensor, training=None):
        input_transposed = tf.transpose(input_tensor, [4, 0, 1, 2, 3, 5])
        input_shape = K.shape(input_transposed)
        input_tensor_reshaped = K.reshape(input_transposed, [
            input_shape[1] * input_shape[0], self.input_height, self.input_width, self.input_depth, self.input_num_atoms])
        input_tensor_reshaped.set_shape((None, self.input_height, self.input_width, self.input_depth, self.input_num_atoms))


        if self.upsamp_type == 'resize':
#           added 1 more self.scaling
            upsamp = K.resize_images(input_tensor_reshaped, self.scaling, self.scaling, self.scaling, 'channels_last')
            outputs = K.conv3d(upsamp, kernel=self.W, strides=(1, 1, 1), padding=self.padding, data_format='channels_last')
        elif self.upsamp_type == 'subpix':
            conv = K.conv3d(input_tensor_reshaped, kernel=self.W, strides=(1, 1, 1), padding='same',
                            data_format='channels_last')
            outputs = tf.depth_to_space(conv, self.scaling)
        else:
            batch_size = input_shape[1] * input_shape[0]

            # Infer the dynamic output shape:
            out_height = deconv_length(self.input_height, self.scaling, self.kernel_size, self.padding)
            out_width = deconv_length(self.input_width, self.scaling, self.kernel_size, self.padding)
            out_depth = deconv_length(self.input_depth, self.scaling, self.kernel_size, self.padding)
            output_shape = (batch_size, out_height, out_width, out_depth, self.num_capsule * self.num_atoms)

            outputs = K.conv3d_transpose(input_tensor_reshaped, self.W, output_shape, (self.scaling, self.scaling, self.scaling),
                                     padding=self.padding, data_format='channels_last')

        votes_shape = K.shape(outputs)
        _, conv_height, conv_width, conv_depth, _ = outputs.get_shape()

        votes = K.reshape(outputs, [input_shape[2], input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
                                 self.num_capsule, self.num_atoms])
        votes.set_shape((None, self.input_num_capsule, conv_height.value, conv_width.value, conv_depth.value,
                         self.num_capsule, self.num_atoms))

        logit_shape = K.stack([
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2], votes_shape[3], self.num_capsule])
        biases_replicated = K.tile(self.b, [votes_shape[1], votes_shape[2], votes_shape[3], 1, 1])

        activations = update_routing(
            votes=votes,
            biases=biases_replicated,
            logit_shape=logit_shape,
            num_dims=7,
            input_dim=self.input_num_capsule,
            output_dim=self.num_capsule,
            num_routing=self.routings)

        return activations
Exemple #4
0
    def call(self, input_tensor, training=None):

        input_transposed = tf.transpose(input_tensor, [0, 3, 4, 1, 2])
        input_shape = K.shape(input_transposed)
        input_tensor_reshaped = K.reshape(input_tensor, [input_shape[0], 1, self.input_num_capsule * self.input_num_atoms, self.input_height, self.input_width])

        input_tensor_reshaped.set_shape((None, 1, self.input_num_capsule * self.input_num_atoms, self.input_height, self.input_width))

        # conv = Conv3D(input_tensor_reshaped, self.W, (self.strides, self.strides),
        #                 padding=self.padding, data_format='channels_first')

        conv = K.conv3d(input_tensor_reshaped, self.W, strides=(self.input_num_atoms, self.strides, self.strides), padding=self.padding, data_format='channels_first')

        votes_shape = K.shape(conv)
        _, _, _, conv_height, conv_width = conv.get_shape()
        conv = tf.transpose(conv, [0, 2, 1, 3, 4])
        votes = K.reshape(conv, [input_shape[0], self.input_num_capsule, self.num_capsule, self.num_atoms, votes_shape[3], votes_shape[4]])
        votes.set_shape((None, self.input_num_capsule, self.num_capsule, self.num_atoms, conv_height.value, conv_width.value))

        logit_shape = K.stack([input_shape[0], self.input_num_capsule, self.num_capsule, votes_shape[3], votes_shape[4]])
        biases_replicated = K.tile(self.b, [1, 1, conv_height.value, conv_width.value])

        activations = update_routing(
            votes=votes,
            biases=biases_replicated,
            logit_shape=logit_shape,
            num_dims=6,
            input_dim=self.input_num_capsule,
            output_dim=self.num_capsule,
            num_routing=self.routings)

        a2 = tf.transpose(activations, [0, 3, 4, 1, 2])
        return a2
Exemple #5
0
    def call(self, inputs):
        if self.rank == 1:
            outputs = K.conv1d(
                inputs,
                self.kernel,
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs = K.conv3d(
                inputs,
                self.kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Exemple #6
0
 def cal_conv(self, inputs, kernel):
     if self.rank == 1:
         outputs = K.conv1d(
             inputs,
             kernel,
             strides=self.strides[0],
             padding=self.padding,
             data_format=self.data_format,
             dilation_rate=self.dilation_rate[0])
     if self.rank == 2:
         outputs = K.conv2d(
             inputs,
             kernel,
             strides=self.strides,
             padding=self.padding,
             data_format=self.data_format,
             dilation_rate=self.dilation_rate)
     if self.rank == 3:
         outputs = K.conv3d(
             inputs,
             kernel,
             strides=self.strides,
             padding=self.padding,
             data_format=self.data_format,
             dilation_rate=self.dilation_rate)
     return outputs
    def call(self, inputs):
        def _l2normalize(v, eps=1e-12):
            return v / (K.sum(v**2)**0.5 + eps)

        def power_iteration(W, u):
            _u = u
            _v = _l2normalize(K.dot(_u, K.transpose(W)))
            _u = _l2normalize(K.dot(_v, W))
            return _u, _v

        if self.spectral_normalization:
            W_shape = self.kernel.shape.as_list()
            #Flatten the Tensor
            W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])
            _u, _v = power_iteration(W_reshaped, self.u)
            #Calculate Sigma
            sigma = K.dot(_v, W_reshaped)
            sigma = K.dot(sigma, K.transpose(_u))
            #normalize it
            W_bar = W_reshaped / sigma
            #reshape weight tensor
            if training in {0, False}:
                W_bar = K.reshape(W_bar, W_shape)
            else:
                with tf.control_dependencies([self.u.assign(_u)]):
                    W_bar = K.reshape(W_bar, W_shape)

            #update weitht
            self.kernel = W_bar

        if self.rank == 1:
            outputs = K.conv1d(inputs,
                               self.kernel,
                               strides=self.strides[0],
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs = K.conv2d(inputs,
                               self.kernel,
                               strides=self.strides,
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs = K.conv3d(inputs,
                               self.kernel,
                               strides=self.strides,
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
    def call(self, x, mask=None):
        # compute the candidate hidden state

        input_shape = self.input_spec[0].shape
        conv_out = K.conv3d(x,
                            self.W,
                            strides=self.subsample,
                            border_mode=self.border_mode,
                            dim_ordering=self.dim_ordering,
                            volume_shape=input_shape,
                            filter_shape=self.W_shape)

        if self.dim_ordering == 'th':
            transform = conv_out + K.reshape(self.b,
                                             (1, self.nb_filter, 1, 1, 1))
        elif self.dim_ordering == 'tf':
            transform = conv_out + K.reshape(self.b,
                                             (1, 1, 1, 1, self.nb_filter))
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        transform = self.activation(transform)

        transform_gate = K.conv3d(x,
                                  self.W_gate,
                                  strides=self.subsample,
                                  border_mode=self.border_mode,
                                  dim_ordering=self.dim_ordering,
                                  volume_shape=input_shape,
                                  filter_shape=self.W_shape)

        if self.bias:
            if self.dim_ordering == 'th':
                transform_gate += K.reshape(self.b_gate,
                                            (1, self.nb_filter, 1, 1, 1))
            elif self.dim_ordering == 'tf':
                transform_gate += K.reshape(self.b_gate,
                                            (1, 1, 1, 1, self.nb_filter))
            else:
                raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        transform_gate = K.sigmoid(transform_gate)
        #transform_gate = self.activation(transform_gate)
        carry_gate = 1.0 - transform_gate

        return transform * transform_gate + x * carry_gate
Exemple #9
0
def sf_conv3d_base(x):
    sf2, expand_tensor = x

    return K.conv3d(sf2,
                    expand_tensor,
                    strides=[1, 1, 1, 1, 1],
                    padding="valid",
                    data_format="channels_first")
    def call(self, inputs, mask=None):
        # compute channels_firste candidate hidden state
        # Arguments
        '''
        x: Tensor or variable.
        kernel: kernel tensor.
        strides: strides tuple.
        padding: string, `"same"` or `"valid"`.
        data_format: string, `"channels_last"` or `"channels_first"`.
            Whether to use Theano or TensorFlow/CNTK data format
            for inputs/kernels/outputs.
        dilation_rate: tuple of 3 integers.
        '''
        transform = K.conv3d(inputs,
                             self.kernel,
                             strides=self.strides,
                             padding=self.padding,
                             data_format=self.data_format,
                             dilation_rate=self.dilation_rate)

        if self.use_bias:
            transform = K.bias_add(transform,
                                   self.bias,
                                   data_format=self.data_format)

        if self.activation is not None:
            transform = self.activation(transform)

        transform_gate = K.conv3d(inputs,
                                  self.kernel_gate,
                                  strides=self.strides,
                                  padding=self.padding,
                                  data_format=self.data_format,
                                  dilation_rate=self.dilation_rate)

        if self.use_bias:
            transform = K.bias_add(transform,
                                   self.bias_gate,
                                   data_format=self.data_format)

        transform_gate = K.sigmoid(transform_gate)

        carry_gate = 1.0 - transform_gate

        return transform * transform_gate + inputs * carry_gate
Exemple #11
0
def wavelet_transform(img, filters=None, levels=None):
    if levels is None:
        vimg = tf.pad(img, [(0, 0),
                            (0, 2**int(np.ceil(np.log2(K.int_shape(img)[1]))) -
                             K.int_shape(img)[1]),
                            (0, 2**int(np.ceil(np.log2(K.int_shape(img)[2]))) -
                             K.int_shape(img)[2]),
                            (0, 2**int(np.ceil(np.log2(K.int_shape(img)[3]))) -
                             K.int_shape(img)[3]), (0, 0)])
    else:
        vimg = img

    if filters is None:
        w = pywt.Wavelet('db4')
        dec_hi = np.array(w.dec_hi[::-1])
        dec_lo = np.array(w.dec_lo[::-1])
        filters = np.stack([
            dec_lo[None, None, :] * dec_lo[None, :, None] *
            dec_lo[:, None, None], dec_lo[None, None, :] *
            dec_lo[None, :, None] * dec_hi[:, None, None],
            dec_lo[None, None, :] * dec_hi[None, :, None] *
            dec_lo[:, None, None], dec_lo[None, None, :] *
            dec_hi[None, :, None] * dec_hi[:, None, None],
            dec_hi[None, None, :] * dec_lo[None, :, None] *
            dec_lo[:, None, None], dec_hi[None, None, :] *
            dec_lo[None, :, None] * dec_hi[:, None, None],
            dec_hi[None, None, :] * dec_hi[None, :, None] *
            dec_lo[:, None, None], dec_hi[None, None, :] *
            dec_hi[None, :, None] * dec_hi[:, None, None]
        ]).transpose((1, 2, 3, 0))[:, :, :, None, :]
        filters = K.constant(filters)
    if levels is None:
        print(K.int_shape(vimg)[1:4])
        levels = pywt.dwtn_max_level(K.int_shape(vimg)[1:4], 'db4')
        print(levels)

    t = vimg.shape[1]
    h = vimg.shape[2]
    w = vimg.shape[3]
    res = K.conv3d(vimg, filters, strides=(2, 2, 2), padding='same')
    if levels > 1:
        res = K.concatenate([
            wavelet_transform(res[:, :, :, :, :1],
                              filters,
                              levels=(levels - 1)), res[:, :, :, :, 1:]
        ],
                            axis=-1)
    '''
    res = K.permute_dimensions(res, (0, 4, 1, 2, 3))
    res = K.reshape(res, (-1, 2, t // 2, h // 2, w // 2))
    res = K.permute_dimensions(res, (0, 2, 1, 3, 4))
    res = K.reshape(res, (-1, 1, t, h, w))
    res = K.permute_dimensions(res, (0, 2, 3, 4, 1))
    '''
    res = K.reshape(res, (-1, t, h, w, 1))
    #print('wt', levels, K.int_shape(img), K.int_shape(vimg), K.int_shape(filters), K.int_shape(res))
    return res
Exemple #12
0
def _alphabeta_dtd(layer, R, beta, parameter2):
    print('_convolutional3d_alphabeta_dtd')

    alpha = 1 + beta

    X = layer.input + 1e-12

    if not alpha == 0:
        Wp = K.maximum(layer.kernel, 1e-12)
        Zp = K.conv3d(X,
                      Wp,
                      strides=layer.strides,
                      padding=layer.padding,
                      data_format=layer.data_format)
        Salpha = alpha * (R / Zp)
        Calpha = K.conv3d_transpose(Salpha,
                                    Wp,
                                    K.shape(layer.input),
                                    strides=layer.strides,
                                    padding=layer.padding,
                                    data_format=layer.data_format)
    else:
        Calpha = 0

    if not beta == 0:
        Wn = K.minimum(layer.kernel, -1e-12)
        Zn = K.conv3d(X,
                      Wn,
                      strides=layer.strides,
                      padding=layer.padding,
                      data_format=layer.data_format)
        Sbeta = -beta * (R / Zn)
        Cbeta = K.conv3d_transpose(Sbeta,
                                   Wn,
                                   K.shape(layer.input),
                                   strides=layer.strides,
                                   padding=layer.padding,
                                   data_format=layer.data_format)
    else:
        Cbeta = 0

    return X * (Calpha + Cbeta)
Exemple #13
0
    def call(self, inputs):
        if self.rank == 1:
            expert_outputs = K.conv1d(inputs,
                                      self.expert_kernel,
                                      strides=self.strides[0],
                                      padding=self.padding,
                                      data_format=self.data_format,
                                      dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            expert_outputs = K.conv2d(inputs,
                                      self.expert_kernel,
                                      strides=self.strides,
                                      padding=self.padding,
                                      data_format=self.data_format,
                                      dilation_rate=self.dilation_rate)
        if self.rank == 3:
            expert_outputs = K.conv3d(inputs,
                                      self.expert_kernel,
                                      strides=self.strides,
                                      padding=self.padding,
                                      data_format=self.data_format,
                                      dilation_rate=self.dilation_rate)

        expert_outputs = K.reshape(expert_outputs,
                                   (-1, ) + self.o_shape[1:-1] +
                                   (self.n_filters, self.n_experts_per_filter))

        if self.use_expert_bias:
            expert_outputs = K.bias_add(expert_outputs,
                                        self.expert_bias,
                                        data_format=self.data_format)

        if self.expert_activation is not None:
            expert_outputs = self.expert_activation(expert_outputs)

        gating_outputs = tf.tensordot(
            inputs, self.gating_kernel,
            axes=self.rank + 1)  # samples x n_filters x n_experts_per_filter

        if self.use_gating_bias:
            gating_outputs = K.bias_add(gating_outputs,
                                        self.gating_bias,
                                        data_format=self.data_format)

        if self.gating_activation is not None:
            gating_outputs = self.gating_activation(gating_outputs)

        gating_outputs = K.reshape(gating_outputs,
                                   self.new_gating_outputs_shape)
        outputs = K.sum(expert_outputs * gating_outputs,
                        axis=-1,
                        keepdims=False)

        return outputs
Exemple #14
0
    def get_grad_tensor_3d(img_tensor,apply_gauss=True):

        grad_x = K.conv3d(img_tensor, SOBEL_X_3D, padding='same')
        grad_y = K.conv3d(img_tensor, SOBEL_Y_3D, padding='same')
        grad_z= K.conv3d(img_tensor, SOBEL_Z_3D, padding='same')
        grad_tensor = K.sqrt(grad_x * grad_x + grad_y * grad_y + grad_z*grad_z)
        grad_tensor = K.greater(grad_tensor, 100.0 * K.epsilon())
        grad_tensor = K.cast(grad_tensor, K.floatx())
        grad_tensor = K.clip(grad_tensor, K.epsilon(), 1.0)
        grad_map = K.sum(grad_tensor, axis=-1, keepdims=True)
        for i in range(n_classes):
            if i ==0:
                grad_tensor=grad_map[:]
            else:
                grad_tensor = K.concatenate([grad_tensor,grad_map], axis=-1)
        # del grad_map
        # grad_tensor = K.concatenate([grad_tensor, grad_tensor], axis=CHANNEL_AXIS)
        grad_tensor = K.greater(grad_tensor, 100.0 * K.epsilon())
        grad_tensor = K.cast(grad_tensor, K.floatx())
        if apply_gauss:
            grad_tensor = K.conv3d(grad_tensor, GAUSS_KERNEL_3D, padding='same')
        return grad_tensor
Exemple #15
0
    def var_rad_layer(x):
        in_img, in_vox = x

        tf_weights = tf.expand_dims(
            tf.stack([
                gkern_tf(d=3,
                         kernlen=k_dim,
                         nsigs=[
                             c_wid / vox_size[0, 0], c_wid / vox_size[0, 1],
                             c_wid / vox_size[0, 2]
                         ])
                for c_wid in np.linspace(min_width, max_width, gk_count)
            ], -1), -2)
        return K.conv3d(in_img, kernel=tf_weights, padding='same')
Exemple #16
0
def do_3d_convolution(feature_matrix,
                      kernel_matrix,
                      pad_edges=False,
                      stride_length_px=1):
    """Convolves 3-D feature maps with 3-D kernel.

    m = number of rows in kernel
    n = number of columns in kernel
    h = number of height in kernel
    c = number of output feature maps (channels)

    :param feature_matrix: Input feature maps (numpy array).  Dimensions must be
        M x N x H x C or 1 x M x N x H x C.
    :param kernel_matrix: Kernel as numpy array.  Dimensions must be
        m x n x h x C x c.
    :param pad_edges: See doc for `do_2d_convolution`.
    :param stride_length_px: See doc for `do_2d_convolution`.
    :return: feature_matrix: Output feature maps (numpy array).  Dimensions will
        be 1 x M x N x H x c or
        1 x (M - m + 1) x (N - n + 1) x (H - h + 1) x c, depending on
        whether or not edges are padded.
    """

    error_checking.assert_is_numpy_array_without_nan(feature_matrix)
    error_checking.assert_is_numpy_array_without_nan(kernel_matrix)
    error_checking.assert_is_numpy_array(kernel_matrix, num_dimensions=5)
    error_checking.assert_is_boolean(pad_edges)
    error_checking.assert_is_integer(stride_length_px)
    error_checking.assert_is_geq(stride_length_px, 1)

    if len(feature_matrix.shape) == 4:
        feature_matrix = numpy.expand_dims(feature_matrix, axis=0)
    error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=5)

    if pad_edges:
        padding_string = 'same'
    else:
        padding_string = 'valid'

    feature_tensor = K.conv3d(x=K.variable(feature_matrix),
                              kernel=K.variable(kernel_matrix),
                              strides=(stride_length_px, stride_length_px,
                                       stride_length_px),
                              padding=padding_string,
                              data_format='channels_last')

    return feature_tensor.eval(session=K.get_session())
 def _compute_mask_output(self, mask_tensor):
     if self.layer.rank == 1:
         mask_output = K.conv1d(mask_tensor, self.mask_kernel,
                                self.layer.strides[0], self.layer.padding,
                                self.layer.data_format,
                                self.layer.dilation_rate[0])
     if self.layer.rank == 2:
         mask_output = K.conv2d(mask_tensor, self.mask_kernel,
                                self.layer.strides, self.layer.padding,
                                self.layer.data_format,
                                self.layer.dilation_rate)
     if self.layer.rank == 3:
         mask_output = K.conv3d(mask_tensor, self.mask_kernel,
                                self.layer.strides, self.layer.padding,
                                self.layer.data_format,
                                self.layer.dilation_rate)
     return mask_output
Exemple #18
0
def _z_dtd(layer, R, parameter1, parameter2):
    print('_convolutional3d_z_dtd')

    X = layer.input + 1e-12
    Z = K.conv3d(X,
                 layer.kernel,
                 strides=layer.strides,
                 padding=layer.padding,
                 data_format=layer.data_format)
    S = R / Z
    C = K.conv3d_transpose(S,
                           layer.kernel,
                           K.shape(layer.input),
                           strides=layer.strides,
                           padding=layer.padding,
                           data_format=layer.data_format)
    return X * C
Exemple #19
0
    def call(self, x, **kwargs):
        x = K.spatial_3d_padding(x, padding=self.padding)

        # we imitate depthwise_conv3d actually
        channels = x.shape[-1]
        x = K.concatenate(
            [
                K.conv3d(
                    x=x[:, :, :, :, i:i + 1],
                    kernel=self.blur_kernel[..., i:i + 1, :],
                    strides=self.pool_size,
                    padding='valid',
                ) for i in range(0, channels)
            ],
            axis=-1,
        )

        return x
    def call(self, inputs):
        self.kernel = K.reverse(K.permute_dimensions(
            self.tied_to.get_weights()[0], (0, 1, 2, 4, 3)),
                                axes=(0, 1, 2))

        outputs = K.conv3d(inputs,
                           self.kernel,
                           strides=self.strides,
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
 def call(self, inputs, training=None):
     def _l2normalize(v, eps=1e-12):
         return v / (K.sum(v ** 2) ** 0.5 + eps)
     def power_iteration(W, u):
         #Accroding the paper, we only need to do power iteration one time.
         _u = u
         _v = _l2normalize(K.dot(_u, K.transpose(W)))
         _u = _l2normalize(K.dot(_v, W))
         return _u, _v
     #Spectral Normalization
     W_shape = self.kernel.shape.as_list()
     #Flatten the Tensor
     W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])
     _u, _v = power_iteration(W_reshaped, self.u)
     #Calculate Sigma
     sigma=K.dot(_v, W_reshaped)
     sigma=K.dot(sigma, K.transpose(_u))
     #normalize it
     W_bar = W_reshaped / sigma
     #reshape weight tensor
     if training:
         W_bar = K.reshape(W_bar, W_shape)
     else:
         with tf.control_dependencies([self.u.assign(_u)]):
             W_bar = K.reshape(W_bar, W_shape)
             
     outputs = K.conv3d(
             inputs,
             W_bar,
             strides=self.strides,
             padding=self.padding,
             data_format=self.data_format,
             dilation_rate=self.dilation_rate)
     if self.use_bias:
         outputs = K.bias_add(
             outputs,
             self.bias,
             data_format=self.data_format)
     if self.activation is not None:
         return self.activation(outputs)
     return outputs
Exemple #22
0
def depthwiseConvGPU(cube, k):
    '''
    Convolves a kernel depthwise (each z-plane independently)
    by using K.conv3d with an (x,y,1) 3d kernel

    Parameters
    ----------
    cube : ndarray.
        3d ndarray, (x, y, z).
    k : ndarray.
        2d ndarray (x, y) to be convolved depthwise.

    Returns
    -------
    cubeC : ndarray.
        3d ndarray (x, y, z) convolved with kernel depthwise.
    '''
    kT = K.variable(k.reshape(1, 1, k.shape[0], k.shape[1], 1))
    xT = K.variable(
        cube.reshape(1, 1, cube.shape[0], cube.shape[1], cube.shape[2]))
    out = K.conv3d(xT, kernel=kT, border_mode='same')
    cubeC = out.eval()
    return np.squeeze(cubeC)
    def call(self, input_tensor, training=None):
        input_transposed = tf.transpose(input_tensor, [4, 0, 1, 2, 3, 5])

        input_shape = K.shape(input_transposed)

        input_tensor_reshaped = K.reshape(input_transposed, [
            input_shape[0] * input_shape[1], self.input_height, 
                      self.input_width, self.input_depth, self.input_num_atoms])

        input_tensor_reshaped.set_shape((None, self.input_height, self.input_width, self.input_depth, self.input_num_atoms))

        conv = K.conv3d(input_tensor_reshaped, self.W, (self.strides, self.strides, self.strides),
                        padding=self.padding, data_format='channels_last', dilation_rate=(1, 1, 1))

        votes_shape = K.shape(conv)
        _, conv_height, conv_width, conv_depth, _ = conv.get_shape()
        
        votes = K.reshape(conv, [input_shape[1], input_shape[0], votes_shape[1], votes_shape[2], votes_shape[3],
                                 self.num_capsule, self.num_atoms])
        votes.set_shape((None, self.input_num_capsule, conv_height.value, conv_width.value, conv_depth.value,
                         self.num_capsule, self.num_atoms))

        logit_shape = K.stack([
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2], votes_shape[3], self.num_capsule])

        biases_replicated = K.tile(self.b, [conv_height.value, conv_width.value, conv_depth.value, 1, 1])

        activations = update_routing(
            votes=votes,
            biases=biases_replicated,
            logit_shape=logit_shape,
            num_dims=7,
            input_dim=self.input_num_capsule,
            output_dim=self.num_capsule,
            num_routing=self.routings)

        return activations
Exemple #24
0
    def call(self, inputs, **kwargs):
        t1, t2, t3, t4, t5 = inputs
        # concatenation = keras.layers.Concatenate()
        layer_list = []
        for i in range(t1.shape[3]):
            layer_list.append(t1[:, :, :, i:i+1])
            layer_list.append(t2[:, :, :, i:i+1])
            layer_list.append(t3[:, :, :, i:i+1])
            layer_list.append(t4[:, :, :, i:i+1])
            layer_list.append(t5[:, :, :, i:i+1])


        tensor =  keras.backend.stack(layer_list, axis=-1)
        # print(self.kernel.shape)
        tensor = K.conv3d(tensor, self.kernel, padding='same', strides=(1, 1, 5))
        # print(tensor.shape)

        # tensor = tf.layers.conv3d(tensor, t1.shape[3], (1, 1, 5), activation='relu', strides=(1, 1, 5), padding='same')
        # conv3D_layer = keras.layers.Conv3D(int(t1.shape[3]), (1, 1, 5), strides=(1, 1, 5), activation='relu', padding='same')
        # conv3D_layer.set_weights = self.add_weight(name='kernel', shape=(1, 1, 5, int(t1.shape[3])), initializer='uniform', trainable=True)
        # tensor = conv3D_layer(tensor)

        tensor = keras.backend.squeeze(tensor, axis=-2)
        return tensor
Exemple #25
0
    def call(self, inputs):
        if self.rank == 1:
            _conv = lambda x, k: K.conv1d(  # noqa: E731
                x,
                k,
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0],
            )
        if self.rank == 2:
            _conv = lambda x, k: K.conv2d(  # noqa: E731
                x,
                k,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate,
            )
        if self.rank == 3:
            _conv = lambda x, k: K.conv3d(  # noqa: E731
                x,
                k,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate,
            )

        def _both_call(inputs, convfunc):
            x, lo, hi = inputs
            output = _single_call(x, convfunc)
            out_lo, out_hi = _interval_call([lo, hi], convfunc)
            output = [output, out_lo, out_hi]
            return output

        def _interval_call(inputs, convfunc):
            lo, hi = inputs
            lo_neg = K.minimum(lo, 0.0)
            lo_pos = K.maximum(lo, 0.0)
            hi_neg = K.minimum(hi, 0.0)
            hi_pos = K.maximum(hi, 0.0)

            min_kernel_pos = K.maximum(self.kernel - self.min_kernel, 0.0)
            min_kernel_neg = K.minimum(self.kernel - self.min_kernel, 0.0)
            max_kernel_pos = K.maximum(self.kernel + self.max_kernel, 0.0)
            max_kernel_neg = K.minimum(self.kernel + self.max_kernel, 0.0)

            lo_out = (convfunc(lo_pos, min_kernel_pos) +
                      convfunc(hi_pos, min_kernel_neg) +
                      convfunc(lo_neg, max_kernel_pos) +
                      convfunc(hi_neg, max_kernel_neg))
            hi_out = (convfunc(lo_pos, max_kernel_neg) +
                      convfunc(hi_pos, max_kernel_pos) +
                      convfunc(lo_neg, min_kernel_neg) +
                      convfunc(hi_neg, min_kernel_pos))
            if self.use_bias:
                lo_out = K.bias_add(
                    lo_out,
                    self.bias - self.min_bias,
                    data_format=self.data_format,
                )
                hi_out = K.bias_add(
                    hi_out,
                    self.bias + self.max_bias,
                    data_format=self.data_format,
                )
            if self.activation is not None:
                lo_out = self.activation(lo_out)
                hi_out = self.activation(hi_out)
            output = [lo_out, hi_out]
            return output

        def _single_call(inputs, convfunc):
            x = inputs
            output = convfunc(x, self.kernel)
            if self.use_bias:
                output = K.bias_add(output,
                                    self.bias,
                                    data_format=self.data_format)
            if self.activation is not None:
                output = self.activation(output)
            return output

        if isinstance(inputs, list):
            if len(inputs) == 2:
                return _interval_call(inputs, _conv)
            elif len(inputs) == 3:
                return _both_call(inputs, _conv)
        else:
            return _single_call(inputs, _conv)
Exemple #26
0
    def call(self, inputs):
        assert type(inputs) is list, 'must input a list containing two tensors'
        assert len(inputs) == 2, 'must input 2 inputs tensors, one for real'\
            + ', and the other for image.'

        if self.rank == 1:
            outputs_real = K.conv1d(
                inputs[0],
                self.kernel[0],
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0])\
                    - K.conv1d(
                inputs[1],
                self.kernel[1],
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0])
            outputs_imag = K.conv1d(
                inputs[0],
                self.kernel[1],
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0])\
                + K.conv1d(
                inputs[1],
                self.kernel[0],
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs_real = K.conv2d(
                inputs[0],
                self.kernel[0],
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)\
                - K.conv2d(
                inputs[1],
                self.kernel[1],
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
            outputs_imag = K.conv2d(
                inputs[0],
                self.kernel[1],
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)\
                + K.conv2d(
                inputs[1],
                self.kernel[0],
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs_real = K.conv3d(
                inputs[0],
                self.kernel[0],
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)\
                - K.conv3d(
                inputs[1],
                self.kernel[1],
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
            outputs_imag = K.conv3d(
                inputs[0],
                self.kernel[1],
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)\
                + K.conv3d(
                inputs[1],
                self.kernel[0],
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)

        if self.output_merge:
            outputs = K.concatenate((K.expand_dims(
                outputs_real, axis=1), K.expand_dims(outputs_imag, axis=1)),
                                    axis=1)
            if self.use_bias:
                outputs = K.bias_add(outputs,
                                     self.bias,
                                     data_format=self.data_format)

            if self.activation is not None:
                return self.activation(outputs)
            return outputs
        else:
            if self.use_bias:
                outputs_real = K.bias_add(outputs_real,
                                          self.bias[0],
                                          data_format=self.data_format)
                outputs_imag = K.bias_add(outputs_imag,
                                          self.bias[1],
                                          data_format=self.data_format)

            if self.activation is not None:
                return [
                    self.activation(outputs_real),
                    self.activation(outputs_imag)
                ]
            return [outputs_real, outputs_imag]
Exemple #27
0
    def call(self, inputs, training=None):

        if self.spectral_norm:
            w_shape = K.shape(self.kernel)
            w_reshaped = K.reshape(self.kernel, (-1, w_shape[-1]))

            u_hat = self.u
            v_hat = None
            for i in range(1):
                """
                power iteration
                Usually iteration = 1 will be enough
                """
                v_ = K.dot(u_hat, K.transpose(w_reshaped))
                v_hat = l2_norm(v_)

                u_ = K.dot(v_hat, w_reshaped)
                u_hat = l2_norm(u_)

            sigma = K.dot(v_hat, w_reshaped)
            sigma = K.dot(sigma, K.transpose(u_hat))

            # normalize it
            w_bar = w_reshaped / sigma

            # reshape weight tensor
            if training in {0, False}:
                w_bar = K.reshape(w_bar, w_shape)
            else:
                with tf.control_dependencies([self.u.assign(u_)]):
                    w_bar = K.reshape(w_bar, w_shape)

            self.kernel = w_bar

        if self.rank == 1:
            outputs = K.conv1d(inputs,
                               self.kernel,
                               strides=self.strides[0],
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs = K.conv2d(inputs,
                               self.kernel,
                               strides=self.strides,
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs = K.conv3d(inputs,
                               self.kernel,
                               strides=self.strides,
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Exemple #28
0
    def call(self, x):
        #         print('input shape:',x.shape) # None, 16,12,512
        exp_x = K.expand_dims(x, axis=-1)
        #         print('expanded input shape:',exp_x.shape) # N, 16, 12, 512, 1
        # filters = Attention Heads
        c3d = K.conv3d(exp_x,
                       kernel=self.kernel_conv3d,
                       strides=(1, 1, self.i_shape[-1]),
                       padding='same',
                       data_format='channels_last')
        conv3d = K.bias_add(c3d, self.bias_conv3d)
        #         print('c3d shape:', c3d.shape)
        #         conv3d = kl.Conv3D(padding='same',filters=self.multiheads, kernel_size=(3,3,self.i_shape[-1]), strides=(1,1,self.i_shape[-1]),kernel_initializer='he_normal',activation='relu')(exp_x)
        #         print('conv3d shape:', conv3d.shape)
        conv3d = K.permute_dimensions(conv3d, pattern=(0, 4, 1, 2, 3))
        #         print('conv3d shape:', conv3d.shape)
        #         conv3d = K.flatten(conv3d)

        conv3d = K.squeeze(conv3d, axis=-1)
        conv3d = K.reshape(conv3d,
                           shape=(-1, self.multiheads,
                                  self.i_shape[1] * self.i_shape[2]))
        #         print('conv3d shape:', conv3d.shape)
        #         conv3d = K.expand_dims(conv3d,axis=1) # N, 1, 16, 12
        #         print('conv3d shape:', conv3d.shape)
        softmax_alpha = K.softmax(conv3d, axis=-1)  # attention map # N, 16x12
        #         print('softmax_alpha shape:', softmax_alpha.shape)
        softmax_alpha = K.reshape(softmax_alpha,
                                  shape=(-1, self.multiheads, self.i_shape[1],
                                         self.i_shape[2]))
        #         print('softmax_alpha shape:', softmax_alpha.shape)

        if self.aggregate_channels == False:
            exp_softmax_alpha = K.expand_dims(
                softmax_alpha, axis=-1)  # for elementwise multiplication
            #         print('exp_softmax_alpha shape:', exp_softmax_alpha.shape)
            exp_softmax_alpha = K.permute_dimensions(exp_softmax_alpha,
                                                     pattern=(0, 2, 3, 1, 4))
            #         print('exp_softmax_alpha shape:', exp_softmax_alpha.shape)
            x_exp = K.expand_dims(x, axis=-2)
            #         print('x_exp shape:', x_exp.shape)
            u = multiply([exp_softmax_alpha, x_exp])
            #         print('u shape:', u.shape)
            u = K.reshape(u,
                          shape=(-1, self.i_shape[1], self.i_shape[2],
                                 u.shape[-1] * u.shape[-2]))
        else:
            exp_softmax_alpha = K.permute_dimensions(softmax_alpha,
                                                     pattern=(0, 2, 3, 1))
            exp_softmax_alpha = K.sum(exp_softmax_alpha, axis=-1)
            #             print('exp_softmax_alpha shape:', exp_softmax_alpha.shape)
            exp_softmax_alpha = K.expand_dims(exp_softmax_alpha, axis=-1)
            #             print('exp_softmax_alpha shape:', exp_softmax_alpha.shape)
            u = multiply([exp_softmax_alpha, x])
#             print('u shape:', u.shape)
        if self.concat_input_with_scaled:
            o = K.concatenate([u, x], axis=-1)
        else:
            o = u
#         print('o shape:', o.shape)
#         u = kl.Conv2D(activation='relu',filters=self.i_shape[-1],kernel_size=(1,1),padding='valid')(u)
#         u = self.gamma2 * u + x
#         print('u shape:', u.shape)
#         u = self.gamma2 * u
#         u = K.tanh(u)
#         print('u shape:', u.shape)
#         return [u, softmax_alpha]
#         self.out_features_shape = tuple(u.shape.as_list())
#         self.out_attention_maps_shape = tuple(softmax_alpha.shape.as_list())
#         print(self.out_features_shape, self.out_attention_maps_shape)

        return [o, softmax_alpha]