Exemplo n.º 1
0
    def call(self, inputs):
        """
        inputs [batch, seq, windows_size, 1, filters ]
        """
        values = inputs[:, :(self.seq - 1), :, :, :]
        query = inputs[:, (self.seq - 1), :, :, :]

        values = tf.reshape(values,
                            [-1, self.windows_size, 1, self.filters_in])

        values_out = K.conv2d(
            values,
            self.weight_value,
            strides=(1, 1),
            padding='valid',
            data_format="channels_last"
        )  # [ batch*(seq-1) , rest_window, 1, self.filters_out 1]
        temp_shape = values_out.get_shape().as_list()

        values_out = tf.reshape(values_out,
                                [-1, temp_shape[1] * temp_shape[3]])
        values_out = tf.reshape(
            values_out, [-1, (self.seq - 1), temp_shape[1] * temp_shape[3]])

        query_out = K.conv2d(query,
                             self.weight_query,
                             strides=(1, 1),
                             padding='valid',
                             data_format="channels_last"
                             )  # [ batch , rest_window, 1, self.filters_out 1]
        query_out = tf.reshape(query_out, [-1, temp_shape[1] * temp_shape[3]])
        query_out = tf.expand_dims(query_out, 1)

        score = self.V(tf.nn.tanh(values_out + query_out))

        attention_weights = tf.nn.softmax(score, axis=1)  #[batch , seq-1, 1]

        values = tf.reshape(
            values, [-1, (self.seq - 1), self.windows_size * self.filters_in])

        context_vector = attention_weights * values

        context_vector = tf.reduce_sum(context_vector, axis=1)

        query = tf.reshape(query, [-1, self.windows_size * self.filters_in])

        out = tf.concat([context_vector, query], axis=-1)

        return out
Exemplo n.º 2
0
def check_the_config_valid(para, window_size, feature):
    initial_state = np.zeros((1, window_size, feature, 1))
    initial_state = tf.cast(initial_state, 'float32')
    initial_state = K.zeros_like(initial_state)

    channel = 1
    try:
        for i in range(para["preprocessing_layers"]):

            shape = (para["pre_kernel_width"], 1, channel,
                     para["pre_number_filters"])
            channel = para["pre_number_filters"]
            initial_state = K.conv2d(
                initial_state, array_ops.zeros(tuple(shape)),
                (para["pre_strides"],
                 1))  #,dilation_rate=(para["pre_dilation_rate"],1))

        for i in range(1, 4):
            assert len(para["eclstm_{}_recurrent_activation".format(i)]) == len(para["eclstm_{}_conv_activation".format(i)]) == \
                   len(para["eclstm_{}_number_filters".format(i)]) == len(para["eclstm_{}_kernel_width".format(i)])== \
                   len(para["eclstm_{}_fusion".format(i)]), "Archtecture Parameters of {} layer should be in same length".format(i)

            for j in range(
                    len(para["eclstm_{}_recurrent_activation".format(i)])):

                if para["eclstm_{}_recurrent_activation".format(i)][0] is None:
                    break
                if para["eclstm_{}_fusion".format(i)][j] == "early":
                    shape = (para["eclstm_{}_kernel_width".format(i)][j],
                             feature, channel,
                             para["eclstm_{}_number_filters".format(i)][j])
                    feature = 1
                    channel = para["eclstm_{}_number_filters".format(i)][j]
                else:
                    shape = (para["eclstm_{}_kernel_width".format(i)][j], 1,
                             channel,
                             para["eclstm_{}_number_filters".format(i)][j])
                    channel = para["eclstm_{}_number_filters".format(i)][j]
                initial_state = K.conv2d(
                    initial_state, array_ops.zeros(tuple(shape)),
                    (para["eclstm_{}_strides".format(i)], 1))
        print("valid Configuration!")
        return True
    except:
        print(
            "Invalid Configuration! Try smaller strides or kernel size or greater window size!"
        )
        return False
Exemplo n.º 3
0
    def call(self, inputs):
        _, kernel_b = xnorize(self.kernel, self.H)
        _, inputs_b = xnorize(inputs)
        outputs = K.conv2d(inputs_b,
                           kernel_b,
                           strides=self.strides,
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate)

        # calculate Wa and xa

        # kernel_a
        mask = K.reshape(
            self.kernel,
            (-1,
             self.filters))  # self.nb_row * self.nb_col * channels, filters
        kernel_a = K.stop_gradient(K.mean(K.abs(mask), axis=0))  # filters

        # inputs_a
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        mask = K.mean(K.abs(inputs), axis=channel_axis, keepdims=True)
        ones = K.ones(self.kernel_size + (1, 1))
        inputs_a = K.conv2d(mask,
                            ones,
                            strides=self.strides,
                            padding=self.padding,
                            data_format=self.data_format,
                            dilation_rate=self.dilation_rate
                            )  # nb_sample, 1, new_nb_row, new_nb_col
        if self.data_format == 'channels_first':
            outputs = outputs * K.stop_gradient(inputs_a) * K.expand_dims(
                K.expand_dims(K.expand_dims(kernel_a, 0), -1), -1)
        else:
            outputs = outputs * K.stop_gradient(inputs_a) * K.expand_dims(
                K.expand_dims(K.expand_dims(kernel_a, 0), 0), 0)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Exemplo n.º 4
0
    def call(self, inputs, training=None):
        def _l2normalize(v, eps=1e-12):
            return v / (K.sum(v**2)**0.5 + eps)

        def power_iteration(W, u):
            _u = u
            _v = _l2normalize(K.dot(_u, K.transpose(W)))
            _u = _l2normalize(K.dot(_v, W))
            return _u, _v

        if self.spectral_normalization:
            W_shape = self.kernel.shape.as_list()
            # Flatten the Tensor
            W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])
            _u, _v = power_iteration(W_reshaped, self.u)
            # Calculate Sigma
            sigma = K.dot(_v, W_reshaped)
            sigma = K.dot(sigma, K.transpose(_u))
            # normalize it
            W_bar = W_reshaped / sigma
            # reshape weight tensor
            if training in {0, False}:
                W_bar = K.reshape(W_bar, W_shape)
            else:
                with tf.control_dependencies([self.u.assign(_u)]):
                    W_bar = K.reshape(W_bar, W_shape)

            # update weitht
            self.kernel = W_bar

        if self.rank == 1:
            outputs = K.conv1d(inputs,
                               self.kernel,
                               strides=self.strides[0],
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs = K.conv2d(inputs,
                               self.kernel,
                               strides=self.strides,
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs = K.conv3d(inputs,
                               self.kernel,
                               strides=self.strides,
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Exemplo n.º 5
0
 def recurrent_conv_u(self, x, w):
     conv_out = K.conv2d(x,
                         w,
                         strides=(1, 1),
                         padding='same',
                         data_format='channels_last')
     return conv_out
 def recurrent_conv(self, x, w):
     conv_out = K.conv2d(x,
                         w,
                         strides=(1, 1),
                         padding='same',
                         data_format=self.data_format)
     return conv_out
Exemplo n.º 7
0
    def call(self, inputs, **kwargs):
        if type(inputs) is list:
            features = inputs[0]
        else:
            features = inputs

        if self.weightnorm:
            norm = tf.sqrt(
                tf.reduce_sum(tf.square(self.kernel), (0, 1, 2)) + self.eps)
            kernel = self.kernel / norm * self.wn_g
        else:
            kernel = self.kernel

        features = K.conv2d(features,
                            kernel,
                            strides=self.strides,
                            padding=self.padding,
                            dilation_rate=self.dilation_rate)

        if self.use_bias:
            features = tf.add(features, self.bias)

        if self.activation is not None:
            features = self.activation(features)

        return features
Exemplo n.º 8
0
def soft_min_reg(cv, axis=None, min_disp=None, max_disp=None, labels=None):
    if axis == 1:
        cv = Lambda(lambda x: K.squeeze(x, axis=-1))(cv)
    disp_map = K.reshape(
        K.arange(min_disp,
                 max_disp - 0.000001, (max_disp - min_disp) / labels,
                 dtype="float32"), (1, 1, labels, 1))
    if axis == 1:
        output = K.conv2d(cv,
                          disp_map,
                          strides=(1, 1),
                          padding='valid',
                          data_format="channels_first")
        x = K.expand_dims(K.squeeze(output, axis=1), axis=-1)
    else:
        x = K.conv2d(cv, disp_map, strides=(1, 1), padding='valid')
    return x
Exemplo n.º 9
0
 def input_conv_u(self, x, w, b=None, padding='same'):
   conv_out = K.conv2d(x, w, strides=self.strides,
                       padding=padding,
                       data_format='channels_last',
                       dilation_rate=self.dilation_rate)
   if b is not None:
     conv_out = K.bias_add(conv_out, b,
                           data_format='channels_last')
   return conv_out
Exemplo n.º 10
0
 def _conv(self, x, w, b=None, padding='same'):
     conv_out = K.conv2d(x,
                         w,
                         strides=(1, 1),
                         padding=padding,
                         data_format='channels_last')
     if b is not None:
         conv_out = K.bias_add(conv_out, b, data_format='channels_last')
     return conv_out
Exemplo n.º 11
0
 def input_conv(self, x, w, b=None, padding='valid'):
   conv_out = backend.conv2d(x, w, strides=self.strides,
                             padding=padding,
                             data_format=self.data_format,
                             dilation_rate=self.dilation_rate)
   if b is not None:
     conv_out = backend.bias_add(conv_out, b,
                                 data_format=self.data_format)
   return conv_out
Exemplo n.º 12
0
 def input_conv(self, x, w, b=None, padding='valid'):
   conv_out = K.conv2d(x, w, strides=self.strides,
                       padding=padding,
                       data_format=self.data_format,
                       dilation_rate=self.dilation_rate)
   if b is not None:
     conv_out = K.bias_add(conv_out, b,
                           data_format=self.data_format)
   return conv_out
Exemplo n.º 13
0
    def call(self, inputs): #?
        scaled_kernel = self.kernel * self.runtime_coeff
        
        if self.rank == 1:
            kernel = Ke.pad(scaled_kernel
                            , [[1,1], [0,0], [0,0]])
            fused_kernel = Ke.add_n([kernel[1:]
                               , kernel[:-1]]) / 2.0
            outputs = K.conv1d(inputs
                , fused_kernel
                , strides=self.strides[0]
                , padding=self.padding
                , data_format=self.data_format
                , dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            kernel = Ke.pad(scaled_kernel
                            , [[1,1], [1,1], [0,0], [0,0]])
            fused_kernel = Ke.add_n([kernel[1:, 1:]
                               , kernel[:-1, 1:]
                               , kernel[1:, :-1]
                               , kernel[:-1, :-1]]) / 4.0
            outputs = K.conv2d(inputs
                , fused_kernel
                , strides=self.strides
                , padding=self.padding
                , data_format=self.data_format
                , dilation_rate=self.dilation_rate)
        if self.rank == 3:
            kernel = Ke.pad(scaled_kernel
                            , [[1,1], [1,1], [1,1], [0,0], [0,0]])
            fused_kernel = Ke.add_n([kernel[1:, 1:, 1:]
                               , kernel[1:, 1:, :-1]
                               , kernel[1:, :-1, 1:]
                               , kernel[1:, :-1, :-1]
                               , kernel[:-1, 1:, 1:]
                               , kernel[:-1, 1:, :-1]
                               , kernel[:-1, :-1, 1:]
                               , kernel[:-1, :-1, :-1]]) / 8.0
            outputs = K.conv3d(inputs
                , fused_kernel
                , strides=self.strides
                , padding=self.padding
                , data_format=self.data_format
                , dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs
                , self.bias
                , data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs        
Exemplo n.º 14
0
def do_2d_convolution(feature_matrix,
                      kernel_matrix,
                      pad_edges=False,
                      stride_length_px=1):
    """Convolves 2-D feature maps with 2-D kernel.

    m = number of rows in kernel
    n = number of columns in kernel
    c = number of output feature maps (channels)

    :param feature_matrix: Input feature maps (numpy array).  Dimensions must be
        M x N x C or 1 x M x N x C.
    :param kernel_matrix: Kernel as numpy array.  Dimensions must be
        m x n x C x c.
    :param pad_edges: Boolean flag.  If True, edges of input feature maps will
        be zero-padded during convolution, so spatial dimensions of the output
        feature maps will be the same (M x N).  If False, dimensions
        of the output maps will be (M - m + 1) x (N - n + 1).
    :param stride_length_px: Stride length (pixels).  The kernel will move by
        this many rows or columns at a time as it slides over each input feature
        map.
    :return: feature_matrix: Output feature maps (numpy array).  Dimensions will
        be 1 x M x N x c or 1 x (M - m + 1) x (N - n + 1) x c, depending on
        whether or not edges are padded.
    """

    error_checking.assert_is_numpy_array_without_nan(feature_matrix)
    error_checking.assert_is_numpy_array_without_nan(kernel_matrix)
    error_checking.assert_is_numpy_array(kernel_matrix, num_dimensions=4)
    error_checking.assert_is_boolean(pad_edges)
    error_checking.assert_is_integer(stride_length_px)
    error_checking.assert_is_geq(stride_length_px, 1)

    if len(feature_matrix.shape) == 3:
        feature_matrix = numpy.expand_dims(feature_matrix, axis=0)

    error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=4)

    if pad_edges:
        padding_string = 'same'
    else:
        padding_string = 'valid'

    feature_tensor = K.conv2d(x=K.variable(feature_matrix),
                              kernel=K.variable(kernel_matrix),
                              strides=(stride_length_px, stride_length_px),
                              padding=padding_string,
                              data_format='channels_last')

    return feature_tensor.numpy()
Exemplo n.º 15
0
 def apply_separate_filter_for_each_batch(inputs):
     kernel = inputs[1]
     x = K.expand_dims(inputs[0], axis=0)
     outputs = K.conv2d(
                 x,
                 kernel,
                 strides=self.strides,
                 padding=self.padding,
                 data_format=self.data_format,
                 dilation_rate=self.dilation_rate)
     if self.bias is not None:
         bias = inputs[2]
         outputs = K.bias_add(outputs, bias, data_format=self.data_format)
     return K.squeeze(outputs, axis=0)
Exemplo n.º 16
0
 def context_gating(self, x, w, rx, rw, b=None, padding='valid'):
   input_shape = x.get_shape().as_list()
   if self.data_format == 'channels_first':
     x = K.pool2d(x, (input_shape[2], input_shape[3]), pool_mode='avg')
     rx = K.pool2d(rx, (input_shape[2], input_shape[3]), pool_mode='avg')
   elif self.data_format == 'channels_last':
     x = K.pool2d(x, (input_shape[1], input_shape[2]), pool_mode='avg')
     rx = K.pool2d(rx, (input_shape[1], input_shape[2]), pool_mode='avg')
   conv_out1 = K.conv2d(
       x,
       w,
       strides=self.strides,
       padding=padding,
       data_format=self.data_format)
   conv_out2 = K.conv2d(
       rx,
       rw,
       strides=self.strides,
       padding=padding,
       data_format=self.data_format)
   conv_out = conv_out1 + conv_out2
   if b is not None:
     conv_out = K.bias_add(conv_out, b, data_format=self.data_format)
   return conv_out
Exemplo n.º 17
0
    def call(self, inputs):
        outputs = K.conv2d(inputs,
                           self.W_bar(),
                           strides=self.strides,
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Exemplo n.º 18
0
 def correlation(self, displace, kernel):
   """ Do the actual convolution==correlation.
   """  
   # Given an input tensor of shape [batch, in_height, in_width, in_channels]
   displace = K.expand_dims(displace, 0) # 在开头增加一维
   
   # a kernel tensor of shape [filter_height, filter_width, in_channels, out_channels]
   kernel = K.expand_dims(kernel, 3)  # 在末尾增加一维
   
   # kernal去水平扫padding这一长条
   out = K.conv2d(displace, kernel, padding='valid', data_format='channels_last')
       
   out = K.squeeze(out, 0)  # 扒掉开头的维度
   # print(K.int_shape(out))  # (1,360,1)
   
   return out
Exemplo n.º 19
0
    def call(self, inputs):
        binary_kernel = binarize(self.kernel, H=self.H)
        outputs = K.conv2d(inputs,
                           binary_kernel,
                           strides=self.strides,
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
    def call(self, inputs, training=None):

        input_transposed = tf.transpose(inputs, [3, 0, 1, 2, 4])
        input_shape = tf.shape(input_transposed)
        input_tensor_reshaped = tf.reshape(input_transposed, [
            input_shape[0] * input_shape[1], self.input_height,
            self.input_width, self.input_num_atoms
        ])
        input_tensor_reshaped.set_shape(
            (None, self.input_height, self.input_width, self.input_num_atoms))

        conv = K.conv2d(input_tensor_reshaped,
                        self.W, (self.strides, self.strides),
                        padding=self.padding,
                        data_format='channels_last')

        votes_shape = tf.shape(conv)
        _, conv_height, conv_width, _ = conv.get_shape()

        votes = tf.reshape(conv, [
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
            self.num_capsule, self.num_atoms
        ])
        votes.set_shape((None, self.input_num_capsule, conv_height.value,
                         conv_width.value, self.num_capsule, self.num_atoms))

        logit_shape = tf.stack([
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
            self.num_capsule
        ])
        biases_replicated = tf.tile(
            self.b, [conv_height.value, conv_width.value, 1, 1])

        activations = update_routing(votes=votes,
                                     biases=biases_replicated,
                                     logit_shape=logit_shape,
                                     num_dims=6,
                                     input_dim=self.input_num_capsule,
                                     output_dim=self.num_capsule,
                                     num_routing=self.routings)

        return activations
Exemplo n.º 21
0
    def call(self, inputs, training=None):

        outputs = K.conv2d(
            inputs,
            self.compute_spectral_normal(training),
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        if self.bias is not None:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs
Exemplo n.º 22
0
    def call(self, inputs):
        # Mask kernel with connection matrix
        masked_kernel = self.kernel * self.connections

        # Apply convolution
        if self.rank == 1:
            outputs = K.conv1d(
                inputs,
                masked_kernel,
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                masked_kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs = K.conv3d(
                inputs,
                masked_kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
def max_singular_val_for_convolution(w,
                                     u,
                                     fully_differentiable=False,
                                     ip=1,
                                     padding='same',
                                     strides=(1, 1),
                                     data_format='channels_last'):
    assert ip >= 1
    if not fully_differentiable:
        w_ = K.stop_gradient(w)
    else:
        w_ = w

    u_bar = u
    for _ in range(ip):
        v_bar = K.conv2d(u_bar,
                         w_,
                         strides=strides,
                         data_format=data_format,
                         padding=padding)
        v_bar = K.l2_normalize(v_bar)

        u_bar_raw = K.conv2d_transpose(v_bar,
                                       w_,
                                       output_shape=K.int_shape(u),
                                       strides=strides,
                                       data_format=data_format,
                                       padding=padding)
        u_bar = K.l2_normalize(u_bar_raw)

    u_bar_raw_diff = K.conv2d_transpose(v_bar,
                                        w,
                                        output_shape=K.int_shape(u),
                                        strides=strides,
                                        data_format=data_format,
                                        padding=padding)
    sigma = K.sum(u_bar * u_bar_raw_diff)
    return sigma, u_bar
Exemplo n.º 24
0
    def call(self, inputs, training=None):
        scaled_kernel = self.kernel * self.runtime_coeff
        
        if self.rank == 1:
            outputs = K.conv1d(
                inputs,
                scaled_kernel,
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                scaled_kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs = K.conv3d(
                inputs,
                scaled_kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            outputs = self.activation(outputs)
        return outputs
def offset_conv2d_eval(depth, padding, x):
    """Perform a conv2d on x with a given padding"""
    kernel = K.variable(value=np.array([[[[1]] + [[0]] * (depth - 1)]]),
        dtype='float32')
    return K.conv2d(x, kernel, strides=(3, 3), padding=padding)
Exemplo n.º 26
0
    def call(self, features):
        ni = features.shape[-1]
        no = self.filters

        if self.group == 'C4':
            nt = 4
        elif self.group == 'D4':
            nt = 8

        nti = 1 if self.first else nt
        nto = nt

        k = self.kernel_size[0]
        t = np.reshape(np.arange(nti * k * k), (nti, k, k))
        trafos = [np.rot90(t, k, axes=(1, 2)) for k in range(4)]
        if nt == 8:
            trafos = trafos + [np.flip(t, 1) for t in trafos]
        self.trafos = trafos = np.array(trafos)

        # index magic happens here
        if nti == 1:
            indices = trafos
        elif nti == 4:
            indices = [[trafos[l, (m - l) % 4, :, :] for m in range(4)]
                       for l in range(4)]
        elif nti == 8:
            indices = [[
                trafos[l, (m - l) % 4 if ((m < 4) == (l < 4)) else
                       (m + l) % 4 + 4, :, :] for m in range(8)
            ] for l in range(8)]
        self.indices = indices = np.reshape(indices, (nto, nti, k, k))

        # transform the kernel
        kernel = self.kernel
        kernel = tf.reshape(kernel, (nti * k * k, ni, no))
        kernel = tf.gather(kernel, indices, axis=0)
        kernel = tf.reshape(kernel, (nto, nti, k, k, ni, no))
        kernel = tf.transpose(kernel, (2, 3, 1, 4, 0, 5))
        kernel = tf.reshape(kernel, (k, k, nti * ni, nto * no))
        self.transformed_kernel = kernel

        if self.first:
            x = features
        else:
            s = features.shape
            x = tf.reshape(features, (-1, s[1], s[2], s[3] * s[4]))

        x = K.conv2d(x,
                     kernel,
                     strides=self.strides,
                     padding=self.padding,
                     dilation_rate=self.dilation_rate)
        s = x.shape
        x = tf.reshape(x, (-1, s[1], s[2], nto, no))

        if self.use_bias:
            features = tf.add(features, self.bias)

        if self.activation is not None:
            features = self.activation(features)

        return x
Exemplo n.º 27
0
def offset_conv2d_eval(depth, padding, x):
    """Perform a conv2d on x with a given padding"""
    kernel = K.variable(value=np.array([[[[1]] + [[0]] * (depth - 1)]]),
                        dtype='float32')
    return K.conv2d(x, kernel, strides=(3, 3), padding=padding)
    def call(self, input_tensor, training=None):
        input_transposed = tf.transpose(input_tensor, [3, 0, 1, 2, 4])
        input_shape = K.shape(input_transposed)
        input_tensor_reshaped = K.reshape(input_transposed, [
            input_shape[1] * input_shape[0], self.input_height,
            self.input_width, self.input_num_atoms
        ])
        input_tensor_reshaped.set_shape(
            (None, self.input_height, self.input_width, self.input_num_atoms))

        if self.upsamp_type == 'resize':
            upsamp = K.resize_images(input_tensor_reshaped, self.scaling,
                                     self.scaling, 'channels_last')
            outputs = K.conv2d(upsamp,
                               kernel=self.W,
                               strides=(1, 1),
                               padding=self.padding,
                               data_format='channels_last')
        elif self.upsamp_type == 'subpix':
            conv = K.conv2d(input_tensor_reshaped,
                            kernel=self.W,
                            strides=(1, 1),
                            padding='same',
                            data_format='channels_last')
            outputs = tf.depth_to_space(conv, self.scaling)
        else:
            batch_size = input_shape[1] * input_shape[0]

            # Infer the dynamic output shape:
            out_height = deconv_output_length(input_length=self.input_height,
                                              stride=self.scaling,
                                              filter_size=self.kernel_size,
                                              padding=self.padding)
            out_width = deconv_output_length(input_length=self.input_width,
                                             stride=self.scaling,
                                             filter_size=self.kernel_size,
                                             padding=self.padding)
            output_shape = (batch_size, out_height, out_width,
                            self.num_capsule * self.num_atoms)

            outputs = K.conv2d_transpose(input_tensor_reshaped,
                                         self.W,
                                         output_shape,
                                         (self.scaling, self.scaling),
                                         padding=self.padding,
                                         data_format='channels_last')

        votes_shape = K.shape(outputs)
        _, conv_height, conv_width, _ = outputs.get_shape()

        votes = K.reshape(outputs, [
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
            self.num_capsule, self.num_atoms
        ])
        votes.set_shape((None, self.input_num_capsule, conv_height.value,
                         conv_width.value, self.num_capsule, self.num_atoms))

        logit_shape = K.stack([
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
            self.num_capsule
        ])
        biases_replicated = K.tile(self.b,
                                   [votes_shape[1], votes_shape[2], 1, 1])

        activations = update_routing(votes=votes,
                                     biases=biases_replicated,
                                     logit_shape=logit_shape,
                                     num_dims=6,
                                     input_dim=self.input_num_capsule,
                                     output_dim=self.num_capsule,
                                     num_routing=self.routings)

        return activations
Exemplo n.º 29
0
 def recurrent_conv(self, x, w):
   conv_out = K.conv2d(x, w, strides=(1, 1),
                       padding='same',
                       data_format=self.data_format)
   return conv_out
Exemplo n.º 30
0
def find_patch_matches(a, a_norm, b):
    '''For each patch in A, find the best matching patch in B'''
    # we want cross-correlation here so flip the kernels
    convs = K.conv2d(a, b[:, :, ::-1, ::-1], border_mode='valid')
    argmax = K.argmax(convs / a_norm, axis=1)
    return argmax