Ejemplo n.º 1
0
    def call(self, inputs):
        if self.tied_to is not None:
            outputs = K.conv1d(inputs,
                               self.tied_to.kernel,
                               strides=self.strides[0],
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate[0])
        else:
            # this branch is typically entered when a previously trained model is being loaded again
            outputs = K.conv1d(inputs,
                               self.learnedKernel,
                               strides=self.strides[0],
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate[0])

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Ejemplo n.º 2
0
    def call(self, inputs):
        outputs = K.conv1d(inputs,
                           self.kernel,
                           strides=self.strides[0],
                           padding='same',
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate[0])
        print tf.shape(outputs)
        outputs = tf.reverse(outputs, axis=[1])
        outputs = K.conv1d(outputs,
                           self.kernel,
                           strides=self.strides[0],
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate[0])
        print tf.shape(outputs)
        outputs = tf.reverse(outputs, axis=[1])
        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs
Ejemplo n.º 3
0
    def call(self, inputs):
        h,x = inputs
        q = K.conv1d(h,
                     kernel=self.kernel_q,
                     strides=(1,), padding='same')
        q = K.bias_add(q, self.bias_q)
        k = K.conv1d(x,
                     kernel=self.kernel_k,
                     strides=(1,), padding='same')
        k = K.bias_add(k, self.bias_k)
        v = K.conv1d(x,
                     kernel=self.kernel_v,
                     strides=(1,), padding='same')
        v = K.bias_add(v, self.bias_v)
#         print('q.shape,k.shape,v.shape,',q.shape,k.shape,v.shape)
        s = tf.matmul(q, k, transpose_b=True)  # # [bs, N, N]
#         print('s.shape:',s.shape)
        beta = K.softmax(s, axis=-1)  # attention map
        self.beta_shape = tuple(beta.shape[1:].as_list())
#         print('beta.shape:',beta.shape.as_list())
        o = K.batch_dot(beta, v)  # [bs, N, C]
        o = K.conv1d(o,
                     kernel=self.kernel_o,
                     strides=(1,), padding='same')
        o = K.bias_add(o, self.bias_o)
#         print('o.shape:',o.shape)
#         o = K.reshape(o, shape=K.shape(x))  # [bs, h, w, C]
        x = self.gamma * o + x
    
    
        
#         print('x.shape:',x.shape)
        return [x, s, self.gamma]
Ejemplo n.º 4
0
    def call(self, inputs, **kwargs):
        if self.normalize_signal:
            inputs = (inputs - K.mean(inputs, axis=(1, 2), keepdims=True)) / (
                K.std(inputs, axis=(1, 2), keepdims=True) + K.epsilon()
            )

        if self.length < self.nfft:
            inputs = ZeroPadding1D(padding=(0, self.nfft - self.length))(inputs)

        real_part = []
        imag_part = []
        for n in range(inputs.shape[-1]):
            real_part.append(
                K.conv1d(
                    K.expand_dims(inputs[:, :, n]),
                    kernel=self.real_kernel,
                    strides=self.shift,
                    padding="valid",
                )
            )
            imag_part.append(
                K.conv1d(
                    K.expand_dims(inputs[:, :, n]),
                    kernel=self.imag_kernel,
                    strides=self.shift,
                    padding="valid",
                )
            )

        real_part = K.stack(real_part, axis=-1)
        imag_part = K.stack(imag_part, axis=-1)

        # real_part = K.expand_dims(real_part)
        # imag_part = K.expand_dims(imag_part)
        if self.mode == "abs":
            fft = K.sqrt(K.square(real_part) + K.square(imag_part))
        if self.mode == "phase":
            fft = tf.atan(real_part / imag_part)
        elif self.mode == "real":
            fft = real_part
        elif self.mode == "imag":
            fft = imag_part
        elif self.mode == "complex":
            fft = K.concatenate((real_part, imag_part), axis=-1)
        elif self.mode == "log":
            fft = K.clip(
                K.sqrt(K.square(real_part) + K.square(imag_part)), K.epsilon(), None
            )
            fft = K.log(fft) / np.log(10)

        fft = K.permute_dimensions(fft, (0, 2, 1, 3))[:, : self.nfft // 2, :, :]
        if self.normalize_feature:
            if self.mode == "complex":
                warnings.warn(
                    'spectrum normalization will not applied with mode == "complex"'
                )
            else:
                fft = (fft - K.mean(fft, axis=1, keepdims=True)) / (
                    K.std(fft, axis=1, keepdims=True) + K.epsilon()
                )
Ejemplo n.º 5
0
    def call(self, inputs):

        if self.kernel_size[0] % 2 == 0:
            flipped = tf.reverse(self.kernel, axis=[0])
        else:
            flipped = tf.reverse(self.kernel[1:, :, :], axis=[0])
        #         print (flipped)
        conv_kernel = tf.concat([flipped, self.kernel], axis=0)
        #         print (conv_kernel)

        outputs = K.conv1d(inputs,
                           conv_kernel,
                           strides=self.strides[0],
                           padding='same',
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate[0])
        #         print tf.shape(outputs)
        outputs = tf.reverse(outputs, axis=[1])
        outputs = K.conv1d(outputs,
                           conv_kernel,
                           strides=self.strides[0],
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate[0])
        #         print tf.shape(outputs)
        outputs = tf.reverse(outputs, axis=[1])
        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs
Ejemplo n.º 6
0
    def call(self, inputs):
        if self.tied_to is not None:
            outputs = K.conv1d(
               inputs,
               self.tied_to.kernel,
               strides=self.strides[0],
               padding=self.padding,
               data_format=self.data_format,
               dilation_rate=self.dilation_rate[0])
        else:
            # this branch is typically entered when a previously trained model is being loaded again
            outputs = K.conv1d(
               inputs,
               self.learnedKernel,
               strides=self.strides[0],
               padding=self.padding,
               data_format=self.data_format,
               dilation_rate=self.dilation_rate[0])

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Ejemplo n.º 7
0
    def backprop_conv(self, w, b, a, r):

        w_p = K.maximum(w, 0.)
        b_p = K.maximum(b, 0.)
        z_p = K.conv1d(a, kernel=w_p, strides=1,
                       padding='valid') + b_p + self.epsilon
        s_p = r / z_p
        c_p = K.tf.contrib.nn.conv1d_transpose(value=s_p,
                                               filter=w_p,
                                               output_shape=K.shape(a),
                                               stride=1,
                                               padding='SAME',
                                               name=None)
        w_n = K.minimum(w, 0.)
        b_n = K.minimum(b, 0.)
        z_n = K.conv1d(a, kernel=w_n, strides=1,
                       padding='valid') + b_n - self.epsilon
        s_n = r / z_n
        c_n = K.tf.contrib.nn.conv1d_transpose(value=s_n,
                                               filter=w_n,
                                               output_shape=K.shape(a),
                                               stride=1,
                                               padding='SAME',
                                               name=None)

        return a * (self.alpha * c_p + self.beta * c_n)
def signal2noise_accuracy(y_true, y_pred):
    kernel_size = 15
    filters = K.constant(numpy.ones((kernel_size, 1, 1)))
    attention = K.clip(K.conv1d(y_true, filters, 1, 'same'), 0, 1)
    level = K.clip(K.conv1d(y_pred * y_true, filters, 1, 'same'), 0, 1)
    diff = y_true - y_pred

    return 1 - K.minimum(K.sum(K.abs(diff) * attention) / K.sum(level), 1)
Ejemplo n.º 9
0
 def call(self, inputs):
     x, seq_len = inputs
     pos = tf.nn.relu(K.bias_add(K.conv1d(x, self.weight), self.bias))
     logits = tf.squeeze(K.conv1d(pos, self.v), axis=-1)
     mask = tf.sequence_mask(seq_len,
                             maxlen=x.shape.as_list()[1],
                             dtype=tf.float32)
     logits = logits + tf.float32.min * (1 - mask)
     return tf.nn.softmax(logits, axis=-1)
Ejemplo n.º 10
0
 def call(self, x):
   shape = K.shape(x)
   if K.ndim(x) == 4:
     x = K.reshape(x, (-1, shape[-2], shape[-1]))
     x = K.conv1d(x, self.lda, data_format="channels_last") + self.bias
     return K.reshape(x, (shape[0], shape[1], shape[2] - self.kernel_size + 1, self.feat_dim * self.kernel_size))
   elif K.ndim(x) == 5:
     x = K.reshape(x, (-1, shape[-2], shape[-1]))
     x = K.conv1d(x, self.lda, data_format="channels_last") + self.bias
     return K.reshape(x, (shape[0], shape[1], shape[2], shape[3] - self.kernel_size + 1, self.feat_dim * self.kernel_size))
Ejemplo n.º 11
0
    def call(self, inputs):
        if self.rank == 1:
            if self.Masked == False:
                outputs = K.conv1d(inputs,
                                   self.kernel,
                                   strides=self.strides[0],
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate[0])
            else:
                outputs = K.conv1d(inputs,
                                   self.kernel * self.kernel_mask,
                                   strides=self.strides[0],
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            if self.Masked == False:
                outputs = K.conv2d(inputs,
                                   self.kernel,
                                   strides=self.strides,
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate)
            else:
                outputs = K.conv2d(inputs,
                                   self.kernel * self.kernel_mask,
                                   strides=self.strides,
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate)
        if self.rank == 3:
            if self.Masked == False:
                outputs = K.conv3d(inputs,
                                   self.kernel,
                                   strides=self.strides,
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate)
            else:
                outputs = K.conv3d(inputs,
                                   self.kernel * self.kernel_mask,
                                   strides=self.strides,
                                   padding=self.padding,
                                   data_format=self.data_format,
                                   dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Ejemplo n.º 12
0
    def call(self, inputs):
        def hw_flatten(x):
            return kl.Reshape(target_shape=(int(x.shape[1]) * int(x.shape[2]),
                                            int(x.shape[3])))(x)
#             s = x.shape.as_list()
#             return K.reshape(x, shape=[-1,s[1]*s[2],s[3]])

        x1, x2, masks = inputs  #img, text, mask
        if masks is not None:
            self.masks = masks
#         self.text_input_shape = tuple(x1.shape[1:].as_list())
        q = K.conv2d(x1, kernel=self.kernel_q, strides=(1, 1), padding='same')
        q = K.bias_add(q, self.bias_q)
        #         q = kl.tanh(alpha=1.0)(q)
        k = K.conv1d(x2, kernel=self.kernel_k, strides=(1, ), padding='same')
        k = K.bias_add(k, self.bias_k)
        #         k = kl.tanh(alpha=1.0)(k)
        v = K.conv1d(x2, kernel=self.kernel_v, strides=(1, ), padding='same')
        v = K.bias_add(v, self.bias_v)
        #         v = kl.tanh(alpha=1.0)(v)
        #         print('q.shape,k.shape,v.shape,',q.shape,k.shape,v.shape)
        s = K.batch_dot(hw_flatten(q),
                        K.permute_dimensions(k, (0, 2, 1)))  # # [bs, N, M]
        #         print(s.shape)
        beta = K.softmax(s, axis=-1)  # attention map
        if self.masks is not None:
            beta = K.permute_dimensions(x=beta, pattern=(0, 2, 1))
            #             print(s.shape)
            beta = kl.Multiply()([beta, self.masks])
            beta = K.permute_dimensions(x=beta, pattern=(0, 2, 1))


#             print(s.shape)
#         print('s.shape:',s.shape)

        self.beta_shape = tuple(beta.shape[1:].as_list())
        #         print('hw_flatten(v).shape:',hw_flatten(v).shape)
        o = K.batch_dot(beta, v)  # [bs, N, C]
        #         print('o.shape:',o.shape)
        o = K.reshape(o, shape=K.shape(x1))  # [bs, h, w, C]
        #         print('o.shape:',o.shape)
        o = K.conv2d(o, kernel=self.kernel_o, strides=(1, 1), padding='same')
        o = K.bias_add(o, self.bias_o)
        #         o = kl.tanh(alpha=1.0)(o)
        #         print('o.shape:',o.shape)
        #         x_text = self.gamma1 * x1
        # #         print('x_text.shape:',x_text,x_text.shape)
        #         x_att = self.gamma2 * o
        # #         print('x_att.shape:',x_att,x_att.shape)
        #         x_out = K.concatenate([x_text,x_att],axis=-1) #kl.Concatenate()([x_text,x_att])
        #         print('x_out.shape:',x_out,x_out.shape)
        self.out_sh = tuple(o.shape.as_list())
        return [o, beta]
Ejemplo n.º 13
0
    def call(self, inputs, training=None):
        q, k, v, seq_len = inputs

        q = self.split_heads(K.conv1d(q, self.W_Q), self.num_heads)
        k = self.split_heads(K.conv1d(k, self.W_K), self.num_heads)
        v = self.split_heads(K.conv1d(v, self.W_V), self.num_heads)

        scale = self.d**(1 / 2)
        q *= scale
        x = self.dot_product_attention(q, k, v, seq_len, self.dropout,
                                       training)
        x = self.combine_heads(x)
        return K.conv1d(x, self.W_O)
Ejemplo n.º 14
0
 def call(self, x):
     """
     Main compute function. If bias is used, subtract before multiplying with
     kernel. Otw, just multiply by kernel.
     :param x: input tensor
     :return: either K*x or K*(x-b)
     """
     if self.tied_layer.use_bias is True:
         output = K.bias_add(x, -1 * self.bias)
         output = K.conv1d(output, self.kernel)
     else:
         output = K.conv1d(x, self.kernel)
     return output
Ejemplo n.º 15
0
 def ista_iteration(z_old, ctr):
     """
     ISTA iteration
     :param z_old: sparse code form previous iteraiton
     :param ctr: counter for monitor the iteration
     :return: z_new, ctr+1
     """
     # zero-pad
     paddings = tf.constant(
         [[0, 0], [self.kernel_size[0] - 1, self.kernel_size[0] - 1],
          [0, 0]])
     z_pad = tf.pad(z_old, paddings, "CONSTANT")
     # Hz
     H_z_old = K.conv1d(z_pad, self.kernel, padding="valid")
     # take residuals
     res = tf.add(self.y, -H_z_old)
     # convolve with HT
     HT_res = K.conv1d(res, self.tied_layer.kernel, padding="valid")
     # divide by L
     HT_res_L = tf.multiply(HT_res, 1 / self.L)
     # get new z before shrinkage
     pre_z_new = tf.add(z_old, HT_res_L)
     # soft-thresholding
     # multiply lambda / L to be the bias
     bias_with_L = self.lambda_value / self.L
     bias_with_L = tf.cast(bias_with_L, tf.float32)
     bias_vector = tf.add(
         bias_with_L[0],
         tf.zeros((self.output_dim1, 1), dtype=tf.float32))
     # apply a different bias for each convolution kernel
     for n in range(self.output_dim2 - 1):
         temp = tf.add(
             bias_with_L[n + 1],
             tf.zeros((self.output_dim1, 1), dtype=tf.float32),
         )
         bias_vector = tf.concat([bias_vector, temp], axis=1)
     # add bias
     output_pos = K.bias_add(pre_z_new, -1 * bias_vector)
     if self.twosided:
         output_neg = K.bias_add(pre_z_new, bias_vector)
     # shrinkage
     output_pos = self.activation(output_pos)
     if self.twosided:
         output_neg = -1 * self.activation(-1 * output_neg)
     if self.twosided:
         output = output_pos + output_neg
     else:
         output = output_pos
     z_new = output
     return z_new, ctr + 1
Ejemplo n.º 16
0
    def call(self, x):
        if self.algorithm == "GI":
            L = K.shape(x)[1]
            pad_right = (self.dilation_factor - L % self.dilation_factor
                         ) if L % self.dilation_factor != 0 else 0

            pad = [[0, pad_right]]
            # decomposition to smaller-sized feature maps
            #[N,L,C] -> [N*d, L/d, C]
            o = K.tf.space_to_batch_nd(x,
                                       paddings=pad,
                                       block_shape=[self.dilation_factor])

            s = 1
            o = K.conv1d(o, self.w, s, padding='same')

            l = K.tf.split(o, self.dilation_factor, axis=0)
            res = []
            for i in range(0, self.dilation_factor):
                res.append(self.fix_w[0, i] * l[i])
                for j in range(1, self.dilation_factor):
                    res[i] += self.fix_w[j, i] * l[j]
            o = K.tf.concat(res, axis=0)
            if self.biased:
                o = K.bias_add(o, self.b)
            o = K.tf.batch_to_space_nd(o,
                                       crops=pad,
                                       block_shape=[self.dilation_factor])

            return o
        elif self.algorithm == "SSC":
            mask = np.zeros([self.fix_w_size, self.fix_w_size, 1, 1],
                            dtype=np.float32)
            mask[self.dilation_factor - 1, self.dilation_factor - 1, 0, 0] = 1

            self.fix_w = K.tf.add(self.fix_w, K.constant(mask,
                                                         dtype=tf.float32))
            o = K.expand_dims(x, -1)
            # maybe we can also use K.separable_conv1d
            o = K.conv2d(o, self.fix_w, strides=[1, 1], padding='same')
            o = K.squeeze(o, -1)
            o = K.conv1d(o,
                         self.w,
                         dilation_rate=self.dilation_factor,
                         padding='same')

            if self.biased:
                o = K.bias_add(o, self.b)
            return o
Ejemplo n.º 17
0
    def call(self, u_vecs):
        if self.share_weights:
            u_hat_vecs = K.conv1d(u_vecs, self.W)
        else:
            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])

        batch_size = K.shape(u_vecs)[0]
        input_num_capsule = K.shape(u_vecs)[1]
        u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
                                            self.num_capsule, self.dim_capsule))
        u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
        # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]

        b = K.zeros_like(u_hat_vecs[:, :, :, 0])  # shape = [None, num_capsule, input_num_capsule]
        for i in range(self.routings):
            c = softmax(b, 1)
            o = K.batch_dot(c, u_hat_vecs, [2, 2])
            if K.backend() == 'theano':
                o = K.sum(o, axis=1)
            if i < self.routings - 1:
                o = K.l2_normalize(o, -1)
                b = K.batch_dot(o, u_hat_vecs, [2, 3])
                if K.backend() == 'theano':
                    b = K.sum(b, axis=1)

        return self.activation(o)
Ejemplo n.º 18
0
    def call(self, inputs):
        if self.rank == 1:
            outputs = K.conv1d(
                inputs,
                self.kernel,
                strides=self.strides[0],
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs = K.conv3d(
                inputs,
                self.kernel,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Ejemplo n.º 19
0
Archivo: models.py Proyecto: rpp0/emma
    def call(self, inputs):
        if self.normalize_inputs:
            '''
            TODO: This will not result in a true ZN correlation because we cannot set a stride for the reduce_mean operator. Can this behavior be enforced using
            something like https://www.tensorflow.org/api_docs/python/tf/strided_slice?
            '''
            inputs_mean = tf.reduce_mean(inputs, axis=1, keep_dims=True)
            inputs_l2norm = tf.norm(inputs, ord=2, axis=1, keep_dims=True)
            inputs = tf.divide(tf.subtract(inputs, inputs_mean),
                               inputs_l2norm + self.epsilon)

        outputs = K.conv1d(inputs,
                           self.zn_kernel,
                           strides=self.strides[0],
                           padding=self.padding,
                           data_format=self.data_format,
                           dilation_rate=self.dilation_rate[0])

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        outputs = K.max(outputs, axis=1, keepdims=False)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Ejemplo n.º 20
0
    def call(self, inputs):
        if self.rank == 1:
            outputs = K.conv1d(inputs,
                               self.kernel * self.mask,
                               strides=self.strides[0],
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate[0])
        """
        if self.rank == 1:
            outputs = K.conv1d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        """

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Ejemplo n.º 21
0
    def call(self, inputs, **kwargs):
        """Following the routing algorithm from Hinton's paper,
        but replace b = b + <u,v> with b = <u,v>.

        This change can improve the feature representation of the capsule.

        However, you can replace
            b = K.batch_dot(outputs, hat_inputs, [2, 3])
        with
            b += K.batch_dot(outputs, hat_inputs, [2, 3])
        to get standard routing.
        """

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        print(self.routings)
        for i in range(self.routings):
            c = K.softmax(b, 1)
            o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(o, hat_inputs, [2, 3])
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)
        return o
Ejemplo n.º 22
0
    def call(self, inputs):
        c, q, c_len, q_len = inputs
        d = c.shape[-1]  # hidden_dim

        # similarity
        c_tile = tf.tile(tf.expand_dims(c, 2), [1, 1, self.ques_limit, 1])
        q_tile = tf.tile(tf.expand_dims(q, 1), [1, self.cont_limit, 1, 1])
        total_len = self.ques_limit * self.cont_limit
        c_mat = tf.reshape(c_tile, [-1, total_len, d])
        q_mat = tf.reshape(q_tile, [-1, total_len, d])
        c_q = c_mat * q_mat
        weight_in = tf.concat([c_mat, q_mat, c_q], 2)
        S = tf.reshape(K.conv1d(weight_in, self.W),
                       [-1, self.cont_limit, self.ques_limit])

        # mask
        # mask: (batch, 1, c_len)
        c_mask = tf.sequence_mask(c_len,
                                  maxlen=self.cont_limit,
                                  dtype=tf.float32)
        # mask: (batch, 1, q_len)
        q_mask = tf.sequence_mask(q_len,
                                  maxlen=self.ques_limit,
                                  dtype=tf.float32)
        # mask: (batch, c_len, q_len)
        mask = tf.matmul(c_mask, q_mask, transpose_a=True)

        # softmax
        S_q = self.masked_softmax(S, mask, axis=2)
        S_c = self.masked_softmax(S, mask, axis=1)
        a = tf.matmul(S_q, q)
        b = tf.matmul(tf.matmul(S_q, S_c, transpose_b=True), c)
        x = tf.concat([c, a, c * a, c * b], axis=2)
        return [x, S_q, S_c]
Ejemplo n.º 23
0
    def call(self, inputs, mask=None):
        if mask is not None:
            if K.ndim(mask) == K.ndim(inputs) - 1:
                mask = K.expand_dims(mask)

            inputs *= K.cast(mask, K.floatx())

        output = K.conv1d(inputs,
                          self.kernel,
                          strides=self.strides[0],
                          padding=self.padding,
                          data_format=self.data_format,
                          dilation_rate=self.dilation_rate[0])

        if self.use_bias:
            m = K.not_equal(output, 0)
            output = K.bias_add(output,
                                self.bias,
                                data_format=self.data_format)
            output *= K.cast(m, K.floatx())

        # Apply activations on the image
        if self.activation is not None:
            output = self.activation(output)

        return output
Ejemplo n.º 24
0
 def loss(y_true, y_pred):
     l1 = K.mean((y_pred - y_true), axis=-1)
     klayer1 = K.expand_dims(klayer, 2)
     kconv = conv1d(klayer1, kwindow2, padding='same')
     a = K.abs(kconv[:, :kconv.shape[1] - 1] - kconv[:, 1:])
     l2 = K.mean(a, axis=1)
     return (l1 + l2)
Ejemplo n.º 25
0
    def call(self, inputs):
        if self.share_weights:
            u_hat_vectors = K.conv1d(inputs, self.W)
        else:
            u_hat_vectors = K.local_conv1d(inputs, self.W, [1], [1])

        # u_hat_vectors : The spatially transformed input vectors (with local_conv_1d)

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        u_hat_vectors = K.reshape(u_hat_vectors,
                                  (batch_size, input_num_capsule,
                                   self.num_capsule, self.dim_capsule))

        u_hat_vectors = K.permute_dimensions(u_hat_vectors, (0, 2, 1, 3))
        routing_weights = K.zeros_like(u_hat_vectors[:, :, :, 0])

        for i in range(self.routings):
            capsule_weights = K.softmax(routing_weights)
            outputs = K.batch_dot(capsule_weights, u_hat_vectors, [2, 2])
            if K.ndim(outputs) == 4:
                outputs = K.sum(outputs, axis=1)
            if i < self.routings - 1:
                outputs = K.l2_normalize(outputs, -1)
                routing_weights = K.batch_dot(outputs, u_hat_vectors, [2, 3])
                if K.ndim(routing_weights) == 4:
                    routing_weights = K.sum(routing_weights, axis=1)

        return self.activation(outputs)
Ejemplo n.º 26
0
    def call(self, u_vecs):
        if self.share_weights:
            u_hat_vecs = K.conv1d(u_vecs, self.W)  # bsz,200,160
        else:
            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])

        batch_size = K.shape(u_vecs)[0]
        input_num_capsule = K.shape(u_vecs)[1]
        u_hat_vecs = K.reshape(
            u_hat_vecs, (batch_size, input_num_capsule, self.num_capsule,
                         self.dim_capsule))  # bsz,200,10,16
        u_hat_vecs = K.permute_dimensions(u_hat_vecs,
                                          (0, 2, 1, 3))  # bsz,10,200,16
        # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]

        b = K.zeros_like(u_hat_vecs[:, :, :, 0])  # bsz,10,200
        for i in range(self.routings):
            b = K.permute_dimensions(b, (0, 2, 1))  # bsz,200,10
            c = K.softmax(b)  # bsz,200,10, 对最后一个轴10维进行sofrmax
            c = K.permute_dimensions(c, (0, 2, 1))  # bsz,10,200
            b = K.permute_dimensions(b, (0, 2, 1))  # bsz,200,10
            outputs = self.activation(K.batch_dot(c, u_hat_vecs,
                                                  [2, 2]))  # bsz,10,16
            if i < self.routings - 1:
                b = K.batch_dot(outputs, u_hat_vecs, [2, 3])  # bsz,10,200

        return outputs
Ejemplo n.º 27
0
    def call(self, inputs):
        """Following the routing algorithm from Hinton's paper,
        but replace b = b + <u,v> with b = <u,v>.

        This change can improve the feature representation of Capsule.

        However, you can replace
            b = K.batch_dot(outputs, hat_inputs, [2, 3])
        with
            b += K.batch_dot(outputs, hat_inputs, [2, 3])
        to realize a standard routing.
        """

        if self.share_weights:
            hat_inputs = K.conv1d(inputs, self.kernel)
        else:
            hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1])

        batch_size = K.shape(inputs)[0]
        input_num_capsule = K.shape(inputs)[1]
        hat_inputs = K.reshape(hat_inputs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3))

        b = K.zeros_like(hat_inputs[:, :, :, 0])
        for i in range(self.routings):
            c = softmax(b, 1)
            o = self.activation(K.batch_dot(c, hat_inputs, [2, 2]))
            if i < self.routings - 1:
                b += K.batch_dot(o, hat_inputs, [2, 3])
                if K.backend() == 'theano':
                    o = K.sum(o, axis=1)

        return o
Ejemplo n.º 28
0
 def call(self, x):
     _ = K.conv1d(x, self.kernel, padding='same')
     _ = _[:, :, :self.output_dim] * K.sigmoid(_[:, :, self.output_dim:])
     if self.residual:
         return _ + x
     else:
         return _
Ejemplo n.º 29
0
def KernelSeqDive(tmp_ker, seqs, Pos=True):
    """
    The kernel extracts the fragments on each sequence and the corresponding volume points.
     At the same time retain the position information on the sequence fragments mined by the kernel [sequence number, sequence start position, end position]
    :param tmp_ker:
    :param seqs:
    :return:
    """
    ker_len = tmp_ker.shape[0]
    inputs = K.placeholder(seqs.shape)
    ker = K.variable(tmp_ker.reshape(ker_len, 4, 1))
    conv_result = K.conv1d(inputs,
                           ker,
                           padding="valid",
                           strides=1,
                           data_format="channels_last")
    max_idxs = K.argmax(conv_result, axis=1)
    max_Value = K.max(conv_result, axis=1)
    # sort_idxs = tensorflow.nn.top_k(tensorflow.transpose(max_Value,[1,0]), 100, sorted=True).indices

    f = K.function(inputs=[inputs], outputs=[max_idxs, max_Value])
    ret_idxs, ret = f([seqs])

    if Pos:
        seqlist = []
        SeqInfo = []
        for seq_idx in range(ret.shape[0]):
            start_idx = ret_idxs[seq_idx]
            seqlist.append(seqs[seq_idx,
                                start_idx[0]:start_idx[0] + ker_len, :])
            SeqInfo.append([seq_idx, start_idx[0], start_idx[0] + ker_len])
        del f
        return seqlist, ret, np.asarray(SeqInfo)
    else:
        return ret
    def call(self, inputs):
        def _l2normalize(v, eps=1e-12):
            return v / (K.sum(v**2)**0.5 + eps)

        def power_iteration(W, u):
            _u = u
            _v = _l2normalize(K.dot(_u, K.transpose(W)))
            _u = _l2normalize(K.dot(_v, W))
            return _u, _v

        if self.spectral_normalization:
            W_shape = self.kernel.shape.as_list()
            #Flatten the Tensor
            W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])
            _u, _v = power_iteration(W_reshaped, self.u)
            #Calculate Sigma
            sigma = K.dot(_v, W_reshaped)
            sigma = K.dot(sigma, K.transpose(_u))
            #normalize it
            W_bar = W_reshaped / sigma
            #reshape weight tensor
            if training in {0, False}:
                W_bar = K.reshape(W_bar, W_shape)
            else:
                with tf.control_dependencies([self.u.assign(_u)]):
                    W_bar = K.reshape(W_bar, W_shape)

            #update weitht
            self.kernel = W_bar

        if self.rank == 1:
            outputs = K.conv1d(inputs,
                               self.kernel,
                               strides=self.strides[0],
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate[0])
        if self.rank == 2:
            outputs = K.conv2d(inputs,
                               self.kernel,
                               strides=self.strides,
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate)
        if self.rank == 3:
            outputs = K.conv3d(inputs,
                               self.kernel,
                               strides=self.strides,
                               padding=self.padding,
                               data_format=self.data_format,
                               dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Ejemplo n.º 31
0
 def call(self, inputs):
     if self.rank == 1:
         self.init_left()
         self.init_right()
         k_weights_left = K.sigmoid(self.k_weights_3d_left)
         k_weights_right = K.sigmoid(self.k_weights_3d_right)
         MaskFinal = k_weights_left + k_weights_right - 1
         mask = K.repeat_elements(MaskFinal, 4, axis=1)
         self.MaskFinal = K.sigmoid(self.k_weights_3d_left) + K.sigmoid(self.k_weights_3d_right) - 1
         kernel = self.kernel * mask
         outputs = K.conv1d(
             inputs,
             kernel,
             strides=self.strides[0],
             padding=self.padding,
             data_format=self.data_format,
             dilation_rate=self.dilation_rate[0])
     if self.use_bias:
         outputs = K.bias_add(
             outputs,
             self.bias,
             data_format=self.data_format)
     if self.activation is not None:
         return self.activation(outputs)
     return outputs
    def call(self, u_vecs, mask=None):
        if self.share_weights:
            u_hat_vecs = K.conv1d(u_vecs, self.W)
        else:
            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])

        batch_size = K.shape(u_vecs)[0]
        input_num_capsule = K.shape(u_vecs)[1]
        u_hat_vecs = K.reshape(u_hat_vecs,
                               (batch_size, input_num_capsule,
                                self.num_capsule, self.dim_capsule))
        u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
        # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]

        b = K.zeros_like(
            u_hat_vecs[:, :, :,
                       0])  # shape = [None, num_capsule, input_num_capsule]
        for i in range(self.routings):
            b = K.permute_dimensions(
                b, (0, 2, 1))  # shape = [None, input_num_capsule, num_capsule]
            c = K.softmax(b)
            c = K.permute_dimensions(c, (0, 2, 1))
            b = K.permute_dimensions(b, (0, 2, 1))
            outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
            if i < self.routings - 1:
                b = K.batch_dot(outputs, u_hat_vecs, [2, 3])

        return outputs
Ejemplo n.º 33
0
    def call(self, inputs):
        input_shape = K.shape(inputs)
        outputs = []
        for i in range(len(self.ac_list)):
            # print('i',i,'input_shape',K.int_shape(inputs),'k_shape',K.int_shape(self.kernel))
            temp_outputs = K.conv1d(inputs,
                                    self.kernel,
                                    strides=1,
                                    padding='same',
                                    data_format='channels_last',
                                    dilation_rate=self.ac_list[i])
            if self.use_bias:
                temp_outputs = K.bias_add(temp_outputs,
                                          self.bias,
                                          data_format='channels_last')
            if self.activation is not None:
                temp_outputs = self.activation(temp_outputs)

            outputs.append(temp_outputs)
        out = K.concatenate(outputs)
        # print('output',K.int_shape(out))
        out = K.reshape(
            out,
            [input_shape[0], input_shape[1], self.filters * len(self.ac_list)])
        return out