def build(self, input_shape):

        assert len(input_shape) >= 2
        input_dim = input_shape[-1]
        self.split = input_shape[1]
        kernels = []

        for i in range(input_shape[-2]):

            kernel = self.add_weight(shape=(input_dim, self.units),
                                     initializer=self.kernel_initializer,
                                     name='kernel',
                                     regularizer=self.kernel_regularizer,
                                     constraint=self.kernel_constraint)
            kernels.append(kernel)

        self.kernel = K.concatenate(kernels, axis=-1)

        if self.use_bias:
            biases = []
            for i in range(input_shape[-2]):
                bias = self.add_weight(shape=(self.units, ),
                                       initializer=self.bias_initializer,
                                       name='bias',
                                       regularizer=self.bias_regularizer,
                                       constraint=self.bias_constraint)

                biases.append(bias)

            self.bias = K.concatenate(biases, axis=-1)
        else:
            self.bias = None
        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
    def call(self, inputs):

        #unstacked = K.tf.unstack(inputs, axis=1)
        unstacked = tf.split(inputs, self.split, axis=1)
        W = tf.split(self.kernel, len(unstacked), axis=1)
        b = tf.split(self.bias, len(unstacked), axis=0)
        #kernels = K.tf.unstack(self.kernels, axis=1)
        #biases = K.tf.unstack(self.biases, axis=0)
        outputs = []
        for i, j in enumerate(unstacked):

            output = K.dot(j, W[i])

            if self.use_bias:
                output = K.bias_add(output, b[i])

            if self.activation is not None:
                output = self.activation(output)

            outputs.append(output)

        outputs = K.concatenate(outputs, axis=1)

        #outputs = K.stack(outputs, axis=1)
        return outputs
    def call(self, inputs):

        x = tf.split(inputs, self.filters, axis=2)
        W = tf.split(self.kernel, self.filters, axis=2)
        outputs = []

        for i in range(self.filters):
            output = K.conv1d(x[i],
                              W[i],
                              strides=self.strides[0],
                              padding=self.padding,
                              data_format=self.data_format,
                              dilation_rate=self.dilation_rate[0])

            outputs.append(output)

        outputs = K.concatenate(outputs, axis=-1)

        if self.use_bias:
            outputs = K.bias_add(outputs,
                                 self.bias,
                                 data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs
示例#4
0
    def call(self, inputs):

        x = tf.split(inputs, 2, axis=2)

        y = tf.split(x[0], self.filters, axis=-1)
        w = tf.split(x[1], self.filters, axis=-1)

        outputs = []
        for j in range(self.batch):
            outputs_ = []
            for i in range(self.filters):

                X = y[i][j]
                X = tf.reshape(X, (1, X.shape[0], 1))
                W = w[i][j][:]
                W = tf.reshape(W, (W.shape[0], 1, 1))

                output = K.conv1d(X,
                                  W,
                                  strides=self.strides[0],
                                  padding=self.padding,
                                  data_format=self.data_format,
                                  dilation_rate=self.dilation_rate[0])

                outputs_.append(output)

            outputs_ = K.concatenate(outputs_, axis=-1)

            outputs.append(outputs_)
        outputs = K.concatenate(outputs, axis=0)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs
示例#5
0
    def __init__(self,
                 filters,
                 kernel_size,
                 batch,
                 strides=1,
                 padding='valid',
                 dilation_rate=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 input_dim=None,
                 input_length=None,
                 **kwargs):

        if padding not in {'valid', 'same', 'causal'}:
            raise Exception('Invalid padding mode for Convolution1D:', padding)

        super(Conv1D_localTensor, self).__init__(**kwargs)

        self.rank = 1
        self.filters = filters
        self.batch = batch
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, self.rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, self.rank,
                                                  'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = K.normalize_data_format('channels_last')
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, self.rank, 'dilation_rate')
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=self.rank + 2)
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
示例#6
0
    def call(self, inputs):

        N = self.input_length
        ps = self.pulse_sec

        x = tf.split(inputs, 2, axis=2)
        sgn = tf.split(x[0], self.input_dim, axis=-1)
        idxs = tf.split(x[1], self.input_dim, axis=-1)
        t = self.t

        output = []

        for j in range(self.batch_size):

            output_ = []
            for i in range(self.input_dim):

                s = sgn[i][j][:, 0]

                s = tf.reshape(s, [-1])

                idx = idxs[i][j]

                idx = (N / ps - 1) * idx  #

                idx = castIntSoftMax(idx)

                idx = t + idx

                zeros = tf.zeros((ps, 1), dtype=tf.int64)

                idx = tf.concat([idx, zeros], axis=-1)

                sparse_tensor = tf.SparseTensor(values=s,
                                                indices=idx,
                                                dense_shape=[N, 1])

                sparse_tensor = tf.sparse_add(tf.zeros([N, 1]), sparse_tensor)
                output_.append(sparse_tensor)

            output.append(K.concatenate(output_, axis=1))

        output = tf.stack(output)
        return output
示例#7
0
    def call(self, x, mask=None):
        
        x = K.expand_dims(x, -1)  # add a dimension of the right
        x = K.permute_dimensions(x, (0, 3, 1, 2))

        W = self.tied_to.kernel   
        W = K.expand_dims(W, -1)
        W = tf.transpose(W, (1, 0, 2, 3))
        
        output = K.conv2d(x, W, 
                          strides=(self.strides,self.strides),
                          padding=self.padding,
                          data_format=self.data_format)
        if self.bias:
            output += K.reshape(self.bias, (1, self.filters, 1, 1))
        output = K.squeeze(output, 3)  # remove the dummy 3rd dimension
        output = K.permute_dimensions(output, (0, 2, 1))
        output = self.activation(output)
        return output