def call(self, inputs): #unstacked = K.tf.unstack(inputs, axis=1) unstacked = tf.split(inputs, self.split, axis=1) W = tf.split(self.kernel, len(unstacked), axis=1) b = tf.split(self.bias, len(unstacked), axis=0) #kernels = K.tf.unstack(self.kernels, axis=1) #biases = K.tf.unstack(self.biases, axis=0) outputs = [] for i, j in enumerate(unstacked): output = K.dot(j, W[i]) if self.use_bias: output = K.bias_add(output, b[i]) if self.activation is not None: output = self.activation(output) outputs.append(output) outputs = K.concatenate(outputs, axis=1) #outputs = K.stack(outputs, axis=1) return outputs
def call(self, inputs): x = tf.split(inputs, self.filters, axis=2) W = tf.split(self.kernel, self.filters, axis=2) outputs = [] for i in range(self.filters): output = K.conv1d(x[i], W[i], strides=self.strides[0], padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate[0]) outputs.append(output) outputs = K.concatenate(outputs, axis=-1) if self.use_bias: outputs = K.bias_add(outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs
def call(self, inputs): x = tf.split(inputs, 2, axis=2) y = tf.split(x[0], self.filters, axis=-1) w = tf.split(x[1], self.filters, axis=-1) outputs = [] for j in range(self.batch): outputs_ = [] for i in range(self.filters): X = y[i][j] X = tf.reshape(X, (1, X.shape[0], 1)) W = w[i][j][:] W = tf.reshape(W, (W.shape[0], 1, 1)) output = K.conv1d(X, W, strides=self.strides[0], padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate[0]) outputs_.append(output) outputs_ = K.concatenate(outputs_, axis=-1) outputs.append(outputs_) outputs = K.concatenate(outputs, axis=0) if self.activation is not None: return self.activation(outputs) return outputs
def call(self, inputs): N = self.input_length ps = self.pulse_sec x = tf.split(inputs, 2, axis=2) sgn = tf.split(x[0], self.input_dim, axis=-1) idxs = tf.split(x[1], self.input_dim, axis=-1) t = self.t output = [] for j in range(self.batch_size): output_ = [] for i in range(self.input_dim): s = sgn[i][j][:, 0] s = tf.reshape(s, [-1]) idx = idxs[i][j] idx = (N / ps - 1) * idx # idx = castIntSoftMax(idx) idx = t + idx zeros = tf.zeros((ps, 1), dtype=tf.int64) idx = tf.concat([idx, zeros], axis=-1) sparse_tensor = tf.SparseTensor(values=s, indices=idx, dense_shape=[N, 1]) sparse_tensor = tf.sparse_add(tf.zeros([N, 1]), sparse_tensor) output_.append(sparse_tensor) output.append(K.concatenate(output_, axis=1)) output = tf.stack(output) return output