def call(self, x, mask=None): #1d convolution print("x shape:", x._keras_shape) print("W shape:", K.shape(self.W)) extended_x = K.expand_dims(x, 2) # add a dummtransform dimension transform = K.conv2d(extended_x, self.W, strides=self.subsample, border_mode=self.border_mode, dim_ordering='tf', filter_shape=self.W_shape) transform = K.squeeze(transform, 2) # remove the dummtransform dimension if self.bias: transform += K.reshape(self.b, (1, 1, self.nb_filter)) transform = self.activation(transform) transform_gate = K.conv2d(extended_x, self.W_gate, strides=self.subsample, border_mode=self.border_mode, dim_ordering='tf', filter_shape=self.W_shape) transform_gate = K.squeeze(transform_gate, 2) # remove the dummtransform dimension if self.bias: transform_gate += K.reshape(self.b_gate, (1, 1, self.nb_filter)) transform_gate = K.sigmoid(transform_gate) #transform_gate = K.activation(transform_gate) #print("transform_gate shape: ", K.int_shape(transform_gate)) #we need zero padding for transform gate and carry gate padded = x._keras_shape[1] - K.int_shape(transform_gate)[1] #transform_gate = K.asymmetric_temporal_padding(transform_gate, left_pad=0, right_pad=padded) #print("padded transform_gate shape : ", K.int_shape(transform)) carry_gate = 1.0 - transform_gate carry_gate = K.asymmetric_temporal_padding(carry_gate, left_pad=0, right_pad=padded) #print("padded carry_gate shape : ", K.int_shape(carry_gate)) x_carried = x * carry_gate #print("transform shape : ", K.int_shape(transform)) #transform = K.asymmetric_temporal_padding(transform, left_pad=0, right_pad=padded) #print("padded transform shape : ", K.int_shape(transform)) transform = transform * transform_gate transform = K.asymmetric_temporal_padding(transform, left_pad=0, right_pad=padded) output = transform + x_carried print("output shape: ", K.int_shape(output)) return output
def preprocess_input(self, x): if self.bias: weights = zip(self.trainable_weights[0:3], self.trainable_weights[3:]) else: weights = self.trainable_weights if self.window_size > 1: x = K.asymmetric_temporal_padding(x, self.window_size-1, 0) x = K.expand_dims(x, 2) # add a dummy dimension # z, f, o outputs = [] for param in weights: if self.bias: W, b = param else: W = param output = K.conv2d(x, W, strides=self.subsample, border_mode='valid', dim_ordering='tf') output = K.squeeze(output, 2) # remove the dummy dimension if self.bias: output += K.reshape(b, (1, 1, self.output_dim)) outputs.append(output) if self.dropout is not None and 0. < self.dropout < 1.: f = K.sigmoid(outputs[1]) outputs[1] = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f) return K.concatenate(outputs, 2)
def call(self, x, mask=None): if self.causal: x = K.asymmetric_temporal_padding(x, left_pad=self.atrous_rate * (self.filter_length - 1), right_pad=0) return super(CausalAtrousConvolution1D, self).call(x, mask)
def preprocess_input(self, x): if self.window_size > 1: x = K.asymmetric_temporal_padding(x, self.window_size - 1, 0) x = K.expand_dims(x, 2) # add a dummy dimension output = K.conv2d(x, self.W, strides=self.subsample, border_mode='valid', dim_ordering='tf') output = K.squeeze(output, 2) # remove the dummy dimension if self.bias: output += K.reshape(self.b, (1, 1, self.output_dim * 3)) if self.dropout is not None and 0. < self.dropout < 1.: z = output[:, :, :self.output_dim] f = output[:, :, self.output_dim:2 * self.output_dim] o = output[:, :, 2 * self.output_dim:] f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f) return K.concatenate([z, f, o], -1) else: return output
def call(self, x, mask=None): # input shape: (nb_samples, time (padded with zeros), input_dim) # note that the .build() method of subclasses MUST define # self.input_spec with a complete input shape. input_shape = self.input_spec[0].shape if self.window_size > 1: x = K.asymmetric_temporal_padding(x, self.window_size - 1, 0) x = K.expand_dims(x, 2) # add a dummy dimension # z, g output = K.conv2d(x, self.W, strides=self.subsample, border_mode='valid', dim_ordering='tf') output = K.squeeze(output, 2) # remove the dummy dimension if self.bias: output += K.reshape(self.b, (1, 1, self.output_dim * 2)) z = output[:, :, :self.output_dim] g = output[:, :, self.output_dim:] return self.activation(z) * K.sigmoid(g)
import theano import theano.tensor as T import numpy as np import keras.backend as K # A test script to validate causal dilated convolutions dilation = 2 input = T.fvector() filters = T.fvector( ) # (output channels, input channels, filter rows, filter columns). input_reshaped = T.reshape(input, (1, -1, 1)) input_reshaped = K.asymmetric_temporal_padding(input_reshaped, left_pad=dilation, right_pad=0) input_reshaped = T.reshape(input_reshaped, (1, 1, -1, 1)) filters_reshaped = T.reshape(filters, (1, 1, -1, 1)) out = T.nnet.conv2d(input_reshaped, filters_reshaped, border_mode='valid', filter_dilation=(dilation, 1)) out = T.reshape(out, (1, -1, 1)) out = K.asymmetric_temporal_padding(out, left_pad=dilation, right_pad=0) out = T.reshape(out, (1, 1, -1, 1)) out = T.nnet.conv2d(out, filters_reshaped, border_mode='valid', filter_dilation=(dilation, 1)) out = T.flatten(out) in_input = np.arange(8, dtype='float32') in_filters = np.array([1, 1], dtype='float32')
def call(self, x, mask=None): return super().call(K.asymmetric_temporal_padding(x, self.length, 0), mask)