def get_output(self, train=False): X = self.get_input(train) newshape = (X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4]) Y = theano.tensor.reshape(X, newshape) #collapse num_samples and num_timesteps border_mode = self.border_mode if on_gpu() and dnn.dnn_available(): if border_mode == 'same': assert(self.subsample == (1, 1)) pad_x = (self.nb_row - self.subsample[0]) // 2 pad_y = (self.nb_col - self.subsample[1]) // 2 conv_out = dnn.dnn_conv(img=Y, kerns=self.W, border_mode=(pad_x, pad_y)) else: conv_out = dnn.dnn_conv(img=Y, kerns=self.W, border_mode=border_mode, subsample=self.subsample) else: if border_mode == 'same': border_mode = 'full' conv_out = theano.tensor.nnet.conv.conv2d(Y, self.W, border_mode=border_mode, subsample=self.subsample) if self.border_mode == 'same': shift_x = (self.nb_row - 1) // 2 shift_y = (self.nb_col - 1) // 2 conv_out = conv_out[:, :, shift_x:Y.shape[2] + shift_x, shift_y:Y.shape[3] + shift_y] output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3]) return theano.tensor.reshape(output, newshape)
def get_output(self, train): X = self.get_input(train) border_mode = self.border_mode if on_gpu() and dnn.dnn_available(): if border_mode == 'same': assert (self.subsample == (1, 1)) pad_x = (self.nb_row - self.subsample[0]) // 2 pad_y = (self.nb_col - self.subsample[1]) // 2 conv_out = dnn.dnn_conv(img=X, kerns=self.W, border_mode=(pad_x, pad_y)) else: conv_out = dnn.dnn_conv(img=X, kerns=self.W, border_mode=border_mode, subsample=self.subsample) else: if border_mode == 'same': border_mode = 'full' conv_out = T.nnet.conv.conv2d(X, self.W, border_mode=border_mode, subsample=self.subsample) if self.border_mode == 'same': shift_x = (self.nb_row - 1) // 2 shift_y = (self.nb_col - 1) // 2 conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y] return self.activation(conv_out)
def get_output(self, train=False): X = self.get_input(train) newshape = (X.shape[0] * X.shape[1], X.shape[2], X.shape[3], X.shape[4]) Y = theano.tensor.reshape( X, newshape) #collapse num_samples and num_timesteps border_mode = self.border_mode if on_gpu() and dnn.dnn_available(): if border_mode == 'same': assert (self.subsample == (1, 1)) pad_x = (self.nb_row - self.subsample[0]) // 2 pad_y = (self.nb_col - self.subsample[1]) // 2 conv_out = dnn.dnn_conv(img=Y, kerns=self.W, border_mode=(pad_x, pad_y)) else: conv_out = dnn.dnn_conv(img=Y, kerns=self.W, border_mode=border_mode, subsample=self.subsample) else: if border_mode == 'same': border_mode = 'full' conv_out = theano.tensor.nnet.conv.conv2d(Y, self.W, border_mode=border_mode, subsample=self.subsample) if self.border_mode == 'same': shift_x = (self.nb_row - 1) // 2 shift_y = (self.nb_col - 1) // 2 conv_out = conv_out[:, :, shift_x:Y.shape[2] + shift_x, shift_y:Y.shape[3] + shift_y] output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3]) return theano.tensor.reshape(output, newshape)
def get_output(self, train): X = self.get_input(train) border_mode = self.border_mode if on_gpu() and dnn.dnn_available(): if border_mode == "same": assert self.subsample == (1, 1) pad_x = (self.nb_row - self.subsample[0]) // 2 pad_y = (self.nb_col - self.subsample[1]) // 2 conv_out = dnn.dnn_conv(img=X, kerns=self.W, border_mode=(pad_x, pad_y)) else: conv_out = dnn.dnn_conv(img=X, kerns=self.W, border_mode=border_mode, subsample=self.subsample) else: if border_mode == "same": border_mode = "full" conv_out = T.nnet.conv.conv2d(X, self.W, border_mode=border_mode, subsample=self.subsample) if self.border_mode == "same": shift_x = (self.nb_row - 1) // 2 shift_y = (self.nb_col - 1) // 2 conv_out = conv_out[:, :, shift_x : X.shape[2] + shift_x, shift_y : X.shape[3] + shift_y] return self.activation(conv_out)
from keras.layers.core import Layer import theano.tensor as T from theano.tensor.signal import downsample from keras import activations, initializations, regularizers, constraints from keras.utils.theano_utils import shared_zeros, floatX, on_gpu from keras.utils.generic_utils import make_tuple from keras.regularizers import ActivityRegularizer, Regularizer if on_gpu(): from theano.sandbox.cuda import dnn class Convolution2DNoBias(Layer): def __init__( self, nb_filter, stack_size, nb_row, nb_col, init="glorot_uniform", activation="linear", weights=None, border_mode="valid", subsample=(1, 1), W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, ):
print '[new keras detected]' from keras.backend import zeros as shared_zeros def on_gpu(): return theano.config.device[:3] == 'gpu' else: from keras.utils.theano_utils import shared_zeros, floatX, on_gpu from keras.utils.generic_utils import make_tuple from keras.regularizers import ActivityRegularizer, Regularizer from six.moves import zip from keras.layers.core import Layer if on_gpu(): from theano.sandbox.cuda import dnn def conv_output_length(input_length, filter_size, border_mode, stride): if input_length is None: return None assert border_mode in {'same', 'full', 'valid'} if border_mode == 'same': output_length = input_length elif border_mode == 'full': output_length = input_length + filter_size - 1 elif border_mode == 'valid': output_length = input_length - filter_size + 1 return (output_length + stride - 1) // stride