def drop_output(self, input, drop=0, rng=None, p=0.5): ###--- Unpool if self.poolsize[0] == 1 and self.poolsize[1] == 1: unpool_out = input else: unpool_out = Textra.repeat(Textra.repeat( input, self.poolsize[0], axis=2), self.poolsize[1], axis=3) * self.mask image_shape = list(self.image_shape) if n_batch is not None: image_shape[0] = n_batch if self.border_mode == 'same': conv_out = dnn.dnn_conv( img=unpool_out, kerns=self.W, subsample=(1, 1), border_mode=self.border, #conv_mode='cross' ) else: raise Exception('Unknown conv type') # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x') output = (lin_output if self.activation is None else self.activation(lin_output)) droppedOutput = nonlinearity.dropout(rng, output, p) return T.switch(T.neq(drop, 0), droppedOutput, output)
def drop_output(self, input, drop=0, rng=None, p=0.5): ###--- Unpool if self.poolsize[0] == 1 and self.poolsize[1] == 1: unpool_out = input else: unpool_out = Textra.repeat(Textra.repeat(input, self.poolsize[0], axis = 2), self.poolsize[1], axis = 3) * self.mask image_shape = list(self.image_shape) if n_batch is not None: image_shape[0] = n_batch if self.border_mode == 'same': conv_out = dnn.dnn_conv( img=unpool_out, kerns=self.W, subsample=(1,1), border_mode=self.border, #conv_mode='cross' ) else: raise Exception('Unknown conv type') # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x') output= ( lin_output if self.activation is None else self.activation(lin_output) ) droppedOutput = nonlinearity.dropout(rng, output, p) return T.switch(T.neq(drop, 0), droppedOutput, output)
def drop_output(self, input, drop=0, rng=None, p=0.5): ###--- Unpool if self.poolsize[0] == 1 and self.poolsize[1] == 1: unpool_out = input else: unpool_out = Textra.repeat(Textra.repeat(input, self.poolsize[0], axis = 2), self.poolsize[1], axis = 3) * self.mask image_shape = list(self.image_shape) if n_batch is not None: image_shape[0] = n_batch ###--- Unpool + conv # convolve input feature maps with filters if self.border_mode == 'valid': conv_out = conv.conv2d( input=unpool_out, filters=self.W, filter_shape=self.filter_shape, image_shape=image_shape, border_mode='valid' ) elif self.border_mode == 'same': conv_out = conv.conv2d( input=unpool_out, filters=self.W, filter_shape=self.filter_shape, image_shape=image_shape, border_mode='full' ) padding_w = theano.shared((self.filter_shape[2] - 1) / 2) padding_h = theano.shared((self.filter_shape[3] - 1) / 2) conv_out = conv_out[:,:,padding_w:-padding_w,padding_h:-padding_h] elif self.border_mode == 'full': conv_out = conv.conv2d( input=unpool_out, filters=self.W, filter_shape=self.filter_shape, image_shape=image_shape, border_mode='full' ) else: raise Exception('Unknown conv type') # downsample each feature map individually, using maxpooling # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x') output= ( lin_output if self.activation is None else self.activation(lin_output) ) droppedOutput = nonlinearity.dropout(rng, output, p) return T.switch(T.neq(drop, 0), droppedOutput, output)
def drop_output(self, input, drop=0, rng=None, p=0.5): lin_output = T.dot(input, self.W) + self.b output = ( lin_output if self.activation is None else self.activation(lin_output) ) droppedOutput = nonlinearity.dropout(rng, output, p) return T.switch(T.neq(drop, 0), droppedOutput, output)
def drop_output(self, input, drop=0, rng=None, p=0.5): # convolve input feature maps with filters if self.border_mode == 'valid': conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=self.filter_shape, image_shape=self.image_shape, border_mode='valid' ) elif self.border_mode == 'same': conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=self.filter_shape, image_shape=self.image_shape, border_mode='full' ) padding_w = theano.shared((self.filter_shape[2] - 1) / 2) padding_h = theano.shared((self.filter_shape[3] - 1) / 2) conv_out = conv_out[:,:,padding_w:-padding_w,padding_h:-padding_h] elif self.border_mode == 'full': conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=self.filter_shape, image_shape=self.image_shape, border_mode='full' ) else: raise Exception('Unknown conv type') # downsample each feature map individually, using maxpooling if self.poolsize[0] == 1 and self.poolsize[1] == 1: pooled_out = conv_out else: pooled_out = downsample.max_pool_2d( input=conv_out, ds=self.poolsize, ignore_border=True ) # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height lin_output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') output = ( lin_output if self.activation is None else self.activation(lin_output) ) droppedOutput = nonlinearity.dropout(rng, output, p) return T.switch(T.neq(drop, 0), droppedOutput, output)
def drop_output(self, input, drop=0, rng=None, p=0.5): # convolve input feature maps with filters ''' dnn ''' if self.border_mode == 'same': conv_out = dnn.dnn_conv( img=input, kerns=self.W, subsample=(1,1), border_mode=self.border, #conv_mode='cross' ) else: raise Exception('Unknown conv type') # downsample each feature map individually, using maxpooling if self.poolsize[0] == 1 and self.poolsize[1] == 1: pooled_out = conv_out else: pooled_out = downsample.max_pool_2d( input=conv_out, ds=self.poolsize, ignore_border=True ) if self.cnorm: print 'cnorm size', self.filter_shape[0]/8+1 pooled_out=ContrastCrossChannels.ContrastCrossChannels(input=pooled_out, n=self.filter_shape[0]/8+1) # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height lin_output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') output = ( lin_output if self.activation is None else self.activation(lin_output) ) droppedOutput = nonlinearity.dropout(rng, output, p) return T.switch(T.neq(drop, 0), droppedOutput, output)
def drop_output(self, input, drop=0, rng=None, p=0.5): # convolve input feature maps with filters ''' dnn ''' if self.border_mode == 'same': conv_out = dnn.dnn_conv( img=input, kerns=self.W, subsample=(1,1), border_mode=self.border, #conv_mode='cross' ) else: raise Exception('Unknown conv type') # downsample each feature map individually, using maxpooling if self.poolsize[0] == 1 and self.poolsize[1] == 1: pooled_out = conv_out else: pooled_out = dnn.dnn_pool( img=conv_out, ws=self.poolsize, stride=self.poolsize, mode='max', pad=(0,0) ) # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height lin_output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') output = ( lin_output if self.activation is None else self.activation(lin_output) ) droppedOutput = nonlinearity.dropout(rng, output, p) return T.switch(T.neq(drop, 0), droppedOutput, output)
def drop_output(self, input, drop=0, rng=None, p=0.5): lin_output = T.dot(input, self.W) + self.b output = (lin_output if self.activation is None else self.activation(lin_output)) droppedOutput = nonlinearity.dropout(rng, output, p) return T.switch(T.neq(drop, 0), droppedOutput, output)