コード例 #1
0
 def apply(self, input_):
     stride = self.stride
     if not stride:
         stride = self.pooling_size
     return dnn_pool(input_,
                     ws=self.pooling_size,
                     stride=stride,
                     pad=self.pad)
コード例 #2
0
 def apply(self, input_):
     stride = self.stride
     if not stride:
         stride = self.pooling_size
     return dnn_pool(input_,
                     ws=self.pooling_size,
                     stride=stride,
                     pad=self.pad,
                     mode='average_inc_pad')
コード例 #3
0
def local_gpua_pool_dnn_alternative(fgraph, op, ctx_name, inputs, outputs):
    if not dnn_available(ctx_name):
        return
    if not op.ignore_border:
        return
    img, ws, stride, pad = inputs
    nd = op.ndim
    if nd not in (2, 3):
        return
    img = gpu_contiguous(as_gpuarray_variable(img, ctx_name))
    mode = op.mode
    # dnn_pool expects exactly 2 non-pooling dimensions
    if img.ndim == nd + 2:
        return dnn_pool(img, ws, stride=stride, pad=pad, mode=mode)
    else:
        # reshape to 4D or 5D with 2 non-pooling dimensions
        img_padded = pad_dims(img, 2, nd)
        ret_padded = dnn_pool(img_padded, ws, stride=stride, pad=pad, mode=mode)
        return unpad_dims(ret_padded, img, 2, nd)
コード例 #4
0
 def apply(self, input_):
     stride = self.stride
     if not stride:
         stride = self.pooling_size
     inter = input_**2
     inter = dnn_pool(inter,
                      ws=self.pooling_size,
                      stride=stride,
                      pad=self.pad,
                      mode='average_inc_pad')
     return T.sqrt(inter)
コード例 #5
0
    def get_output_for(self, input, **kwargs):
        input_size = tuple(
            symb if fixed is None else fixed
            for fixed, symb in zip(self.input_shape[2:], input.shape[2:]))
        pool_list = []
        for pool_dim in self.pool_dims:
            win_size = tuple(
                (i + pool_dim - 1) // pool_dim for i in input_size)
            str_size = tuple(i // pool_dim for i in input_size)

            pool = dnn.dnn_pool(input, win_size, str_size, self.mode, (0, 0))
            pool = pool.flatten(3)
            pool_list.append(pool)

        return theano.tensor.concatenate(pool_list, axis=2)
コード例 #6
0
ファイル: patches.py プロジェクト: shoaibahmed/pl-cnn
def local_mypool_dnn_alternative(node):
    if not dnn_available():
        return
    if isinstance(node.op, MyPool):
        if not node.op.ignore_border:
            return
        img, = node.inputs
        ds = node.op.ds
        stride = node.op.st
        pad = node.op.padding
        mode = node.op.mode
        if (img.owner and isinstance(img.owner.op, HostFromGpu)):
            ret = dnn_pool(gpu_contiguous(img.owner.inputs[0]),
                           ds, stride=stride, pad=pad, mode=mode)
            return [host_from_gpu(ret)]
コード例 #7
0
ファイル: layers2.py プロジェクト: nathinal/Theano-MPI
 def __init__(self, input, poolsize, poolstride, 
              poolpad, mode = 'max', printinfo=True,
              input_shape=None,output_shape=None):                 
     
     self.get_input_shape(input,input_shape)
     self.poolsize = poolsize
     self.poolstride = poolstride
     self.poolpad = poolpad
     
     if self.poolsize != 1:
         self.output = dnn.dnn_pool(self.input,
                                    ws=(poolsize, poolsize),
                                    stride=(poolstride, poolstride),
                                    mode=mode, pad=(poolpad, poolpad))
     else:
         self.output = input
     
     if output_shape:
         self.output_shape = output_shape 
     else:
         self.output_shape = self.get_output_shape(self.input_shape)
     
     self.name = 'Pool\t'
     if printinfo: self.print_shape()
コード例 #8
0
ファイル: layers.py プロジェクト: nathinal/Theano-MPI
    def __init__(self,
                 input,
                 image_shape,
                 filter_shape,
                 convstride,
                 padsize,
                 group,
                 poolsize,
                 poolstride,
                 bias_init,
                 lrn=False,
                 lib_conv='cudnn',
                 verbose=False):
        '''
        lib_conv can be cudnn (recommended)or cudaconvnet
        '''

        self.filter_size = filter_shape
        self.convstride = convstride
        self.padsize = padsize
        self.poolsize = poolsize
        self.poolstride = poolstride
        self.channel = image_shape[0]
        self.lrn = lrn
        self.lib_conv = lib_conv
        self.verbose = verbose
        assert group in [1, 2]

        self.filter_shape = np.asarray(filter_shape)
        self.image_shape = np.asarray(image_shape)

        if self.lrn:
            self.lrn_func = CrossChannelNormalization()

        if group == 1:
            self.W = Weight(self.filter_shape)
            self.b = Weight(self.filter_shape[3], bias_init, std=0)
        else:
            self.filter_shape[0] = self.filter_shape[0] / 2
            self.filter_shape[3] = self.filter_shape[3] / 2
            self.image_shape[0] = self.image_shape[0] / 2
            self.image_shape[3] = self.image_shape[3] / 2
            self.W0 = Weight(self.filter_shape)
            self.W1 = Weight(self.filter_shape)
            self.b0 = Weight(self.filter_shape[3], bias_init, std=0)
            self.b1 = Weight(self.filter_shape[3], bias_init, std=0)

        if lib_conv == 'cudnn':

            input_shuffled = input.dimshuffle(3, 0, 1, 2)  # c01b to bc01
            # in01out to outin01
            # print image_shape_shuffled
            # print filter_shape_shuffled
            if group == 1:
                W_shuffled = self.W.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
                conv_out = dnn.dnn_conv(
                    img=input_shuffled,
                    kerns=W_shuffled,
                    subsample=(convstride, convstride),
                    border_mode=padsize,
                )
                conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
            else:
                W0_shuffled = \
                    self.W0.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
                conv_out0 = \
                    dnn.dnn_conv(img=input_shuffled[:, :self.channel / 2,
                                                    :, :],
                                 kerns=W0_shuffled,
                                 subsample=(convstride, convstride),
                                 border_mode=padsize,
                                 )
                conv_out0 = conv_out0 + \
                    self.b0.val.dimshuffle('x', 0, 'x', 'x')
                W1_shuffled = \
                    self.W1.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
                conv_out1 = \
                    dnn.dnn_conv(img=input_shuffled[:, self.channel / 2:,
                                                    :, :],
                                 kerns=W1_shuffled,
                                 subsample=(convstride, convstride),
                                 border_mode=padsize,
                                 )
                conv_out1 = conv_out1 + \
                    self.b1.val.dimshuffle('x', 0, 'x', 'x')
                conv_out = T.concatenate([conv_out0, conv_out1], axis=1)

            # ReLu
            self.output = T.maximum(conv_out, 0)

            # Pooling
            if self.poolsize != 1:
                self.output = dnn.dnn_pool(self.output,
                                           ws=(poolsize, poolsize),
                                           stride=(poolstride, poolstride))

            self.output = self.output.dimshuffle(1, 2, 3, 0)  # bc01 to c01b

        # elif lib_conv == 'cudaconvnet':
        #     from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
        #
        #     self.conv_op = FilterActs(pad=self.padsize, stride=self.convstride,
        #                               partial_sum=1)
        #
        #     from theano.sandbox.cuda.basic_ops import gpu_contiguous
        #
        #     # Conv
        #     if group == 1:
        #         contiguous_input = gpu_contiguous(input)
        #         contiguous_filters = gpu_contiguous(self.W.val)
        #         conv_out = self.conv_op(contiguous_input, contiguous_filters)
        #         conv_out = conv_out + self.b.val.dimshuffle(0, 'x', 'x', 'x')
        #     else:
        #         contiguous_input0 = gpu_contiguous(
        #             input[:self.channel / 2, :, :, :])
        #         contiguous_filters0 = gpu_contiguous(self.W0.val)
        #         conv_out0 = self.conv_op(
        #             contiguous_input0, contiguous_filters0)
        #         conv_out0 = conv_out0 + \
        #             self.b0.val.dimshuffle(0, 'x', 'x', 'x')
        #
        #         contiguous_input1 = gpu_contiguous(
        #             input[self.channel / 2:, :, :, :])
        #         contiguous_filters1 = gpu_contiguous(self.W1.val)
        #         conv_out1 = self.conv_op(
        #             contiguous_input1, contiguous_filters1)
        #         conv_out1 = conv_out1 + \
        #             self.b1.val.dimshuffle(0, 'x', 'x', 'x')
        #         conv_out = T.concatenate([conv_out0, conv_out1], axis=0)
        #
        #     # ReLu
        #     conv_out = gpu_contiguous(conv_out)
        #     self.output = T.maximum(conv_out, 0)
        #
        #     # Pooling
        #     if self.poolsize != 1:
        #         from pylearn2.sandbox.cuda_convnet.pool import MaxPool
        #         self.pool_op = MaxPool(ds=poolsize, stride=poolstride)
        #         self.output = self.pool_op(self.output)
        #
        # elif lib_conv == 'corrmm':
        #
        #     from theano.sandbox.cuda.basic_ops import gpu_contiguous
        #     from theano.sandbox.cuda.blas import GpuCorrMM
        #
        #     border_mode = 'half' if padsize == (filter_shape[1]-1)/2 else (padsize, padsize)
        #     self.corr_mm_op = GpuCorrMM(subsample=(convstride,convstride),
        #                                         border_mode=border_mode)
        #     flip_filters=True
        #     input_shuffled = input.dimshuffle(3, 0, 1, 2)  # c01b to bc01
        #
        #
        #     if group==1:
        #
        #         filters = self.W.val.dimshuffle(3, 0, 1, 2)
        #
        #         if flip_filters:
        #             filters = filters[:, :, ::-1, ::-1]  # flip top-down, left-right
        #         contiguous_filters = gpu_contiguous(filters)
        #         contiguous_input = gpu_contiguous(input_shuffled)
        #
        #         conv_out = self.corr_mm_op(contiguous_input, contiguous_filters)
        #         conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
        #
        #     else:
        #
        #         W0_shuffled = \
        #             self.W0.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
        #         if flip_filters:
        #             W0_shuffled = W0_shuffled[:, :, ::-1, ::-1]
        #
        #         contiguous_filters0 = gpu_contiguous(W0_shuffled)
        #         contiguous_input0 = gpu_contiguous(input_shuffled[:, :self.channel / 2,:, :])
        #
        #         conv_out0 = self.corr_mm_op(contiguous_input0, contiguous_filters0)
        #         conv_out0 = conv_out0 + \
        #             self.b0.val.dimshuffle('x', 0, 'x', 'x')
        #
        #         W1_shuffled = \
        #             self.W1.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
        #         if flip_filters:
        #             W1_shuffled = W1_shuffled[:, :, ::-1, ::-1]
        #
        #         contiguous_filters1 = gpu_contiguous(W1_shuffled)
        #         contiguous_input1 = gpu_contiguous(input_shuffled[:, self.channel / 2:,:, :])
        #
        #         conv_out1 = self.corr_mm_op(contiguous_input1, contiguous_filters1)
        #         conv_out1 = conv_out1 + \
        #             self.b1.val.dimshuffle('x', 0, 'x', 'x')
        #         conv_out = T.concatenate([conv_out0, conv_out1], axis=1)
        #
        #     # ReLu
        #     self.output = T.maximum(conv_out, 0)
        #
        #     # Pooling
        #     if self.poolsize != 1:
        #         from theano.tensor.signal import downsample
        #         self.output = downsample.max_pool_2d(self.output,
        #                                     ds=(poolsize,poolsize),
        #                                     st=(poolstride,poolstride),
        #                                     ignore_border=False,
        #                                     padding=(0,0),
        #                                     mode='max',
        #                                                 )
        #
        #     self.output = self.output.dimshuffle(1, 2, 3, 0)  # bc01 to c01b

        else:
            NotImplementedError("lib_conv can only be cudnn for now")

        # LRN
        if self.lrn:
            # lrn_input = gpu_contiguous(self.output)
            self.output = self.lrn_func(self.output)

        if group == 1:
            self.params = [self.W.val, self.b.val]
            self.weight_type = ['W', 'b']
        else:
            self.params = [self.W0.val, self.b0.val, self.W1.val, self.b1.val]
            self.weight_type = ['W', 'b', 'W', 'b']

        if self.verbose:
            print "conv ({}) layer with shape_in: {}".format(
                lib_conv, str(image_shape))
コード例 #9
0
 def get_output_for(self, input, **kwargs):
     return dnn.dnn_pool(input, self.pool_size, self.stride, self.mode,
                         self.pad)
コード例 #10
0
ファイル: layers.py プロジェクト: uoguelph-mlrg/Theano-MPI
    def __init__(self, input, image_shape, filter_shape, convstride, padsize,
                 group, poolsize, poolstride, bias_init, lrn=False,
                 lib_conv='cudnn',
                 verbose=False
                 ):
        '''
        lib_conv can be cudnn (recommended)or cudaconvnet
        '''

        self.filter_size = filter_shape
        self.convstride = convstride
        self.padsize = padsize
        self.poolsize = poolsize
        self.poolstride = poolstride
        self.channel = image_shape[0]
        self.lrn = lrn
        self.lib_conv = lib_conv
        self.verbose = verbose
        assert group in [1, 2]

        self.filter_shape = np.asarray(filter_shape)
        self.image_shape = np.asarray(image_shape)

        if self.lrn:
            self.lrn_func = CrossChannelNormalization()

        if group == 1:
            self.W = Weight(self.filter_shape)
            self.b = Weight(self.filter_shape[3], bias_init, std=0)
        else:
            self.filter_shape[0] = self.filter_shape[0] / 2
            self.filter_shape[3] = self.filter_shape[3] / 2
            self.image_shape[0] = self.image_shape[0] / 2
            self.image_shape[3] = self.image_shape[3] / 2
            self.W0 = Weight(self.filter_shape)
            self.W1 = Weight(self.filter_shape)
            self.b0 = Weight(self.filter_shape[3], bias_init, std=0)
            self.b1 = Weight(self.filter_shape[3], bias_init, std=0)                              
                                                

        if lib_conv == 'cudnn':

            input_shuffled = input.dimshuffle(3, 0, 1, 2)  # c01b to bc01
            # in01out to outin01
            # print image_shape_shuffled
            # print filter_shape_shuffled
            if group == 1:
                W_shuffled = self.W.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
                conv_out = dnn.dnn_conv(img=input_shuffled,
                                        kerns=W_shuffled,
                                        subsample=(convstride, convstride),
                                        border_mode=padsize,
                                        )
                conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
            else:
                W0_shuffled = \
                    self.W0.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
                conv_out0 = \
                    dnn.dnn_conv(img=input_shuffled[:, :self.channel / 2,
                                                    :, :],
                                 kerns=W0_shuffled,
                                 subsample=(convstride, convstride),
                                 border_mode=padsize,
                                 )
                conv_out0 = conv_out0 + \
                    self.b0.val.dimshuffle('x', 0, 'x', 'x')
                W1_shuffled = \
                    self.W1.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
                conv_out1 = \
                    dnn.dnn_conv(img=input_shuffled[:, self.channel / 2:,
                                                    :, :],
                                 kerns=W1_shuffled,
                                 subsample=(convstride, convstride),
                                 border_mode=padsize,
                                 )
                conv_out1 = conv_out1 + \
                    self.b1.val.dimshuffle('x', 0, 'x', 'x')
                conv_out = T.concatenate([conv_out0, conv_out1], axis=1)

            # ReLu
            self.output = T.maximum(conv_out, 0)

            # Pooling
            if self.poolsize != 1:
                self.output = dnn.dnn_pool(self.output,
                                           ws=(poolsize, poolsize),
                                           stride=(poolstride, poolstride))

            self.output = self.output.dimshuffle(1, 2, 3, 0)  # bc01 to c01b
            
        # elif lib_conv == 'cudaconvnet':
        #     from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
        #
        #     self.conv_op = FilterActs(pad=self.padsize, stride=self.convstride,
        #                               partial_sum=1)
        #
        #     from theano.sandbox.cuda.basic_ops import gpu_contiguous
        #
        #     # Conv
        #     if group == 1:
        #         contiguous_input = gpu_contiguous(input)
        #         contiguous_filters = gpu_contiguous(self.W.val)
        #         conv_out = self.conv_op(contiguous_input, contiguous_filters)
        #         conv_out = conv_out + self.b.val.dimshuffle(0, 'x', 'x', 'x')
        #     else:
        #         contiguous_input0 = gpu_contiguous(
        #             input[:self.channel / 2, :, :, :])
        #         contiguous_filters0 = gpu_contiguous(self.W0.val)
        #         conv_out0 = self.conv_op(
        #             contiguous_input0, contiguous_filters0)
        #         conv_out0 = conv_out0 + \
        #             self.b0.val.dimshuffle(0, 'x', 'x', 'x')
        #
        #         contiguous_input1 = gpu_contiguous(
        #             input[self.channel / 2:, :, :, :])
        #         contiguous_filters1 = gpu_contiguous(self.W1.val)
        #         conv_out1 = self.conv_op(
        #             contiguous_input1, contiguous_filters1)
        #         conv_out1 = conv_out1 + \
        #             self.b1.val.dimshuffle(0, 'x', 'x', 'x')
        #         conv_out = T.concatenate([conv_out0, conv_out1], axis=0)
        #
        #     # ReLu
        #     conv_out = gpu_contiguous(conv_out)
        #     self.output = T.maximum(conv_out, 0)
        #
        #     # Pooling
        #     if self.poolsize != 1:
        #         from pylearn2.sandbox.cuda_convnet.pool import MaxPool
        #         self.pool_op = MaxPool(ds=poolsize, stride=poolstride)
        #         self.output = self.pool_op(self.output)
        #
        # elif lib_conv == 'corrmm':
        #
        #     from theano.sandbox.cuda.basic_ops import gpu_contiguous
        #     from theano.sandbox.cuda.blas import GpuCorrMM
        #
        #     border_mode = 'half' if padsize == (filter_shape[1]-1)/2 else (padsize, padsize)
        #     self.corr_mm_op = GpuCorrMM(subsample=(convstride,convstride),
        #                                         border_mode=border_mode)
        #     flip_filters=True
        #     input_shuffled = input.dimshuffle(3, 0, 1, 2)  # c01b to bc01
        #
        #
        #     if group==1:
        #
        #         filters = self.W.val.dimshuffle(3, 0, 1, 2)
        #
        #         if flip_filters:
        #             filters = filters[:, :, ::-1, ::-1]  # flip top-down, left-right
        #         contiguous_filters = gpu_contiguous(filters)
        #         contiguous_input = gpu_contiguous(input_shuffled)
        #
        #         conv_out = self.corr_mm_op(contiguous_input, contiguous_filters)
        #         conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
        #
        #     else:
        #
        #         W0_shuffled = \
        #             self.W0.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
        #         if flip_filters:
        #             W0_shuffled = W0_shuffled[:, :, ::-1, ::-1]
        #
        #         contiguous_filters0 = gpu_contiguous(W0_shuffled)
        #         contiguous_input0 = gpu_contiguous(input_shuffled[:, :self.channel / 2,:, :])
        #
        #         conv_out0 = self.corr_mm_op(contiguous_input0, contiguous_filters0)
        #         conv_out0 = conv_out0 + \
        #             self.b0.val.dimshuffle('x', 0, 'x', 'x')
        #
        #         W1_shuffled = \
        #             self.W1.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
        #         if flip_filters:
        #             W1_shuffled = W1_shuffled[:, :, ::-1, ::-1]
        #
        #         contiguous_filters1 = gpu_contiguous(W1_shuffled)
        #         contiguous_input1 = gpu_contiguous(input_shuffled[:, self.channel / 2:,:, :])
        #
        #         conv_out1 = self.corr_mm_op(contiguous_input1, contiguous_filters1)
        #         conv_out1 = conv_out1 + \
        #             self.b1.val.dimshuffle('x', 0, 'x', 'x')
        #         conv_out = T.concatenate([conv_out0, conv_out1], axis=1)
        #
        #     # ReLu
        #     self.output = T.maximum(conv_out, 0)
        #
        #     # Pooling
        #     if self.poolsize != 1:
        #         from theano.tensor.signal import downsample
        #         self.output = downsample.max_pool_2d(self.output,
        #                                     ds=(poolsize,poolsize),
        #                                     st=(poolstride,poolstride),
        #                                     ignore_border=False,
        #                                     padding=(0,0),
        #                                     mode='max',
        #                                                 )
        #
        #     self.output = self.output.dimshuffle(1, 2, 3, 0)  # bc01 to c01b

        else:
            NotImplementedError("lib_conv can only be cudnn for now")

        # LRN
        if self.lrn:
            # lrn_input = gpu_contiguous(self.output)
            self.output = self.lrn_func(self.output)

        if group == 1:
            self.params = [self.W.val, self.b.val]
            self.weight_type = ['W', 'b']
        else:
            self.params = [self.W0.val, self.b0.val, self.W1.val, self.b1.val]
            self.weight_type = ['W', 'b', 'W', 'b']

        if self.verbose: 
            print "conv ({}) layer with shape_in: {}".format(lib_conv,
                                                         str(image_shape))
コード例 #11
0
ファイル: ops.py プロジェクト: IndicoDataSolutions/Foxhound
 def op(self, state):
     X = self.l_in.op(state=state)
     return dnn_pool(X, self.shape, self.stride, self.mode, self.pad)
コード例 #12
0
 def forward(self, input):
     self.output = dnn_pool(input,
                            self.pooling_size,
                            stride=self.step,
                            mode=self.mode)
     return self.output
コード例 #13
0
ファイル: layers2.py プロジェクト: nathinal/Theano-MPI
    def __init__(self, input, convstride, padsize, poolsize, poolstride,
                 b, W = None, filter_shape = None, 
                 poolpad=0, mode = 'max', 
                 lrn=False, lib_conv='cudnn', printinfo=True,
                 input_shape=None, output_shape=None
                 ):
                 
        if W == None and filter_shape == None:
            raise AttributeError('need to specify at least one of W and filtershape')
        
        self.get_input_shape(input,input_shape)
         
        self.filter_shape = filter_shape
        self.convstride = convstride
        self.padsize = padsize
        self.lib_conv = lib_conv

        if W:
            self.W = W #Weight(self.filter_shape,)
        else:
            self.W = Normal(filter_shape, mean = 0.0, std=0.1)
            
        self.b = b #Weight(self.filter_shape[3])

        self.channel = self.input_shape[1]
        self.lrn = lrn

        conv_out = dnn.dnn_conv(img=self.input,
                                kerns=self.W.val,
                                subsample=(convstride, convstride),
                                border_mode=padsize,
                                )
        conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')

        # ReLu
        self.output = T.maximum(conv_out, 0)

        # Pool
        self.poolsize = poolsize
        self.poolstride = poolstride
        self.poolpad = poolpad

        if self.poolsize != 1:
            self.output = dnn.dnn_pool(self.output,
                                       ws=(poolsize, poolsize),
                                       stride=(poolstride, poolstride),
                                       mode='max', pad=(poolpad, poolpad))

        # LRN
        if self.lrn:
            self.lrn_func = CrossChannelNormalization()
            # lrn_input = gpu_contiguous(self.output)
            self.output = self.lrn_func(self.output)

        self.params = [self.W.val, self.b.val]
        self.weight_type = ['W', 'b']

        if output_shape:
            self.output_shape = output_shape 
        else:
            self.output_shape = self.get_output_shape(self.input_shape)
        
        self.name = 'ConvPoolLRN(%s)' % lib_conv
        if printinfo: self.print_shape()
コード例 #14
0
ファイル: layers2.py プロジェクト: nathinal/Theano-MPI
    def __init__(self, input, convstride, padsize, poolsize, poolstride, group,
                 b, W = None, filter_shape = None, 
                 poolpad=0, mode = 'max', 
                 lrn=False, lib_conv='cudnn', printinfo=True,
                 input_shape=None, output_shape=None,
                 ):
                 
                
        '''
                 ConvPoolLRN layer
                 
        To be used in AlexNet
        lib_conv can be cudnn (recommended)or cudaconvnet
        
        '''
                 
        
        self.get_input_shape(input,input_shape)
        self.convstride = convstride
        self.padsize = padsize
        self.lib_conv = lib_conv
        self.poolsize = poolsize
        self.poolstride = poolstride
        self.poolpad = poolpad
        self.lrn = lrn
        if self.lrn:
            self.lrn_func = CrossChannelNormalization()
                 
        if W == None and filter_shape!=None:
            
            assert group in [1, 2]
            
            self.filter_shape = np.asarray(filter_shape)
            
            if group == 1:
                    
                self.W = Normal(self.filter_shape, mean=0, std=0.01)
                self.b = Constant(self.filter_shape[3], val=b)
                
            else:
            
                self.filter_shape[0] = self.filter_shape[0] // 2
                self.filter_shape[3] = self.filter_shape[3] // 2
                # self.input_shape[0] = self.input_shape[0] / 2
                # self.input_shape[3] = self.input_shape[3] / 2
                channel = self.input_shape[0]
                self.W0 = Normal(self.filter_shape, mean=0, std=0.01)
                self.W1 = Normal(self.filter_shape, mean=0, std=0.01)
                self.b0 = Constant(self.filter_shape[3], val=b)
                self.b1 = Constant(self.filter_shape[3], val=b)
            
            
        elif W!=None and filter_shape==None:
            assert group ==1
            self.filter_shape = W.val.shape.eval()
            self.W=W
            self.b = Constant(self.filter_shape[3], val=b)
            
        else:
            raise AttributeError('need to specify exactly one of W and filtershape')                 
                                                

        if lib_conv == 'cudnn':
            

            input_shuffled = self.input.dimshuffle(3, 0, 1, 2)  # c01b to bc01
            
            # in01out to outin01
            # print image_shape_shuffled
            # print filter_shape_shuffled
            if group == 1:
                W_shuffled = self.W.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
                conv_out = dnn.dnn_conv(img=input_shuffled,
                                        kerns=W_shuffled,
                                        subsample=(convstride, convstride),
                                        border_mode=padsize,
                                        )
                conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
            else:
                W0_shuffled = \
                    self.W0.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
                    
                # print W0_shuffled.shape.eval()# c01b to bc01  # 96, 5, 5, 256 -> 128, 48, 5, 5
                #
                # x_in = np.zeros((96, 27, 27, 128), dtype=np.float32) # c01b to bc01  # 96, 27, 27, 128 -> 128, 48, 27, 27
                # test = input_shuffled[:, :self.channel / 2,:, :]
                #
                # print test.shape
                    
                conv_out0 = \
                    dnn.dnn_conv(img=input_shuffled[:, :channel//2,
                                                    :, :],
                                 kerns=W0_shuffled,
                                 subsample=(convstride, convstride),
                                 border_mode=padsize,
                                 )
                conv_out0 = conv_out0 + \
                    self.b0.val.dimshuffle('x', 0, 'x', 'x')
                W1_shuffled = \
                    self.W1.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01
                conv_out1 = \
                    dnn.dnn_conv(img=input_shuffled[:, channel//2:,
                                                    :, :],
                                 kerns=W1_shuffled,
                                 subsample=(convstride, convstride),
                                 border_mode=padsize,
                                 )
                conv_out1 = conv_out1 + \
                    self.b1.val.dimshuffle('x', 0, 'x', 'x')
                conv_out = T.concatenate([conv_out0, conv_out1], axis=1)

            # ReLu
            self.output = T.maximum(conv_out, 0)

            # Pooling
            if poolsize != 1:
                self.output = dnn.dnn_pool(self.output,
                                           ws=(poolsize, poolsize),
                                           stride=(poolstride, poolstride))

            self.output = self.output.dimshuffle(1, 2, 3, 0)  # bc01 to c01b
            
        # elif lib_conv == 'cudaconvnet':
#
#             from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
#
#             self.conv_op = FilterActs(pad=self.padsize, stride=self.convstride,
#                                       partial_sum=1)
#
#             from theano.gpuarray.basic_ops import gpu_contiguous
#
#             # Conv
#             if group == 1:
#                 contiguous_input = gpu_contiguous(self.input)
#                 contiguous_filters = gpu_contiguous(self.W.val)
#                 conv_out = self.conv_op(contiguous_input, contiguous_filters)
#                 conv_out = conv_out + self.b.val.dimshuffle(0, 'x', 'x', 'x')
#             else:
#                 contiguous_input0 = gpu_contiguous(
#                     self.input[:channel//2, :, :, :])
#                 contiguous_filters0 = gpu_contiguous(self.W0.val)
#                 conv_out0 = self.conv_op(
#                     contiguous_input0, contiguous_filters0)
#                 conv_out0 = conv_out0 + \
#                     self.b0.val.dimshuffle(0, 'x', 'x', 'x')
#
#                 contiguous_input1 = gpu_contiguous(
#                     self.input[channel//2:, :, :, :])
#                 contiguous_filters1 = gpu_contiguous(self.W1.val)
#                 conv_out1 = self.conv_op(
#                     contiguous_input1, contiguous_filters1)
#                 conv_out1 = conv_out1 + \
#                     self.b1.val.dimshuffle(0, 'x', 'x', 'x')
#                 conv_out = T.concatenate([conv_out0, conv_out1], axis=0)
#
#             # ReLu
#             conv_out = gpu_contiguous(conv_out)
#             self.output = T.maximum(conv_out, 0)
#
#             # Pooling
#             if poolsize != 1:
#                 from pylearn2.sandbox.cuda_convnet.pool import MaxPool
#                 self.pool_op = MaxPool(ds=poolsize, stride=poolstride)
#                 self.output = self.pool_op(self.output)

        elif lib_conv == 'corrmm':

            from theano.gpuarray.basic_ops import gpu_contiguous
            from theano.gpuarray.blas import GpuCorrMM

            border_mode = 'half' if padsize == (filter_shape[1]-1)//2 else (padsize, padsize)
            self.corr_mm_op = GpuCorrMM(subsample=(convstride,convstride),
                                                border_mode=border_mode)

            input_shuffled = self.input.dimshuffle(3, 0, 1, 2)  # c01b to bc01


            if group==1:

                filters = self.W.val.dimshuffle(3, 0, 1, 2)

                # flip top-down, left-right to compute convolution instead of correlation  
                contiguous_filters = gpu_contiguous(filters[:, :, ::-1, ::-1])
                contiguous_input = gpu_contiguous(input_shuffled)

                conv_out = self.corr_mm_op(contiguous_input, contiguous_filters)
                conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')

            else:

                W0_shuffled = self.W0.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01

                contiguous_filters0 = gpu_contiguous(W0_shuffled[:, :, ::-1, ::-1])
                contiguous_input0 = gpu_contiguous(input_shuffled[:, :channel // 2,:, :])

                conv_out0 = self.corr_mm_op(contiguous_input0, contiguous_filters0)
                conv_out0 = conv_out0 + self.b0.val.dimshuffle('x', 0, 'x', 'x')

                W1_shuffled = self.W1.val.dimshuffle(3, 0, 1, 2)  # c01b to bc01

                contiguous_filters1 = gpu_contiguous(W1_shuffled[:, :, ::-1, ::-1])
                contiguous_input1 = gpu_contiguous(input_shuffled[:, channel // 2:,:, :])

                conv_out1 = self.corr_mm_op(contiguous_input1, contiguous_filters1)
                conv_out1 = conv_out1 + self.b1.val.dimshuffle('x', 0, 'x', 'x')
                conv_out = T.concatenate([conv_out0, conv_out1], axis=1)

            # ReLu
            self.output = T.maximum(conv_out, 0)
   

            # Pooling
            if poolsize != 1:
                from theano.gpuarray.pool import GpuPool
                
                
                ds_op = GpuPool(ignore_border=False, mode='max', ndim=2)
                
                self.output = ds_op(inp=self.output, ws=(poolsize,poolsize),
                                    stride=(poolstride,poolstride), pad=(0,0))
                
            self.output = self.output.dimshuffle(1, 2, 3, 0)  # bc01 to c01b

        else:
            NotImplementedError("lib_conv can only be cudnn or cudaconvnet for now")

        # LRN
        if self.lrn:
            # lrn_input = gpu_contiguous(self.output)
            self.output = self.lrn_func(self.output)

        if group == 1:
            self.params = [self.W.val, self.b.val]
            self.weight_type = ['W', 'b']
        else:
            self.params = [self.W0.val, self.b0.val, self.W1.val, self.b1.val]
            self.weight_type = ['W', 'b', 'W', 'b']

        if output_shape:
            self.output_shape = output_shape 
        else:
            self.output_shape = self.get_output_shape(self.input_shape)
        
        self.name = 'ConvPoolLRN(%s)' % lib_conv
        if printinfo: self.print_shape()                           
コード例 #15
0
ファイル: ops.py プロジェクト: IndicoDataSolutions/Foxhound
 def op(self, state):
     X = self.l_in.op(state=state)
     return dnn_pool(X, self.shape, self.stride, self.mode, self.pad)