Example #1
0
def pool2d(x,
           pool_size,
           strides=(1, 1),
           border_mode='valid',
           dim_ordering='th',
           pool_mode='max'):
    if border_mode == 'same':
        # TODO: add implementation for border_mode="same"
        raise Exception('border_mode="same" not supported with Theano.')
    elif border_mode == 'valid':
        ignore_border = True
        padding = (0, 0)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 3, 1, 2))

    if pool_mode == 'max':
        if _on_gpu() and dnn.dnn_available():
            pool_out = dnn_pool(x, pool_size, stride=strides, mode='max')
        else:
            pool_out = downsample.max_pool_2d(x,
                                              ds=pool_size,
                                              st=strides,
                                              ignore_border=ignore_border,
                                              padding=padding,
                                              mode='max')
    elif pool_mode == 'avg':
        if _on_gpu() and dnn.dnn_available():
            pool_out = dnn_pool(x,
                                pool_size,
                                stride=strides,
                                mode='average_exc_pad')
        else:
            pool_out = downsample.max_pool_2d(x,
                                              ds=pool_size,
                                              st=strides,
                                              ignore_border=ignore_border,
                                              padding=padding,
                                              mode='average_exc_pad')

    else:
        raise Exception('Invalid pooling mode: ' + str(pool_mode))

    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
Example #2
0
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    if border_mode == 'same':
        # TODO: add implementation for border_mode="same"
        raise Exception('border_mode="same" not supported with Theano.')
    elif border_mode == 'valid':
        ignore_border = True
        padding = (0, 0)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 3, 1, 2))

    if pool_mode == 'max':
        if _on_gpu() and dnn.dnn_available():
            pool_out = dnn_pool(x,
                                pool_size,
                                stride=strides,
                                mode='max')
        else:
            pool_out = downsample.max_pool_2d(x,
                                              ds=pool_size,
                                              st=strides,
                                              ignore_border=ignore_border,
                                              padding=padding,
                                              mode='max')
    elif pool_mode == 'avg':
        if _on_gpu() and dnn.dnn_available():
            pool_out = dnn_pool(x,
                                pool_size,
                                stride=strides,
                                mode='average_exc_pad')
        else:
            pool_out = downsample.max_pool_2d(x,
                                              ds=pool_size,
                                              st=strides,
                                              ignore_border=ignore_border,
                                              padding=padding,
                                              mode='average_exc_pad')
        
    else:
        raise Exception('Invalid pooling mode: ' + str(pool_mode))
    
    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
    def get_output(self, train=False):
        X = self.get_input(train)
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert (self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
                assert (self.subsample == (1, 1))

            conv_out = T.nnet.conv.conv2d(X,
                                          self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x,
                                    shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
    def test_gpucorrmm_conv(self):
        if not dnn_available():
            raise SkipTest(cuda.dnn.dnn_available.msg)

        mode = mode_with_gpu.excluding('cudnn')
        for (i, f), s, b, flip, provide_shape in itertools.product(
                zip(self.inputs_shapes, self.filters_shapes),
                self.subsamples,
                self.border_modes,
                self.filter_flip,
                [False, True]):

            o = self.get_output_shape(i, f, s, b)
            self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                         verify_grad=True, mode=mode, device='gpu',
                         provide_shape=provide_shape, border_mode=b,
                         filter_flip=flip,
                         target_op=(GpuCorrMM,
                                    GpuCorrMM_gradWeights,
                                    GpuCorrMM_gradInputs))
            self.run_gradweight(inputs_shape=i, filters_shape=f,
                                output_shape=o, subsample=s,
                                verify_grad=True, mode=mode, device='gpu',
                                provide_shape=provide_shape, border_mode=b,
                                filter_flip=flip,
                                target_op=GpuCorrMM_gradWeights)
            self.run_gradinput(inputs_shape=i, filters_shape=f,
                               output_shape=o, subsample=s,
                               verify_grad=True, mode=mode, device='gpu',
                               provide_shape=provide_shape, border_mode=b,
                               filter_flip=flip,
                               target_op=GpuCorrMM_gradInputs)
Example #5
0
    def get_output(self, train=False):
        X = self.get_input(train)
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
                assert(self.subsample == (1, 1))

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample,
                                          image_shape=self.input_shape,
                                          filter_shape=self.W_shape)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
Example #6
0
    def test_conv_gradw(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4("img")
        kerns = T.ftensor4("kerns")
        out = T.ftensor4("out")
        img_val = numpy.asarray(numpy.random.rand(2, 5, 6, 8), dtype="float32")
        kern_vals = numpy.asarray(numpy.random.rand(2, 1, 5, 6), dtype="float32")
        out_vals = numpy.zeros((3, 3, 1, 1), dtype="float32")

        for params in product(["valid", "full"], [(1, 1)], ["conv", "cross"]):  # strides besides (1, 1)
            temp_img = img.dimshuffle(1, 0, 2, 3)
            temp_kerns = kerns
            if params[2] == "conv":
                temp_kerns = temp_kerns[:, :, ::-1, ::-1]
            temp_kerns = temp_kerns.dimshuffle(1, 0, 2, 3)
            shape = (
                kern_vals.shape[1],
                img_val.shape[1],
                img_val.shape[2] - kern_vals.shape[2] + 1,
                img_val.shape[3] - kern_vals.shape[3] + 1,
            )
            out_vals = numpy.zeros(shape, dtype="float32")
            desc = dnn.GpuDnnConvDesc(border_mode=params[0], subsample=params[1], conv_mode=params[2])(
                temp_img.shape, out.shape
            )
            conv_grad_w = dnn.GpuDnnConvGradW()(temp_img, temp_kerns, out, desc)
            self._compile_and_check(
                [temp_img, temp_kerns, out], [conv_grad_w], [img_val, kern_vals, out_vals], dnn.GpuDnnConvGradW
            )
Example #7
0
    def get_output(self, train):
        X = self.get_input(train)
        border_mode = self.border_mode
        if dnn.dnn_available() and theano.config.device[:3] == 'gpu':
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
Example #8
0
    def test_conv_gradi(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        kerns = T.ftensor4('kerns')
        out = T.ftensor4('out')
        img_val = numpy.asarray(numpy.random.rand(3, 4, 5, 6), dtype='float32')
        kern_vals = numpy.asarray(numpy.random.rand(3, 4, 5, 6),
                                  dtype='float32')

        for params in product(
            ['valid'],  # Should this work for 'full'?
            [(1, 1)],
            ['conv', 'cross']):
            temp_kerns = kerns.dimshuffle(1, 0, 2, 3)
            shape = (img_val.shape[0], kern_vals.shape[1],
                     img_val.shape[2] + kern_vals.shape[2] - 1,
                     img_val.shape[3] + kern_vals.shape[3] - 1)
            out_vals = numpy.zeros(shape, dtype='float32')
            desc = dnn.GpuDnnConvDesc(border_mode=params[0],
                                      subsample=params[1],
                                      conv_mode=params[2])(out.shape,
                                                           temp_kerns.shape)
            conv_grad_i = dnn.GpuDnnConvGradI()(
                temp_kerns,
                img,
                out,
                desc,
            )
            self._compile_and_check([temp_kerns, img, out], [conv_grad_i],
                                    [kern_vals, img_val, out_vals],
                                    dnn.GpuDnnConvGradI)
Example #9
0
    def test_conv_gradw(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        kerns = T.ftensor4('kerns')
        img_val = numpy.asarray(numpy.random.rand(3, 4, 5, 6), dtype='float32')
        kern_vals = numpy.asarray(numpy.random.rand(3, 4, 5, 6),
                                  dtype='float32')

        for params in product(
            ['valid', 'full'],
            [(1, 1)],  # strides besides (1, 1)
            ['conv', 'cross']):
            temp_img = img.dimshuffle(1, 0, 2, 3)
            temp_kerns = kerns
            if params[2] == 'conv':
                temp_kerns = temp_kerns[:, :, ::-1, ::-1]
            temp_kerns = temp_kerns.dimshuffle(1, 0, 2, 3)
            shape = theano.tensor.stack(
                temp_kerns.shape[1], temp_img.shape[1],
                temp_img.shape[2] - temp_kerns.shape[2] + 1,
                temp_img.shape[3] - temp_kerns.shape[3] + 1)
            desc = dnn.GpuDnnConvDesc(border_mode=params[0],
                                      subsample=params[1],
                                      conv_mode=params[2])(temp_img.shape,
                                                           shape)
            conv_grad_w = dnn.GpuDnnConvGradW()(temp_img, temp_kerns, desc,
                                                shape[2], shape[3])
            self._compile_and_check([temp_img, temp_kerns], [conv_grad_w],
                                    [img_val, kern_vals], dnn.GpuDnnConvGradW)
Example #10
0
    def get_output(self, train=False):
        X = self.get_input(train)
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
    def get_config(self):
        return {"name": self.__class__.__name__,
                "nb_filter": self.nb_filter,
                "stack_size": self.stack_size,
                "nb_row": self.nb_row,
                "nb_col": self.nb_col,
                "init": self.init.__name__,
                "activation": self.activation.__name__,
                "border_mode": self.border_mode,
                "subsample": self.subsample,
                "W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
                "b_regularizer": self.b_regularizer.get_config() if self.b_regularizer else None,
                "activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
                "W_constraint": self.W_constraint.get_config() if self.W_constraint else None,
                "b_constraint": self.b_constraint.get_config() if self.b_constraint else None}
Example #11
0
 def test_import_without_gpu_or_cudnn_raises(self):
     from theano.sandbox.cuda import dnn
     if theano.config.device.startswith("gpu") and dnn.dnn_available():
         pytest.skip()
     else:
         with pytest.raises(ImportError):
             import lasagne.layers.dnn
    def get_output(self, train=False):
        X = self.get_input(train)
        X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)

        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                pad_x = (self.filter_length - self.subsample_length) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, 0))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.filter_length - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
        return output
Example #13
0
def create_NIPS_Sprag_init(inp_shape, output_num, stride=None, untie_biases=False, input_var=None):
    import theano.tensor.signal.conv
    from theano.sandbox.cuda import dnn
    # if no dnn support use default conv
    if not theano.config.device.startswith("gpu") or not dnn.dnn_available():  # code stolen from lasagne dnn.py
        import lasagne.layers.conv
        conv = lasagne.layers.conv.Conv2DLayer
    else:
        import lasagne.layers.dnn
        conv = lasagne.layers.dnn.Conv2DDNNLayer

    # setup network layout
    l_in = lasagne.layers.InputLayer(inp_shape, input_var=input_var)
    l_hid1 = conv(l_in, 16, (8, 8), stride=stride[0], untie_biases=untie_biases,
                        W=lasagne.init.Normal(.01),
                        b=lasagne.init.Constant(.1))

    l_hid2 = conv(l_hid1, 32, (4, 4), stride=stride[1], untie_biases=untie_biases,
                        W=lasagne.init.Normal(.01),
                        b=lasagne.init.Constant(.1))

    l_hid3 = lasagne.layers.DenseLayer(l_hid2, 256,
                        W=lasagne.init.Normal(.01),
                        b=lasagne.init.Constant(.1))

    l_out = lasagne.layers.DenseLayer(l_hid3, output_num, nonlinearity=lasagne.nonlinearities.linear,
                        W=lasagne.init.Normal(.01),
                        b=lasagne.init.Constant(.1))

    return {'l_in': l_in, 'l_hid1': l_hid1, 'l_hid2': l_hid2, 'l_hid3': l_hid3, 'l_out': l_out}
Example #14
0
    def test_softmax(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        t = T.ftensor4('t')
        rand_tensor = numpy.asarray(
            numpy.random.rand(5, 4, 3, 2),
            dtype='float32'
        )
        self._compile_and_check(
            [t],
            [dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')(t)],
            [rand_tensor],
            dnn.GpuDnnSoftmax
        )

        self._compile_and_check(
            [t],
            [
                T.grad(
                    dnn.GpuDnnSoftmax(
                        'bc01',
                        'accurate',
                        'channel'
                    )(t).mean(),
                    t
                )
            ],
            [rand_tensor],
            dnn.GpuDnnSoftmaxGrad
        )
Example #15
0
    def test_conv(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        kerns = T.ftensor4('kerns')
        out = T.ftensor4('out')
        img_val = numpy.asarray(numpy.random.rand(7, 2, 6, 4), dtype='float32')
        kern_vals = numpy.asarray(numpy.random.rand(8, 2, 4, 3),
                                  dtype='float32')

        for params in product(['valid', 'full'], [(1, 1), (2, 2)],
                              ['conv', 'cross']):
            out_vals = numpy.zeros(dnn.GpuDnnConv.get_out_shape(
                img_val.shape,
                kern_vals.shape,
                border_mode=params[0],
                subsample=params[1]),
                                   dtype='float32')
            desc = dnn.GpuDnnConvDesc(border_mode=params[0],
                                      subsample=params[1],
                                      conv_mode=params[2])(img.shape,
                                                           kerns.shape)
            conv = dnn.GpuDnnConv()(img, kerns, out, desc)
            self._compile_and_check([img, kerns, out], [conv],
                                    [img_val, kern_vals, out_vals],
                                    dnn.GpuDnnConv)
Example #16
0
    def test_softmax(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        t = T.ftensor4('t')
        rand_tensor = numpy.asarray(
            numpy.random.rand(5, 4, 3, 2),
            dtype='float32'
        )
        self._compile_and_check(
            [t],
            [dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')(t)],
            [rand_tensor],
            dnn.GpuDnnSoftmax
        )

        self._compile_and_check(
            [t],
            [
                T.grad(
                    dnn.GpuDnnSoftmax(
                        'bc01',
                        'accurate',
                        'channel'
                    )(t).mean(),
                    t
                )
            ],
            [rand_tensor],
            dnn.GpuDnnSoftmaxGrad
        )
Example #17
0
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    # ====== dim ordering ====== #
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))
    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 3, 1, 2))
    # ====== border mode ====== #
    if border_mode == 'same':
        w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
        h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
        padding = (w_pad, h_pad)
    elif border_mode == 'valid':
        padding = (0, 0)
    elif isinstance(border_mode, (tuple, list)):
        padding = tuple(border_mode)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    # ====== pooling ====== #
    if _on_gpu() and dnn.dnn_available():
        pool_out = dnn.dnn_pool(x, pool_size,
                                stride=strides,
                                mode=pool_mode,
                                pad=padding)
    else: # CPU veresion support by theano
        pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
                                ignore_border=True,
                                padding=padding,
                                mode=pool_mode)

    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
Example #18
0
    def get_output(self, train=False):
        X = self.get_input(train)
        newshape = (X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4])
        Y = theano.tensor.reshape(X, newshape) #collapse num_samples and num_timesteps
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'

            conv_out = theano.tensor.nnet.conv.conv2d(Y, self.W,
                border_mode=border_mode, subsample=self.subsample)

            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:Y.shape[2] + shift_x, shift_y:Y.shape[3] + shift_y]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3])
        return theano.tensor.reshape(output, newshape)
Example #19
0
    def test_conv_gradi(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4("img")
        kerns = T.ftensor4("kerns")
        out = T.ftensor4("out")
        img_val = numpy.asarray(numpy.random.rand(3, 4, 5, 6), dtype="float32")
        kern_vals = numpy.asarray(numpy.random.rand(3, 4, 5, 6), dtype="float32")

        for params in product(["valid"], [(1, 1)], ["conv", "cross"]):  # Should this work for 'full'?
            temp_kerns = kerns.dimshuffle(1, 0, 2, 3)
            shape = (
                img_val.shape[0],
                kern_vals.shape[1],
                img_val.shape[2] + kern_vals.shape[2] - 1,
                img_val.shape[3] + kern_vals.shape[3] - 1,
            )
            out_vals = numpy.zeros(shape, dtype="float32")
            desc = dnn.GpuDnnConvDesc(border_mode=params[0], subsample=params[1], conv_mode=params[2])(
                out.shape, temp_kerns.shape
            )
            conv_grad_i = dnn.GpuDnnConvGradI()(temp_kerns, img, out, desc)
            self._compile_and_check(
                [temp_kerns, img, out], [conv_grad_i], [kern_vals, img_val, out_vals], dnn.GpuDnnConvGradI
            )
Example #20
0
    def test_dnn_conv(self):
        if not dnn_available():
            raise SkipTest(cuda.dnn.dnn_available.msg)
        mode = mode_with_gpu
        # provide_shape is not used by the CuDNN impementation
        provide_shape = False

        for (i, f), s, b, flip in itertools.product(
                zip(self.inputs_shapes, self.filters_shapes),
                self.subsamples,
                self.border_modes,
                self.filter_flip):
            o = self.get_output_shape(i, f, s, b)
            self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                         verify_grad=True, mode=mode, device='gpu',
                         provide_shape=provide_shape, border_mode=b,
                         filter_flip=flip)
            self.run_gradweight(inputs_shape=i, filters_shape=f,
                                output_shape=o, subsample=s,
                                verify_grad=True, mode=mode, device='gpu',
                                provide_shape=provide_shape, border_mode=b,
                                filter_flip=flip)
            self.run_gradinput(inputs_shape=i, filters_shape=f,
                               output_shape=o, subsample=s,
                               verify_grad=True, mode=mode, device='gpu',
                               provide_shape=provide_shape, border_mode=b,
                               filter_flip=flip)
Example #21
0
    def get_output(self, train=False):
        X = self.get_input(train)
        X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)

        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                pad_x = (self.filter_length - self.subsample_length) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, 0))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.filter_length - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
        return output
Example #22
0
    def test_conv(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        kerns = T.ftensor4('kerns')
        img_val = numpy.asarray(
            numpy.random.rand(3, 4, 5, 6),
            dtype='float32'
        )
        kern_vals = numpy.asarray(
            numpy.random.rand(3, 4, 5, 6),
            dtype='float32'
        )

        for params in product(
            ['valid', 'full'],
            [(1, 1), (2, 2)],
            ['conv', 'cross']
        ):
            desc = dnn.GpuDnnConvDesc(
                border_mode=params[0],
                subsample=params[1],
                conv_mode=params[2]
            )(img.shape, kerns.shape)
            conv = dnn.GpuDnnConv()(img_val, kern_vals, desc)
            self._compile_and_check(
                [img, kerns],
                [conv],
                [img_val, kern_vals],
                dnn.GpuDnnConv
            )
Example #23
0
 def test_dnn_conv(self):
     if not dnn_available():
         raise SkipTest(cuda.dnn.dnn_available.msg)
     mode = mode_with_gpu
     # provide_shape is not used by the CuDNN impementation
     provide_shape = False
     for (i, f), s, b, flip in itertools.product(
             zip(self.inputs_shapes, self.filters_shapes),
             self.subsamples,
             self.border_modes,
             self.filter_flip):
         o = self.get_output_shape(i, f, s, b)
         self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                      verify_grad=True, mode=mode, device='gpu',
                      provide_shape=provide_shape, border_mode=b,
                      filter_flip=flip, target_op=GpuDnnConv)
         self.run_gradweight(inputs_shape=i, filters_shape=f,
                             output_shape=o, subsample=s,
                             verify_grad=True, mode=mode, device='gpu',
                             provide_shape=provide_shape, border_mode=b,
                             filter_flip=flip, target_op=GpuDnnConvGradW)
         self.run_gradinput(inputs_shape=i, filters_shape=f,
                            output_shape=o, subsample=s,
                            verify_grad=True, mode=mode, device='gpu',
                            provide_shape=provide_shape, border_mode=b,
                            filter_flip=flip, target_op=GpuDnnConvGradI)
Example #24
0
    def test_gpucormm_conv(self):
        if not dnn_available():
            raise SkipTest(cuda.dnn.dnn_available.msg)

        mode = mode_with_gpu.excluding('cudnn')
        for (i, f), s, b, flip, provide_shape in itertools.product(
                zip(self.inputs_shapes, self.filters_shapes),
                self.subsamples,
                self.border_modes,
                self.filter_flip,
                [False, True]):

            o = self.get_output_shape(i, f, s, b)
            self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                         verify_grad=True, mode=mode, device='gpu',
                         provide_shape=provide_shape, border_mode=b,
                         filter_flip=flip)
            self.run_gradweight(inputs_shape=i, filters_shape=f,
                                output_shape=o, subsample=s,
                                verify_grad=True, mode=mode, device='gpu',
                                provide_shape=provide_shape, border_mode=b,
                                filter_flip=flip)
            self.run_gradinput(inputs_shape=i, filters_shape=f,
                               output_shape=o, subsample=s,
                               verify_grad=True, mode=mode, device='gpu',
                               provide_shape=provide_shape, border_mode=b,
                               filter_flip=flip)
    def test_cormm_conv(self):
        if not dnn_available():
            raise SkipTest(cuda.dnn.dnn_available.msg)

        mode = mode_without_gpu
        for (i, f), s, b, flip, provide_shape in itertools.product(
                zip(self.inputs_shapes, self.filters_shapes),
                self.subsamples,
                self.border_modes,
                self.filter_flip,
                [False, True]):

            o = self.get_output_shape(i, f, s, b)
            self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                         verify_grad=True, mode=mode, device='cpu',
                         provide_shape=provide_shape, border_mode=b,
                         filter_flip=flip)
            self.run_gradweight(inputs_shape=i, filters_shape=f,
                                output_shape=o, subsample=s,
                                verify_grad=True, mode=mode, device='cpu',
                                provide_shape=provide_shape, border_mode=b,
                                filter_flip=flip)
            self.run_gradinput(inputs_shape=i, filters_shape=f,
                               output_shape=o, subsample=s,
                               verify_grad=True, mode=mode, device='cpu',
                               provide_shape=provide_shape, border_mode=b,
                               filter_flip=flip)
Example #26
0
 def tcase(self, i, f, s, b, flip, provide_shape):
     if not dnn_available():
         raise SkipTest(cuda.dnn.dnn_available.msg)
     mode = mode_with_gpu
     o = self.get_output_shape(i, f, s, b)
     self.run_fwd(inputs_shape=i,
                  filters_shape=f,
                  subsample=s,
                  verify_grad=True,
                  mode=mode,
                  provide_shape=provide_shape,
                  border_mode=b,
                  filter_flip=flip,
                  target_op=GpuDnnConv)
     self.run_gradweight(inputs_shape=i,
                         filters_shape=f,
                         output_shape=o,
                         subsample=s,
                         verify_grad=True,
                         mode=mode,
                         provide_shape=provide_shape,
                         border_mode=b,
                         filter_flip=flip,
                         target_op=GpuDnnConvGradW)
     self.run_gradinput(inputs_shape=i,
                        filters_shape=f,
                        output_shape=o,
                        subsample=s,
                        verify_grad=True,
                        mode=mode,
                        provide_shape=provide_shape,
                        border_mode=b,
                        filter_flip=flip,
                        target_op=GpuDnnConvGradI)
Example #27
0
 def test_import_without_gpu_or_cudnn_raises(self):
     from theano.sandbox.cuda import dnn
     if theano.config.device.startswith("gpu") and dnn.dnn_available():
         pytest.skip()
     else:
         with pytest.raises(ImportError):
             import lasagne.layers.dnn
Example #28
0
File: pool.py Project: zhjpqq/denet
    def __init__(self,
                 layers,
                 size=(2, 2),
                 stride=None,
                 pad=(0, 0),
                 mode="max",
                 ignore_border=True,
                 json_param={}):
        super().__init__(layer_index=len(layers))

        self.input = layers[-1].output
        self.input_shape = layers[-1].output_shape

        self.size = json_param.get("size", size)
        self.pad = json_param.get("pad", pad)
        self.ignore_border = json_param.get("ignoreBorder", ignore_border)
        self.mode = json_param.get("mode", mode)
        self.stride = json_param.get("stride", stride)
        if self.stride is None:
            self.stride = self.size

        #output dim
        if self.ignore_border:
            h = int(
                math.floor(
                    (self.input_shape[2] + 2 * self.pad[0] - self.size[0]) /
                    self.stride[0])) + 1
            w = int(
                math.floor(
                    (self.input_shape[3] + 2 * self.pad[1] - self.size[1]) /
                    self.stride[1])) + 1
        else:
            h = int(
                math.ceil(
                    (self.input_shape[2] + 2 * self.pad[0]) / self.stride[0]))
            w = int(
                math.ceil(
                    (self.input_shape[3] + 2 * self.pad[1]) / self.stride[1]))

        #theano optimizer is sometimes failing to use cudnn pooling!
        use_cudnn = (dnn.dnn_available() and dnn.version() >= (4000, 4000)
                     and self.ignore_border)
        if use_cudnn:
            self.output = dnn.dnn_pool(self.input,
                                       ws=self.size,
                                       pad=self.pad,
                                       stride=self.stride,
                                       mode=self.mode)
        else:
            self.output = tensor.signal.pool.pool_2d(
                self.input,
                ds=self.size,
                padding=self.pad,
                ignore_border=self.ignore_border,
                st=self.stride,
                mode=self.mode)

        self.output_shape = (self.input_shape[0], self.input_shape[1], h, w)
        logging.verbose("Adding", self)
Example #29
0
 def test_pool(self):
     if not dnn.dnn_available():
         raise SkipTest(dnn.dnn_available.msg)
     img = T.ftensor4("img")
     img_val = numpy.asarray(numpy.random.rand(2, 3, 4, 5), dtype="float32")
     for params in product([(1, 1), (2, 2), (3, 3)], [(1, 1), (2, 2), (3, 3)], ["max", "average"]):
         desc = dnn.GpuDnnPoolDesc(ws=params[0], stride=params[1], mode=params[2])()
         self._compile_and_check([img], [dnn.GpuDnnPool()(img, desc)], [img_val], dnn.GpuDnnPool)
Example #30
0
File: env.py Project: vitteemou/DLS
def check_cudnn():
    result  ={}
    try:
        result['available'] = dnn.dnn_available()
        if len(dnn.version()) > 0:
            result['version'] = str(dnn.version()[0])
    except:
        result['available'] = False
    return result
Example #31
0
    def get_output(self):
        # RECURSE
        inp, time, updates = self.incoming.get_output()

        # CALCULATE SYNAPTIC SUMMED INPUT
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert (self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
            conv_out = T.nnet.conv.conv2d(inp,
                                          self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:inp.shape[2] + shift_x,
                                    shift_y:inp.shape[3] + shift_y]

        # UPDATE NEURONS
        #   Get impulse
        impulse = conv_out
        #   Destroy impulse if in refrac
        masked_imp = T.set_subtensor(
            impulse[(self.refrac_until > time).nonzero()], 0.)
        #   Add impulse
        new_mem = self.mem + masked_imp
        #   Store spiking
        output_spikes = new_mem > self.threshold
        #   Reset neuron
        new_and_reset_mem = T.set_subtensor(new_mem[output_spikes.nonzero()],
                                            0.)
        #   Store refractory
        new_refractory = T.set_subtensor(
            self.refrac_until[output_spikes.nonzero()], time + self.refractory)

        # Store updates
        updates.append((self.refrac_until, new_refractory))
        updates.append((self.mem, new_and_reset_mem))

        # Finish
        return (T.cast(output_spikes, 'float32'), time, updates)
 def get_output(self, input, params, testing=False):
     if dnn.dnn_available():
         return dnn.dnn_conv(img=input,
                             kerns=params[0],
                             subsample=(self.row_stride, self.col_stride),
                             border_mode=self.conv_mode)
     else:
         return T.nnet.conv2d(input,
                              params[0],
                              subsample=(self.row_stride, self.col_stride),
                              border_mode=self.conv_mode)
Example #33
0
def get_lasagne_conv_layer():
    import theano.tensor.signal.conv
    from theano.sandbox.cuda import dnn
    # if no dnn support use default conv
    if not theano.config.device.startswith("gpu") or not dnn.dnn_available():  # code stolen from lasagne dnn.py
        import lasagne.layers.conv
        conv = lasagne.layers.conv.Conv2DLayer
    else:
        import lasagne.layers.dnn
        conv = lasagne.layers.dnn.Conv2DDNNLayer
    return conv
Example #34
0
def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    '''
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = x.dimshuffle((0, 3, 1, 2))
        kernel = kernel.dimshuffle((3, 2, 0, 1))

    if _on_gpu() and dnn.dnn_available():
        if border_mode == 'same':
            assert(strides == (1, 1))
            np_kernel = kernel.eval()
            pad_x = (np_kernel.shape[2] - strides[0]) // 2
            pad_y = (np_kernel.shape[3] - strides[1]) // 2
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=(pad_x, pad_y))
        else:
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=border_mode,
                                    subsample=strides)
    else:
        if border_mode == 'same':
            th_border_mode = 'full'
            assert(strides == (1, 1))
        elif border_mode == 'valid':
            th_border_mode = 'valid'
        else:
            raise Exception('Border mode not supported: ' + str(border_mode))

        conv_out = T.nnet.conv.conv2d(x, kernel,
                                      border_mode=th_border_mode,
                                      subsample=strides)
        if border_mode == 'same':
            shift_x = (kernel.shape[2] - 1) // 2
            shift_y = (kernel.shape[3] - 1) // 2
            conv_out = conv_out[:, :,
                                shift_x:x.shape[2] + shift_x,
                                shift_y:x.shape[3] + shift_y]
    if dim_ordering == 'tf':
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
Example #35
0
 def test_pool(self):
     if not dnn.dnn_available():
         raise SkipTest(dnn.dnn_available.msg)
     img = T.ftensor4('img')
     img_val = numpy.asarray(numpy.random.rand(2, 3, 4, 5), dtype='float32')
     for params in product([(1, 1), (2, 2), (3, 3)],
                           [(1, 1), (2, 2), (3, 3)], ['max', 'average']):
         desc = dnn.GpuDnnPoolDesc(ws=params[0],
                                   stride=params[1],
                                   mode=params[2])()
         self._compile_and_check([img], [dnn.GpuDnnPool()(img, desc)],
                                 [img_val], dnn.GpuDnnPool)
Example #36
0
def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    '''
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = x.dimshuffle((0, 3, 1, 2))
        kernel = kernel.dimshuffle((3, 2, 0, 1))

    if _on_gpu() and dnn.dnn_available():
        if border_mode == 'same':
            assert (strides == (1, 1))
            np_kernel = kernel.eval()
            pad_x = (np_kernel.shape[2] - strides[0]) // 2
            pad_y = (np_kernel.shape[3] - strides[1]) // 2
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=(pad_x, pad_y))
        else:
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=border_mode,
                                    subsample=strides)
    else:
        if border_mode == 'same':
            th_border_mode = 'full'
            assert (strides == (1, 1))
        elif border_mode == 'valid':
            th_border_mode = 'valid'
        else:
            raise Exception('Border mode not supported: ' + str(border_mode))

        conv_out = T.nnet.conv.conv2d(x,
                                      kernel,
                                      border_mode=th_border_mode,
                                      subsample=strides)
        if border_mode == 'same':
            shift_x = (kernel.shape[2] - 1) // 2
            shift_y = (kernel.shape[3] - 1) // 2
            conv_out = conv_out[:, :, shift_x:x.shape[2] + shift_x,
                                shift_y:x.shape[3] + shift_y]
    if dim_ordering == 'tf':
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
Example #37
0
def get_lasagne_conv_layer():
    import theano.tensor.signal.conv
    from theano.sandbox.cuda import dnn
    # if no dnn support use default conv
    if not theano.config.device.startswith("gpu") or not dnn.dnn_available(
    ):  # code stolen from lasagne dnn.py
        import lasagne.layers.conv
        conv = lasagne.layers.conv.Conv2DLayer
    else:
        import lasagne.layers.dnn
        conv = lasagne.layers.dnn.Conv2DDNNLayer
    return conv
Example #38
0
def find_patch_matches(a, a_norm, b):
    '''For each patch in A, find the best matching patch in B'''
    convs = None
    if K.backend() == 'theano':
        # HACK: This was not being performed on the GPU for some reason.
        from theano.sandbox.cuda import dnn
        if dnn.dnn_available():
            convs = dnn.dnn_conv(
                img=a, kerns=b[:, :, ::-1, ::-1], border_mode='valid')
    if convs is None:
        convs = K.conv2d(a, b[:, :, ::-1, ::-1], border_mode='valid')
    argmax = K.argmax(convs / a_norm, axis=1)
    return argmax
Example #39
0
def create_NIPS_Sprag_init(inp_shape,
                           output_num,
                           stride=None,
                           untie_biases=False,
                           input_var=None):
    import theano.tensor.signal.conv
    from theano.sandbox.cuda import dnn
    # if no dnn support use default conv
    if not theano.config.device.startswith("gpu") or not dnn.dnn_available(
    ):  # code stolen from lasagne dnn.py
        import lasagne.layers.conv
        conv = lasagne.layers.conv.Conv2DLayer
    else:
        import lasagne.layers.dnn
        conv = lasagne.layers.dnn.Conv2DDNNLayer

    # setup network layout
    l_in = lasagne.layers.InputLayer(inp_shape, input_var=input_var)
    l_hid1 = conv(l_in,
                  16, (8, 8),
                  stride=stride[0],
                  untie_biases=untie_biases,
                  W=lasagne.init.Normal(.01),
                  b=lasagne.init.Constant(.1))

    l_hid2 = conv(l_hid1,
                  32, (4, 4),
                  stride=stride[1],
                  untie_biases=untie_biases,
                  W=lasagne.init.Normal(.01),
                  b=lasagne.init.Constant(.1))

    l_hid3 = lasagne.layers.DenseLayer(l_hid2,
                                       256,
                                       W=lasagne.init.Normal(.01),
                                       b=lasagne.init.Constant(.1))

    l_out = lasagne.layers.DenseLayer(
        l_hid3,
        output_num,
        nonlinearity=lasagne.nonlinearities.linear,
        W=lasagne.init.Normal(.01),
        b=lasagne.init.Constant(.1))

    return {
        'l_in': l_in,
        'l_hid1': l_hid1,
        'l_hid2': l_hid2,
        'l_hid3': l_hid3,
        'l_out': l_out
    }
    def get_output(self):
        # RECURSE
        inp, time, updates = self.incoming.get_output()

        # CALCULATE SYNAPTIC SUMMED INPUT
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
            conv_out = T.nnet.conv.conv2d(inp, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:inp.shape[2] + shift_x, shift_y:inp.shape[3] + shift_y]

        # UPDATE NEURONS
        #   Get impulse
        impulse = conv_out
        #   Destroy impulse if in refrac
        masked_imp = T.set_subtensor(impulse[(self.refrac_until>time).nonzero()], 0.)
        #   Add impulse
        new_mem = self.mem + masked_imp
        #   Store spiking
        output_spikes = new_mem > self.threshold
        #   Reset neuron
        new_and_reset_mem = T.set_subtensor(new_mem[output_spikes.nonzero()], 0.)
        #   Store refractory
        new_refractory = T.set_subtensor(self.refrac_until[output_spikes.nonzero()], time + self.refractory)

        # Store updates
        updates.append( (self.refrac_until, new_refractory) )
        updates.append( (self.mem, new_and_reset_mem) )

        # Finish
        return (T.cast(output_spikes,'float32'), time, updates)
Example #41
0
def conv2d(x, kernel, strides=(1, 1), border_mode="valid", dim_ordering="th", image_shape=None, filter_shape=None):
    """
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    """
    if dim_ordering not in {"th", "tf"}:
        raise Exception("Unknown dim_ordering " + str(dim_ordering))

    if dim_ordering == "tf":
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = x.dimshuffle((0, 3, 1, 2))
        kernel = kernel.dimshuffle((3, 2, 0, 1))
        if image_shape:
            image_shape = (image_shape[0], image_shape[3], image_shape[1], image_shape[2])
        if filter_shape:
            filter_shape = (filter_shape[3], filter_shape[2], filter_shape[0], filter_shape[1])

    if _on_gpu() and dnn.dnn_available():
        if border_mode == "same":
            assert strides == (1, 1)
            np_kernel = kernel.eval()
            pad_x = (np_kernel.shape[2] - strides[0]) // 2
            pad_y = (np_kernel.shape[3] - strides[1]) // 2
            conv_out = dnn.dnn_conv(img=x, kerns=kernel, border_mode=(pad_x, pad_y))
        else:
            conv_out = dnn.dnn_conv(img=x, kerns=kernel, border_mode=border_mode, subsample=strides)
    else:
        if border_mode == "same":
            th_border_mode = "full"
            assert strides == (1, 1)
        elif border_mode == "valid":
            th_border_mode = "valid"
        else:
            raise Exception("Border mode not supported: " + str(border_mode))

        conv_out = T.nnet.conv.conv2d(
            x, kernel, border_mode=th_border_mode, subsample=strides, image_shape=image_shape, filter_shape=filter_shape
        )
        if border_mode == "same":
            shift_x = (kernel.shape[2] - 1) // 2
            shift_y = (kernel.shape[3] - 1) // 2
            conv_out = conv_out[:, :, shift_x : x.shape[2] + shift_x, shift_y : x.shape[3] + shift_y]
    if dim_ordering == "tf":
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
Example #42
0
def find_patch_matches(a, a_norm, b):
    '''For each patch in A, find the best matching patch in B'''
    convs = None
    if K.backend() == 'theano':
        # HACK: This was not being performed on the GPU for some reason.
        from theano.sandbox.cuda import dnn
        if dnn.dnn_available():
            convs = dnn.dnn_conv(img=a,
                                 kerns=b[:, :, ::-1, ::-1],
                                 border_mode='valid')
    if convs is None:
        convs = K.conv2d(a, b[:, :, ::-1, ::-1], border_mode='valid')
    argmax = K.argmax(convs / a_norm, axis=1)
    return argmax
Example #43
0
    def get_output(self):
        """Get output."""

        # Recurse
        inp, time, updates = get_input(self)
        if settings['payloads']:
            # Add payload from previous layer
            prev_layer = self.inbound_nodes[0].inbound_layers[0]
            inp = add_payloads(prev_layer, inp)

        if settings['online_normalization']:
            # Modify threshold if firing rate of layer too low
            updates.append((self.v_thresh, get_new_thresh(self, time)))

        # CALCULATE SYNAPTIC SUMMED INPUT
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            conv_mode = 'conv' if self.filter_flip else 'cross'
            if border_mode == 'same':
                assert (self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y),
                                        conv_mode=conv_mode)
            else:
                conv_out = dnn.dnn_conv(img=inp,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample,
                                        conv_mode=conv_mode)
        else:
            if border_mode == 'same':
                border_mode = 'full'
            conv_out = t.nnet.conv2d(inp,
                                     self.W,
                                     border_mode=border_mode,
                                     subsample=self.subsample,
                                     filter_flip=self.filter_flip)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:inp.shape[2] + shift_x,
                                    shift_y:inp.shape[3] + shift_y]
        self.impulse = conv_out + k.reshape(self.b, (1, self.nb_filter, 1, 1))
        output_spikes = update_neurons(self, time, updates)
        self.updates = updates
        return t.cast(output_spikes, 'float32')
Example #44
0
    def test_conv_gradw(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        kerns = T.ftensor4('kerns')
        out = T.ftensor4('out')
        img_val = numpy.asarray(
            numpy.random.rand(2, 5, 6, 8),
            dtype='float32'
        )
        kern_vals = numpy.asarray(
            numpy.random.rand(2, 1, 5, 6),
            dtype='float32'
        )
        out_vals = numpy.zeros((3, 3, 1, 1), dtype='float32')

        for params in product(
            ['valid', 'full'],
            [(1, 1)],  # strides besides (1, 1)
            ['conv', 'cross']
        ):
            temp_img = img.dimshuffle(1, 0, 2, 3)
            temp_kerns = kerns
            if params[2] == 'conv':
                temp_kerns = temp_kerns[:, :, ::-1, ::-1]
            temp_kerns = temp_kerns.dimshuffle(1, 0, 2, 3)
            shape = (
                kern_vals.shape[1], img_val.shape[1],
                img_val.shape[2] - kern_vals.shape[2] + 1,
                img_val.shape[3] - kern_vals.shape[3] + 1
            )
            out_vals = numpy.zeros(shape, dtype='float32')
            desc = dnn.GpuDnnConvDesc(
                border_mode=params[0],
                subsample=params[1],
                conv_mode=params[2]
            )(temp_img.shape, out.shape)
            conv_grad_w = dnn.GpuDnnConvGradW()(
                temp_img,
                temp_kerns,
                out,
                desc,
            )
            self._compile_and_check(
                [temp_img, temp_kerns, out],
                [conv_grad_w],
                [img_val, kern_vals, out_vals],
                dnn.GpuDnnConvGradW
            )
Example #45
0
    def test_softmax(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        t = T.ftensor4("t")
        rand_tensor = numpy.asarray(numpy.random.rand(5, 4, 3, 2), dtype="float32")
        self._compile_and_check(
            [t], [dnn.GpuDnnSoftmax("bc01", "accurate", "channel")(t)], [rand_tensor], dnn.GpuDnnSoftmax
        )

        self._compile_and_check(
            [t],
            [T.grad(dnn.GpuDnnSoftmax("bc01", "accurate", "channel")(t).mean(), t)],
            [rand_tensor],
            dnn.GpuDnnSoftmaxGrad,
        )
Example #46
0
    def test_pool_grad(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4("img")
        img_grad = T.ftensor4("img_grad")
        out = T.ftensor4("out")
        img_val = numpy.asarray(numpy.random.rand(2, 3, 4, 5), dtype="float32")
        img_grad_val = numpy.asarray(numpy.random.rand(2, 3, 4, 5), dtype="float32")
        out_val = numpy.asarray(numpy.random.rand(2, 3, 4, 5), dtype="float32")

        for params in product([(1, 1), (2, 2), (3, 3)], [(1, 1), (2, 2), (3, 3)], ["max", "average_inc_pad"]):
            desc = dnn.GpuDnnPoolDesc(ws=params[0], stride=params[1], mode=params[2])()
            pool_grad = dnn.GpuDnnPoolGrad()(img, out, img_grad, desc)
            self._compile_and_check(
                [img, img_grad, out], [pool_grad], [img_val, img_grad_val, out_val], dnn.GpuDnnPoolGrad
            )
Example #47
0
def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    # ====== dim ordering ====== #
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))
    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 4, 1, 2, 3))
    # ====== border mode ====== #
    if border_mode == 'same':
        w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
        h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
        d_pad = pool_size[2] - 2 if pool_size[2] % 2 == 1 else pool_size[2] - 1
        padding = (w_pad, h_pad, d_pad)
    elif border_mode == 'valid':
        padding = (0, 0, 0)
    elif isinstance(border_mode, (tuple, list)):
        padding = tuple(border_mode)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))
    # ====== pooling ====== #
    if _on_gpu() and dnn.dnn_available():
        pool_out = dnn.dnn_pool(x, pool_size,
                                stride=strides,
                                mode=pool_mode,
                                pad=padding)
    else:
        padding = padding[:2]
        # pooling over conv_dim2, conv_dim1 (last two channels)
        output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
                              ds=(pool_size[1], pool_size[0]),
                              st=(strides[1], strides[0]),
                              ignore_border=True,
                              padding=padding,
                              mode=pool_mode)
        # pooling over conv_dim3
        pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
                                ds=(1, pool_size[2]),
                                st=(1, strides[2]),
                                ignore_border=True,
                                padding=padding,
                                mode=pool_mode)
    # ====== output ====== #
    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
    return pool_out
Example #48
0
    def test_conv_gradi(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        kerns = T.ftensor4('kerns')
        out = T.ftensor4('out')
        img_val = numpy.asarray(
            numpy.random.rand(3, 4, 5, 6),
            dtype='float32'
        )
        kern_vals = numpy.asarray(
            numpy.random.rand(13, 14, 15, 16),
            dtype='float32'
        )

        for params in product(
            ['valid'],  # Should this work for 'full'?
            [(1, 1)],
            ['conv', 'cross']
        ):
            temp_kerns = kerns.dimshuffle(1, 0, 2, 3)
            shape = (
                img_val.shape[0], kern_vals.shape[1],
                img_val.shape[2] + kern_vals.shape[2] - 1,
                img_val.shape[3] + kern_vals.shape[3] - 1
            )
            out_vals = numpy.zeros(shape, dtype='float32')
            desc = dnn.GpuDnnConvDesc(
                border_mode=params[0],
                subsample=params[1],
                conv_mode=params[2]
            )(out.shape, temp_kerns.shape)
            conv_grad_i = dnn.GpuDnnConvGradI()(
                temp_kerns,
                img,
                out,
                desc,
            )
            self._compile_and_check(
                [temp_kerns, img, out],
                [conv_grad_i],
                [kern_vals, img_val, out_vals],
                dnn.GpuDnnConvGradI
            )
Example #49
0
    def test_conv_gradi(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        kerns = T.ftensor4('kerns')
        img_val = numpy.asarray(
            numpy.random.rand(3, 4, 5, 6),
            dtype='float32'
        )
        kern_vals = numpy.asarray(
            numpy.random.rand(3, 4, 5, 6),
            dtype='float32'
        )

        for params in product(
            ['valid'],  # Should this work for 'full'?
            [(1, 1)],
            ['conv', 'cross']
        ):
            print params
            temp_kerns = kerns.dimshuffle(1, 0, 2, 3)
            shape = theano.tensor.stack(
                img.shape[0], temp_kerns.shape[1],
                img.shape[2] + temp_kerns.shape[2] - 1,
                img.shape[3] + temp_kerns.shape[3] - 1
            )
            desc = dnn.GpuDnnConvDesc(
                border_mode=params[0],
                subsample=params[1],
                conv_mode=params[2]
            )(shape, temp_kerns.shape)
            conv_grad_i = dnn.GpuDnnConvGradI()(
                temp_kerns,
                img,
                desc,
                shape[2],
                shape[3]
            )
            self._compile_and_check(
                [temp_kerns, img],
                [conv_grad_i],
                [kern_vals, img_val],
                dnn.GpuDnnConvGradI
            )
Example #50
0
    def setUp(self):
        """
        Set up a test image and filter to re-use.
        """
        skip_if_no_gpu()
        if not dnn_available():
            raise SkipTest('Skipping tests cause cudnn is not available')
        self.orig_floatX = theano.config.floatX
        theano.config.floatX = 'float32'
        self.image = np.random.rand(1, 1, 3, 3).astype(theano.config.floatX)
        self.image_tensor = tensor.tensor4()
        self.input_space = Conv2DSpace((3, 3), 1, axes=('b', 'c', 0, 1))
        self.filters_values = np.ones(
            (1, 1, 2, 2), dtype=theano.config.floatX
        )
        self.filters = sharedX(self.filters_values, name='filters')
        self.batch_size = 1

        self.cudnn2d = Cudnn2D(self.filters, self.batch_size, self.input_space)
Example #51
0
 def tcase(self, i, f, s, b, flip, provide_shape):
     if not dnn_available():
         raise SkipTest(cuda.dnn.dnn_available.msg)
     mode = mode_with_gpu
     o = self.get_output_shape(i, f, s, b)
     self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                  verify_grad=True, mode=mode,
                  provide_shape=provide_shape, border_mode=b,
                  filter_flip=flip, target_op=GpuDnnConv)
     self.run_gradweight(inputs_shape=i, filters_shape=f,
                         output_shape=o, subsample=s,
                         verify_grad=True, mode=mode,
                         provide_shape=provide_shape, border_mode=b,
                         filter_flip=flip, target_op=GpuDnnConvGradW)
     self.run_gradinput(inputs_shape=i, filters_shape=f,
                        output_shape=o, subsample=s,
                        verify_grad=True, mode=mode,
                        provide_shape=provide_shape, border_mode=b,
                        filter_flip=flip, target_op=GpuDnnConvGradI)
Example #52
0
    def tcase_gi(self,
                 i,
                 f,
                 o,
                 s,
                 b,
                 flip,
                 provide_shape,
                 fd=(1, 1),
                 expect_error=False):
        if fd != (1, 1):
            raise SkipTest("No dilation implementation for cuDNN ConvOp.")
        if not dnn_available():
            raise SkipTest(cuda.dnn.dnn_available.msg)
        mode = mode_with_gpu

        if not expect_error:
            self.run_gradinput(inputs_shape=i,
                               filters_shape=f,
                               output_shape=o,
                               subsample=s,
                               verify_grad=True,
                               mode=mode,
                               provide_shape=provide_shape,
                               border_mode=b,
                               filter_flip=flip,
                               target_op=GpuDnnConvGradI,
                               filter_dilation=fd)
        else:
            assert_raises((RuntimeError, ValueError),
                          self.run_gradinput,
                          inputs_shape=i,
                          filters_shape=f,
                          output_shape=o,
                          subsample=s,
                          verify_grad=False,
                          mode=mode,
                          provide_shape=provide_shape,
                          border_mode=b,
                          filter_flip=flip,
                          target_op=GpuDnnConvGradI,
                          ref=None,
                          filter_dilation=fd)
Example #53
0
def create_A3C(inp_shape,
               output_num,
               stride=None,
               untie_biases=False,
               input_var=None):
    import theano.tensor.signal.conv
    from theano.sandbox.cuda import dnn
    # if no dnn support use default conv
    if not theano.config.device.startswith("gpu") or not dnn.dnn_available(
    ):  # code stolen from lasagne dnn.py
        import lasagne.layers.conv
        conv = lasagne.layers.conv.Conv2DLayer
    else:
        import lasagne.layers.dnn
        conv = lasagne.layers.dnn.Conv2DDNNLayer

    # setup network layout
    l_in = lasagne.layers.InputLayer(inp_shape, input_var=input_var)
    l_hid1 = conv(l_in,
                  16, (8, 8),
                  stride=stride[0],
                  untie_biases=untie_biases)

    l_hid2 = conv(l_hid1,
                  32, (4, 4),
                  stride=stride[1],
                  untie_biases=untie_biases)

    l_hid3 = lasagne.layers.DenseLayer(l_hid2, 256)

    l_value = lasagne.layers.DenseLayer(
        l_hid3, 1, nonlinearity=lasagne.nonlinearities.linear)
    l_policy = lasagne.layers.DenseLayer(
        l_hid3, output_num, nonlinearity=lasagne.nonlinearities.softmax)

    return {
        'l_in': l_in,
        'l_hid1': l_hid1,
        'l_hid2': l_hid2,
        'l_hid3': l_hid3,
        'l_value': l_value,
        'l_policy': l_policy
    }
Example #54
0
    def test_pool_grad(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        img_grad = T.ftensor4('img_grad')
        out = T.ftensor4('out')
        img_val = numpy.asarray(
            numpy.random.rand(2, 3, 4, 5),
            dtype='float32'
        )
        img_grad_val = numpy.asarray(
            numpy.random.rand(2, 3, 4, 5),
            dtype='float32'
        )
        out_val = numpy.asarray(
            numpy.random.rand(2, 3, 4, 5),
            dtype='float32'
        )

        for params in product(
            [(1, 1), (2, 2), (3, 3)],
            [(1, 1), (2, 2), (3, 3)],
            ['max', 'average_inc_pad']
        ):
            desc = dnn.GpuDnnPoolDesc(
                ws=params[0],
                stride=params[1],
                mode=params[2]
            )()
            pool_grad = dnn.GpuDnnPoolGrad()(
                img,
                out,
                img_grad,
                desc
            )
            self._compile_and_check(
                [img, img_grad, out],
                [pool_grad],
                [img_val, img_grad_val, out_val],
                dnn.GpuDnnPoolGrad
            )
Example #55
0
    def get_output(self, train=False):
        X = self.get_input(train)
        newshape = (X.shape[0] * X.shape[1], X.shape[2], X.shape[3],
                    X.shape[4])
        Y = theano.tensor.reshape(
            X, newshape)  #collapse num_samples and num_timesteps
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert (self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'

            conv_out = theano.tensor.nnet.conv.conv2d(Y,
                                                      self.W,
                                                      border_mode=border_mode,
                                                      subsample=self.subsample)

            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:Y.shape[2] + shift_x,
                                    shift_y:Y.shape[3] + shift_y]

        output = self.activation(conv_out +
                                 self.b.dimshuffle('x', 0, 'x', 'x'))
        newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2],
                    output.shape[3])
        return theano.tensor.reshape(output, newshape)
Example #56
0
 def tcase(self, i, f, s, b, flip, provide_shape, fd=(1, 1)):
     if fd != (1, 1):
         raise SkipTest("No dilation implementation for cuDNN ConvOp.")
     if not dnn_available():
         raise SkipTest(cuda.dnn.dnn_available.msg)
     mode = mode_with_gpu
     o = self.get_output_shape(i, f, s, b, fd)
     self.run_fwd(inputs_shape=i,
                  filters_shape=f,
                  subsample=s,
                  verify_grad=True,
                  mode=mode,
                  provide_shape=provide_shape,
                  border_mode=b,
                  filter_flip=flip,
                  target_op=GpuDnnConv,
                  filter_dilation=fd)
     self.run_gradweight(inputs_shape=i,
                         filters_shape=f,
                         output_shape=o,
                         subsample=s,
                         verify_grad=True,
                         mode=mode,
                         provide_shape=provide_shape,
                         border_mode=b,
                         filter_flip=flip,
                         target_op=GpuDnnConvGradW,
                         filter_dilation=fd)
     self.run_gradinput(inputs_shape=i,
                        filters_shape=f,
                        output_shape=o,
                        subsample=s,
                        verify_grad=True,
                        mode=mode,
                        provide_shape=provide_shape,
                        border_mode=b,
                        filter_flip=flip,
                        target_op=GpuDnnConvGradI,
                        filter_dilation=fd)
Example #57
0
def conv2d(x,
           kernel,
           strides=(1, 1),
           border_mode='valid',
           dim_ordering='th',
           image_shape=None,
           filter_shape=None):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    '''
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = x.dimshuffle((0, 3, 1, 2))
        kernel = kernel.dimshuffle((3, 2, 0, 1))
        if image_shape:
            image_shape = (image_shape[0], image_shape[3], image_shape[1],
                           image_shape[2])
        if filter_shape:
            filter_shape = (filter_shape[3], filter_shape[2], filter_shape[0],
                            filter_shape[1])

    if _on_gpu() and dnn.dnn_available():
        if border_mode == 'same':
            np_kernel = kernel.eval()
            assert strides[0] <= np_kernel.shape[
                2], 'strides should be smaller than the convolution window.'
            assert strides[1] <= np_kernel.shape[
                3], 'strides should be smaller than the convolution window.'
            conv_out = dnn.dnn_conv(img=x, kerns=kernel, border_mode='full')
            shift_x = (np_kernel.shape[2] - strides[0]) // 2
            shift_y = (np_kernel.shape[3] - strides[1]) // 2
            expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
            expected_height = (x.shape[3] + strides[1] - 1) // strides[1]

            conv_out = conv_out[:, :, shift_x:shift_x + expected_width,
                                shift_y:shift_y + expected_height]
        else:
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=border_mode,
                                    subsample=strides)
    else:
        if border_mode == 'same':
            th_border_mode = 'full'
            np_kernel = kernel.eval()
            assert strides[0] <= np_kernel.shape[
                2], 'strides should be smaller than the convolution window.'
            assert strides[1] <= np_kernel.shape[
                3], 'strides should be smaller than the convolution window.'
        elif border_mode == 'valid':
            th_border_mode = 'valid'
        else:
            raise Exception('Border mode not supported: ' + str(border_mode))

        conv_out = T.nnet.conv.conv2d(x,
                                      kernel,
                                      border_mode=th_border_mode,
                                      subsample=strides,
                                      image_shape=image_shape,
                                      filter_shape=filter_shape)
        if border_mode == 'same':
            shift_x = (np_kernel.shape[2] - strides[0]) // 2
            shift_y = (np_kernel.shape[3] - strides[1]) // 2
            expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
            expected_height = (x.shape[3] + strides[1] - 1) // strides[1]

            conv_out = conv_out[:, :, shift_x:shift_x + expected_width,
                                shift_y:shift_y + expected_height]
    if dim_ordering == 'tf':
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
Example #58
0
import theano
from theano.sandbox.cuda import dnn

from .. import init
from .. import nonlinearities
from .base import Layer

from .conv import conv_output_length, BaseConvLayer
from .pool import pool_output_length
from ..utils import as_tuple

if not theano.sandbox.cuda.cuda_enabled:
    raise ImportError(
        "requires GPU support -- see http://lasagne.readthedocs.org/en/"
        "latest/user/installation.html#gpu-support")  # pragma: no cover
elif not dnn.dnn_available():
    raise ImportError(
        "cuDNN not available: %s\nSee http://lasagne.readthedocs.org/en/"
        "latest/user/installation.html#cudnn" %
        dnn.dnn_available.msg)  # pragma: no cover

__all__ = [
    "Pool2DDNNLayer",
    "MaxPool2DDNNLayer",
    "Pool3DDNNLayer",
    "MaxPool3DDNNLayer",
    "Conv2DDNNLayer",
    "Conv3DDNNLayer",
    "SpatialPyramidPoolingDNNLayer",
]