Exemple #1
0
    def test_constant_input(self):
        # Check the AbstractConv Ops for constant inputs
        input = self.input
        filters = self.filters
        topgrad = self.topgrad
        constant_tensor = self.constant_tensor
        out_shape = tensor.lvector()

        # Check the forward Op
        output = conv.conv2d(constant_tensor, filters)
        grad_filters = theano.grad(output.sum(), wrt=filters)
        assert grad_filters.type == filters.type, (grad_filters,
                                                   grad_filters.type, filters,
                                                   filters.type)

        output = conv.conv2d(input, constant_tensor)
        grad_input = theano.grad(output.sum(), wrt=input)
        assert grad_input.type == input.type, (grad_input, grad_input.type,
                                               input, input.type)

        # Check grad wrt weights
        grad_filters = conv.AbstractConv2d_gradWeights()(constant_tensor,
                                                         topgrad, out_shape)
        grad_topgrad = theano.grad(grad_filters.sum(), wrt=topgrad)
        assert grad_topgrad.type == topgrad.type, (grad_topgrad,
                                                   grad_topgrad.type, topgrad,
                                                   topgrad.type)

        grad_filters = conv.AbstractConv2d_gradWeights()(input,
                                                         constant_tensor,
                                                         out_shape)
        grad_input = theano.grad(grad_filters.sum(), wrt=input)
        assert grad_input.type == input.type, (grad_input, grad_input.type,
                                               input, input.type)

        # Check grad wrt inputs
        grad_input = conv.AbstractConv2d_gradInputs()(constant_tensor, topgrad,
                                                      out_shape)
        grad_topgrad = theano.grad(grad_input.sum(), wrt=topgrad)
        assert grad_topgrad.type == topgrad.type, (grad_topgrad,
                                                   grad_topgrad.type, topgrad,
                                                   topgrad.type)

        grad_input = conv.AbstractConv2d_gradInputs()(filters, constant_tensor,
                                                      out_shape)
        grad_filters = theano.grad(grad_input.sum(), wrt=filters)
        assert grad_filters.type == filters.type, (grad_filters,
                                                   grad_filters.type, filters,
                                                   filters.type)
Exemple #2
0
    def run_gradweight(self,
                       inputs_shape,
                       filters_shape,
                       output_shape,
                       ref=dnn_gradweight,
                       subsample=(1, 1),
                       filter_flip=True,
                       verify_grad=True,
                       mode=mode_without_gpu,
                       border_mode='valid',
                       device='cpu',
                       provide_shape=False):

        inputs_val = numpy.random.random(inputs_shape).astype('float32')
        output_val = numpy.random.random(output_shape).astype('float32')
        if device == 'gpu':
            inputs = gpu_shared(inputs_val)
            output = gpu_shared(output_val)
        else:
            inputs = theano.tensor.as_tensor_variable(cpu_shared(inputs_val))
            output = theano.tensor.as_tensor_variable(cpu_shared(output_val))
        if provide_shape:
            imshp = inputs_shape
            kshp = filters_shape
        else:
            imshp = None
            kshp = None
        if filter_flip:
            conv_mode = 'conv'
        else:
            conv_mode = 'cross'
        c = conv.AbstractConv2d_gradWeights(border_mode=border_mode,
                                            filter_flip=filter_flip,
                                            subsample=subsample,
                                            imshp=imshp,
                                            kshp=kshp)
        c = c(inputs, output, filters_shape[-2:])
        c_ref = ref(inputs,
                    output,
                    filters_shape,
                    border_mode=border_mode,
                    subsample=subsample,
                    conv_mode=conv_mode)
        f = theano.function([], c, mode)
        f_ref = theano.function([], c_ref, mode)
        res_ref = numpy.array(f_ref())
        res = numpy.array(f())
        utt.assert_allclose(res_ref, res)

        def abstract_conv2d_gradweight(inputs_val, output_val):
            conv_op = conv.AbstractConv2d_gradWeights(border_mode=border_mode,
                                                      subsample=subsample)
            return conv_op(inputs_val, output_val, filters_shape[-2:])

        if verify_grad:
            utt.verify_grad(abstract_conv2d_gradweight,
                            [inputs_val, output_val],
                            mode=mode,
                            eps=1)
Exemple #3
0
    def run_gradweight(self, inputs_shape, filters_shape, output_shape,
                       ref=conv_corr_gw, subsample=(1, 1), filter_flip=True,
                       verify_grad=True, mode=None, border_mode='valid',
                       provide_shape=False, target_op=None, check_trace=False):

        inputs_val = numpy.random.random(inputs_shape).astype('float32')
        output_val = numpy.random.random(output_shape).astype('float32')

        inputs = self.shared(inputs_val)
        output = self.shared(output_val)

        if provide_shape:
            imshp = inputs_shape
            kshp = filters_shape
        else:
            imshp = None
            kshp = None
        if filter_flip:
            conv_mode = 'conv'
        else:
            conv_mode = 'cross'
        c = conv.AbstractConv2d_gradWeights(border_mode=border_mode,
                                            filter_flip=filter_flip,
                                            subsample=subsample,
                                            imshp=imshp, kshp=kshp)
        c = c(inputs, output, filters_shape[-2:])
        c_ref = ref(inputs, output,
                    filters_shape,
                    border_mode=border_mode,
                    subsample=subsample,
                    conv_mode=conv_mode)
        f = theano.function([], c, mode=mode)
        f_ref = theano.function([], c_ref, mode='FAST_RUN')

        if target_op is not None:
            assert any([isinstance(n.op, target_op) for n
                        in f.maker.fgraph.toposort()])
            if check_trace:
                self.assertTrue(check_stack_trace(f, ops_to_check=target_op))

        res_ref = numpy.array(f_ref())
        res = numpy.array(f())
        utt.assert_allclose(res_ref, res)

        def abstract_conv2d_gradweight(inputs_val, output_val):
            conv_op = conv.AbstractConv2d_gradWeights(border_mode=border_mode,
                                                      subsample=subsample)
            return conv_op(inputs_val, output_val, filters_shape[-2:])

        if verify_grad:
            utt.verify_grad(abstract_conv2d_gradweight,
                            [inputs_val, output_val],
                            mode=mode, eps=1)
    def test_grad_types(self):
        # This function simply tests the behaviour of the AbstractConv
        # Ops, not their optimizations
        cpu_input = tensor.ftensor4()
        cpu_filters = tensor.ftensor4()
        cpu_topgrad = tensor.ftensor4()
        gpu_input = gpu_ftensor4()
        gpu_filters = gpu_ftensor4()
        gpu_topgrad = gpu_ftensor4()

        out_shape = tensor.lvector()

        # Check the gradient of the forward conv2d
        for input, filters in itertools.product((cpu_input, gpu_input),
                                                (cpu_filters, gpu_filters)):
            output = conv.conv2d(input, filters)
            grad_input, grad_filters = theano.grad(output.sum(),
                                                   wrt=(input, filters))
            assert grad_input.type == input.type, (grad_input, grad_input.type,
                                                   input, input.type)
            assert grad_filters.type == filters.type, (grad_filters,
                                                       grad_filters.type,
                                                       filters, filters.type)

        # Check the gradient of gradweight
        for input, topgrad in itertools.product((cpu_input, gpu_input),
                                                (cpu_topgrad, gpu_topgrad)):
            grad_filters = conv.AbstractConv2d_gradWeights()(input, topgrad,
                                                             out_shape)
            grad_input, grad_topgrad = theano.grad(grad_filters.sum(),
                                                   wrt=(input, topgrad))

            assert grad_input.type == input.type, (grad_input, grad_input.type,
                                                   input, input.type)
            assert grad_topgrad.type == topgrad.type, (grad_topgrad,
                                                       grad_topgrad.type,
                                                       topgrad, topgrad.type)

        # Check the gradient of gradinputs
        for filters, topgrad in itertools.product((cpu_filters, gpu_filters),
                                                  (cpu_topgrad, gpu_topgrad)):
            grad_input = conv.AbstractConv2d_gradInputs()(filters, topgrad,
                                                          out_shape)
            grad_filters, grad_topgrad = theano.grad(grad_input.sum(),
                                                     wrt=(filters, topgrad))

            assert grad_filters.type == filters.type, (grad_filters,
                                                       grad_filters.type,
                                                       filters, filters.type)
            assert grad_topgrad.type == topgrad.type, (grad_topgrad,
                                                       grad_topgrad.type,
                                                       topgrad, topgrad.type)
Exemple #5
0
    def test_grad_types(self):
        # This function simply tests the behaviour of the AbstractConv
        # Ops, not their optimizations
        input = self.input
        filters = self.filters
        topgrad = self.topgrad

        out_shape = tensor.lvector()

        output = conv.conv2d(input, filters)
        grad_input, grad_filters = theano.grad(output.sum(),
                                               wrt=(input, filters))
        assert grad_input.type == input.type, (grad_input, grad_input.type,
                                               input, input.type)
        assert grad_filters.type == filters.type, (grad_filters,
                                                   grad_filters.type, filters,
                                                   filters.type)

        grad_filters = conv.AbstractConv2d_gradWeights()(input, topgrad,
                                                         out_shape)
        grad_input, grad_topgrad = theano.grad(grad_filters.sum(),
                                               wrt=(input, topgrad))

        assert grad_input.type == input.type, (grad_input, grad_input.type,
                                               input, input.type)
        assert grad_topgrad.type == topgrad.type, (grad_topgrad,
                                                   grad_topgrad.type, topgrad,
                                                   topgrad.type)

        grad_input = conv.AbstractConv2d_gradInputs()(filters, topgrad,
                                                      out_shape)
        grad_filters, grad_topgrad = theano.grad(grad_input.sum(),
                                                 wrt=(filters, topgrad))

        assert grad_filters.type == filters.type, (grad_filters,
                                                   grad_filters.type, filters,
                                                   filters.type)
        assert grad_topgrad.type == topgrad.type, (grad_topgrad,
                                                   grad_topgrad.type, topgrad,
                                                   topgrad.type)
Exemple #6
0
 def abstract_conv2d_gradweight(inputs_val, output_val):
     conv_op = conv.AbstractConv2d_gradWeights(border_mode=border_mode,
                                               subsample=subsample)
     return conv_op(inputs_val, output_val, filters_shape[-2:])