Пример #1
0
    def deconvDown(self, input_down, conv_out_up, pooled_out):

        # print type(input_down.shape), "type(input_down.shape)"
        # print input_down.shape, "input_down.shape"
        input_down = theano.shared(input_down, borrow=True)
        conv_out_up = theano.shared(conv_out_up, borrow=True)
        input_down =  T.tanh(input_down)
        # conv_out_up = conv_out_up.astype(theano.config.floatX)
        # print type(input_down), "type(input_down)", type(pooled_out), type(conv_out_up)
        # print input_down.shape, "input_down.shape"
        up_pooled = DownsampleFactorMaxGrad(self.poolsize, ignore_border=True,st=None)(conv_out_up.astype(theano.config.floatX), pooled_out, input_down.astype(theano.config.floatX))
        
        output = self.convDown( up_pooled.eval() )

        return output
Пример #2
0
    def test_opt_max_to_average(self):
        im = theano.tensor.tensor4()
        maxout = theano.tensor.tensor4()
        grad = theano.tensor.tensor4()

        compilation_mode = theano.compile.get_default_mode().including(
            'local_average_pool_grad')

        for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
            f = theano.function([im, maxout, grad],
                                DownsampleFactorMaxGrad(ds=(3, 3),
                                                        ignore_border=False,
                                                        mode=mode)(im, maxout,
                                                                   grad),
                                mode=compilation_mode)

            if mode == 'max':
                assert any(
                    isinstance(n.op, MaxPoolGrad)
                    for n in f.maker.fgraph.toposort())
                assert not any(
                    isinstance(n.op, AveragePoolGrad)
                    for n in f.maker.fgraph.toposort())
            else:
                assert not any(
                    isinstance(n.op, MaxPoolGrad)
                    for n in f.maker.fgraph.toposort())
                assert any(
                    isinstance(n.op, AveragePoolGrad)
                    for n in f.maker.fgraph.toposort())
Пример #3
0
    def test_DownsampleFactorMaxGrad(self):
        im = theano.tensor.tensor4()
        maxout = theano.tensor.tensor4()
        grad = theano.tensor.tensor4()

        for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
            f = theano.function([im, maxout, grad],
                                DownsampleFactorMaxGrad(ds=(3, 3),
                                                        ignore_border=False,
                                                        mode=mode)(im, maxout,
                                                                   grad),
                                on_unused_input='ignore')

            if mode == 'max':
                assert any(
                    isinstance(n.op, MaxPoolGrad)
                    for n in f.maker.fgraph.toposort())
                assert not any(
                    isinstance(n.op, AveragePoolGrad)
                    for n in f.maker.fgraph.toposort())
            else:
                assert not any(
                    isinstance(n.op, MaxPoolGrad)
                    for n in f.maker.fgraph.toposort())
                assert any(
                    isinstance(n.op, AveragePoolGrad)
                    for n in f.maker.fgraph.toposort())
Пример #4
0
    def test_infer_shape(self):
        image = tensor.dtensor4()
        maxout = tensor.dtensor4()
        gz = tensor.dtensor4()
        rng = numpy.random.RandomState(utt.fetch_seed())
        maxpoolshps = ((1, 1), (2, 2), (3, 3), (2, 3), (3, 2))

        image_val = rng.rand(4, 6, 7, 9)
        out_shapes = [[[4, 6, 7, 9], [4, 6, 7, 9]], [[4, 6, 3, 4],
                                                     [4, 6, 4, 5]],
                      [[4, 6, 2, 3], [4, 6, 3, 3]], [[4, 6, 3, 3],
                                                     [4, 6, 4, 3]],
                      [[4, 6, 2, 4], [4, 6, 3, 5]]]

        for i, maxpoolshp in enumerate(maxpoolshps):
            for j, ignore_border in enumerate([True, False]):

                # checking shapes generated by DownsampleFactorMax
                self._compile_and_check([image], [
                    DownsampleFactorMax(maxpoolshp,
                                        ignore_border=ignore_border)(image)
                ], [image_val], DownsampleFactorMax)

                # checking shapes generated by DownsampleFactorMaxGrad
                maxout_val = rng.rand(*out_shapes[i][j])
                gz_val = rng.rand(*out_shapes[i][j])
                self._compile_and_check([image, maxout, gz], [
                    DownsampleFactorMaxGrad(maxpoolshp,
                                            ignore_border=ignore_border)(
                                                image, maxout, gz)
                ], [image_val, maxout_val, gz_val],
                                        DownsampleFactorMaxGrad,
                                        warn=False)
Пример #5
0
 def mp(input, grad):
     out = DownsampleFactorMax(maxpoolshp,
                               ignore_border=ignore_border,
                               st=stride)(input)
     grad_op = DownsampleFactorMaxGrad(
         maxpoolshp, ignore_border=ignore_border, st=stride)
     return grad_op(input, out, grad)
Пример #6
0
    def __init__(self, bs):
        mp = max_pool_2d(bs.output, (2, 2))
        self.output = mp
        self.output_shape = (bs.output_shape[0], bs.output_shape[1],
                             bs.output_shape[2] / 2, bs.output_shape[3] / 2)

        gradop = DownsampleFactorMaxGrad((2, 2),
                                         st=(2, 2),
                                         padding=(0, 0),
                                         ignore_border=False)
        self.upop = lambda v: gradop(bs.output, mp, v)
        self.origshape = bs.output_shape
Пример #7
0
 def mp(input, grad):
     out = DownsampleFactorMax(
         maxpoolsize,
         ignore_border=True,
         st=stridesize,
         padding=paddingsize,
     )(input)
     grad_op = DownsampleFactorMaxGrad(maxpoolsize,
                                       ignore_border=True,
                                       st=stridesize,
                                       padding=paddingsize)
     return grad_op(input, out, grad)
    def deconvDown(self, input_down, conv_out_up, pooled_out):

        # print type(input_down.shape), "type(input_down.shape)"
        # print input_down.shape, "input_down.shape"
        input_down = theano.shared(input_down, borrow=True)
        conv_out_up = theano.shared(conv_out_up, borrow=True)

        input_down = T.maximum(0.0, input_down)

        # rect_out = T.maximum(0.0, input_down)
        # rect_out = theano.shared(rect_out)
        # conv_out_up = conv_out_up.astype(theano.config.floatX)
        # print type(input_down), "type(input_down)", type(pooled_out), type(conv_out_up)
        # print input_down.shape, "input_down.shape"

        # possible to use function from max pooling since it only depends on which value was used in the upward process
        up_pooled = DownsampleFactorMaxGrad(self.poolsize, ignore_border=True,st=None)(conv_out_up.astype(theano.config.floatX), pooled_out, input_down.astype(theano.config.floatX))
        
        

        output = self.convDown( up_pooled.eval() )

        return output 
Пример #9
0
def max_pool_2d_same_size(input, patch_size):
    output = DownsampleFactorMax(patch_size, True)(input)
    outs = DownsampleFactorMaxGrad(patch_size, True)(input, output, output)
    return outs