def deconvDown(self, input_down, conv_out_up, pooled_out):

        # print type(input_down.shape), "type(input_down.shape)"
        # print input_down.shape, "input_down.shape"
        input_down = theano.shared(input_down, borrow=True)
        conv_out_up = theano.shared(conv_out_up, borrow=True)
        input_down =  T.tanh(input_down)
        # conv_out_up = conv_out_up.astype(theano.config.floatX)
        # print type(input_down), "type(input_down)", type(pooled_out), type(conv_out_up)
        # print input_down.shape, "input_down.shape"
        up_pooled = DownsampleFactorMaxGrad(self.poolsize, ignore_border=True,st=None)(conv_out_up.astype(theano.config.floatX), pooled_out, input_down.astype(theano.config.floatX))
        
        output = self.convDown( up_pooled.eval() )

        return output
    def deconvDown(self, input_down, conv_out_up, pooled_out):

        # print type(input_down.shape), "type(input_down.shape)"
        # print input_down.shape, "input_down.shape"
        input_down = theano.shared(input_down, borrow=True)
        conv_out_up = theano.shared(conv_out_up, borrow=True)

        input_down = T.maximum(0.0, input_down)

        # rect_out = T.maximum(0.0, input_down)
        # rect_out = theano.shared(rect_out)
        # conv_out_up = conv_out_up.astype(theano.config.floatX)
        # print type(input_down), "type(input_down)", type(pooled_out), type(conv_out_up)
        # print input_down.shape, "input_down.shape"

        # possible to use function from max pooling since it only depends on which value was used in the upward process
        up_pooled = DownsampleFactorMaxGrad(self.poolsize, ignore_border=True,st=None)(conv_out_up.astype(theano.config.floatX), pooled_out, input_down.astype(theano.config.floatX))
        
        

        output = self.convDown( up_pooled.eval() )

        return output