def build_theano_functions(self, bottom, top):
        # building Theano functions
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        p = np.float32(self.p_)
        axis = self.axis_
        if axis is None:
            axis = tuple(range(1, len(bottom[0].shape)))

        # blob to CudaNdArray
        # Forward pass
        Tensor = T.TensorType('float32', [False] * len(bottom[0].shape))
        s_x = Tensor('x')  # bottom data
        s_dz = Tensor('dz')  # top diff
        s_z = s_x * ((s_x**p).sum(axis, keepdims=True)**(np.float32(-1. / p)))
        # See http://goo.gl/wIVRsP for `tn.Out(x, borrow=True)`
        self.f_forward = tn.function([s_x], tn.Out(s_z, borrow=True))

        # Backward pass
        s_l = (s_dz * s_z).sum()
        s_grad = tn.grad(s_l, wrt=s_x)
        self.f_backward = tn.function([s_x, s_dz], tn.Out(s_grad, borrow=True))
Esempio n. 2
0
    def setup(self, bottom, top):
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        assert len(bottom) == 2
        assert len(top) == 1
        s_y = T.matrix('y')  # y in [-inf, inf]
        s_t = T.matrix('t')  # t in {-1, 0, 1} where 0 is ignored
        s_dloss = T.scalar('dloss')
        # Forward
        # s_loss = T.mean(abs(s_t) * T.log1p(T.exp(-s_y * s_t)))  # unstable
        s_loss = -T.sum(
            abs(s_t) * (
                s_y * ((s_t >= 0) - (s_y >= 0)) - T.log1p(T.exp(-abs(s_y)))))\
            / T.maximum(T.sum(abs(s_t)), 1)
        # Backward
        s_p = 1 / (1 + T.exp(-s_y))
        s_dy = s_dloss * abs(s_t) * (s_p - (s_t >= 0)) / \
            T.maximum(T.sum(abs(s_t)), 1)

        def _o(s):
            return tn.Out(s, borrow=True)

        self.tn_forward = tn.function([s_y, s_t], s_loss)
        self.tn_backward = tn.function([s_y, s_t, s_dloss], _o(s_dy))
    def build_theano_functions(self, bottom, top):
        # building Theano functions
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        p = np.float32(self.p_)
        axis = self.axis_
        if axis is None:
            axis = tuple(range(1, len(bottom[0].shape)))

        # blob to CudaNdArray
        # Forward pass
        Tensor = T.TensorType('float32', [False] * len(bottom[0].shape))
        s_x = Tensor('x')  # bottom data
        s_dz = Tensor('dz')  # top diff
        s_z = s_x * (
            (s_x**p).sum(axis, keepdims=True)**(np.float32(-1./p)))
        # See http://goo.gl/wIVRsP for `tn.Out(x, borrow=True)`
        self.f_forward = tn.function([s_x], tn.Out(s_z, borrow=True))

        # Backward pass
        s_l = (s_dz * s_z).sum()
        s_grad = tn.grad(s_l, wrt=s_x)
        self.f_backward = tn.function([s_x, s_dz], tn.Out(s_grad, borrow=True))
Esempio n. 4
0
    def setup(self, bottom, top):
        self.reshape(bottom, top)
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        shape1 = bottom[0].shape  # prediction
        shape2 = bottom[1].shape  # label
        s_p = T.TensorType('float32', [False] * len(shape1))('p')
        s_t = T.TensorType('float32', [False] * len(shape2))('t')

        # Forward pass
        FLTMIN = np.finfo(np.float32).tiny
        s_l = -T.mean(
            T.log(T.maximum(FLTMIN, s_p.flatten(2)))[
                T.arange(s_t.shape[0]), T.cast(s_t, 'int32')]
        )
        self.f_forward = tn.function(
            [s_p, s_t], tn.Out(s_l, borrow=True))

        # Backward pass
        s_dz = T.fscalar('dz')
        sg_p = tn.grad(s_dz * s_l, wrt=s_p)
        self.f_backward = tn.function(
            [s_p, s_t, s_dz], tn.Out(sg_p, borrow=True))
Esempio n. 5
0
    def setup(self, bottom, top):
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        assert len(bottom) == 2
        assert len(top) == 1
        s_y = T.matrix('y')  # y in [-inf, inf]
        s_t = T.matrix('t')  # t in {-1, 0, 1} where 0 is ignored
        s_dloss = T.scalar('dloss')
        # Forward
        # s_loss = T.mean(abs(s_t) * T.log1p(T.exp(-s_y * s_t)))  # unstable
        s_loss = -T.sum(
            abs(s_t) * (
                s_y * ((s_t >= 0) - (s_y >= 0)) - T.log1p(T.exp(-abs(s_y)))))\
            / T.maximum(T.sum(abs(s_t)), 1)
        # Backward
        s_p = 1 / (1 + T.exp(-s_y))
        s_dy = s_dloss * abs(s_t) * (s_p - (s_t >= 0)) / \
            T.maximum(T.sum(abs(s_t)), 1)

        def _o(s):
            return tn.Out(s, borrow=True)
        self.tn_forward = tn.function([s_y, s_t], s_loss)
        self.tn_backward = tn.function([s_y, s_t, s_dloss], _o(s_dy))
Esempio n. 6
0
    def setup(self, bottom, top):
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        assert len(bottom) == 2
        assert len(top) == 1
        s_y = T.matrix('y')  # y in [-inf, inf]
        s_t = T.matrix('t')  # t in {-1, 0, 1} where 0 is ignored
        # Forward
        s_loss = T.sum(abs(s_t) * T.eq((s_y >= 0), (s_t >= 0))) \
            / T.maximum(T.sum(abs(s_t)), 1)

        def _o(s):
            return tn.Out(s, borrow=True)
        self.tn_forward = tn.function([s_y, s_t], _o(s_loss))
Esempio n. 7
0
    def setup(self, bottom, top):
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        assert len(bottom) == 2
        assert len(top) == 1
        s_y = T.matrix('y')  # y in [-inf, inf]
        s_t = T.matrix('t')  # t in {-1, 0, 1} where 0 is ignored
        # Forward
        s_loss = T.sum(abs(s_t) * T.eq((s_y >= 0), (s_t >= 0))) \
            / T.maximum(T.sum(abs(s_t)), 1)

        def _o(s):
            return tn.Out(s, borrow=True)

        self.tn_forward = tn.function([s_y, s_t], _o(s_loss))
Esempio n. 8
0
    def setup(self, bottom, top):
        self.reshape(bottom, top)
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        shape1 = bottom[0].shape  # prediction
        shape2 = bottom[1].shape  # label
        s_p = T.TensorType('float32', [False] * len(shape1))('p')
        s_t = T.TensorType('float32', [False] * len(shape2))('t')

        # Forward pass
        FLTMIN = np.finfo(np.float32).tiny
        s_l = -T.mean(
            T.log(T.maximum(FLTMIN, s_p.flatten(2)))[T.arange(s_t.shape[0]),
                                                     T.cast(s_t, 'int32')])
        self.f_forward = tn.function([s_p, s_t], tn.Out(s_l, borrow=True))

        # Backward pass
        s_dz = T.fscalar('dz')
        sg_p = tn.grad(s_dz * s_l, wrt=s_p)
        self.f_backward = tn.function([s_p, s_t, s_dz],
                                      tn.Out(sg_p, borrow=True))
    def setup(self, bottom, top):
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        from theano.tensor.signal.conv import conv2d
        assert len(bottom) >= 2
        assert len(bottom) <= 3
        assert len(top) == 1
        # parameter
        self.K_ = [0.01, 0.03]
        self.L_ = 1.0
        param = eval(self.param_str_)
        self.hsize_ = param.get('hsize', 11)
        self.sigma_ = param.get('sigma', 1.5)
        assert self.hsize_ % 2 == 1
        hsize = self.hsize_
        sigma = self.sigma_
        C1 = (self.K_[0] * self.L_) ** 2
        C2 = (self.K_[1] * self.L_) ** 2
        # Creating gaussian filter
        x = np.exp(-0.5 * ((np.arange(hsize) - int(hsize / 2)) ** 2) /
                   (sigma ** 2))
        filt = x.reshape(-1, 1) * x.reshape(1, -1)
        filt /= filt.sum()

        # Build a Theano function which computes SSIM and its gradients wrt two
        # images
        simg1_in = T.ftensor3()
        simg2_in = T.ftensor3()

        if len(bottom) > 2:
            smask = T.ftensor3()
            sk = T.sum(simg1_in * simg2_in * smask) \
                / T.sum(simg1_in * simg1_in * smask)
            simg1 = sk * simg1_in * smask
            simg2 = simg2_in * smask
        else:
            sk = T.sum(simg1_in * simg2_in) \
                / T.sum(simg1_in * simg1_in)
            simg1 = sk * simg1_in
            simg2 = simg2_in
        sfilt = tn.shared(filt.astype(np.float32))
        smu1 = conv2d(simg1, sfilt)
        smu2 = conv2d(simg2, sfilt)
        smu1_sq = smu1 * smu1
        smu2_sq = smu2 * smu2
        smu1_mu2 = smu1 * smu2
        ssig1_sq = conv2d(simg1 * simg1, sfilt) - smu1_sq
        ssig2_sq = conv2d(simg2 * simg2, sfilt) - smu2_sq
        ssig12 = conv2d(simg1 * simg2, sfilt) - smu1_mu2
        sssim = (
            ((2 * smu1_mu2 + C1) * (2 * ssig12 + C2))
            / ((smu1_sq + smu2_sq + C1) * (ssig1_sq + ssig2_sq + C2))
        ).mean()
        sdssim = (1 - sssim) / 2
        gimg1, gimg2 = tn.grad(sdssim, [simg1_in, simg2_in])
        if len(bottom) > 2:
            self.fdssim_with_grad = tn.function(
                [simg1_in, simg2_in, smask], [sdssim, gimg1, gimg2])
        else:
            self.fdssim_with_grad = tn.function(
                [simg1_in, simg2_in], [sdssim, gimg1, gimg2])
Esempio n. 10
0
    def setup(self, bottom, top):
        from caffe_helper.theano_util import init_theano
        init_theano()

        import theano as tn
        import theano.tensor as T
        from theano.tensor.signal.conv import conv2d
        assert len(bottom) >= 2
        assert len(bottom) <= 3
        assert len(top) == 1
        # parameter
        self.K_ = [0.01, 0.03]
        self.L_ = 1.0
        param = eval(self.param_str_)
        self.hsize_ = param.get('hsize', 11)
        self.sigma_ = param.get('sigma', 1.5)
        assert self.hsize_ % 2 == 1
        hsize = self.hsize_
        sigma = self.sigma_
        C1 = (self.K_[0] * self.L_)**2
        C2 = (self.K_[1] * self.L_)**2
        # Creating gaussian filter
        x = np.exp(-0.5 * ((np.arange(hsize) - int(hsize / 2))**2) /
                   (sigma**2))
        filt = x.reshape(-1, 1) * x.reshape(1, -1)
        filt /= filt.sum()

        # Build a Theano function which computes SSIM and its gradients wrt two
        # images
        simg1_in = T.ftensor3()
        simg2_in = T.ftensor3()

        if len(bottom) > 2:
            smask = T.ftensor3()
            sk = T.sum(simg1_in * simg2_in * smask) \
                / T.sum(simg1_in * simg1_in * smask)
            simg1 = sk * simg1_in * smask
            simg2 = simg2_in * smask
        else:
            sk = T.sum(simg1_in * simg2_in) \
                / T.sum(simg1_in * simg1_in)
            simg1 = sk * simg1_in
            simg2 = simg2_in
        sfilt = tn.shared(filt.astype(np.float32))
        smu1 = conv2d(simg1, sfilt)
        smu2 = conv2d(simg2, sfilt)
        smu1_sq = smu1 * smu1
        smu2_sq = smu2 * smu2
        smu1_mu2 = smu1 * smu2
        ssig1_sq = conv2d(simg1 * simg1, sfilt) - smu1_sq
        ssig2_sq = conv2d(simg2 * simg2, sfilt) - smu2_sq
        ssig12 = conv2d(simg1 * simg2, sfilt) - smu1_mu2
        sssim = (((2 * smu1_mu2 + C1) * (2 * ssig12 + C2)) /
                 ((smu1_sq + smu2_sq + C1) *
                  (ssig1_sq + ssig2_sq + C2))).mean()
        sdssim = (1 - sssim) / 2
        gimg1, gimg2 = tn.grad(sdssim, [simg1_in, simg2_in])
        if len(bottom) > 2:
            self.fdssim_with_grad = tn.function([simg1_in, simg2_in, smask],
                                                [sdssim, gimg1, gimg2])
        else:
            self.fdssim_with_grad = tn.function([simg1_in, simg2_in],
                                                [sdssim, gimg1, gimg2])