Esempio n. 1
0
    def grad(self, V):
        T, v = V.shape
        assert (v == self.v)
        h = self.h
        VH, HH, b_init = self

        G = 0 * self
        d_VH, d_HH, d_b_init = G

        H = zeros((T, h))
        B = zeros((T, h))

        H[0] = sigmoid(VH * V[[0]] + b_init[newaxis, :])

        for t in range(1, T):
            B[[t]] = HH * H[[t - 1]]
            H[[t]] = sigmoid(VH * V[[t]] + B[[t]])

        dB = zeros((T, h))
        dBL = zeros((T, h))

        F_t = zeros(h)

        loss = 0

        VH_t = 1 * VH
        for t in reversed(range(T)):
            dB[t] = H[t] * (1 - H[t]) * F_t

            VH_t[2] = B[t] + VH[2]

            if self.CD_n > 0:
                dVH_t, dict_loss = rbm.rbm_grad_cd(VH_t, V[[t]], self.CD_n,
                                                   self.vis_gauss)
            else:
                dVH_t, dict_loss = rbm.rbm_grad_exact(VH_t, V[[t]],
                                                      self.vis_gauss)
            loss += dict_loss['loss']

            dBL[t] = dVH_t[2]

            d_VH += dVH_t

            HH.direction = up
            F_t[:] = HH.T() * (dB[[t]] + dBL[[t]])
            HH.direction = None

        d_b_init += dB[0]

        HH.direction = up
        VH.direction = up
        for t in range(1, T):
            d_HH += HH.outp_up(H[[t - 1]], dB[[t]] + dBL[[t]])
            d_VH += VH.outp_up(V[[t]], dB[[t]])
        d_VH += VH.outp_up(V[[0]], dB[[0]])

        HH.direction = None
        VH.direction = None

        return G, dict(loss=loss)
Esempio n. 2
0
    def grad(self, V):
        T, v = V.shape
        assert(v == self.v)
        h = self.h
        VH, HH, b_init = self

        G = 0 * self
        d_VH, d_HH, d_b_init = G

        H = zeros((T, h))
        B = zeros((T, h))
        
        H[0] = sigmoid(VH * V[[0]]+ b_init[newaxis, :])

        for t in range(1, T):
            B[[t]] = HH*H[[t-1]]
            H[[t]] = sigmoid(VH*V[[t]] + B[[t]])    
    
        dB  = zeros((T, h))
        dBL = zeros((T, h))
        
        F_t = zeros(h)

        loss =0 

        VH_t = 1 * VH

        for t in reversed(range(T)):
            VH_t[2] = B[t] + VH[2]  

            if self.CD_n > 0:
                dVH_t, dict_loss = rbm.rbm_grad_cd   (VH_t, V[[t]], self.CD_n, self.vis_gauss)
            else:
                dVH_t, dict_loss = rbm.rbm_grad_exact(VH_t, V[[t]], self.vis_gauss)
            loss += dict_loss['loss']
            
            d_VH += dVH_t
            if t>0:
                HH.direction = up
                d_HH += HH.outp(H[[t-1]], dVH_t[2][newaxis,:])
                HH.direction = None
            else:
                d_b_init += dVH_t[2]

        return G, dict(loss=loss)
Esempio n. 3
0
 def loss(W, x):
     return rbm.rbm_grad_cd(W, x, CD_n)[1]
Esempio n. 4
0
 def grad(W, x):
     return rbm.rbm_grad_cd(W, x, CD_n)
Esempio n. 5
0
 def loss(W, x):
     return rbm.rbm_grad_cd(W, x, CD_n)[1]
Esempio n. 6
0
 def grad(W, x):
     return rbm.rbm_grad_cd(W, x, CD_n)
Esempio n. 7
0
def loss(W, x):
    return rbm.rbm_grad_cd(W, x, n_cd)[1]
Esempio n. 8
0
def grad(W, x):
    return rbm.rbm_grad_cd(W, x, n_cd)
Esempio n. 9
0
def loss(W, x):
    return rbm.rbm_grad_cd(W, x, n_cd)[1]
Esempio n. 10
0
def grad(W, x):
    return rbm.rbm_grad_cd(W, x, n_cd)