예제 #1
0
 def doStep(w):
     
     f_holdout, gw_holdout = func_holdout(w)
     gw_holdout_norm = 0
     gw_holdout_effective = ndict.clone(gw_holdout)
     for i in w:
         m1_holdout[i] += lambd * (gw_holdout[i] - m1_holdout[i])
         m2_holdout[i] += lambd * (gw_holdout[i]**2 - m2_holdout[i])
         gw_holdout_effective[i] /= np.sqrt(m2_holdout[i] + 1e-8)
         gw_holdout_norm += (gw_holdout_effective[i]**2).sum()
     gw_holdout_norm = np.sqrt(gw_holdout_norm)
     
     f_tot = 0
     gw_tot = ndict.cloneZeros(w)
     alphas = []
     for j in range(len(funcs)):
         f, gw = funcs[j](w)
         f_tot += f
         
         gw_norm = 0
         gw_effective = ndict.clone(gw)
         for i in w:
             # Update first and second moments
             m1[j][i] += lambd * (gw[i] - m1[j][i])
             m2[j][i] += lambd * (gw[i]**2 - m2[j][i])
             gw_effective[i] /= np.sqrt(m2[j][i] + 1e-8)
             gw_norm += (gw_effective[i]**2).sum()
         gw_norm = np.sqrt(gw_norm)
         
         # Compute dot product with holdout gradient
         alpha = 0
         for i in w:
             alpha += (gw_effective[i] * gw_holdout_effective[i]).sum()
         alpha /= gw_holdout_norm * gw_norm
         
         alphas.append(alpha)
         
         #alpha = (alpha > 0) * 1.0
         
         for i in w:
             # Accumulate gradient of subobjective
             gw_tot[i] += alpha * gw[i] / np.sqrt(m2[j][i] + 1e-8)
         
     #print 'alphas:', alphas
     
     if batchi[0] > warmup:
         for i in w:
             w[i] += stepsize * gw_tot[i]
     
     
     batchi[0] += 1
     
     return f_tot
예제 #2
0
def sample_standard_auto(z, fgrad, n_burnin, n_samples):
    hmc_dostep = hmc_step_autotune(n_steps=10, init_stepsize=1e-2, target=0.9)

    def dostep(_z):
        logpxz, _, _ = hmc_dostep(fgrad, _z)
        return logpxz

    # Burn-in phase
    for i in range(n_burnin):
        dostep(z)

    # Sample
    z_list = []
    logpxz_list = []
    for _ in range(n_samples):
        logpxz = dostep(z)
        #print 'logpxz:', logpxz
        logpxz_list.append(logpxz.copy())
        z_list.append(ndict.clone(z))

    z, logpxz = combine_samples(z_list, logpxz_list)

    return z, logpxz
예제 #3
0
파일: hmc.py 프로젝트: Beronx86/anglepy
def sample_standard_auto(z, fgrad, n_burnin, n_samples):
    hmc_dostep = hmc_step_autotune(n_steps=10, init_stepsize=1e-2, target=0.9)

    def dostep(_z):
        logpxz, _, _ = hmc_dostep(fgrad, _z)
        return logpxz

        # Burn-in phase

    for i in range(n_burnin):
        dostep(z)

        # Sample
    z_list = []
    logpxz_list = []
    for _ in range(n_samples):
        logpxz = dostep(z)
        # print 'logpxz:', logpxz
        logpxz_list.append(logpxz.copy())
        z_list.append(ndict.clone(z))

    z, logpxz = combine_samples(z_list, logpxz_list)

    return z, logpxz
예제 #4
0
def hmc_step(fgrad, x0, _stepsize=1e-2, n_steps=20):

    # === INITIALIZE
    n_batch = x0.itervalues().next().shape[1]
    stepsize = (np.random.uniform(size=(1, n_batch)) <
                0.5).astype(float) * 2 - 1
    stepsize *= _stepsize

    if np.random.uniform() < 0.5:
        stepsize *= -1

    # Sample velocity
    vnew = {}
    for i in x0:
        vnew[i] = np.random.normal(size=x0[i].shape)

    # copy initial state
    xnew = ndict.clone(x0)
    v0 = ndict.clone(vnew)

    # === LEAPFROG STEPS

    # Compute velocity at time (t + stepsize/2)
    # Compute position at time (t + stepsize)
    logpxz0, g = fgrad(xnew)
    for i in xnew:
        vnew[i] += 0.5 * stepsize * g[i]
        xnew[i] += stepsize * vnew[i]

    # Perform leapfrog steps
    for step in xrange(n_steps):
        #print 'hmc_step:', step
        logpxz, g = fgrad(xnew)
        for i in xnew:
            vnew[i] += stepsize * g[i]
            xnew[i] += stepsize * vnew[i]

    # Perform final half-step for velocity
    logpxz1, g = fgrad(xnew)
    for i in xnew:
        vnew[i] += 0.5 * stepsize * g[i]

    # === METROPOLIS-HASTINGS ACCEPT/REJECT

    # Compute old and new Hamiltonians

    k0 = 0
    k1 = 0
    for i in vnew:
        k0 += (v0[i]**2).sum(axis=0, keepdims=True)
        k1 += (vnew[i]**2).sum(axis=0, keepdims=True)

    h0 = -logpxz0 + 0.5 * k0
    h1 = -logpxz1 + 0.5 * k1

    #print logpxz0, k0, logpxz1, k1
    #print h0-h1

    # Perform Metropolis-Hasting step
    accept = np.exp(h0 - h1) >= np.random.uniform(size=h1.shape)
    accept = accept.astype(float)

    for i in xnew:
        accept2 = np.dot(np.ones((xnew[i].shape[0], 1)), accept)
        x0[i] = (1 - accept2) * x0[i] + accept2 * xnew[i]

    # result: updated 'x0'
    logpxz = (1 - accept) * logpxz0 + accept * logpxz1

    return logpxz, accept
예제 #5
0
파일: hmc.py 프로젝트: Beronx86/anglepy
def hmc_step(fgrad, x0, _stepsize=1e-2, n_steps=20):

    # === INITIALIZE
    n_batch = x0.itervalues().next().shape[1]
    stepsize = (np.random.uniform(size=(1, n_batch)) < 0.5).astype(float) * 2 - 1
    stepsize *= _stepsize

    if np.random.uniform() < 0.5:
        stepsize *= -1

        # Sample velocity
    vnew = {}
    for i in x0:
        vnew[i] = np.random.normal(size=x0[i].shape)

        # copy initial state
    xnew = ndict.clone(x0)
    v0 = ndict.clone(vnew)

    # === LEAPFROG STEPS

    # Compute velocity at time (t + stepsize/2)
    # Compute position at time (t + stepsize)
    logpxz0, g = fgrad(xnew)
    for i in xnew:
        vnew[i] += 0.5 * stepsize * g[i]
        xnew[i] += stepsize * vnew[i]

        # Perform leapfrog steps
    for step in xrange(n_steps):
        # print 'hmc_step:', step
        logpxz, g = fgrad(xnew)
        for i in xnew:
            vnew[i] += stepsize * g[i]
            xnew[i] += stepsize * vnew[i]

            # Perform final half-step for velocity
    logpxz1, g = fgrad(xnew)
    for i in xnew:
        vnew[i] += 0.5 * stepsize * g[i]

        # === METROPOLIS-HASTINGS ACCEPT/REJECT

        # Compute old and new Hamiltonians

    k0 = 0
    k1 = 0
    for i in vnew:
        k0 += (v0[i] ** 2).sum(axis=0, keepdims=True)
        k1 += (vnew[i] ** 2).sum(axis=0, keepdims=True)

    h0 = -logpxz0 + 0.5 * k0
    h1 = -logpxz1 + 0.5 * k1

    # print logpxz0, k0, logpxz1, k1
    # print h0-h1

    # Perform Metropolis-Hasting step
    accept = np.exp(h0 - h1) >= np.random.uniform(size=h1.shape)
    accept = accept.astype(float)

    for i in xnew:
        accept2 = np.dot(np.ones((xnew[i].shape[0], 1)), accept)
        x0[i] = (1 - accept2) * x0[i] + accept2 * xnew[i]

        # result: updated 'x0'
    logpxz = (1 - accept) * logpxz0 + accept * logpxz1

    return logpxz, accept