Esempio n. 1
0
def main():
    # Generate synthetic data
    x = 2 * npr.rand(N,D) - 1  # data features, an (N,D) array
    x[:, 0] = 1
    th_true = 10.0 * np.array([0, 1, 1])
    y = np.dot(x, th_true[:, None])[:, 0]
    t = npr.rand(N) > (1 / ( 1 + np.exp(y)))  # data targets, an (N) array of 0s and 1s

    # Obtain joint distributions over z and th
    model = ff.LogisticModel(x, t, th0=th0, y0=y0)

    # Set up step functions
    th = np.random.randn(D) * th0
    z = ff.BrightnessVars(N)
    th_stepper = ff.ThetaStepMH(model.log_p_joint, stepsize)
    z__stepper = ff.zStepMH(model.log_pseudo_lik, q)

    plt.ion()
    ax = plt.figure(figsize=(8, 6)).add_subplot(111)
    while True:
        th = th_stepper.step(th, z)  # Markov transition step for theta
        z  = z__stepper.step(th ,z)  # Markov transition step for z
        update_fig(ax, x, y, z, th, t)
        plt.draw()
        plt.pause(0.05)
Esempio n. 2
0
def main():
    # Generate synthetic data
    x = 2 * npr.rand(N, D) - 1  # data features, an (N,D) array
    x[:, 0] = 1
    th_true = 10.0 * np.array([0, 1, 1])
    y = np.dot(x, th_true[:, None])[:, 0]
    t = npr.rand(N) > (1 / (1 + np.exp(y))
                       )  # data targets, an (N) array of 0s and 1s

    # Obtain joint distributions over z and th
    model = ff.LogisticModel(x, t, th0=th0, y0=y0)

    # Set up step functions
    th = np.random.randn(D) * th0
    z = ff.BrightnessVars(N)
    th_stepper = ff.ThetaStepMH(model.log_p_joint, stepsize)
    z__stepper = ff.zStepMH(model.log_pseudo_lik, q)

    plt.ion()
    ax = plt.figure(figsize=(8, 6)).add_subplot(111)
    while True:
        th = th_stepper.step(th, z)  # Markov transition step for theta
        z = z__stepper.step(th, z)  # Markov transition step for z
        update_fig(ax, x, y, z, th, t)
        plt.draw()
        plt.pause(0.05)
Esempio n. 3
0
 def run_model(model, q=0.1, fly=False):
     th = np.random.randn(K, D) * th0
     if fly:
         z = ff.BrightnessVars(N)
     else:
         z = ff.BrightnessVars(N, range(N))
     th_stepper = ff.ThetaStepLangevin(model.log_p_joint,
                                       model.D_log_p_joint, stepsize)
     if fly:
         z__stepper = ff.zStepMH(model.log_pseudo_lik, q)
     ths = []
     for _ in range(N_steps):
         num_lik_prev = model.num_lik_evals
         if _ % N_ess == 0 and _ > 0:
             #print pypmc.tools.convergence.ess(ths) # TODO: is this correct?
             #print ess(ths)
             np.savetxt('softmax-trace-untuned-{0}.csv'.format(_),
                        np.array(ths))
             ths = []
         th = th_stepper.step(th, z)  # Markov transition step for theta
         if fly:
             z = z__stepper.step(th, z)  # Markov transition step for z
         ths.append(th)
         print "Likelihood evals in iter {0}: {1}".format(
             _, model.num_lik_evals - num_lik_prev)
         print "Number bright points: {0}".format(len(z.bright))
     return th
 def run_model(model, th_init=np.random.randn(D) * th0, q=0.1, fly=False):
     th = th_init
     if fly:
         z = ff.BrightnessVars(N)
     else:
         z = ff.BrightnessVars(N, range(N))
     th_stepper = ff.ThetaStepMH(model.log_p_joint, stepsize)
     if fly:
         z__stepper = ff.zStepMH(model.log_pseudo_lik, q)
     ths = []
     num_rejects = 0
     for i in range(N_steps):
         num_lik_prev = model.num_lik_evals
         if i % N_ess == 0 and i > 0:
             #print pypmc.tools.convergence.ess(ths) # TODO: is this correct?
             #print ess(ths)
             np.savetxt('trace-untuned-{0}.csv'.format(i), np.array(ths))
             ths = []
         th = th_stepper.step(th, z)  # Markov transition step for theta
         num_rejects += th_stepper.num_rejects
         if fly:
             z = z__stepper.step(th, z)  # Markov transition step for z
         ths.append(th)
         print "\t\t".join(
             map(lambda x: "{0:.5f}".format(x), [
                 i,
                 len(z.bright), model.num_lik_evals - num_lik_prev,
                 1.0 - num_rejects / float(i + 1),
                 -1.0 * model.log_p_marg(th, increment_ctr=False)
             ]))
     return th
def logistic_regression_chain(x, t, N_iter=100, stepsize=1, th0=1, q=0.1, y0=1, seed=None):

    # Set seed
    npr.seed(seed)

    # Obtain joint distributions over z and th and set step functions
    model = ff.LogisticModel(x, t, th0=th0, y0=y0)
    z__stepper = ff.zStepMH(model.log_pseudo_lik , q)
    th_stepper = ff.ThetaStepMH(model.log_p_joint, stepsize)

    # Initialize
    N, D = x.shape
    th = np.random.randn(D)*th0
    z = ff.BrightnessVars(N)

    # run chain
    th_chain = np.zeros((N_iter,  ) +  th.shape)
    z_chain  = np.zeros((N_iter, N), dtype=bool)

    for i in range(N_iter):
        th = th_stepper.step(th, z)
        z  = z__stepper.step(th ,z)
        # Record the intermediate results
        th_chain[i,:] = th.copy()
        z_chain[i,z.bright] = 1

    print "th0 = ", th0, "frac accepted is", th_stepper.frac_accepted,  "bright frac is:", np.mean(z_chain)
    return th_chain, z_chain
def check_flymc_converges(model, correct_moments, th_stepper):
    tol = 0.2
    q = 0.5
    z_stepper = ff.zStepMH(model.log_pseudo_lik, q)
    th_init = model.draw_from_prior()
    z_init = ff.BrightnessVars(model.N)
    chain_gen = chain_generator_th_z(th_stepper, z_stepper, th_init, z_init)
    est_moments = mcmc_estimator(chain_gen, tol)
    assert (np.all(np.abs(est_moments - correct_moments) < tol))
def check_flymc_converges(model, correct_moments, th_stepper):
    tol = 0.2
    q = 0.5
    z_stepper = ff.zStepMH(model.log_pseudo_lik , q)
    th_init = model.draw_from_prior()
    z_init = ff.BrightnessVars(model.N)
    chain_gen = chain_generator_th_z(th_stepper, z_stepper,
                                     th_init, z_init)
    est_moments = mcmc_estimator(chain_gen, tol)
    assert(np.all(np.abs(est_moments - correct_moments) < tol))
    def run_model(model, q=0.1, fly=False):
        '''
        function to run model
        '''
        th = np.random.randn(D) * th0
        if fly:
            z = ff.BrightnessVars(N, range(int(q * N)))
        else:
            z = ff.BrightnessVars(N, range(N))
        th_stepper = ff.ThetaStepMH(model.log_p_joint, stepsize)
        if fly:
            z__stepper = ff.zStepMH(model.log_pseudo_lik, q)
        th_lists = []  # trace of th
        num_rejects_list = []  #
        num_iter_list = []  # number of num_lik_evals for each iteration
        for _ in range(N_steps):
            num_lik_prev = model.num_lik_evals
            if _ % N_ess == 0 and _ > 0:
                #print pypmc.tools.convergence.ess(th_lists) # TODO: is this correct?
                #print ess(th_lists)
                np.savetxt('trace-untuned-{0}.csv'.format(_),
                           np.array(th_lists))
                th_lists = []
            th = th_stepper.step(th, z)  # Markov transition step for theta

            if fly:
                z = z__stepper.step(th, z)  # Markov transition step for z
            th_lists.append(th)
            num_rejects_list.append(th_stepper.num_rejects)
            num_iter_list.append(model.num_lik_evals - num_lik_prev)
            print "Accept rate: {0}".format(1.0 - sum(num_rejects_list) /
                                            float(_ + 1))
            print "Likelihood evals in iter {0}: {1}".format(
                _, model.num_lik_evals - num_lik_prev)
            print "Neg log posterior: {0}".format(
                -1.0 * model.log_p_marg(th, increment_ctr=False))
            print "Number bright points: {0}".format(len(z.bright))

        return th, num_iter_list, th_lists
Esempio n. 9
0
def logistic_regression_chain(x,
                              t,
                              N_iter=100,
                              stepsize=1,
                              th0=1,
                              q=0.1,
                              y0=1,
                              seed=None):

    # Set seed
    npr.seed(seed)

    # Obtain joint distributions over z and th and set step functions
    model = ff.LogisticModel(x, t, th0=th0, y0=y0)
    z__stepper = ff.zStepMH(model.log_pseudo_lik, q)
    th_stepper = ff.ThetaStepMH(model.log_p_joint, stepsize)

    # Initialize
    N, D = x.shape
    th = np.random.randn(D) * th0
    z = ff.BrightnessVars(N)

    # run chain
    th_chain = np.zeros((N_iter, ) + th.shape)
    z_chain = np.zeros((N_iter, N), dtype=bool)

    for i in range(N_iter):
        th = th_stepper.step(th, z)
        z = z__stepper.step(th, z)
        # Record the intermediate results
        th_chain[i, :] = th.copy()
        z_chain[i, z.bright] = 1

    print "th0 = ", th0, "frac accepted is", th_stepper.frac_accepted, "bright frac is:", np.mean(
        z_chain)
    return th_chain, z_chain
Esempio n. 10
0
def run_model(model, q=0.1, fly=False, verbose=False):
    '''
    function to run model
    '''
    th = np.random.randn(D) * th0

    # Init steppers
    if fly:
        z = ff.BrightnessVars(N, range(int(q * N)))
    else:
        z = ff.BrightnessVars(N, range(N))
    th_stepper = ff.ThetaStepMH(model.log_p_joint, stepsize)
    if fly:
        z_stepper = ff.zStepMH(model.log_pseudo_lik, q)

    # Trace list
    num_rejects_list = []
    acceptance_list = []
    num_lik_evals_list = []
    neg_log_list = []
    performance_dict = {}
    # Trace - Sampling
    sample_num_lik_evals_list = []
    sample_th_list = []

    # Run chain
    for _ in range(N_steps + N_ess):
        num_lik_prev = model.num_lik_evals
        # Markov transition
        th = th_stepper.step(th, z)  # Markov transition step for theta
        if fly:
            z = z_stepper.step(th, z)  # Markov transition step for z

        # Record performance
        num_rejects = th_stepper.num_rejects
        num_lik_evals = model.num_lik_evals - num_lik_prev
        acceptance = 1.0 - sum(num_rejects_list) / float(_ + 1)
        neg_log = -1.0 * model.log_p_marg(th, increment_ctr=False)

        if _ < N_steps:
            num_rejects_list.append(num_rejects)
            num_lik_evals_list.append(num_lik_evals)
            acceptance_list.append(acceptance)
            neg_log_list.append(neg_log)
        else:
            # _ >= N_steps
            sample_num_lik_evals_list.append(num_lik_evals)
            sample_th_list.append(th)

        # Print info
        if verbose or (_ % 50 == 0):
            print "Accept rate: {0}".format(acceptance)
            print "Likelihood evals in iter {0}: {1}".format(_, num_lik_evals)
            print "Neg log posterior: {0}".format(neg_log)
            print "Number bright points: {0}".format(len(z.bright))

    sample_evals = sum(sample_num_lik_evals_list)
    np.savetxt('trace-{0}-{1}.csv'.format(model.name, N_ess),
               np.array(sample_th_list))

    performance_dict[KEY_NAME] = model.name
    performance_dict[KEY_NUM_REJECTS] = num_rejects_list
    performance_dict[KEY_LIK_EVALS] = num_lik_evals_list
    performance_dict[KEY_ACCEPTANCE] = acceptance_list
    performance_dict[KEY_NEG_LOG] = neg_log_list
    performance_dict[KEY_SAMPLE_EVALS] = sample_evals

    return performance_dict