Esempio n. 1
0
def jump_plot(distribution, nsamples=100, **kwargs):
    """
    Plots samples drawn from distribution with dwelling time on the x-axis
    and the sample value on the y-axis
    1D only
    """
    distr = distribution(ndims=1, nbatch=1, **kwargs)
    # sampler = MarkovJumpHMC(np.array([0]).reshape(1,1),
    sampler = MarkovJumpHMC(distr.Xinit, distr.E, distr.dEdX, epsilon=0.3, beta=0.2, num_leapfrog_steps=5)
    x_t = []
    d_t = []
    transitions = []
    last_L_count, last_F_count, last_R_count = 0, 0, 0
    for idx in xrange(nsamples):
        sampler.sampling_iteration()
        x_t.append(sampler.state.X[0, 0])
        d_t.append(sampler.dwelling_times[0])
        if sampler.L_count - last_L_count == 1:
            transitions.append("L")
            last_L_count += 1
        elif sampler.F_count - last_F_count == 1:
            transitions.append("F")
            last_F_count += 1
        elif sampler.R_count - last_R_count == 1:
            transitions.append("R")
            last_R_count += 1
    t = np.cumsum(d_t)
    plt.scatter(t, x_t)
    t = np.array(t).reshape(len(t), 1)
    x_t = np.array(x_t).reshape(len(x_t), 1)
    transitions = np.array(transitions).reshape(len(transitions), 1)
    data = np.concatenate((x_t, t, transitions), axis=1)
    return pd.DataFrame(data, columns=["x", "t", "transitions"])
Esempio n. 2
0
def generate_initialization(distribution):
    """ Run mjhmc for BURN_IN_STEPS on distribution, generating a fair set of initial states

    :param distribution: Distribution object. Must have nbatch == MAX_N_PARTICLES
    :returns: a set of fair initial states and an estimate of the variance for emc and true both
    :rtype: tuple: (array of shape (distribution.ndims, MAX_N_PARTICLES), float, float)
    """
    print('Generating fair initialization for {} by burning in {} steps'.format(
        type(distribution).__name__, BURN_IN_STEPS))
    assert BURN_IN_STEPS > VAR_STEPS
    assert distribution.nbatch == MAX_N_PARTICLES
    mjhmc = MarkovJumpHMC(distribution=distribution, resample=False)
    for _ in xrange(BURN_IN_STEPS - VAR_STEPS):
        mjhmc.sampling_iteration()
    assert mjhmc.resample == False

    emc_var_estimate, mjhmc = online_variance(mjhmc, distribution)
    # we discard v since p(x,v) = p(x)p(v)
    fair_x = mjhmc.state.copy().X

    # otherwise will go into recursive loop
    distribution.mjhmc = False
    control = ControlHMC(distribution=distribution.reset())
    for _ in xrange(BURN_IN_STEPS - VAR_STEPS):
        control.sampling_iteration()
    true_var_estimate, control = online_variance(control, distribution)

    return (fair_x, emc_var_estimate, true_var_estimate)
Esempio n. 3
0
def generate_initialization(distribution):
    """ Run mjhmc for BURN_IN_STEPS on distribution, generating a fair set of initial states

    :param distribution: Distribution object. Must have nbatch == MAX_N_PARTICLES
    :returns: a set of fair initial states and an estimate of the variance for emc and true both
    :rtype: tuple: (array of shape (distribution.ndims, MAX_N_PARTICLES), float, float)
    """
    print(
        'Generating fair initialization for {} by burning in {} steps'.format(
            type(distribution).__name__, BURN_IN_STEPS))
    assert BURN_IN_STEPS > VAR_STEPS
    # must rebuild graph to nbatch=MAX_N_PARTICLES
    if distribution.backend == 'tensorflow':
        distribution.build_graph()
    mjhmc = MarkovJumpHMC(distribution=distribution, resample=False)
    for _ in xrange(BURN_IN_STEPS - VAR_STEPS):
        mjhmc.sampling_iteration()
    assert mjhmc.resample == False

    emc_var_estimate, mjhmc = online_variance(mjhmc, distribution)
    # we discard v since p(x,v) = p(x)p(v)
    mjhmc_endpt = mjhmc.state.copy().X

    # otherwise will go into recursive loop
    distribution.mjhmc = False
    try:
        distribution.gen_init_X()
    except NotImplementedError:
        print("No explicit init method found, using mjhmc endpoint")

    distribution.E_count = 0
    distribution.dEdX_count = 0

    control = ControlHMC(distribution=distribution)
    for _ in xrange(BURN_IN_STEPS - VAR_STEPS):
        control.sampling_iteration()
    true_var_estimate, control = online_variance(control, distribution)
    control_endpt = control.state.copy().X

    return mjhmc_endpt, emc_var_estimate, true_var_estimate, control_endpt
Esempio n. 4
0
def generate_initialization(distribution):
    """ Run mjhmc for BURN_IN_STEPS on distribution, generating a fair set of initial states

    :param distribution: Distribution object. Must have nbatch == MAX_N_PARTICLES
    :returns: a set of fair initial states and an estimate of the variance for emc and true both
    :rtype: tuple: (array of shape (distribution.ndims, MAX_N_PARTICLES), float, float)
    """
    print('Generating fair initialization for {} by burning in {} steps'.format(
        type(distribution).__name__, BURN_IN_STEPS))
    assert BURN_IN_STEPS > VAR_STEPS
    # must rebuild graph to nbatch=MAX_N_PARTICLES
    if distribution.backend == 'tensorflow':
        distribution.build_graph()
    mjhmc = MarkovJumpHMC(distribution=distribution, resample=False)
    for _ in xrange(BURN_IN_STEPS - VAR_STEPS):
        mjhmc.sampling_iteration()
    assert mjhmc.resample == False

    emc_var_estimate, mjhmc = online_variance(mjhmc, distribution)
    # we discard v since p(x,v) = p(x)p(v)
    mjhmc_endpt = mjhmc.state.copy().X

    # otherwise will go into recursive loop
    distribution.mjhmc = False
    try:
        distribution.gen_init_X()
    except NotImplementedError:
        print("No explicit init method found, using mjhmc endpoint")

    distribution.E_count = 0
    distribution.dEdX_count = 0

    control = ControlHMC(distribution=distribution)
    for _ in xrange(BURN_IN_STEPS - VAR_STEPS):
        control.sampling_iteration()
    true_var_estimate, control = online_variance(control, distribution)
    control_endpt = control.state.copy().X

    return mjhmc_endpt, emc_var_estimate, true_var_estimate, control_endpt
Esempio n. 5
0
def jump_plot(distribution, nsamples=100, **kwargs):
    """
    Plots samples drawn from distribution with dwelling time on the x-axis
    and the sample value on the y-axis
    1D only
    """
    distr = distribution(ndims=1, nbatch=1, **kwargs)
    # sampler = MarkovJumpHMC(np.array([0]).reshape(1,1),
    sampler = MarkovJumpHMC(distr.Xinit,
                            distr.E,
                            distr.dEdX,
                            epsilon=.3,
                            beta=.2,
                            num_leapfrog_steps=5)
    x_t = []
    d_t = []
    transitions = []
    last_L_count, last_F_count, last_R_count = 0, 0, 0
    for idx in xrange(nsamples):
        sampler.sampling_iteration()
        x_t.append(sampler.state.X[0, 0])
        d_t.append(sampler.dwelling_times[0])
        if sampler.L_count - last_L_count == 1:
            transitions.append("L")
            last_L_count += 1
        elif sampler.F_count - last_F_count == 1:
            transitions.append("F")
            last_F_count += 1
        elif sampler.R_count - last_R_count == 1:
            transitions.append("R")
            last_R_count += 1
    t = np.cumsum(d_t)
    plt.scatter(t, x_t)
    t = np.array(t).reshape(len(t), 1)
    x_t = np.array(x_t).reshape(len(x_t), 1)
    transitions = np.array(transitions).reshape(len(transitions), 1)
    data = np.concatenate((x_t, t, transitions), axis=1)
    return pd.DataFrame(data, columns=['x', 't', 'transitions'])