コード例 #1
0
def log_likelihood_per_sample(input_parameters):
    # Set the random seed
    np.random.seed()
    # Define the number of particles
    num_particles = 2000
    # Apply relevant transformations to the sample (sigmoid transformation to probability parameters)
    z = np.copy(input_parameters)
    z[0] = 1 / (1 + np.exp(-z[0]))
    z[1] = np.exp(z[1])
    psi_juv_big = 1 / (1 + np.exp(-(input_parameters[0] + z[1])))
    z[2] = 1 / (1 + np.exp(-z[2]))
    z[4] = np.exp(z[4])
    z[5] = 1 / (1 + np.exp(-z[5]))
    # Evaluate prior distribution at transformed samples (don't forget to factor in Jacobian from transformation)
    log_prior = sp.beta.logpdf(z[0], 3, 3) + log_jacobian_sigmoid(
        input_parameters[0])
    log_prior += sp.gamma.logpdf(z[1], 0.001, 0.001) + input_parameters[1]
    log_prior += sp.beta.logpdf(z[2], 3, 3) + log_jacobian_sigmoid(
        input_parameters[2])
    log_prior += sp.norm.logpdf(z[3], 0, 1)
    log_prior += sp.norm.logpdf(z[4], 0, 0.1) + input_parameters[4]
    log_prior += sp.beta.logpdf(z[5], 1, 9) + log_jacobian_sigmoid(
        input_parameters[5])
    # Create the model (assuming the noise variances are known)
    regimes = [
        penguins.AgeStructuredModel(psi_juv=z[0],
                                    psi_adu=z[2],
                                    alpha_r=z[3],
                                    beta_r=z[4],
                                    var_s=param[6],
                                    var_c=param[7],
                                    nstage=num_stages),
        penguins.AgeStructuredModel(psi_juv=psi_juv_big,
                                    psi_adu=z[2],
                                    alpha_r=z[3],
                                    beta_r=z[4],
                                    var_s=param[6],
                                    var_c=param[7],
                                    nstage=num_stages)
    ]
    draw_regimes = lambda model_idx, num_samp: np.random.choice(
        np.arange(start=0, stop=2),
        num_samp,
        replace=True,
        p=np.array([z[5], 1 - z[5]]))
    regimes_log_pdf = lambda model_idx: model_idx * np.log(1 - z[5]) + (
        1 - model_idx) * np.log(z[5])
    # Create regime switching system
    model = pf.MultiRegimeSSM(regimes, draw_regimes, regimes_log_pdf)
    # Draw the initial particles
    x_init = np.array([x[0]]).T + np.random.randint(
        low=-10, high=10, size=(2 * num_stages - 2, num_particles))
    # Run the particle filter and return the log-likelihood
    output = pf.brspf(y, model, x_init)
    return output.log_evidence + log_prior
コード例 #2
0
def log_likelihood_per_sample(input_parameters):
    # Set the random seed
    np.random.seed()
    # Define the number of particles
    num_particles = 1500
    # Apply relevant transformations to the sample (sigmoid transformation to probability parameters)
    z = np.copy(input_parameters)
    z[0] = 1 / (1 + np.exp(-z[0]))
    z[1] = 1 / (1 + np.exp(-z[1]))
    z[3] = np.exp(z[3])
    z[4] = np.exp(z[4])
    z[5] = 1 / (1 + np.exp(-z[5]))
    # Evaluate prior distribution at transformed samples (don't forget to factor in Jacobian from transformation)
    log_prior = log_jacobian_sigmoid(input_parameters[0]) + sp.beta.logpdf(
        z[0], alpha_juv0, beta_juv0)
    log_prior += log_jacobian_sigmoid(input_parameters[1]) + sp.beta.logpdf(
        z[1], alpha_adu0, beta_adu0)
    log_prior += sp.norm.logpdf(z[2], mu_int_pb0, np.sqrt(var_int_pb0))
    log_prior += input_parameters[3] + sp.invgamma.logpdf(
        z[3], a=alpha_diff0, scale=beta_diff0)
    log_prior += input_parameters[4] + sp.norm.logpdf(z[4], mu_slope_pb0,
                                                      np.sqrt(var_slope_pb0))
    log_prior += log_jacobian_sigmoid(input_parameters[5]) + sp.beta.logpdf(
        z[5], alpha_gamma0, beta_gamma0)
    # Initialize log joint as log prior
    log_joint = log_prior
    # Create the model (assuming the noise variances are known)
    regimes = [
        penguins.AgeStructuredModel(psi_juv=z[0],
                                    psi_adu=z[1],
                                    alpha_r=z[2],
                                    beta_r=z[4],
                                    var_s=param[6],
                                    var_c=param[7],
                                    nstage=num_stages),
        penguins.AgeStructuredModel(psi_juv=z[0],
                                    psi_adu=z[1],
                                    alpha_r=z[2] + z[3],
                                    beta_r=z[4],
                                    var_s=param[6],
                                    var_c=param[7],
                                    nstage=num_stages)
    ]
    draw_regimes = lambda model_idx, num_samp: np.random.choice(
        np.arange(start=0, stop=2),
        num_samp,
        replace=True,
        p=np.array([z[5], 1 - z[5]]))
    regimes_log_pdf = lambda model_idx: model_idx * np.log(1 - z[5]) + (
        1 - model_idx) * np.log(z[5])
    # Create regime switching system
    model = pf.MultiRegimeSSM(regimes, draw_regimes, regimes_log_pdf)
    for k in range(num_sites):
        # Draw the initial particles
        init_particles = np.array([x[k, 0]]).T + np.random.randint(
            low=-1, high=1, size=(2 * num_stages - 2, num_particles))
        # Run the particle filter and return the log-likelihood
        output = pf.brspf(y[k], model, init_particles)
        # Update the log joint
        log_joint += output.log_evidence
    return log_joint
コード例 #3
0
                                var_s=param[6],
                                var_c=param[7],
                                nstage=num_stages)
]

# Define the regime dynamics
regime_dynamics_rand = lambda model_idx, num_samp: np.random.choice(
    np.arange(start=0, stop=2),
    num_samp,
    replace=True,
    p=np.array([param[5], 1 - param[5]]))
regime_dynamics_log_pdf = lambda model_idx: model_idx * np.log(1 - param[
    5]) + (1 - model_idx) * np.log(param[5])

# Create a multiple regime SSM and generate synthetic data
model = pf.MultiRegimeSSM(candidate_models, regime_dynamics_rand,
                          regime_dynamics_log_pdf)

# Determine initial states
x_init = np.random.randint(low=500,
                           high=2000,
                           size=(num_sites, 2 * num_stages - 2))

# Determine length of time to generate data for
time_generate = 60

# Burn-in the first X number of points to make sure time-series has stabilized
cut_off = 20
time_length = time_generate - cut_off

# Percentage of missing data
missing_percent_adults = 0.3
コード例 #4
0
def log_likelihood_per_sample(input_parameters):
    # Set the random seed
    np.random.seed()
    # Define the number of particles
    num_particles = 1000
    # Apply relevant transformations to the sample (sigmoid transformation to probability parameters)
    z = np.copy(input_parameters)
    z[0] = 1/(1+np.exp(-z[0]))
    z[1] = 1/(1+np.exp(-z[1]))
    z[3] = np.exp(z[3])
    z[4] = np.exp(z[4])
    # z[5] = 1/(1+np.exp(-z[5]))
    # Evaluate prior distribution at transformed samples (don't forget to factor in Jacobian from transformation)
    log_prior = log_jacobian_sigmoid(input_parameters[0])+sp.beta.logpdf(z[0], alpha_juv0, beta_juv0)
    log_prior += log_jacobian_sigmoid(input_parameters[1])+sp.beta.logpdf(z[1], alpha_adu0, beta_adu0)
    log_prior += sp.norm.logpdf(z[2], mu_int_pb0, np.sqrt(var_int_pb0))
    # log_prior += input_parameters[3]+sp.invgamma.logpdf(z[3], a=alpha_diff0, scale=beta_diff0)
    log_prior += input_parameters[3]+sp.norm.logpdf(z[3], mu_slope_pb0, np.sqrt(var_slope_pb0))
    log_prior += input_parameters[4]+sp.norm.logpdf(z[4], mu_im_rate, np.sqrt(var_im_rate))
    # log_prior += log_jacobian_sigmoid(input_parameters[5])+sp.beta.logpdf(z[5], alpha_gamma0, beta_gamma0)
    # Initialize log joint as log prior
    log_joint = log_prior
    # Create the model (assuming the noise variances are known)
    regimes = [penguins.AgeStructuredModel(psi_juv=z[0], psi_adu=z[1], alpha_r=z[2], beta_r=z[3], var_s=var_err,
                                           var_c=var_err, nstage=num_stages, phi=z[4], immigration=True)]
    draw_regimes = lambda model_idx, num_samp: np.random.choice(np.arange(start=0, stop=1), num_samp, replace=True,
                                                                p=np.array([1]))
    regimes_log_pdf = lambda model_idx: (1-model_idx)*np.log(1)
    # Create the model (assuming the noise variances are known)
    # regimes = [penguins.AgeStructuredModel(psi_juv=z[0], psi_adu=z[1], alpha_r=z[2], beta_r=z[4], var_s=var_err,
    #                                        var_c=var_err, nstage=num_stages),
    #            penguins.AgeStructuredModel(psi_juv=z[0], psi_adu=z[1], alpha_r=z[2]+z[3], beta_r=z[4], var_s=var_err,
    #                                        var_c=var_err, nstage=num_stages)]
    # draw_regimes = lambda model_idx, num_samp: np.random.choice(np.arange(start=0, stop=2), num_samp, replace=True,
    #                                                                     p=np.array([z[5], 1 - z[5]]))
    # regimes_log_pdf = lambda model_idx: model_idx*np.log(1-z[5])+(1-model_idx)*np.log(z[5])
    # Create regime switching system
    model = pf.MultiRegimeSSM(regimes, draw_regimes, regimes_log_pdf)
    for k in range(num_sites):
        # Modify the time series to be analyzed
        y_current = y[k, start_year[k]:end_year[k]+1, :]
        # Obtain the initial states
        initial_states = 10*np.ones(2*num_stages-2)
        # if np.isnan(y_current[0, 0]):
        #     initial_states[:num_stages] = y_current[0, 1] * np.array([0.37, 0.29, 0.23, 0.18, 0.61])
        # else:
        #     initial_states[:num_stages] = y_current[0, 0] * np.array([0.37, 0.29, 0.23, 0.18, 0.61])
        # if np.isnan(y_current[0, 1]):
        #     initial_states[-(num_stages - 2):] = y_current[0, 0]*np.array([0.23, 0.18, 0.60])
        # else:
        #     initial_states[-(num_stages - 2):] = y_current[0, 0]*np.array([0.23, 0.18, 0.60])
        # Error for particles
        err = 5 #int(0.1*np.mean(initial_states))
        # Draw the initial particles
        init_particles = np.array([initial_states]).T+np.random.randint(low=-err, high=err,
                                                                        size=(2*num_stages-2, num_particles))
        # Run the particle filter and return the log-likelihood
        output = pf.brspf(y_current, model, init_particles)
        # Update the log joint
        log_joint += output.log_evidence
    return log_joint