Exemple #1
0
add_network_stats(s_results)
s_results.new('Subnetwork kappa', 'm', lambda d, f: d.base_model.kappa)


def f_c(c):
    return ((lambda d, f: d.base_model.beta[c]),
            (lambda d, f: f.base_model.beta[c]))


for c in covariates:
    # Need to do this hackily to avoid for-loop/lambda-binding weirdness.
    f_true, f_estimated = f_c(c)
    s_results.new('True beta_{%s}' % c, 'm', f_true)
    s_results.new('Estimated beta_{%s}' % c, 'm', f_estimated)
s_results.new('Class mismatch', 'n',
              lambda n: minimum_disagreement(n.node_covariates['z_true'][:], \
                                             n.node_covariates['z'][:]))


def rel_mse_p_ij(n, d, f):
    P = d.edge_probabilities(n)
    return rel_mse(f.edge_probabilities(n), f.baseline(n), P)


s_results.new('Rel. MSE(P)', 'nm', rel_mse_p_ij)


def rel_mse_logit_p_ij(n, d, f):
    logit_P = logit(d.edge_probabilities(n))
    logit_Q = f.baseline_logit(n)
    return rel_mse(logit(f.edge_probabilities(n)), logit_Q, logit_P)
Exemple #2
0
# Calculate NLL at initialized block assignments
fit_model.fit_sem(net, cycles=1, sweeps=0, use_best=False, store_all=True)
baseline_nll = fit_model.sem_trace[0][0]

nll_trace = []
z_trace = np.empty((steps, N))
disagreement_trace = []
theta_trace = []

for step in range(steps):
    print step
    fit_model.fit_sem(net, 1, 2, store_all=True)
    #fit_model.fit_kl(net, 1)
    nll_trace.append(fit_model.nll(net))
    z_trace[step, :] = net.node_covariates['z'][:]
    disagreement = minimum_disagreement(net.node_covariates['value'][:],
                                        net.node_covariates['z'][:])
    disagreement_trace.append(disagreement)
    theta_trace.append(fit_model.base_model.beta['x'])

# Eliminate symmetry of 'z'
for step in range(steps):
    if np.mean(z_trace[step, :]) < 0.5:
        z_trace[step, :] = 1 - z_trace[step, :]
z_trace += np.random.normal(0, 0.01, (steps, N))

nll_trace = np.array(nll_trace)
nll_trace -= baseline_nll
disagreement_trace = np.array(disagreement_trace)

plt.figure()
plt.plot(np.arange(steps), theta_trace)
net.new_node_covariate_int('z')

# Set up recording of results from experiment
s_results = Results(params['sub_sizes'], params['num_reps'], 'Stationary fit')
add_network_stats(s_results)
s_results.new('Subnetwork kappa', 'm', lambda d, f: d.base_model.kappa)
def f_c(c):
    return ((lambda d, f: d.base_model.beta[c]),
            (lambda d, f: f.base_model.beta[c]))
for c in covariates:
    # Need to do this hackily to avoid for-loop/lambda-binding weirdness.
    f_true, f_estimated = f_c(c)
    s_results.new('True beta_{%s}' % c, 'm', f_true)
    s_results.new('Estimated beta_{%s}' % c, 'm', f_estimated)
s_results.new('Class mismatch', 'n',
              lambda n: minimum_disagreement(n.node_covariates['z_true'][:], \
                                             n.node_covariates['z'][:]))
def rel_mse_p_ij(n, d, f):
    P = d.edge_probabilities(n)
    return rel_mse(f.edge_probabilities(n), f.baseline(n), P)
s_results.new('Rel. MSE(P)', 'nm', rel_mse_p_ij)
def rel_mse_logit_p_ij(n, d, f):
    logit_P = logit(d.edge_probabilities(n))
    logit_Q = f.baseline_logit(n)
    return rel_mse(logit(f.edge_probabilities(n)), logit_Q, logit_P)
s_results.new('Rel. MSE(logit_P)', 'nm', rel_mse_logit_p_ij)

all_results = { 's': s_results }
if params['fit_nonstationary']:
    n_results = s_results.copy()
    n_results.title = 'Nonstationary fit'
    all_results['n'] = n_results
def class_mismatch(n):
    truth = n.node_covariates['truth'][:]
    estimated = n.node_covariates['z'][:]
    return minimum_disagreement(truth, estimated, normalized = False)
fit_model.fit_sem(net, cycles = 1, sweeps = 0,
                  use_best = False, store_all = True)
baseline_nll = fit_model.sem_trace[0][0]

nll_trace = []
z_trace = np.empty((steps,N))
disagreement_trace = []
theta_trace = []

for step in range(steps):
    print step
    fit_model.fit_sem(net, 1, 2, store_all = True)
    #fit_model.fit_kl(net, 1)
    nll_trace.append(fit_model.nll(net))
    z_trace[step,:] = net.node_covariates['z'][:]
    disagreement = minimum_disagreement(net.node_covariates['value'][:],
                                        net.node_covariates['z'][:])
    disagreement_trace.append(disagreement)
    theta_trace.append(fit_model.base_model.beta['x'])

# Eliminate symmetry of 'z'
for step in range(steps):
    if np.mean(z_trace[step,:]) < 0.5:
        z_trace[step,:] = 1 - z_trace[step,:]
z_trace += np.random.normal(0, 0.01, (steps, N))
                    
nll_trace = np.array(nll_trace)
nll_trace -= baseline_nll
disagreement_trace = np.array(disagreement_trace)

plt.figure()
plt.plot(np.arange(steps), theta_trace)