示例#1
0
def good_sigma_evaluator_2(nodes=20, observations=10, numObservations=10, trials=10000, init=100, size=10):
    results = numpy.zeros(size);
    for i in range(0, size):
        G, sig, D = mcmc.generate_graph_and_paths(nodes, observations, numObservations);
        sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, D, trials, init);
        bar1 = sig_ind_prob/samples;
        results[i] = mcmc.real_distribution_error(bar1, sig);
    return results, numpy.mean(results), numpy.var(results);
示例#2
0
def run_convergence_experiment(nodes=20, observations=10, numObservations=10, trials=10000, init=100, chainSize=3):
    G, sig, D = mcmc.generate_graph_and_paths(nodes, observations, numObservations);
    chain_results_dict_array = [];
    sig_ind_prob_array = numpy.zeros((chainSize, nodes));
    for i in range(0, chainSize):
        sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, D, trials, init);
        chain_results_dict_array.append(sigmas);
        sig_ind_prob_array[i,:] = sig_ind_prob;
        print('Chain {} processed...'.format(i+1));
    print('Calculating R...');
    result = mcmc.simple_convergence_checker(nodes, chain_results_dict_array, sig_ind_prob_array, samples);
    return result;
示例#3
0
def good_sigma_evaluator_2(nodes=20,
                           observations=10,
                           numObservations=10,
                           trials=10000,
                           init=100,
                           size=10):
    results = numpy.zeros(size)
    for i in range(0, size):
        G, sig, D = mcmc.generate_graph_and_paths(nodes, observations,
                                                  numObservations)
        sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, D, trials, init)
        bar1 = sig_ind_prob / samples
        results[i] = mcmc.real_distribution_error(bar1, sig)
    return results, numpy.mean(results), numpy.var(results)
示例#4
0
def evaluate_sigma_mcmc(nodes=20, observations=10, numObservations=10, trials=10000, init=100, size=10):
    results = numpy.zeros(size);
    for i in range(0, size):
        G, sig, D = mcmc.generate_graph_and_paths(nodes, observations, numObservations);
        sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, D, trials, init);
        bar1 = sig_ind_prob/samples;
        bar2 = numpy.ones(G.shape[0]) - bar1;
        true_sig_prob = 1;
        for j in range(0, nodes):
            if(sig[j] == 1):
                true_sig_prob *= bar1[j];
            else:
                true_sig_prob *= bar2[j];
        results[i] = true_sig_prob;
    return results, numpy.mean(results), sum(numpy.power(results - numpy.mean(results), 2));
示例#5
0
def run_sigma_experiment_2(nodes=20, observations=10, numObservations=10, trials=10000, init=100, fileIndex=0):
    G, sig, D = mcmc.generate_graph_and_paths(nodes, observations, numObservations);
    sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, D, trials, init);

    print(sig);
    bar1 = sig_ind_prob/samples;
    bar2 = numpy.ones(G.shape[0]) - bar1;

    ind = numpy.arange(G.shape[0]);
    p1 = plot.bar(ind, bar1, 0.35, color='r');
    p2 = plot.bar(ind, bar2, 0.35, color='b', bottom=bar1);

    plot.ylabel('L/R')
    plot.title('Switch index')
    plot.legend((p1[0], p2[0]), ('L', 'R'))
    plot.show();

    bar = numpy.zeros(len(sigmas));
    label = numpy.zeros(len(sigmas));
    ind = numpy.arange(len(sigmas));
    i = 0;
    for sigma, n in sigmas.items():
        bar[i] = n/float(samples);
        label[i] = mcmc.sigma_hash(sigma);
        i += 1;
    p1 = plot.bar(ind, bar, 0.1, color='black');
    #plot.xticks(ind+0.1/2., tuple(label), rotation=90);
    plot.ylabel('Probability of sigma')
    plot.xlabel('Sigma index')
    plot.title('Joint probability of sigma')
    plot.axis([0, len(sigmas), 0, max(bar)]);
    plot.show();

    true_sig_prob = 1;
    for i in range(0, nodes):
        if(sig[i] == 1):
            true_sig_prob *= bar1[i];
        else:
            true_sig_prob *= bar2[i];

    numpy.savetxt('test_results/sigma_experiment_2_ind_prob_L_{}_{}_{}_{}_{}.{}.txt'.format(nodes,observations,numObservations,trials,init,fileIndex), bar1);
    numpy.savetxt('test_results/sigma_experiment_2_search_space_{}_{}_{}_{}_{}.{}.txt'.format(nodes,observations,numObservations,trials,init,fileIndex), bar);
    numpy.savetxt('test_results/sigma_experiment_2_search_space_index_{}_{}_{}_{}_{}.{}.txt'.format(nodes,observations,numObservations,trials,init,fileIndex), label);

    print('Distribution error: {}'.format(mcmc.real_distribution_error(bar1, sig)));
    print('The probability of guessing the true sigma is {}'.format(true_sig_prob));
    print('posterior/prior = {}'.format(true_sig_prob/pow(0.5, nodes)));
示例#6
0
def calculate_final_distribution(G, O):
    sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, [O], 10000, 100)

    bar1 = sig_ind_prob/samples;
    bar2 = numpy.ones(G.shape[0]) - bar1;

    ind = numpy.arange(G.shape[0]);
    p1 = plot.bar(ind, bar1, 0.35, color='r');
    p2 = plot.bar(ind, bar2, 0.35, color='b', bottom=bar1);

    plot.ylabel('L/R')
    plot.title('Switch index')
    plot.legend((p1[0], p2[0]), ('L', 'R'))
    plot.show();

    bar = numpy.zeros(len(sigmas));
    label = numpy.zeros(len(sigmas));
    ind = numpy.arange(len(sigmas));
    i = 0;
    for sigma, n in sigmas.items():
        bar[i] = n/float(samples);
        label[i] = mcmc.sigma_hash(sigmas);
        i += 1;
    p1 = plot.bar(ind, bar, 0.1, color='black');
    #plot.xticks(ind+0.1/2., tuple(label));
    plot.ylabel('Probability of sigma')
    plot.xlabel('Sigma index')
    plot.title('Joint probability of sigma')
    plot.show();

        
    s = numpy.zeros(G.shape)
    for sigma, n in sigmas.items():
        sigma = numpy.array(sigma)
        # s2 is already normalized, i.e. it is
        # p(s, O | G, sigma)/p(O | G, sigma) = p(s | G, O, sigma)
        s2, _ = state_probabilities.state_probabilities(G, sigma, O)
        # multiply s2 by p(sigma | G, O)
        s2 *= n / samples
        # sum over all sigmas
        s += s2

    return s
示例#7
0
def run_convergence_experiment(nodes=20,
                               observations=10,
                               numObservations=10,
                               trials=10000,
                               init=100,
                               chainSize=3):
    G, sig, D = mcmc.generate_graph_and_paths(nodes, observations,
                                              numObservations)
    chain_results_dict_array = []
    sig_ind_prob_array = numpy.zeros((chainSize, nodes))
    for i in range(0, chainSize):
        sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, D, trials, init)
        chain_results_dict_array.append(sigmas)
        sig_ind_prob_array[i, :] = sig_ind_prob
        print('Chain {} processed...'.format(i + 1))
    print('Calculating R...')
    result = mcmc.simple_convergence_checker(nodes, chain_results_dict_array,
                                             sig_ind_prob_array, samples)
    return result
示例#8
0
def calculate_final_distribution(G, O):
    sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, [O], 10000, 100)

    bar1 = sig_ind_prob / samples
    bar2 = numpy.ones(G.shape[0]) - bar1

    ind = numpy.arange(G.shape[0])
    p1 = plot.bar(ind, bar1, 0.35, color='r')
    p2 = plot.bar(ind, bar2, 0.35, color='b', bottom=bar1)

    plot.ylabel('L/R')
    plot.title('Switch index')
    plot.legend((p1[0], p2[0]), ('L', 'R'))
    plot.show()

    bar = numpy.zeros(len(sigmas))
    label = numpy.zeros(len(sigmas))
    ind = numpy.arange(len(sigmas))
    i = 0
    for sigma, n in sigmas.items():
        bar[i] = n / float(samples)
        label[i] = mcmc.sigma_hash(sigmas)
        i += 1
    p1 = plot.bar(ind, bar, 0.1, color='black')
    #plot.xticks(ind+0.1/2., tuple(label));
    plot.ylabel('Probability of sigma')
    plot.xlabel('Sigma index')
    plot.title('Joint probability of sigma')
    plot.show()

    s = numpy.zeros(G.shape)
    for sigma, n in sigmas.items():
        sigma = numpy.array(sigma)
        # s2 is already normalized, i.e. it is
        # p(s, O | G, sigma)/p(O | G, sigma) = p(s | G, O, sigma)
        s2, _ = state_probabilities.state_probabilities(G, sigma, O)
        # multiply s2 by p(sigma | G, O)
        s2 *= n / samples
        # sum over all sigmas
        s += s2

    return s
示例#9
0
def evaluate_sigma_mcmc(nodes=20,
                        observations=10,
                        numObservations=10,
                        trials=10000,
                        init=100,
                        size=10):
    results = numpy.zeros(size)
    for i in range(0, size):
        G, sig, D = mcmc.generate_graph_and_paths(nodes, observations,
                                                  numObservations)
        sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, D, trials, init)
        bar1 = sig_ind_prob / samples
        bar2 = numpy.ones(G.shape[0]) - bar1
        true_sig_prob = 1
        for j in range(0, nodes):
            if (sig[j] == 1):
                true_sig_prob *= bar1[j]
            else:
                true_sig_prob *= bar2[j]
        results[i] = true_sig_prob
    return results, numpy.mean(results), sum(
        numpy.power(results - numpy.mean(results), 2))
示例#10
0
def run_sigma_experiment_2(nodes=20,
                           observations=10,
                           numObservations=10,
                           trials=10000,
                           init=100,
                           fileIndex=0):
    G, sig, D = mcmc.generate_graph_and_paths(nodes, observations,
                                              numObservations)
    sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, D, trials, init)

    print(sig)
    bar1 = sig_ind_prob / samples
    bar2 = numpy.ones(G.shape[0]) - bar1

    ind = numpy.arange(G.shape[0])
    p1 = plot.bar(ind, bar1, 0.35, color='r')
    p2 = plot.bar(ind, bar2, 0.35, color='b', bottom=bar1)

    plot.ylabel('L/R')
    plot.title('Switch index')
    plot.legend((p1[0], p2[0]), ('L', 'R'))
    plot.show()

    bar = numpy.zeros(len(sigmas))
    label = numpy.zeros(len(sigmas))
    ind = numpy.arange(len(sigmas))
    i = 0
    for sigma, n in sigmas.items():
        bar[i] = n / float(samples)
        label[i] = mcmc.sigma_hash(sigma)
        i += 1
    p1 = plot.bar(ind, bar, 0.1, color='black')
    #plot.xticks(ind+0.1/2., tuple(label), rotation=90);
    plot.ylabel('Probability of sigma')
    plot.xlabel('Sigma index')
    plot.title('Joint probability of sigma')
    plot.axis([0, len(sigmas), 0, max(bar)])
    plot.show()

    true_sig_prob = 1
    for i in range(0, nodes):
        if (sig[i] == 1):
            true_sig_prob *= bar1[i]
        else:
            true_sig_prob *= bar2[i]

    numpy.savetxt(
        'test_results/sigma_experiment_2_ind_prob_L_{}_{}_{}_{}_{}.{}.txt'.
        format(nodes, observations, numObservations, trials, init,
               fileIndex), bar1)
    numpy.savetxt(
        'test_results/sigma_experiment_2_search_space_{}_{}_{}_{}_{}.{}.txt'.
        format(nodes, observations, numObservations, trials, init,
               fileIndex), bar)
    numpy.savetxt(
        'test_results/sigma_experiment_2_search_space_index_{}_{}_{}_{}_{}.{}.txt'
        .format(nodes, observations, numObservations, trials, init,
                fileIndex), label)

    print('Distribution error: {}'.format(
        mcmc.real_distribution_error(bar1, sig)))
    print('The probability of guessing the true sigma is {}'.format(
        true_sig_prob))
    print('posterior/prior = {}'.format(true_sig_prob / pow(0.5, nodes)))