Esempio n. 1
0
def mcmc_chain(G, D, sig_prob=None, sampler=sample_sigma_uniformly):
    sig_prob = sig_prob if sig_prob is not None else dict()

    calculate_probability = lambda sigma: sum(
        state_probabilities(G, sigma, O)[1] for O in D)

    sample = sampler(n=G.shape[0])
    prob = calculate_probability(sample)
    sig_prob[tuple(sample)] = prob

    while True:
        new_sample = sampler(sample)
        try:
            new_prob = sig_prob[tuple(new_sample)]
        except KeyError:
            new_prob = calculate_probability(new_sample)
            sig_prob[tuple(new_sample)] = new_prob

        alpha = math.exp(new_prob - prob)

        r = min(1, alpha)
        u = random.random()

        if u < r:
            sample = new_sample
            prob = new_prob

        yield sample
Esempio n. 2
0
def calculate_sigma(G, O):
    probabilities = collections.defaultdict(int)
    normalizer = -math.inf

    print("Calculating sigma...")
    for sigma in itertools.product((1, 2), repeat=G.shape[0]):
        sigma_arr = numpy.array(sigma)
        _, O_prob = state_probabilities.state_probabilities(G, sigma_arr, O)
        probabilities[sigma] = O_prob
        normalizer = numpy.logaddexp(normalizer, O_prob)

    for sigma, prob in probabilities.items():
        probabilities[sigma] = math.exp(prob - normalizer)

    print("Calculated sigma.")
    return probabilities
Esempio n. 3
0
def calculate_sigma(G, O):
    probabilities = collections.defaultdict(int)
    normalizer = -math.inf

    print("Calculating sigma...")
    for sigma in itertools.product((1, 2), repeat=G.shape[0]):
        sigma_arr = numpy.array(sigma)
        _, O_prob = state_probabilities.state_probabilities(G, sigma_arr, O)
        probabilities[sigma] = O_prob
        normalizer = numpy.logaddexp(normalizer, O_prob)

    for sigma, prob in probabilities.items():
        probabilities[sigma] = math.exp(prob - normalizer)

    print("Calculated sigma.")
    return probabilities
Esempio n. 4
0
def calculate_final_distribution(G, O):
    sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, [O], 10000, 100)

    bar1 = sig_ind_prob/samples;
    bar2 = numpy.ones(G.shape[0]) - bar1;

    ind = numpy.arange(G.shape[0]);
    p1 = plot.bar(ind, bar1, 0.35, color='r');
    p2 = plot.bar(ind, bar2, 0.35, color='b', bottom=bar1);

    plot.ylabel('L/R')
    plot.title('Switch index')
    plot.legend((p1[0], p2[0]), ('L', 'R'))
    plot.show();

    bar = numpy.zeros(len(sigmas));
    label = numpy.zeros(len(sigmas));
    ind = numpy.arange(len(sigmas));
    i = 0;
    for sigma, n in sigmas.items():
        bar[i] = n/float(samples);
        label[i] = mcmc.sigma_hash(sigmas);
        i += 1;
    p1 = plot.bar(ind, bar, 0.1, color='black');
    #plot.xticks(ind+0.1/2., tuple(label));
    plot.ylabel('Probability of sigma')
    plot.xlabel('Sigma index')
    plot.title('Joint probability of sigma')
    plot.show();

        
    s = numpy.zeros(G.shape)
    for sigma, n in sigmas.items():
        sigma = numpy.array(sigma)
        # s2 is already normalized, i.e. it is
        # p(s, O | G, sigma)/p(O | G, sigma) = p(s | G, O, sigma)
        s2, _ = state_probabilities.state_probabilities(G, sigma, O)
        # multiply s2 by p(sigma | G, O)
        s2 *= n / samples
        # sum over all sigmas
        s += s2

    return s
Esempio n. 5
0
def calculate_final_distribution(G, O):
    sigmas, sig_ind_prob, samples = mcmc.sig_mcmc_2(G, [O], 10000, 100)

    bar1 = sig_ind_prob / samples
    bar2 = numpy.ones(G.shape[0]) - bar1

    ind = numpy.arange(G.shape[0])
    p1 = plot.bar(ind, bar1, 0.35, color='r')
    p2 = plot.bar(ind, bar2, 0.35, color='b', bottom=bar1)

    plot.ylabel('L/R')
    plot.title('Switch index')
    plot.legend((p1[0], p2[0]), ('L', 'R'))
    plot.show()

    bar = numpy.zeros(len(sigmas))
    label = numpy.zeros(len(sigmas))
    ind = numpy.arange(len(sigmas))
    i = 0
    for sigma, n in sigmas.items():
        bar[i] = n / float(samples)
        label[i] = mcmc.sigma_hash(sigmas)
        i += 1
    p1 = plot.bar(ind, bar, 0.1, color='black')
    #plot.xticks(ind+0.1/2., tuple(label));
    plot.ylabel('Probability of sigma')
    plot.xlabel('Sigma index')
    plot.title('Joint probability of sigma')
    plot.show()

    s = numpy.zeros(G.shape)
    for sigma, n in sigmas.items():
        sigma = numpy.array(sigma)
        # s2 is already normalized, i.e. it is
        # p(s, O | G, sigma)/p(O | G, sigma) = p(s | G, O, sigma)
        s2, _ = state_probabilities.state_probabilities(G, sigma, O)
        # multiply s2 by p(sigma | G, O)
        s2 *= n / samples
        # sum over all sigmas
        s += s2

    return s
Esempio n. 6
0
def observations_needed(max_nodes=100, runs=50):
    """Calculates the number of observations needed
    to get a good estimate of the stop position."""

    failure_probabilities = []
    with open("observations_needed.dat", 'w') as obs_file:
        for nodes in range(6, max_nodes + 1, 2):
            total_observations_needed = 0
            failed_runs = 0

            i = 0
            while True:
                G, sigma = generate_g.generate_graph_and_settings(nodes)
                O, actual_path = generate_g.simulate_train(G, sigma, 200)

                for observations in range(1, len(O)):
                    s, _ = state_probabilities.state_probabilities(
                        G, sigma, O[:observations])
                    if s.max() > 0.90:
                        break
                else:
                    print(s.max())
                    failed_runs += 1
                    continue

                total_observations_needed += observations
                i += 1
                if i >= runs:
                    break

            obs_file.write("{}\t{}\n".format(nodes,
                                             total_observations_needed / runs))
            print(nodes, total_observations_needed / runs)
            failure_probability = failed_runs / (runs + failed_runs)
            print("Failure probability: {}".format(failure_probability))
            failure_probabilities.append(failure_probability)

    print("Average failure probability: {}".format(
        sum(failure_probabilities) / len(failure_probabilities)))
Esempio n. 7
0
def observations_needed(max_nodes=100, runs=50):
    """Calculates the number of observations needed
    to get a good estimate of the stop position."""

    failure_probabilities = []
    with open("observations_needed.dat", 'w') as obs_file:
        for nodes in range(6, max_nodes + 1, 2):
            total_observations_needed = 0
            failed_runs = 0

            i = 0
            while True:
                G, sigma = generate_g.generate_graph_and_settings(nodes)
                O, actual_path = generate_g.simulate_train(G, sigma, 200)

                for observations in range(1, len(O)):
                    s, _ = state_probabilities.state_probabilities(
                        G, sigma, O[:observations])
                    if s.max() > 0.90:
                        break
                else:
                    print(s.max())
                    failed_runs += 1
                    continue

                total_observations_needed += observations
                i += 1
                if i >= runs:
                    break

            obs_file.write("{}\t{}\n".format(nodes, total_observations_needed / runs))
            print(nodes, total_observations_needed / runs)
            failure_probability = failed_runs / (runs + failed_runs)
            print("Failure probability: {}".format(failure_probability))
            failure_probabilities.append(failure_probability)

    print("Average failure probability: {}".format(sum(failure_probabilities) / len(failure_probabilities)))
Esempio n. 8
0
def mcmc_chain_2(G, D, init=100, sig_prob=None, sig_ind_prob=None):
    sig_prob = sig_prob if sig_prob is not None else dict()
    sig_ind_prob = sig_ind_prob if sig_ind_prob is not None else np.ones(G.shape[0]);
    sig_ind_prob *= init/2;
    t = init;

    calculate_probability = lambda sigma: sum(
        state_probabilities(G, sigma, O)[1] for O in D)

    sample = sample_sigma_uniformly(n=G.shape[0])
    prob = calculate_probability(sample)
    sig_prob[tuple(sample)] = prob

    while True:
        new_sample = sample_posterior_sigma(sig_ind_prob, t)
        try:
            new_prob = sig_prob[tuple(new_sample)]
        except KeyError:
            new_prob = calculate_probability(new_sample)
            sig_prob[tuple(new_sample)] = new_prob

        alpha = math.exp(new_prob - prob)
        alpha *= switch_probability_posterior(new_sample, sig_ind_prob, t)
        alpha /= switch_probability_posterior(sample, sig_ind_prob, t)

        r = min(1, alpha)
        u = random.random()

        if u < r:
            sample = new_sample
            prob = new_prob
            
        sig_ind_prob += sample - np.ones(G.shape[0]);
        t += 1;

        yield sample
Esempio n. 9
0
        if prob < 5:
            print('Some wrong observation made')
            wrongObs = obs
            while wrongObs == obs:
                wrongObs = random.randrange(1, 5)
            obs = wrongObs
        O.append(obs)

        s_old = s
        s = G[s, ext]
        for a in range(0, 3):
            if G[s, a] == s_old:
                orientation = a
                break
        actual_path.append(s)
    return O, actual_path


if __name__ == '__main__':
    G, sigma = generate_graph_and_settings(6)
    O, path = simulate_train(G, sigma, 20)
    print("Actual path:", path)
    print("Observations:", O)

    import state_probabilities
    s, O_prob = state_probabilities.state_probabilities(G, sigma, O)
    predicted_node, predicted_label = np.unravel_index(s.argmax(), s.shape)
    print("Last observation was generated after exiting " \
          "node {} at label {} with probability {}".format(
        predicted_node, "0LR"[predicted_label], s.max()))
Esempio n. 10
0
        if prob < 5:
            print('Some wrong observation made')
            wrongObs = obs
            while wrongObs == obs:
                wrongObs = random.randrange(1, 5)
            obs = wrongObs
        O.append(obs)

        s_old = s
        s = G[s, ext]
        for a in range(0, 3):
            if G[s, a] == s_old:
                orientation = a
                break
        actual_path.append(s)
    return O, actual_path

if __name__ == '__main__':
    G, sigma = generate_graph_and_settings(6)
    O, path = simulate_train(G, sigma, 20)
    print("Actual path:", path)
    print("Observations:", O)
    
    import state_probabilities
    s, O_prob = state_probabilities.state_probabilities(G, sigma, O)
    predicted_node, predicted_label = np.unravel_index(s.argmax(), s.shape)
    print("Last observation was generated after exiting " \
          "node {} at label {} with probability {}".format(
        predicted_node, "0LR"[predicted_label], s.max()))