Ejemplo n.º 1
0
def run_experiment(n, M=None):

    exp = dict()

    if M is None:
        M = markov_chain(2)

    exp["M"] = M
    exp["n"] = n
    exp["n_exp"] = n_exp

    parse = set("")

    tail_symbols = ""

    for k in range(1, n + 1):
        seq = ""
        m_iter = markov_iter(M)

        while seq in parse:
            seq += next(m_iter)

        parse.add(seq)
        tail_symbols += next(m_iter)

        # print("\tThis tree was {} characters long,\n\twith tail symbol {}".format(len(seq), tail_symbols[-1]))

        exp[str(k)] = seq

    exp["tail_symbols"] = tail_symbols

    return exp
Ejemplo n.º 2
0
def run_simulation(n_exp, n, c, M=None):

    exps = []

    if M is None:
        M = markov_chain(2)

    print("\nGenerating trees with {} nodes.".format(n))

    for _ in range(n_exp):

        exp = run_experiment(n, M)
        exps.append(exp)

    return exps
Ejemplo n.º 3
0
    def load_markov(self, path):
        with open(path, 'r') as f_in:
            for line in f_in:
                data = line.split(' ')
                name = base64.b64decode(data[0]).decode('utf-16')
                name = name.replace(' ', '_')
                content = base64.b64decode(
                    data[1]).decode('utf-16').split('\n')

                for l in content:
                    if not name in self.chains:
                        self.chains[name] = markov_chain(self.two)
                    self.chains[name].train(l)
        for k in self.chains.keys():
            self.chains[k].compute()
Ejemplo n.º 4
0
def run_range_simulation(n_exp, ns, c):
    """ Run simulation over a range of values of n """

    sims = []
    M = markov_chain(2)

    from progress.bar import Bar

    bar = Bar("Progress:", max=len(ns))

    for n in ns:

        sims.append(get_summary(run_simulation(n_exp, n, c, M)))
        bar.next()

    return sims
Ejemplo n.º 5
0
def test_h2():
    """Testing the h2 function."""

    print("On first order Markov chains:")

    for _ in range(10):

        M = markov_chain(2)
        print(M)
        print("Has entropy %f" % entropy(M))
        print("Has stationary distribution:")
        p = stationary_distribution(M)
        print(p)
        print("The stationary distribution entropy is:")
        print(sum([-x * log(x, 2) for x in p]))
        print("Its h2 is:")
        input(h_2(M))
Ejemplo n.º 6
0
def markov_command(bot, msg):
    bot.say(msg.channel, markov.markov_chain().upper() + '!!!')
Ejemplo n.º 7
0
def simulation(random_markov=True,
               filesave="experiment_data.npy",
               length_values=None,
               n_exp=None):
    """Generates words from a Markov source.
    """

    # Taking care of the different interactive modes
    if length_values is None:
        i = 1000
        length_values = [10 * i, 50 * i, 100 * i]

        style_mode = False

        fast_mode = input(
            "Do you want fast mode activated? Y/n/s (default = true, s = style_mode) "
        )

        if fast_mode == "n":
            fast_mode = False

        elif fast_mode == "s":
            style_mode = True
            length_values = [500, 1000, 2000]
            n_exp = 100

        else:
            fast_mode = True
            print("\nChoose words lengths:\n")
            length_values = [int(input(str(i) + ": ")) for i in range(3)]
            n_exp = int(input("Number of experiments"))

    else:
        if n_exp is None:
            n_exp = int(input("Specifiy number of experiments"))

        fast_mode = True
        style_mode = False

    # In case the Markov chain isn't randomized, this is the chain we'll use
    if not random_markov:
        p_a = 0.9
        M = np.matrix([[p_a, 1 - p_a], [1 - p_a, p_a]])
        h = -p_a * log(p_a, 2) - (1 - p_a) * log(1 - p_a, 2)
        f = [0, 1]

    else:
        N = 2
        M = markov_chain(N)
        h = entropy(M)
        f = [0, 1]
        print("Random markov has values", M)

    # Some output for slow mode
    # if not fast_mode:
    # print("\nUsing a random Markov chain of size " + str(N))
    # input(M)
    # print("\nIts state function f is:")
    # input(f)
    # print("\nIts entropy is:")
    # input(h)
    # print("\nIts h2 is:")
    # input(h_2(M))
    # print("\nh2-h^2 is:")
    # input(h_2(M) - h ** 2)

    # Initializing the dictionaries used to store the experiments
    # Many keys are being used, such as "h" for entropy, "M" for
    # the chain, "n_exp", "n_word", etc.
    # The experiments are stored in this format in npy files.
    exps = [dict() for _ in range(len(length_values))]

    for i, n in enumerate(length_values):
        exp = exps[i]

        exp["h"] = h
        exp["M"] = M

        if not (fast_mode or style_mode):
            try:
                n = int(
                    input("\nChoose size of words to test (default %d) " % n))
            except:
                n = n

            try:
                n_exp = int(
                    input("\nHow many experiments do I run? (default %d) " %
                          200))
            except:
                n_exp = 200

        print("\nNow simulating words of size %d, doing %d experiments " %
              (n, n_exp))
        exp["n_exp"] = n_exp
        exp["n_word"] = n

        # Runs LZ78 over n_exp samples of words of length n from
        # the Markov chain M
        # word_gen = word_generator(M, f, n)
        # l =[word_gen() for _ in range(n_exp)]
        # l = fast_word_generator(M,f,n,n_exp)
        # c = [compress2(w) for w in l]
        # m = [len(x) for x in c]
        bar = Bar("Processing", max=n_exp)
        m = []
        for _ in range(n_exp):
            m.append(compress2(markov_source2(M, n)))
            bar.next()

        exp["data"] = m

    exps[0]["ns"] = length_values
    print("\nNow savings experiments to " + filesave)
    np.save(filesave, exps)

    return exps, fast_mode
Ejemplo n.º 8
0
        )
        gc.collect()

    bar.finish()

    return sims


if __name__ == "__main__":

    from timeit import default_timer as timer
    from progress.bar import Bar
    import sys

    start = timer()
    M = markov_chain(2)

    if len(sys.argv) > 1:
        if sys.argv[1] == "-range":
            ns = list(
                range(int(input("a = ")), int(input("b = ")), int(input("step = ")))
            )
            n_exp = int(input("n_exp = "))

    else:
        ns = [100, 1000, 2000]
        n_exp = 300

    resu = parallel_simu(M, ns, n_exp)

    end = timer()
Ejemplo n.º 9
0
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 21:50:30 2020

@author: zicong huang
"""

from markov import markov_chain
import numpy as np
import matplotlib.pyplot as plt

transition = np.array(
             [[0.1, 0.5, 0.3, 0.1],
              [0.4, 0.2, 0.1, 0.3],
              [0.2, 0.3, 0.2, 0.3],
              [0.5, 0.2, 0.1, 0.2]])
n = 100
s0 = 2
V = [-1,0,1,2]
chain, state = markov_chain(transition, n, s0, V)

fig,ax = plt.subplots(figsize = (10,6), dpi = 300)
ax.plot(chain)
ax.set_title('sim: markov chain')
ax.set_xlabel('time')
ax.set_ylabel('value')
Ejemplo n.º 10
0
        n (int): Size of the Markov chain.

    Returns:
        (int array): The Psi vector
    """

    return np.ones(n)


if __name__ == "__main__":
    import pandas as pd

    os, hs, unhs, vns, h2s, pqps, bos, lns = [], [], [], [], [], [], [], []

    for _ in range(10):
        m_chain = markov_chain(2)
        n = 500

        (v, (o, h3, unh, bo, pqp, h2, lg)) = variances(m_chain, n)
        # print("For M =", m_chain)
        # print("n =", n)
        # print("var =", var(m_chain, n))

        os.append(o)
        hs.append(h3)
        unhs.append(unh)
        vns.append(v)
        h2s.append(h2)
        bos.append(bo)
        pqps.append(pqp)
        lns.append(lg)
Ejemplo n.º 11
0
def authorized():
    subreddits = request.form['subreddits']
    subreddit_list = subreddits.split(",")
    title, post = markov.markov_chain(subreddit_list, 10, 50)
    return render_template('submission.html', title=title, post=post)