def main():
    total_work = multiprocessing.cpu_count()
    burnin = 30000
    significance_samples = 100000
    per_process_samples = significance_samples / multiprocessing.cpu_count()
    alpha_count_slow = 0.001
    alpha_count_fast = find_optimal_decay(alpha_count_slow)
    alpha_mu_slow = 0.01
    alpha_mu_fast = 0.01
    buckets_slow = 50
    buckets_fast = 50
    pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
    runs = pool.map(
            ergodic_chain,
            [[burnin, per_process_samples,
              alpha_count_slow, alpha_count_fast,
              alpha_mu_slow, alpha_mu_fast,
              buckets_slow, buckets_fast] for _ in range(total_work)])
    aggregator = [[] for _ in range(len(FUNC_LIST))]
    for run in runs:
        for i, data_list in enumerate(run):
            aggregator[i] += data_list
    colors = ['red', 'green', 'blue', 'purple']
    for label, data in zip(FUNC_LABELS, aggregator):
        #data.sort()
        _, _, patches = pylab.hist(
                data, 250, label=label,
                normed=True, histtype='stepfilled')
        pylab.setp(patches, 'alpha', 0.4)
    pylab.legend()
    pylab.show()
def main():
    burnin = 10000
    significance_samples = 10000
    per_process_samples = significance_samples / multiprocessing.cpu_count()
    alpha_count_slow = 0.005
    alpha_count_fast = decay_equations.find_optimal_decay(alpha_count_slow)
    alpha_mu_slow = 0.01
    alpha_mu_fast = 0.01
    buckets_slow = 100
    buckets_fast = 100
    total_work = multiprocessing.cpu_count()
    pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
    runs = pool.map(
            ergodic_chain,
            [[burnin, per_process_samples,
              alpha_count_slow, alpha_count_fast,
              alpha_mu_slow, alpha_mu_fast,
              buckets_slow, buckets_fast] for _ in range(total_work)])
    data = []
    for run in runs:
        for val in run:
            data.append(val)
    data.sort()
    threshold = data[int(0.9999 * len(data))]

    total_work = 100
    upper_bound = 10000
    windows = pool.map(
            get_detection_windows,
            [[burnin, upper_bound,
              alpha_count_slow, alpha_count_fast,
              alpha_mu_slow, alpha_mu_fast,
              buckets_slow, buckets_fast,
              threshold]
             for _ in range(total_work)])
    window_data = []
    for window in windows:
        for datum in window:
            window_data.append(datum)

    for _ in range(total_work):
        window_data.append(0)

    n, bins, patches = pylab.hist(
            window_data, upper_bound, normed=False, histtype='stepfilled',
            range=(0, upper_bound))
    pylab.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
    pylab.show()
import os
import sys
import scipy.stats as stats

my_dir = os.path.dirname(os.path.abspath(__file__))

sys.path.append(os.path.join(my_dir, "../lib/python"))
from decaying_histogram import DecayingHistogram as c_DHist
from roll import *
from decay_equations import find_optimal_decay

if __name__ == '__main__':
    target_num_buckets = 50
    alpha_slow = 0.0001
    alpha_fast = find_optimal_decay(alpha_slow)
    c_slow = c_DHist(target_num_buckets, alpha_slow)
    c_fast = c_DHist(target_num_buckets, alpha_fast)

    py_slow = create_rolling_histogram_class(
            Bucket=create_bucket_class(alpha_count=alpha_slow),
            target_buckets=target_num_buckets)()
    py_fast = create_rolling_histogram_class(
            Bucket=create_bucket_class(alpha_count=alpha_fast),
            target_buckets=target_num_buckets)()

    dist = stats.norm(0, 1)
    num_iterations = 1000000
    for idx in range(num_iterations):
        if idx % int(num_iterations / 100) == 0:
            print >> sys.stderr, "{0} / {1}\r".format(idx, num_iterations),
            sys.stderr.flush()