Beispiel #1
0
def heartRateDecoder():
    print('reading spike data...')
    allSpikeData = getData()
    nSI = allSpikeData.shape[0]
    if not nSI % 600 == 0:
        print('warming: the spikes are not for some complete minute!!!')
        return
    nHI = int(nSI / 600)

    print('getting actual heart rate...')
    labelFileNames = ['caijing.f.txt']
    startTimes = [0]
    stopTimes = [60000]
    actualHeartRates = read_actual_heart_rate.getActualHeartRate(
        labelFileNames, startTimes, stopTimes)
    # actualHeartRates = [72,72,72,72,72,72,72,72,72,72]
    print('actual heart rate:', actualHeartRates)

    print('caculating heart rate...')
    heartRates = np.zeros([nHI])
    for HI_i in range(nHI):
        print('caculating %d th mins...' % HI_i)
        spikeData = allSpikeData[HI_i * 600:(HI_i + 1) * 600]

        probability = MC.MonteCarlo(spikeData).p
        # probability = PSO.ParticleSwarmOptimization(spikeData, c1 = 2, c2 = 1, m = 2).p

        heartRate = PHR.PredictHeartRate(probability).heartRate
        heartRate = min(600 - heartRate, heartRate)
        print('HI_i:', HI_i, '\theartRate:', heartRate)
        heartRates[HI_i] = heartRate

    MAPE = accuracy_estimation.meanAveragePercentError(heartRates,
                                                       actualHeartRates)
    print('Mean Average Percent Error (MAPE):', MAPE)
Beispiel #2
0
def train_monte_carlo(target_matrix, current_matrix, gates_list, **kwargs):
    move_time = kwargs.get('time', 5)
    max_games = kwargs.get('games', 100)
    monte = monte_carlo.Monte_Carlo_Tree(N=int(
        np.log2(np.size(current_matrix, 0))),
                                         target=target_matrix,
                                         list=gates_list,
                                         time=move_time)
    circuit = monte_carlo.Circuit(N=int(np.log2(np.size(current_matrix, 0))),
                                  target=target_matrix,
                                  list=gates_list)
    monte = monte_carlo.MonteCarlo(circuit, time=move_time)
    monte.update(monte_carlo.untuple_array(circuit.start()))
    count = 0
    while (count < max_games):
        monte.run_simulation()
        count += 1
    return monte
Beispiel #3
0
    def __init__(self, radius=40, size=(5, 5), color=(128, 128, 128), players_color=((255, 0, 0), (0, 0, 255)), mode=1, waiting_time=1, load=True):

        self.radius = radius
        self.size = size
        self.rows = size
        self.cols = size
        self.default_color = color
        self.players = players_color
        self.player_turn = 0
        self.board = self.init_board()
        self.history = []
        self.modes = {0: "REAL PLAYER", 1: "AI PLAYER"}
        self.show_states = {0: "OFF", 1: "ON"}
        self.current_mode = mode
        self.current_show = 0
        self.waiting_time = waiting_time
        self.win_path = []
        self.load = load

        self.monte_carlo = monte_carlo.MonteCarlo(self)
        self.load_Monte_Carlo_Obj()
        # print(self.monte_carlo.wins)

        self.setup()
Beispiel #4
0
def main():
    options = parse_args()

    # parameter set 2
    assert (options.number_of_topics > 0)
    number_of_topics = options.number_of_topics
    assert (options.training_iterations > 0)
    training_iterations = options.training_iterations
    assert (options.snapshot_interval > 0)
    if options.snapshot_interval > 0:
        snapshot_interval = options.snapshot_interval

    # parameter set 4
    #disable_alpha_theta_update = options.disable_alpha_theta_update;
    inference_mode = options.inference_mode

    # parameter set 1
    #assert(options.corpus_name!=None);
    assert (options.input_directory != None)
    assert (options.output_directory != None)

    input_directory = options.input_directory
    input_directory = input_directory.rstrip("/")
    corpus_name = os.path.basename(input_directory)

    output_directory = options.output_directory
    if not os.path.exists(output_directory):
        os.mkdir(output_directory)
    output_directory = os.path.join(output_directory, corpus_name)
    if not os.path.exists(output_directory):
        os.mkdir(output_directory)

    # Document
    train_docs_path = os.path.join(input_directory, 'train.dat')
    input_doc_stream = open(train_docs_path, 'r')
    train_docs = []
    for line in input_doc_stream:
        train_docs.append(line.strip().lower())
    print "successfully load all training docs from %s..." % (
        os.path.abspath(train_docs_path))

    # Vocabulary
    vocabulary_path = os.path.join(input_directory, 'voc.dat')
    input_voc_stream = open(vocabulary_path, 'r')
    vocab = []
    for line in input_voc_stream:
        vocab.append(line.strip().lower().split()[0])
    vocab = list(set(vocab))
    print "successfully load all the words from %s..." % (
        os.path.abspath(vocabulary_path))

    # parameter set 3
    alpha_alpha = 1.0 / number_of_topics
    if options.alpha_alpha > 0:
        alpha_alpha = options.alpha_alpha
    alpha_beta = options.alpha_beta
    if alpha_beta <= 0:
        alpha_beta = 1.0 / len(vocab)

    # create output directory
    now = datetime.datetime.now()
    suffix = now.strftime("%y%m%d-%H%M%S") + ""
    suffix += "-%s" % ("lda")
    suffix += "-I%d" % (training_iterations)
    suffix += "-S%d" % (snapshot_interval)
    suffix += "-K%d" % (number_of_topics)
    suffix += "-aa%f" % (alpha_alpha)
    suffix += "-ab%f" % (alpha_beta)
    suffix += "-im%d" % (inference_mode)
    # suffix += "-%s" % (resample_topics);
    # suffix += "-%s" % (hash_oov_words);
    suffix += "/"

    output_directory = os.path.join(output_directory, suffix)
    os.mkdir(os.path.abspath(output_directory))

    #dict_file = options.dictionary;
    #if dict_file != None:
    #dict_file = dict_file.strip();

    # store all the options to a file
    options_output_file = open(output_directory + "option.txt", 'w')
    # parameter set 1
    options_output_file.write("input_directory=" + input_directory + "\n")
    options_output_file.write("corpus_name=" + corpus_name + "\n")
    #options_output_file.write("vocabulary_path=" + str(dict_file) + "\n");
    # parameter set 2
    options_output_file.write("training_iterations=%d\n" %
                              (training_iterations))
    options_output_file.write("snapshot_interval=" + str(snapshot_interval) +
                              "\n")
    options_output_file.write("number_of_topics=" + str(number_of_topics) +
                              "\n")
    # parameter set 3
    options_output_file.write("alpha_alpha=" + str(alpha_alpha) + "\n")
    options_output_file.write("alpha_beta=" + str(alpha_beta) + "\n")
    # parameter set 4
    options_output_file.write("inference_mode=%d\n" % (inference_mode))
    options_output_file.close()

    print "========== ========== ========== ========== =========="
    # parameter set 1
    print "output_directory=" + output_directory
    print "input_directory=" + input_directory
    print "corpus_name=" + corpus_name
    #print "dictionary file=" + str(dict_file)
    # parameter set 2
    print "training_iterations=%d" % (training_iterations)
    print "snapshot_interval=" + str(snapshot_interval)
    print "number_of_topics=" + str(number_of_topics)
    # parameter set 3
    print "alpha_alpha=" + str(alpha_alpha)
    print "alpha_beta=" + str(alpha_beta)
    # parameter set 4
    print "inference_mode=%d" % (inference_mode)
    print "========== ========== ========== ========== =========="

    if inference_mode == 0:
        import hybrid
        lda_inferencer = hybrid.Hybrid()
    elif inference_mode == 1:
        import monte_carlo
        lda_inferencer = monte_carlo.MonteCarlo()
    elif inference_mode == 2:
        import variational_bayes
        lda_inferencer = variational_bayes.VariationalBayes()
    else:
        sys.stderr.write("error: unrecognized inference mode %d...\n" %
                         (inference_mode))
        return

    lda_inferencer._initialize(train_docs, vocab, number_of_topics,
                               alpha_alpha, alpha_beta)

    for iteration in xrange(training_iterations):
        lda_inferencer.learning()

        if (lda_inferencer._counter % snapshot_interval == 0):
            lda_inferencer.export_beta(output_directory + 'exp_beta-' +
                                       str(lda_inferencer._counter))

    model_snapshot_path = os.path.join(output_directory,
                                       'model-' + str(lda_inferencer._counter))
    cPickle.dump(lda_inferencer, open(model_snapshot_path, 'wb'))
Beispiel #5
0
import sys
import os

import ttt
import monte_carlo as mc
import tensorflow as tf

import numpy as np
import random
import re

path = os.path.join(os.getcwd(), "saved_models/gomoku")

if len(sys.argv) == 2:
    tt = ttt.ttt(w=19, h=19, to_win=5)
    m = mc.MonteCarlo(100, .4, path)

    print("go")
    while True:
        if False and tt.player() == 'O':
            move = m.next_move(tt, True)
        else:
            i = str(input())
            if i == "exit" or i == "quit" or i == "q":
                break
            regex = re.compile('([\d]+), ?([\d]+)')
            rf = regex.findall(i)
            if not rf:
                print("indecipherable")
                continue
            x, y = rf[0]