Exemplo n.º 1
0
    def test_S_folder(self, output_folder, caplog):
        caplog.set_level(logging.INFO)
        output_path = str(output_folder).replace('\\', '/')
        simulation.run(output_path=output_path)

        captured = caplog.records

        for level in [k.levelname for k in captured]:
            assert level == 'INFO'

        captured_loggers = [k.name for k in captured]
        correct_loggers = ['Setup', 'Setup', 'Framework', 'Simulation', 'Setup']
        for capt, corr in itertools.zip_longest(captured_loggers, correct_loggers):
            assert capt == corr

        standard_captured = [k.message.replace('\n', '') for k in captured]
        correct = ['{}'.format(outputting.date()),
                   'Log initialised',
                   'Beginning task labelled: Untitled',
                   "Simulation 0 contains the task Basic: Trials = 100.The model used is QLearn: number_actions = 2, number_cues = 1, number_critics = 2, prior = array([0.5, 0.5]), non_action = 'None', actionCode = {0: 0, 1: 1}, stimulus_shaper = 'model.modelTemplate.Stimulus with ', reward_shaper = 'model.modelTemplate.Rewards with ', decision_function = 'discrete.weightProb with task_responses : 0, 1', alpha = 0.3, beta = 4, expectation = array([[0.5],       [0.5]]).",
                   'Shutting down program']

        for correct_line, standard_captured_line in itertools.zip_longest(correct, standard_captured):
            assert standard_captured_line == correct_line

        assert os.listdir(output_path) == []
Exemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--s3_application_url",
                        type=str,
                        required=True,
                        help="S3 URL for application code")
    parser.add_argument(
        "--parameters",
        type=str,
        required=True,
        help="File containing simulation distribution paramters")
    parser.add_argument("--result_folder",
                        type=str,
                        required=True,
                        help="Folder to put results in")
    args = parser.parse_args()
    params = json.loads(open(args.parameters).read())
    s3 = boto3.resource("s3")

    for bucket in ["maccoss-ec2", "maccoss-emr"]:
        s3.Bucket(bucket).objects.filter(Prefix="0/").delete()

    if not os.path.isdir(args.result_folder):
        os.mkdir(args.result_folder)
        if not os.path.isdir(args.result_folder + "/tasks"):
            os.mkdir(args.result_folder + "/tasks")
        if not os.path.isdir(args.result_folder + "/nodes"):
            os.mkdir(args.result_folder + "/nodes")
    shutil.copyfile(args.parameters,
                    args.result_folder + "/" + args.parameters.split("/")[-1])
    m = master.Master(args.s3_application_url, args.result_folder, params)
    m.setup()
    m.start(asynch=True)
    simulation.run(params, m, False)
    m.shutdown()
Exemplo n.º 3
0
def fit((repetition_i,p)):

    neuron = get_default("neuron")
    neuron["phi"]['alpha'] = p["alpha"]
    neuron["phi"]['beta'] = p["beta"]
    neuron["phi"]['r_max'] = p["r_max"]

    learn = get_default("learn")
    learn["eta"] = 4e-7

    my_s = {
        'start': 0.0,
        'end': 1000.0,
        'dt': 0.05,
        'pre_spikes': [np.arange(50.0,1000.0,250.0)],
        'I_ext': lambda t:0.0
        }

    seed = int(int(time.time()*1e8)%1e9)
    accs = [PeriodicAccumulator(['weights'], interval=10)]
    if p["h1"]:
        accums = run(my_s, lambda **kwargs:False, get_dendr_spike_det(p["thresh"]), accs, seed=seed, neuron=neuron, learn=learn, voltage_clamp=True, U_clamp=p['Uclamp'], h=1.0)
    else:
        accums = run(my_s, lambda **kwargs:False, get_dendr_spike_det(p["thresh"]), accs, seed=seed, neuron=neuron, learn=learn, voltage_clamp=True, U_clamp=p['Uclamp'])

    dump(accums,'artola/'+p['ident'])
Exemplo n.º 4
0
def _test_simulation():
    pop_size = 10000
    vacc_percentage = .90
    virus_name = "Ebola"
    mortality_rate = .70
    basic_repro_num = .25
    initial_infected = 10
    simulation = Simulation(pop_size, vacc_percentage, virus_name,
                            mortality_rate, basic_repro_num, initial_infected)
    simulation.run()
Exemplo n.º 5
0
def vary((repetition_i,p)):

    n_vary = 5

    values = {True: {"alpha":-55.0,
                     "beta":0.4,
                     "r_max":0.3},
              False: {"alpha":-59.0,
                      "beta":0.5,
                      "r_max":0.17}}

    vary = {"alpha":(-2.0,2.0),
              "beta":(-0.1,0.2),
              "r_max":(-0.05,0.15)}

    down = vary[p["vary"]][0]
    up = vary[p["vary"]][1]
    middle = values[p["h1"]][p["vary"]]
    vary_val = np.linspace(middle+down, middle+up, n_vary)[p["i"]]
    values[p["h1"]][p["vary"]] = vary_val
    values = values[p["h1"]]

    neuron = get_default("neuron")
    neuron["phi"]['r_max'] = values["r_max"]
    neuron["phi"]['alpha'] = values["alpha"]
    neuron["phi"]['beta'] = values["beta"]

    learn = get_default("learn")
    learn["eps"] = learn["eps"]*p["l_f"]
    learn["eta"] = learn["eta"]*p["l_f"]
    if not p["h1"]:
        learn["eta"] = learn["eta"]*2.5
    else:
        learn["eta"] = learn["eta"]*1.3


    spikes = np.array([61.0])

    my_s = {
        'start': 0.0,
        'end': 150.0,
        'dt': 0.05,
        'pre_spikes': [spikes + p["delta"]],
        'I_ext': lambda t: 0.0
        }


    seed = 1
    accs = [PeriodicAccumulator(['y','weights'], interval=10), BooleanAccumulator(['spike', 'dendr_spike', 'pre_spikes'])]
    if p["h1"]:
        accums = run(my_s, get_fixed_spiker(spikes), get_dendr_spike_det(-50.0), accs, seed=seed, neuron=neuron, learn=learn, h=1.0)
    else:
        accums = run(my_s, get_fixed_spiker(spikes), get_dendr_spike_det(-50.0), accs, seed=seed, neuron=neuron, learn=learn)

    dump((accums, values),'bi_poo/'+p['ident'])
Exemplo n.º 6
0
def fit((repetition_i, p)):

    learn = get_default("learn")
    if p["h1"]:
        learn['eta'] *= 0.125 / 8.0
    else:
        learn["eta"] *= 0.1

    neuron = get_default("neuron")
    neuron["phi"]['r_max'] = 0.2
    neuron["phi"]['alpha'] = -54.0
    neuron["phi"]['beta'] = 0.25

    p_backprop = 0.75

    freq = p["freq"]
    delta = p["delta"]

    n_spikes_in_burst = 10
    burst_pause = 200.0
    bursts = 50 / n_spikes_in_burst
    burst_dur = 1000.0 * n_spikes_in_burst / freq

    first_spike = 1000.0 / (2 * freq)
    isi = 1000.0 / freq
    t_end = bursts * (burst_dur + burst_pause)

    spikes_in_burst = np.arange(first_spike, burst_dur, isi)
    spikes = np.array([])
    for i in range(bursts):
        spikes = np.concatenate((spikes, spikes_in_burst + i * (burst_dur + burst_pause)))

    pre_spikes = spikes + delta

    my_s = {
        'start': 0.0,
        'end': t_end,
        'dt': 0.05,
        'pre_spikes': [pre_spikes],
        'I_ext': lambda t: 0.0
    }

    seed = int(int(time.time() * 1e8) % 1e9)
    accs = [PeriodicAccumulator(['weights'], interval=100)]
    if p["h1"]:
        accums = run(my_s, get_fixed_spiker(spikes), get_dendr_spike_det(-50.0), accs,
                     seed=seed, learn=learn, neuron=neuron, h=1.0, p_backprop=p_backprop)
    else:
        accums = run(my_s, get_fixed_spiker(spikes), get_dendr_spike_det(-50.0), accs,
                     seed=seed, learn=learn, neuron=neuron, p_backprop=p_backprop)

    dump(accums, 'sjostrom/' + p['ident'])
Exemplo n.º 7
0
def task((repetition_i,p)):

    eps_stim = eps_stim_low+(eps_stim_high-eps_stim_low)*np.random.rand()

    learn = get_default("learn")
    learn["eta"] = [1e-7, 0.0]
    learn["eps"] = [p["eps_learn"],10**eps_stim]

    neuron = get_default("neuron")
    neuron["phi"]["alpha"] = -52.0
    neuron["phi"]["beta"] = 0.25
    neuron["phi"]["r_max"] = 0.35


    post_spikes = np.arange(40.0,2000.0,20.0)
    pre_spikes_learn = post_spikes - 10.0
    pre_spikes_stim = post_spikes - p["stim_delta"]

    my_s = {
        'start': 0.0,
        'end': 2000.0,
        'dt': 0.05,
        'pre_spikes': [pre_spikes_learn, pre_spikes_stim],
        'I_ext': lambda t: 0.0
        }

    prob = p["prob"]


    seed = int(int(time.time()*1e8)%1e9)
    accs = [PeriodicAccumulator(['weights'], interval=100)]
    accums = run(my_s, get_fixed_spiker(post_spikes), get_dendr_spike_det(p["thresh"]), accs, neuron=neuron, seed=seed, learn=learn, p_backprop=prob)


    dump((eps_stim,accums),p['ident'])
Exemplo n.º 8
0
def overfit((repetition_i, p)):

    neuron = get_default("neuron")
    neuron["phi"]['alpha'] = p["alpha"]
    neuron["phi"]['beta'] = p["beta"]
    neuron["phi"]['r_max'] = p["r_max"]

    learn = get_default("learn")
    learn["eta"] = 1e-7

    my_s = {
        'start': 0.0,
        'end': 4000.0,
        'dt': 0.05,
        'pre_spikes': [np.arange(50.0, 4000.0, 250.0)],
        'I_ext': lambda t: 0.0
    }

    seed = int(int(time.time() * 1e8) % 1e9)
    accs = [PeriodicAccumulator(['weights'], interval=10)]
    accums = run(my_s,
                 lambda **kwargs: False,
                 get_dendr_spike_det_dyn_ref(p["thresh"], p["tau_ref_0"],
                                             p["theta_0"]),
                 accs,
                 seed=seed,
                 neuron=neuron,
                 voltage_clamp=True,
                 U_clamp=p['Uclamp'])

    dump(accums, 'artola/' + p['ident'])
Exemplo n.º 9
0
            def work(self) -> dataset.experimental_data.ExperimentalData:
                self.set_work_size(options.subject_count)
                ds = dataset.experimental_data.ExperimentalData(
                    options.dataset_name, [])
                ds.alternatives = options.alternatives
                ds.observ_count = 0

                with Core() as core:
                    self.interrupt = lambda: core.shutdown()

                    for subj_nr in range(1, options.subject_count + 1):
                        response = simulation.run(
                            core,
                            simulation.Request(
                                name='random%d' % subj_nr,
                                alternatives=options.alternatives,
                                gen_menus=options.gen_menus,
                                gen_choices=options.gen_choices,
                            ))

                        ds.subjects.append(response.subject_packed)
                        ds.observ_count += response.observation_count

                        self.set_progress(subj_nr)

                return ds
Exemplo n.º 10
0
def runSimulations(): 
    seed(183)
    for i in range(202):
        p = random()
        
        print "Probability: " + str(p)
        moduleSize = 100
        moduleNumber = 8
        N0 = moduleSize * moduleNumber
        N1 = 200
        
        generator = NetworkGenerator(moduleSize, moduleNumber, N1, 4, 1000)
        generator.initialize()
        
        
        generator.setCurrentConnectivityMatrix(p = p)
        generator.genNetwork()
        net = generator.net
    
        print "\tNetwork generated"
        
        tTotal = 60000
        dt = 1
        
        firings0, firings1, u0, u1, v0, v1 = run(net, tTotal, dt, N0, N1)
    
        print "\tNetwork ran for 60 sec"    
        a = visualiseMeanFirings(tTotal, firings0, 50, 20)
        np.save(join('SimulationData', 'mean_' + str(p)), a)
        plt.close("all")s
Exemplo n.º 11
0
def _simulation_gen():
    f_in = io.BytesIO()
    f_out = io.BytesIO()
    with Core(f_tee=(f_in, f_out)) as core:
        response = simulation.run(
            core,
            simulation.Request(
                name='random',
                alternatives=['A', 'B', 'C', 'D', 'E'],
                gen_menus=simulation.GenMenus(
                    generator=simulation.Exhaustive(),
                    defaults=False,
                ),
                gen_choices=simulation.Uniform(
                    forced_choice=True,
                    multiple_choice=False,
                ),
                preserve_deferrals=False,
            ))

    with open('in.bin', 'wb') as f:
        f.write(f_in.getbuffer())

    with open('out.bin', 'wb') as f:
        f.write(f_out.getbuffer())
Exemplo n.º 12
0
def task((repetition_i, p)):

    n_syn = p["n_syn"]

    learn = get_default("learn")
    learn["eps"] = 1e-1 / n_syn
    learn["eta"] = 1e-3 / n_syn

    neuron = get_default("neuron")
    neuron["phi"]["alpha"] = p["alpha"]
    neuron["phi"]["beta"] = p["beta"]
    neuron["phi"]["r_max"] = p["r_max"]
    neuron["g_S"] = p["g_S"]

    epochs = 4
    l_c = 6
    eval_c = 2
    cycles = epochs * l_c + (epochs + 1) * eval_c
    cycle_dur = p["cycle_dur"]
    t_end = cycles * cycle_dur

    def exc_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c:
            return 0.0
        else:
            return ((1 + np.sin(np.pi / 2 + t / t_end * cycles * 2 * np.pi)) * 2e-3 * 1 + 8e-3) * p["g_factor"]

    def inh_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c:
            return 0.0
        else:
            return 8e-2 * p["g_factor"]

    dt = 0.05
    f_r = 0.01  # 10Hz
    t_pts = np.arange(0, t_end / cycles, dt)

    poisson_spikes = [t_pts[np.random.rand(t_pts.shape[0]) < f_r * dt] for _ in range(n_syn)]
    poisson_spikes = [[] if spikes.shape[0] == 0 else np.concatenate(
        [np.arange(spike, t_end, cycle_dur) for spike in spikes]) for spikes in poisson_spikes]
    for train in poisson_spikes:
        train.sort()

    my_s = {
        'start': 0.0,
        'end': t_end,
        'dt': dt,
        'pre_spikes': poisson_spikes,
        'syn_cond_soma': {'E': exc_soma_cond, 'I': inh_soma_cond},
        'I_ext': lambda t: 0.0
    }

    seed = int(int(time.time() * 1e8) % 1e9)
    accs = [PeriodicAccumulator(get_all_save_keys(), interval=40)]
    accums = run(my_s, get_fixed_spiker(np.array([])), get_phi_U_learner(neuron, dt),
                 accs, neuron=neuron, seed=seed, learn=learn)

    dump((seed, accums), 'sine_task/' + p['ident'])
Exemplo n.º 13
0
    def test_S_label(self, output_folder, caplog):
        caplog.set_level(logging.INFO)
        output_path = str(output_folder).replace('\\', '/')
        date = outputting.date()

        simulation.run(label='test', output_path=output_path)

        captured = caplog.records

        for level in [k.levelname for k in captured]:
            assert level == 'INFO'

        captured_loggers = [k.name for k in captured]
        correct_loggers = ['Setup', 'Setup', 'Setup', 'Framework', 'Simulation', 'Setup']
        for capt, corr in itertools.zip_longest(captured_loggers, correct_loggers):
            assert capt == corr

        standard_captured = [k.message.replace('\n', '') for k in captured]
        correct = ['{}'.format(date),
                   'Log initialised',
                   'The log you are reading was written to {}/Outputs/test_{}/log.txt'.format(output_path, date),
                   'Beginning task labelled: test',
                   "Simulation 0 contains the task Basic: Trials = 100.The model used is QLearn: number_actions = 2, number_cues = 1, number_critics = 2, prior = array([0.5, 0.5]), non_action = 'None', actionCode = {0: 0, 1: 1}, stimulus_shaper = 'model.modelTemplate.Stimulus with ', reward_shaper = 'model.modelTemplate.Rewards with ', decision_function = 'discrete.weightProb with task_responses : 0, 1', alpha = 0.3, beta = 4, expectation = array([[0.5],       [0.5]]).",
                   'Shutting down program']

        for correct_line, standard_captured_line in itertools.zip_longest(correct, standard_captured):
            assert standard_captured_line == correct_line

        assert os.path.exists(output_path)
        assert os.path.exists(output_path + '/Outputs')
        folder_path = output_path + '/Outputs/test_{}/'.format(date)
        assert os.path.exists(folder_path)
        assert os.path.exists(folder_path + 'data')
        assert not os.path.exists(folder_path + 'Pickle')

        with open(folder_path + 'log.txt') as log:
            cleaned_log = [l.split('    ')[-1].strip() for l in log.readlines()]
            correct[-2] = correct[-2][:-15]
            final_correct = correct[-1]
            correct[-1] = '[0.5]]).'
            correct.append(final_correct)
        for correct_line, standard_captured_line in itertools.zip_longest(correct, cleaned_log):
            assert standard_captured_line == correct_line
def vary((repetition_i,p)):
    
    etas = {True: 6e-8,
            False: 30e-8}
            
    varies = {"alpha": np.linspace(-52.0,-56.0,3),
              "beta": np.linspace(0.15,0.25,3),
              "r_max": np.linspace(0.1,0.3,3)}

    learn = get_default("learn")
    learn["eta"] = etas[p["h1"]]

    neuron = get_default("neuron")
    neuron["phi"]["alpha"] = -54.0
    neuron["phi"]["beta"] = 0.25
    neuron["phi"]["r_max"] = 0.2
    neuron["phi"][p["vary"]] = varies[p["vary"]][p["ivary"]]


    spikes = np.arange(20.0,301.0,20.0)

    my_s = {
        'start': 0.0,
        'end': 350.0,
        'dt': 0.05,
        'pre_spikes': [spikes-10.0],
        'I_ext': lambda t: 0.0
        }

    # 0.2 <= p <= 1.0
    prob = 0.2 + 0.8*np.random.rand()

    seed = int(int(time.time()*1e8)%1e9)
    accs = [PeriodicAccumulator(['weights'], interval=10)]
    if p["h1"]:
        accums = run(my_s, get_fixed_spiker(spikes), get_dendr_spike_det(-50.0), accs, neuron=neuron, seed=seed, learn=learn, p_backprop=prob, h=1.0)
    else:
        accums = run(my_s, get_fixed_spiker(spikes), get_dendr_spike_det(-50.0), accs, neuron=neuron, seed=seed, learn=learn, p_backprop=prob)


    dump((prob,accums),'sjostrom_switch/'+p['ident'])
Exemplo n.º 15
0
def fit((repetition_i,p)):

    values = {True: {"alpha":-55.0,
                     "beta":0.4,
                     "r_max":0.3},
              False: {"alpha":-59.0,
                      "beta":0.5,
                      "r_max":0.17}}

    neuron = get_default("neuron")
    neuron["phi"]['r_max'] = values[p["h1"]]["r_max"]
    neuron["phi"]['alpha'] = values[p["h1"]]["alpha"]
    neuron["phi"]['beta'] = values[p["h1"]]["beta"]

    learn = get_default("learn")
    if not p["h1"]:
        learn["eta"] = learn["eta"]*2.5
    else:
        learn["eta"] = learn["eta"]*1.3


    spikes = np.array([101.0])

    my_s = {
        'start': 0.0,
        'end': 300.0,
        'dt': 0.05,
        'pre_spikes': [spikes + p["delta"]],
        'I_ext': lambda t: 0.0
        }


    seed = 1
    accs = [PeriodicAccumulator(['y','weights'], interval=10), BooleanAccumulator(['spike', 'dendr_spike', 'pre_spikes'])]
    if p["h1"]:
        accums = run(my_s, get_fixed_spiker(spikes), get_dendr_spike_det(-50.0), accs, seed=seed, neuron=neuron, learn=learn, h=1.0)
    else:
        accums = run(my_s, get_fixed_spiker(spikes), get_dendr_spike_det(-50.0), accs, seed=seed, neuron=neuron, learn=learn)

    dump((accums, values),'bi_poo/'+p['ident'])
Exemplo n.º 16
0
def interpret_command(cmd):
    if cmd == '1':  # path planning
        simulation.run()

    elif cmd == '2':
        print(" Sorry, this section is not functional at this time")

    elif cmd == '3':
        print(" Sorry, this section is not functional at this time")

    elif cmd == '4':
        print(" Sorry, this section is not functional at this time")

    else:
        print(' ERROR: unexpected command...')

    print()
    run_again = input(' Would you like to run another exercise?[y/n]: ')

    if run_again != 'y':
        return False

    return True
Exemplo n.º 17
0
def loop(screen, clock, level):
    section = config['genetic-algorithm']
    population_size = section.getint('population-size')
    chromosome_size = section.getint('chromosome-size')
    bounds = section.gettuple('mutation-bounds')
    information = utils.Information()
    generations = section.getint('generations')
    generation = 0

    population = gentools.initialize(population_size, chromosome_size, bounds)
    population, running = simulation.run(screen, clock, level, population,
                                         information)
    while running and generation < generations:
        generation += 1
        information.generation = generation
        selected = gentools.tournament_selection(
            population, section.getint('tournament-size'))
        offspring = gentools.single_point_crossover(
            selected, section.getfloat('crossover-rate'))
        offspring = gentools.uniform_mutation(offspring, bounds)
        offspring, running = simulation.run(screen, clock, level, offspring,
                                            information)
        population = gentools.elite_succession(population, offspring,
                                               section.getint('elite-size'))
Exemplo n.º 18
0
def test_simulation(nsubjects=256, f_mock=None):
    with Core(f_mock=f_mock) as core:
        response = simulation.run(
            core,
            simulation.Request(
                name='random',
                alternatives=('A', 'B', 'C', 'D', 'E'),
                gen_menus=simulation.GenMenus(
                    generator=simulation.Exhaustive(),
                    defaults=False,
                ),
                gen_choices=simulation.Uniform(
                    forced_choice=True,
                    multiple_choice=False,
                ),
            ))

    assert len(response.subject_packed) == 223
Exemplo n.º 19
0
def task((repetition_i, p)):

    eps_stim = eps_stim_low + (eps_stim_high - eps_stim_low) * np.random.rand()

    learn = get_default("learn")
    learn["eta"] = [1e-7, 0.0]
    learn["eps"] = [p["eps_learn"], 10**eps_stim]

    neuron = get_default("neuron")
    neuron["phi"]["alpha"] = -52.0
    neuron["phi"]["beta"] = 0.25
    neuron["phi"]["r_max"] = 0.35

    post_spikes = np.arange(40.0, 2000.0, 20.0)
    pre_spikes_learn = post_spikes - 10.0
    pre_spikes_stim = post_spikes - p["stim_delta"]

    my_s = {
        'start': 0.0,
        'end': 2000.0,
        'dt': 0.05,
        'pre_spikes': [pre_spikes_learn, pre_spikes_stim],
        'I_ext': lambda t: 0.0
    }

    prob = p["prob"]

    seed = int(int(time.time() * 1e8) % 1e9)
    accs = [PeriodicAccumulator(['weights'], interval=100)]
    accums = run(my_s,
                 get_fixed_spiker(post_spikes),
                 get_dendr_spike_det(p["thresh"]),
                 accs,
                 neuron=neuron,
                 seed=seed,
                 learn=learn,
                 p_backprop=prob)

    dump((eps_stim, accums), p['ident'])
Exemplo n.º 20
0
    def analysis_simulation(
            self, worker: Worker,
            options: 'gui.copycat_simulation.Options') -> 'ExperimentalData':
        subjects: List[PackedSubject] = []

        with Core() as core:
            worker.interrupt = lambda: core.shutdown(
            )  # register interrupt hook

            worker.set_work_size(len(self.subjects) * options.multiplicity)
            position = 0
            for subject_packed in self.subjects:
                for j in range(options.multiplicity):
                    response = simulation.run(
                        core,
                        simulation.Request(
                            name='random%d' % (j + 1),
                            alternatives=self.
                            alternatives,  # we don't use subject.alternatives here
                            gen_menus=simulation.GenMenus(
                                generator=simulation.Copycat(subject_packed),
                                defaults=False,  # this will be ignored, anyway
                            ),
                            gen_choices=options.gen_choices,
                            preserve_deferrals=options.preserve_deferrals,
                        ))

                    subjects.append(response.subject_packed)

                    position += 1
                    if position % 1024 == 0:
                        worker.set_progress(position)

        ds = ExperimentalData(name=options.name,
                              alternatives=self.alternatives)
        ds.subjects = subjects
        ds.observ_count = options.multiplicity * self.observ_count
        return ds
Exemplo n.º 21
0
            end = line.find(" ", start + 1)
            errors.append(float(line[start+1:end]))
    plotting.plot(errors, None, "Error over iterations", "iteration count", "Error value", name + "_iteration_error.png")

if __name__ == "__main__":
    jump = 2
    #graph_recode()
    #quit()


    if(jump == 1):
        for i in range(0,50):
            exc_rate = 0.08
            inh_rate = 0.02

            store = simulation.run(False, (exc_rate,inh_rate))

            tau_e = store['vars'][0]
            tau_i = store['vars'][1]
            E_exc = store['vars'][2]
            E_inh = store['vars'][3]
            r_m   = store['vars'][4]
            tau_m = store['vars'][5]
            c_m   = store['vars'][6]
            g_l   = store['vars'][7]
            N_e   = store['vars'][8]
            N_i   = store['vars'][9]
            g_l   = store['vars'][10]
            E_res = store['vars'][11]
            sampling = store['vars'][12]
Exemplo n.º 22
0
def main(args):
    country = args.country
    region = args.region
    subregion = args.subregion
    skip_hospitalizations = args.skip_hospitalizations
    quarantine_perc = args.quarantine_perc
    quarantine_effectiveness = args.quarantine_effectiveness
    verbose = args.verbose

    if country != 'US' and not region:
        region = 'ALL'

    best_params_type = args.best_params_type
    assert best_params_type in ['mean', 'median', 'top',
                                'top10'], best_params_type

    if args.best_params_dir:
        # Load parameters from file
        best_params = load_best_params_from_file(args.best_params_dir, country,
                                                 region, subregion)
        simulation_start_date = str_to_date(best_params['first_date'])
        simulation_create_date = str_to_date(best_params['date'])
        simulation_end_date = str_to_date(best_params['projection_end_date'])

        region_params = {'population': best_params['population']}
        # mean_params, median_params, top_params, or top10_params
        params_type_name = f'{best_params_type}_params'
        if verbose:
            print('best params type:', best_params_type)
        params_dict = convert_mean_params_to_params_dict(
            best_params[params_type_name])
    else:
        """
        You can hard code your own parameters if you do not want to use the preset parameters.

        This can be especially useful for regions/countries where we do not have projections.

        Then simply run `python run_simulation.py -v` to use these parameters.
        """

        simulation_start_date = datetime.date(2020, 2, 1)
        simulation_create_date = datetime.date.today(
        )  # not used so can also be None
        simulation_end_date = datetime.date(2020, 10, 1)

        region_params = {'population': 332000000}
        params_dict = {
            'INITIAL_R_0': 2.24,
            'LOCKDOWN_R_0': 0.9,
            'INFLECTION_DAY': datetime.date(2020, 3, 18),
            'RATE_OF_INFLECTION': 0.25,
            'LOCKDOWN_FATIGUE': 1.,
            'DAILY_IMPORTS': 500,
            'MORTALITY_RATE': 0.01,
            'REOPEN_DATE': datetime.date(2020, 5, 20),
            'REOPEN_SHIFT_DAYS': 0,
            'REOPEN_R': 1.2,
            'REOPEN_INFLECTION': 0.3,
            'POST_REOPEN_EQUILIBRIUM_R': 1.,
            'FALL_R_MULTIPLIER': 1.001,
        }

    if args.simulation_start_date:
        simulation_start_date = str_to_date(args.simulation_start_date)
    if args.simulation_end_date:
        simulation_end_date = str_to_date(args.simulation_end_date)

    if args.set_param:
        print('---------------------------------------')
        print('Overwriting params from command line...')
        for param_name, param_value in args.set_param:
            assert param_name in params_dict, f'Unrecognized param: {param_name}'
            old_value = params_dict[param_name]
            new_value = convert_str_value_to_correct_type(
                param_value, old_value)
            print(f'Setting {param_name} to: {new_value}')
            params_dict[param_name] = new_value

    if args.change_param:
        print('---------------------------------------')
        print('Changing params from command line...')
        for param_name, value_change in args.change_param:
            assert param_name in params_dict, f'Unrecognized param: {param_name}'
            old_value = params_dict[param_name]
            new_value = old_value + convert_str_value_to_correct_type(
                value_change, old_value, use_timedelta=True)
            print(f'Changing {param_name} from {old_value} to {new_value}')
            params_dict[param_name] = new_value

    region_model = RegionModel(
        country,
        region,
        subregion,
        simulation_start_date,
        simulation_create_date,
        simulation_end_date,
        region_params,
        compute_hospitalizations=(not skip_hospitalizations))

    if quarantine_perc > 0:
        print(f'Quarantine percentage: {quarantine_perc:.0%}')
        print(f'Quarantine effectiveness: {quarantine_effectiveness:.0%}')
        assert quarantine_effectiveness in [0.025, 0.1, 0.25, 0.5], \
            ('must specify --quarantine_effectiveness percentage.'
                ' Possible values: [0.025, 0.1, 0.25, 0.5]')
        quarantine_effectiveness_to_reduction_idx = {
            0.025: 0,
            0.1: 1,
            0.25: 2,
            0.5: 3
        }
        region_model.quarantine_fraction = quarantine_perc
        region_model.reduction_idx = \
            quarantine_effectiveness_to_reduction_idx[quarantine_effectiveness]

    if verbose:
        print('================================')
        print(region_model)
        print('================================')
        print('Parameters:')
        for param_name, param_value in params_dict.items():
            print(f'{param_name:<25s} : {param_value}')

    # Add params to region_model
    params_tups = tuple(params_dict.items())
    region_model.init_params(params_tups)

    if verbose:
        print('--------------------------')
        print('Running simulation...')
        print('--------------------------')

    # Run simulation
    dates, infections, hospitalizations, deaths = run(region_model)
    """
    The following are lists with length N, where N is the number of days from
        simulation_start_date to simulation_end_date.

    dates            : datetime.date objects representing day i
    infections       : number of new infections on day i
    hospitalizations : occupied hospital beds on day i
    deaths           : number of new deaths on day i
    """
    assert len(dates) == len(infections) == len(hospitalizations) == len(
        deaths)
    assert dates[0] == simulation_start_date
    assert dates[-1] == simulation_end_date

    if verbose:
        infections_total = infections.cumsum()
        deaths_total = deaths.cumsum()
        for i in range(len(dates)):
            hospitalization_str = ''
            if not skip_hospitalizations:
                hospitalization_str = f'Hospital beds in use: {hospitalizations[i]:,.0f} - '
            daily_str = (
                f'{dates[i]} - '
                f'New / total infections: {infections[i]:,.0f} / {infections_total[i]:,.0f} - '
                f'{hospitalization_str}'
                f'New / total deaths: {deaths[i]:,.2f} / {deaths_total[i]:,.1f} - '
                f'Mean R: {region_model.effective_r_arr[i]:.3f} - '
                f'IFR: {region_model.ifr_arr[i]:.2%}')
            print(daily_str)  # comment out to spare console buffer
    print('-------------------------------------')
    print(f'End of simulation       : {region_model.projection_end_date}')
    print(f'Total infections        : {infections.sum():,.0f}')
    if not skip_hospitalizations:
        print(f'Peak hospital beds used : {hospitalizations.max():,.0f}')
    print(f'Total deaths            : {deaths.sum():,.0f}')

    if args.save_csv_fname:
        dates_str = np.array(list(map(str, dates)))
        combined_arr = np.vstack((dates_str, infections, hospitalizations,
                                  deaths, region_model.effective_r_arr)).T
        headers = 'dates,infections,hospitalizations,deaths,mean_r_t'
        np.savetxt(args.save_csv_fname,
                   combined_arr,
                   '%s',
                   delimiter=',',
                   header=headers)
        print('----------\nSaved file to:', args.save_csv_fname)
Exemplo n.º 23
0
def main():
    board, clock = initialize()
    simulation.run(board, clock)
Exemplo n.º 24
0
assert len(a_rates) == C, "Check dimension arrival rates"
assert len(b_rates) == C, "Check dimension back-off rates"


# Print activity factors
xi = get_xi_iterative(G, a_rates, b_rates, t_rates)
print("Activity factors", get_xi_iterative(G, a_rates, b_rates, t_rates))

# Solve differential equation
print('\n**** Solve Differential Equation ****\n')
occupancy_eq = run_diff_eq(t_rates=t_rates, a_rates=a_rates, b_rates=b_rates,
                           T=T, G=G, max_store=max_store, num_steps=num_steps)

# Run the simulation
print('\n**** Run simulation ****\n')
occupancy = run(t_rates=t_rates, a_rates=a_rates, b_rates=b_rates, N=N, T=T, G=G, max_store=max_store)

# Create directory for saving the results and save input
os.makedirs(path_output, exist_ok=True)
text_file = open("Input.txt", "w")
text_file.write("G =\n{}\nt_rates= {}\na_rates= {}\nb_rates= {}\nN={}\nnum_steps={}".format(G, t_rates, a_rates,
                                                                                            b_rates, N, num_steps))
text_file.close()

# Plot and save the results
plot_interference_graph(G, path_output=path_output)
print('\n**** Plot ****\n')
for c in range(len(t_rates)):
    plot_occupancy_compare(sol_truth=occupancy_eq, sol_sim=occupancy,
                           c=c, max_store=max_store, T=T, path_output=path_output)
Exemplo n.º 25
0
def main():
    simulation.run()
Exemplo n.º 26
0
def task((repetition_i, p)):

    n_syn = p["n_syn"]

    learn = get_default("learn")
    learn["eps"] = 1e-1 / (1.0 * n_syn)
    learn["eta"] = learn["eps"] * p["eps_factor"]

    neuron = get_default("neuron")
    neuron["phi"]["alpha"] = p["alpha"]
    neuron["phi"]["beta"] = p["beta"]
    neuron["phi"]["r_max"] = 0.1
    neuron["g_S"] = p["g_S"]

    learn_epochs = 20
    test_epochs = 20
    epochs = learn_epochs + test_epochs
    l_c = 8
    eval_c = 2
    cycles = epochs * l_c + (epochs + 1) * eval_c
    cycle_dur = 100.0
    epoch_dur = (l_c + eval_c) * cycle_dur
    t_end = cycles * cycle_dur

    exc_level = p["exc_level"]
    g_factor = 50

    def exc_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)
                ) < cycle_dur * eval_c or t > learn_epochs * epoch_dur:
            return 0.0
        else:
            return ((1 + np.sin(-np.pi / 2 + t / t_end * cycles * 2 * np.pi)) *
                    exc_level + exc_level) * g_factor

    def inh_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)
                ) < cycle_dur * eval_c or t > learn_epochs * epoch_dur:
            return 0.0
        else:
            return 4e-2 * g_factor

    dt = 0.05
    f_r = 0.01  # 10Hz
    t_pts = np.arange(0, t_end / cycles, dt)

    seed = int(int(time.time() * 1e8) % 1e9)
    poisson_spikes = [
        t_pts[np.random.rand(t_pts.shape[0]) < f_r * dt] for _ in range(n_syn)
    ]
    poisson_spikes = [[] if spikes.shape[0] == 0 else np.concatenate(
        [np.arange(spike, t_end, cycle_dur) for spike in spikes])
                      for spikes in poisson_spikes]
    for train in poisson_spikes:
        train.sort()

    my_s = {
        'start': 0.0,
        'end': t_end,
        'dt': dt,
        'pre_spikes': poisson_spikes,
        'syn_cond_soma': {
            'E': exc_soma_cond,
            'I': inh_soma_cond
        },
        'I_ext': lambda t: 0.0
    }

    phi_spiker = get_phi_spiker(neuron)

    # deprecated
    def my_spiker(curr, dt, **kwargs):
        # we want no spikes in eval cycles
        if curr['t'] % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c:
            return False
        else:
            return phi_spiker(curr, dt, **kwargs)

    accs = [
        PeriodicAccumulator(get_all_save_keys(), interval=20, y_keep=3),
        BooleanAccumulator(['spike'])
    ]
    accums = run(my_s,
                 phi_spiker,
                 get_inst_backprop(),
                 accs,
                 neuron=neuron,
                 seed=seed,
                 learn=learn)

    dump((seed, accums), 'sine_task_backprop/' + p['ident'])
Exemplo n.º 27
0
def fit((repetition_i, p)):

    learn = get_default("learn")
    if p["h1"]:
        learn['eta'] *= 0.125 / 8.0
    else:
        learn["eta"] *= 0.1

    neuron = get_default("neuron")
    neuron["phi"]['r_max'] = 0.2
    neuron["phi"]['alpha'] = -54.0
    neuron["phi"]['beta'] = 0.25

    p_backprop = 0.75

    freq = p["freq"]
    delta = p["delta"]

    n_spikes_in_burst = 10
    burst_pause = 200.0
    bursts = 50 / n_spikes_in_burst
    burst_dur = 1000.0 * n_spikes_in_burst / freq

    first_spike = 1000.0 / (2 * freq)
    isi = 1000.0 / freq
    t_end = bursts * (burst_dur + burst_pause)

    spikes_in_burst = np.arange(first_spike, burst_dur, isi)
    spikes = np.array([])
    for i in range(bursts):
        spikes = np.concatenate(
            (spikes, spikes_in_burst + i * (burst_dur + burst_pause)))

    pre_spikes = spikes + delta

    my_s = {
        'start': 0.0,
        'end': t_end,
        'dt': 0.05,
        'pre_spikes': [pre_spikes],
        'I_ext': lambda t: 0.0
    }

    seed = int(int(time.time() * 1e8) % 1e9)
    accs = [PeriodicAccumulator(['weights'], interval=100)]
    if p["h1"]:
        accums = run(my_s,
                     get_fixed_spiker(spikes),
                     get_dendr_spike_det(-50.0),
                     accs,
                     seed=seed,
                     learn=learn,
                     neuron=neuron,
                     h=1.0,
                     p_backprop=p_backprop)
    else:
        accums = run(my_s,
                     get_fixed_spiker(spikes),
                     get_dendr_spike_det(-50.0),
                     accs,
                     seed=seed,
                     learn=learn,
                     neuron=neuron,
                     p_backprop=p_backprop)

    dump(accums, 'sjostrom/' + p['ident'])
Exemplo n.º 28
0
strat = Strategy('derive + no loss')
strat.buy = strategy.derivative
strat.stop_limit = strategy.no_loss
todo.append(strat)

strat = Strategy('velocity reversal + no loss')
strat.buy = strategy.velocity_reversal
strat.stop_limit = strategy.no_loss
todo.append(strat)

ls = []
for interval, values in candles.items():
    for test in todo:
        print('testing...', end=' ', flush=True)
        data = simulation.run(values, intervals, funds, fees, test, False)
        data.insert(0, interval)
        data.insert(0, test)
        ls.append(data)

ls.sort(key=itemgetter(2), reverse=True)

top = set()
for algo in ls[:]:
    name = algo[0].name
    if name in top:
        ls.remove(algo)
    else:
        top.add(name)

print('----------------------------------------')
Exemplo n.º 29
0
import DataGenerator as DataGenerator
from decision_ql import QLearningDecisionPolicy
import simulation as simulation
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
tf.compat.v1.reset_default_graph()

if __name__ == '__main__':
    start, end = '2018-01-01', '2020-05-19'
    company_list = ['hmotor', 'naver', 'lgchem', 'kakao', 'lghnh', 'samsung2', 'sdi']
    actions = company_list + ['not_buying']
    #########################################
    open_prices, close_prices, features = DataGenerator.make_features(company_list, '2018-01-01', '2020-05-19', is_training=True)
    scaler = StandardScaler()
    scaler.fit(features)
    ###########################################
    open_prices, close_prices, features = DataGenerator.make_features(company_list, start, end, is_training=False)
    features = scaler.transform(features)

    budget = 10. ** 8
    num_stocks = [0] * len(company_list)
    input_dim = len(features[0])

    # TODO: fix checkpoint directory name
    policy = QLearningDecisionPolicy(epsilon=0, gamma=0, decay=0, lr=0, actions=actions, input_dim=input_dim,
                                     model_dir="LFD_project4_team09")
    final_portfolio = simulation.run(policy, budget, num_stocks, open_prices, close_prices, features)

    print("Final portfolio: %.2f won" % final_portfolio)

Exemplo n.º 30
0
from simulation import run

if __name__ == '__main__':
    mu = 2
    lambd = 1

    simulation_time = 999999

    run(mu=mu, lambd=lambd, simulation_time=simulation_time)
Exemplo n.º 31
0
def task((repetition_i, p)):

    n_syn = p["n_syn"]

    learn = get_default("learn")
    learn["eps"] = 1e-1 / (1.0 * n_syn)
    learn["eta"] = learn["eps"]*p["eps_factor"]

    neuron = get_default("neuron")
    neuron["phi"]["alpha"] = p["alpha"]
    neuron["phi"]["beta"] = p["beta"]
    neuron["phi"]["r_max"] = 0.1
    neuron["g_S"] = p["g_S"]

    learn_epochs = 20
    test_epochs = 20
    epochs = learn_epochs + test_epochs
    l_c = 8
    eval_c = 2
    cycles = epochs * l_c + (epochs + 1) * eval_c
    cycle_dur = 100.0
    epoch_dur = (l_c + eval_c) * cycle_dur
    t_end = cycles * cycle_dur

    exc_level = p["exc_level"]
    g_factor = 50

    def exc_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c or t > learn_epochs*epoch_dur:
            return 0.0
        else:
            return ((1 + np.sin(-np.pi/2 + t / t_end * cycles * 2 * np.pi)) * exc_level + exc_level) * g_factor

    def inh_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c or t > learn_epochs*epoch_dur:
            return 0.0
        else:
            return 4e-2 * g_factor

    dt = 0.05
    f_r = 0.01 # 10Hz
    t_pts = np.arange(0, t_end / cycles, dt)

    seed = int(int(time.time() * 1e8) % 1e9)
    poisson_spikes = [t_pts[np.random.rand(t_pts.shape[0]) < f_r * dt] for _ in range(n_syn)]
    poisson_spikes = [[] if spikes.shape[0] == 0 else np.concatenate(
        [np.arange(spike, t_end, cycle_dur) for spike in spikes]) for spikes in poisson_spikes]
    for train in poisson_spikes:
        train.sort()

    my_s = {
        'start': 0.0,
        'end': t_end,
        'dt': dt,
        'pre_spikes': poisson_spikes,
        'syn_cond_soma': {'E': exc_soma_cond, 'I': inh_soma_cond},
        'I_ext': lambda t: 0.0
    }

    phi_spiker = get_phi_spiker(neuron)

    # deprecated
    def my_spiker(curr, dt, **kwargs):
        # we want no spikes in eval cycles
        if curr['t'] % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c:
            return False
        else:
            return phi_spiker(curr, dt, **kwargs)

    accs = [PeriodicAccumulator(get_all_save_keys(), interval=20, y_keep = 3), BooleanAccumulator(['spike'])]
    accums = run(my_s, phi_spiker, get_inst_backprop(),
                 accs, neuron=neuron, seed=seed, learn=learn)

    dump((seed, accums), 'sine_task_backprop/' + p['ident'])
Exemplo n.º 32
0
def run_config(script_file, trusted_file=False):
    """
    Takes a .yaml configuration file and runs a simulation or data fitting as described.

    Parameters
    ----------
    script_file : string
        The file name and path of a ``.yaml`` configuration file.
    trusted_file : bool, optional
        If the config file contains executable code this will only be executed if trusted_file is set to ``True``.
        Default is ``False``
    """

    if trusted_file:
        loader = yaml.UnsafeLoader
    else:
        loader = yaml.FullLoader

    with open(script_file) as file_stream:
        script = yaml.load(file_stream, Loader=loader)

    script_sections = list(script.keys())

    if 'model' not in script_sections:
        raise MissingScriptSection(
            'A ``model`` should be described in the script')

    run_properties = {'config_file': script_file}

    for label, location in SCRIPT_PARAMETERS.items():
        try:
            value = key_find(script, location)
            if label == 'data_extra_processing':
                if value[:4] == 'def ':
                    compiled_value = compile(value, '<string>', 'exec')
                    eval(compiled_value)
                    function_name = compiled_value.co_names[0]
                    function = [
                        v for k, v in copy.copy(locals()).items()
                        if k == function_name
                    ][0]
                    args = utils.get_function_args(function)
                    if len(args) != 1:
                        raise ArgumentError(
                            'The data extra_processing function must have only one argument. Found {}'
                            .format(args))
                    function.func_code_string = value
                    value = function
                else:
                    raise TypeError(
                        'data extra_processing must provide a function')
            run_properties[label] = value
        except MissingKeyError:
            continue

    for label, location in SCRIPT_PARAMETER_GROUPS.items():
        try:
            value = key_find(script, location)
            run_properties[label] = value
        except MissingKeyError:
            continue

    if 'simulation' in script_sections:
        if 'task' not in script_sections:
            raise MissingScriptSection(
                'A ``task`` should be described in the script for a simulation'
            )

        simulation.run(**run_properties)

    elif 'fitting' in script_sections:
        if 'data' not in script_sections:
            raise MissingScriptSection(
                'A ``data`` section should be described in the script')
        dataFitting.run(**run_properties)
    else:
        raise MissingScriptSection(
            'A ``simulation`` or ``fitting`` section is necessary for this script to be understood'
        )
Exemplo n.º 33
0
def task((repetition_i, p)):

    n_syn = p["n_syn"]

    learn = get_default("learn")
    learn["eps"] = 1e-1 / (1.0 * n_syn)
    learn["eta"] = learn["eps"] * p["eps_factor"]

    neuron = get_default("neuron")
    neuron["phi"]["alpha"] = p["alpha"]
    neuron["phi"]["beta"] = p["beta"]
    neuron["phi"]["r_max"] = 0.1
    neuron["g_S"] = p["g_S"]

    learn_epochs = 2
    test_epochs = 1
    epochs = learn_epochs + test_epochs
    l_c = 4
    eval_c = 2
    cycles = epochs * l_c + (epochs + 1) * eval_c
    cycle_dur = p["cycle_dur"]
    epoch_dur = (l_c + eval_c) * cycle_dur
    t_end = cycles * cycle_dur

    g_factor = p["g_factor"]

    def exc_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c or t > learn_epochs * epoch_dur:
            return 0.0
        return p["exc_level"] * p["g_factor"]

    def inh_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c or t > learn_epochs * epoch_dur:
            return 0.0
        return 1e-1 * p["g_factor"]

    dt = 0.05
    f_r = 1.0 / cycle_dur
    t_pts = np.arange(0, t_end / cycles, dt)

    seed = int(int(time.time() * 1e8) % 1e9)

    reg_spikes = [np.arange(i+1,t_end+1,10) for i in range(n_syn)]

    poisson_spikes = [t_pts[np.random.rand(t_pts.shape[0]) < f_r * dt] for _ in range(n_syn)]
    poisson_spikes = [[] if spikes.shape[0] == 0 else np.concatenate(
        [np.arange(spike, t_end, cycle_dur) for spike in spikes]) for spikes in poisson_spikes]
    for train in poisson_spikes:
        train.sort()

    my_s = {
        'start': 0.0,
        'end': t_end,
        'dt': dt,
        'pre_spikes': reg_spikes,
        'syn_cond_soma': {'E': exc_soma_cond, 'I': inh_soma_cond},
        'I_ext': lambda t: 0.0
    }

    phi_spiker = get_phi_spiker(neuron)

    # deprecated
    def my_spiker(curr, dt, **kwargs):
        # we want no spikes in eval cycles
        if curr['t'] % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c:
            return False
        else:
            return phi_spiker(curr, dt, **kwargs)

    if p["wiggle"] is None:
        dendr_predictor = phi
    else:
        us = np.linspace(-100,20,1000)
        ampl = neuron["phi"]["r_max"]
        phis = phi(us, neuron)
        alphas = []
        for i in range(p["wiggle"]):
            alphas.append(us[phis > (i+0.5)*ampl/p["wiggle"]][0])
        r_m = neuron['phi']['r_max']/p["wiggle"]
        def dendr_predictor(V, neuron):
            return np.sum([phi(V,{'phi':{'alpha': al, 'beta':p["beta_wiggle"], 'r_max': r_m}}) for al in alphas])


    accs = [PeriodicAccumulator(get_all_save_keys(), interval=20,
                                y_keep=3), BooleanAccumulator(['spike'])]
    accums = run(my_s, get_fixed_spiker(np.array([])), get_phi_U_learner(neuron, dt, p["exc_decrease"]),
                 accs, neuron=neuron, seed=seed, learn=learn, dendr_predictor=dendr_predictor)

    dump((seed, accums), 'wiggle_test/' + p['ident'])
Exemplo n.º 34
0
def export_station_log_csv(scheme, simulation, file_name):
    output = [''] * (len(simulation.station_log[0]) + 1)
    print(len(simulation.station_log[0]))
    for stations in simulation.station_log:
        for station in stations:
            if scheme is simulator.Scheme.CRB:  # TODO save scheme in simulation object
                output[station.num] += '{},{},'.format(
                    station.backoff,
                    True if station.synchronized is True else False)
            else:
                output[station.num] += '{},'.format(station.backoff)

    with open(file_name, 'a') as file:
        for s in output:
            file.write(s + '\n')


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG,
                        stream=sys.stdout,
                        format='%(message)s')

    scheme = simulator.Scheme.CRB
    # simulation = simulator.run(scheme, num_stations=7, cw_start=3, num_iterations=5000)
    # simulation = simulator.run(scheme, num_stations=25, cw_start=63, cw_end=63, num_iterations=5000)
    # simulation = simulator.run(scheme, num_stations=20, num_iterations=300000)
    simulation = simulator.run(scheme, num_stations=8, num_iterations=15000)

    # path = 'output/{}/{}_station_log.csv'.format(datetime.now().strftime('%Y-%m-%d_%H-%M-%S'), scheme)
    # export_station_log_csv(scheme, simulation, ensure_dir(path))
Exemplo n.º 35
0
def task(inputs):
    repetition_i = inputs[0]
    p = inputs[1]

    n_syn = p["n_syn"]

    learn = get_default("learn")
    learn["eps"] = 1e-1 / n_syn
    learn["eta"] = 1e-3 / n_syn

    neuron = get_default("neuron")

    neuron["phi"]["alpha"] = p["alpha"]
    neuron["phi"]["beta"] = p["beta"]
    neuron["phi"]["r_max"] = p["r_max"]
    neuron["g_S"] = p["g_S"]

    epochs = 4
    l_c = 6
    eval_c = 2
    cycles = epochs * l_c + (epochs + 1) * eval_c
    cycle_dur = p["cycle_dur"]
    t_end = cycles * cycle_dur

    def exc_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c:
            return 0.0
        else:
            return ((1 + np.sin(np.pi / 2 + t / t_end * cycles * 2 * np.pi)) \
                    * 2e-3 * 1 + 8e-3) * p["g_factor"]

    def inh_soma_cond(t):
        if t % (cycle_dur * (l_c + eval_c)) < cycle_dur * eval_c:
            return 0.0
        else:
            return 8e-2 * p["g_factor"]

    dt = 0.05
    f_r = 0.01  # 10Hz
    t_pts = np.arange(0, t_end / cycles, dt)

    poisson_spikes = [
        t_pts[np.random.rand(t_pts.shape[0]) < f_r * dt] for _ in range(n_syn)
    ]
    poisson_spikes = [[] if spikes.shape[0] == 0 else np.concatenate(
        [np.arange(spike, t_end, cycle_dur) for spike in spikes])
                      for spikes in poisson_spikes]

    for train in poisson_spikes:
        train.sort()

    my_s = {
        'start': 0.0,
        'end': t_end,
        'dt': dt,
        'pre_spikes': poisson_spikes,
        'syn_cond_soma': {
            'E': exc_soma_cond,
            'I': inh_soma_cond
        },
        'I_ext': lambda t: 0.0
    }

    seed = int(int(time.time() * 1e8) % 1e9)
    accs = [PeriodicAccumulator(get_all_save_keys(), interval=40)]

    accums = run(my_s,
                 get_fixed_spiker(np.array([])),
                 get_phi_U_learner(neuron, dt),
                 accs,
                 neuron=neuron,
                 seed=seed,
                 learn=learn)
    # TODO: create directrory if not exists
    dump((seed, accums), "results/" + p['ident'])
# Calculate runningMean 
# x is data, N is the running mean window size
def runningMean(x, N):
    return np.convolve(x, np.ones((N,))/N,mode='valid')

# Process raw data
# 1. Add up volumes of two serires of data
# 2. Make dataTable to convert date to index and vice versa
# 3. Calculate volumes/runningMean
# 4. Syntesize all data to data
# Not include: 1. Standardization of price. 2. Divide price and volume by its STD
    # They are done in each match below
price=rawdata0['close']
volume=rawdata0['volume']+rawdata1['volume']

date=rawdata0.index.values
dateTable=pd.Series(range(numofentry),date)
mean=runningMean(volume, RunningMeanSize)
for a in range(RunningMeanSize-1):
  mean=np.insert(mean, 0, 0)
pdmean=pd.Series(mean,index=date)
volume=volume/pdmean

data = DataFrame({'price':price,'volume':volume})
data.to_csv('ProcessedData.csv')

startingDate='8/22/16'
endDate='9/22/16'
initialAsset=100000
sim.run(data,startingDate,endDate,initialAsset,RunningMeanSize,NumOfBestFitWant,date,dateTable,L)
Exemplo n.º 37
0
 generation = 1
 populationCount = 50
 population = [AiPlayer(brain=nn.Brain([0,0,0], [0,0,0])) for _ in range(0,populationCount)]
 mode = "m"
 while mode == "m" or mode == "s":
     try:
         results = []
         print(f"Generation: {generation}")
         if mode == "m":
             pool = Pool(4)
             results = pool.map(simulation.run, population)
             pool.close()
             pool.join()
         else:
             for player in population:
                 results.append(simulation.run(player))
         results = sorted(results, key=lambda k: k['score'], reverse=True)
         best = results[0]
         maxScore = best['score']
         print(f"Best Score: {best['score']}")
         newPopulation = [copy.deepcopy(best['player']) for player in results]
         for player in newPopulation:
             player.mutate(random.randrange(0,3))
         newPopulation[0] = best['player']
         population = newPopulation
         if maxScore <= 2000:
             mode = "m"
         elif maxScore >= 2100:
             mode = "s"
         if maxScore >= 5000:
             mode = input("Mode (s/M/q): ") or "m"
Exemplo n.º 38
0
"""

from networkGenerator import NetworkGenerator
from visualisation import visualiseVoltage, visualiseFirings, visualiseMeanFirings
from simulation import run
import matplotlib.pyplot as plt

#%%
moduleSize = 100
moduleNumber = 8
N0 = moduleSize * moduleNumber
N1 = 200

generator = NetworkGenerator(moduleSize, moduleNumber, N1, 4, 1000)
generator.initialize()

for p in [x/10.0 for x in range(6)]:
    generator.setCurrentConnectivityMatrix(p = p)
    generator.genNetwork()
    net = generator.net
    
    tTotal = 1000
    dt = 1
    
    firings0, firings1, u0, u1, v0, v1 = run(net, tTotal, dt, N0, N1)
    visualiseVoltage(tTotal, u0, u1, v0, v1)
    
    generator.visualize(p, folder = 'Results-Q1')
    visualiseFirings(tTotal, firings0, firings1, N0, N1, p = p, folder = 'Results-Q1')
    a = visualiseMeanFirings(tTotal, firings0, 50, 20, p = p, folder = 'Results-Q1', size = 100, moduleNumber = 8)
    plt.close("all")