예제 #1
0
async def runsim(openers, fights, stattable, abilities, party, pbuffs, buffs):
    tasks = []
    tasks2 = []
    tasks3 = []
    x = 0
    while x < 2:
        tasks.append(
            asyncio.create_task(
                sim(x, 600, openers['Late Trick'], fights['Default'],
                    stattable, abilities, party, pbuffs, buffs, True, False,
                    False).sim()))
        tasks2.append(
            asyncio.create_task(
                sim(x, 600, openers['Late Trick'], fights['Default'],
                    stattable, abilities, party, pbuffs, buffs, True, False,
                    False).sim()))
        tasks3.append(
            asyncio.create_task(
                sim(x, 600, openers['Late Trick'], fights['Default'],
                    stattable, abilities, party, pbuffs, buffs, True, False,
                    False).sim()))
        x = x + 1
    a = await asyncio.gather(*tasks)
    b = await asyncio.gather(*tasks2)
    c = await asyncio.gather(*tasks3)

    return a + b + c
예제 #2
0
def run(param, env, controllers, initial_state = None, args = None):
	if args is None:
		args = parse_args()

	if not args.disable_cuda and torch.cuda.is_available():
		device = torch.device('cuda')
	else:
		device = torch.device('cpu')

	if args.il:
		train_il(param, env, device)
	else:
		sim(param, env, controllers, initial_state, args.animate)
예제 #3
0
def main():
    """ This program's entrypoint. """
    # Parse command line arguments.
    psr = argparse.ArgumentParser(description="Generates training data.")
    psr.add_argument(
        "--log-dst", default=EMAIL_DST,
        help="The email address to which updates will be sent.", type=str)
    psr, psr_verify = cl_args.add_out(psr)
    args = psr_verify(psr.parse_args())
    # The ID of the experiment.
    eid = str(round(time.time()))
    # Create a new output directory based on the current time.
    out_dir = path.join(args.out_dir, eid)
    # For organization purposes, store the pcap files in a subdirectory.
    sim_dir = path.join(out_dir, "sim")
    # This also creates out_dir.
    os.makedirs(sim_dir)

    # Set up logging.
    numeric_level = getattr(logging, LOG_LVL.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError(f"Invalid log level: {LOG_LVL}")
    logging.basicConfig(level=numeric_level)
    log = logging.getLogger(LOGGER)

    # Assemble the configurations.
    cnfs = [{"bottleneck_bandwidth_Mbps": bw_Mbps,
             "bottleneck_delay_us": dly_us,
             # Calculate queue capacity as a multiple of the BDP. If the BDP is
             # less than a single packet, then use 1 packet as the BDP anyway.
             "bottleneck_queue_p": int(round(
                 que_p *
                 max(1, bdp_bps(bw_Mbps, dly_us * 6) / float(PACKET_SIZE_B)))),
             "unfair_flows": UNFAIR_FLOWS,
             "other_flows": flws,
             "other_proto": OTHER_PROTO,
             "unfair_edge_delays_us": f"[{dly_us}]",
             "other_edge_delays_us": f"[{dly_us}]",
             "payload_B": PACKET_SIZE_B,
             "enable_mitigation": "false",
             "duration_s": DUR_s,
             "pcap": "true" if PCAP else "false",
             "out_dir": sim_dir}
            for bw_Mbps, dly_us, que_p, flws in itertools.product(
                BWS_Mbps, DELAYS_us, QUEUE_p, OTHER_FLOWS)]
    sim.sim(eid, cnfs, out_dir, log_par=LOGGER, log_dst=args.log_dst,
            dry_run=DRY_RUN, sync=defaults.SYNC)

    log.info("Results in: %s", out_dir)
    log.critical("Finished.")
예제 #4
0
def compute_model(offset, maxframes, nskip, uth, ucl):
    return Model(offset=offset,
                 maxframes=maxframes,
                 nskip=nskip,
                 uth=uth,
                 ucl=ucl,
                 states=list(
                     islice(sim(listgen(uth, ucl), offset), nskip, None)))
예제 #5
0
def run(param, env, controllers, initial_state=None):
    parser = argparse.ArgumentParser()
    parser.add_argument("--rl", action='store_true')
    parser.add_argument("--il", action='store_true')
    parser.add_argument("--rrt", action='store_true')
    parser.add_argument("--scp", action='store_true')
    parser.add_argument("--animate", action='store_true')
    args = parser.parse_args()

    if args.rl:
        train_rl(param, env)
    elif args.il:
        train_il(param, env)
    elif args.rrt:
        rrt(param, env)
    elif args.scp:
        scp(param, env)
    else:
        sim(param, env, controllers, initial_state, args.animate)
예제 #6
0
def main():
    env = GreenhouseEnv()
    agent = DDPGagent(env)
    noise = OUNoise(env.action_space)
    action_dim = env.action_space.shape[0]
    state_dim = env.observation_space.shape[0]
    #writer_reward = SummaryWriter()
    #writer_abs = SummaryWriter()
    #writer_penalty = SummaryWriter()
    t1 = time.time()
    mpl.style.use('seaborn')
    if len(sys.argv) != 1:
        # Load trained model
        PATH = sys.argv[1:].pop()
        print('Se cargo el modelo')
        agent.load(PATH + '/nets')
    else:
        PATH = create_path()

    Constants(PATH)
    all_params['PARAMS_DDPG']['hidden_sizes'] = agent.hidden_sizes

    save_params(all_params, PATH + '/output')
    #agent.actor.eval()
    #agent.critic.eval()

    rewards, avg_rewards, penalties, abs_rewards = train_agent(
        agent, noise, PATH, save_freq=SAVE_FREQ)

    figure_reward(rewards, avg_rewards, penalties, abs_rewards, PATH)
    save_rewards(rewards, avg_rewards, penalties, abs_rewards, PATH)
    S_climate, S_data, S_prod, A, df_inputs, start, Qdic = sim(agent,
                                                               ind=INDICE)
    save_Q(Qdic, PATH)
    start = df_inputs['Date'].iloc[0]
    final_indexes = compute_indexes(
        start, STEPS, env.frec
    )  #Es necesario crear nuevos indices para las graficas, depende de STEP
    figure_cost_gain(Qdic, final_indexes, PATH)
    figure_state(S_climate, final_indexes, PATH)
    figure_rh_par(S_data, final_indexes, PATH)
    figure_prod(S_prod, final_indexes, PATH)
    figure_actions(A, final_indexes, PATH)
    figure_inputs(df_inputs, PATH)

    t2 = time.time()
    season1_nn(agent, PATH, '')
    violin_reward(PATH, 'nn')  ##puede ser nn ó expert
    violin_actions(PATH, 'nn')

    if not (SHOW):
        create_report(PATH, t2 - t1)
        send_correo(PATH + '/reports/Reporte.pdf')
        pass
예제 #7
0
async def runsim(runjobs, runtime, currentid, open, fight, stattable,
                 abilities, party, pbuffs, buffs, potion, ast, log):
    tasks = []
    tasks2 = []
    tasks3 = []
    a = []
    b = []
    c = []
    one = False
    two = False
    three = False
    x = 0
    while x < runjobs:
        one = True
        tasks.append(
            asyncio.create_task(
                sim(1, runtime, open, fight, stattable, abilities, party,
                    pbuffs, buffs, potion, ast, log).sim()))
        x = x + 1
        if x < runjobs:
            two = True
            tasks2.append(
                asyncio.create_task(
                    sim(1, runtime, open, fight, stattable, abilities, party,
                        pbuffs, buffs, potion, ast, log).sim()))
        x = x + 1
        if x < runjobs:
            three = True
            tasks3.append(
                asyncio.create_task(
                    sim(1, runtime, open, fight, stattable, abilities, party,
                        pbuffs, buffs, potion, ast, log).sim()))
        x = x + 1
    if one:
        a = await asyncio.gather(*tasks)
    if two:
        b = await asyncio.gather(*tasks2)
    if three:
        c = await asyncio.gather(*tasks3)

    return a + b + c
예제 #8
0
    def inner_objective(sim_params, expr_str):

        fitness = 0
        for trial in range(sim_params["n_trials_per_individual"]):
            history_u_student, history_u_teacher, *_ = sim(
                sim_params, expr_str=expr_str, trial=trial
            )
            fitness += -error(
                history_u_student["V_m"], history_u_teacher["V_m"], 0.1 * sim_params["t_sim"]
            )  # skip first 10% (initial transients)

        return fitness * 1.0 / sim_params["n_trials_per_individual"]
예제 #9
0
def sim_test():
    # dev driver
    f = 'U.pkl'
    P = pickle.load(open(f))
    log.info('loaded <%s>', f)
    P.describe()
    U = P[P.index >= '2005-01-01']
    U.describe()
    import sim
    _, B = sim.sim(U)
    #plot NAV
    B.NAV.plot(title='Equal Weight Everyone')
    return B
def doSim(model, reps, repe, dir):

  for rep in range(reps, repe):

    print("python sim.py  " dir + "/" + model + ".conf" > dir + "/" + model + rep + ".sim" > dir + "/" + model + rep + ".gen")
    cfg = (dir + "/" +model + ".conf" dir + "/" +model + rep)
    prefOut = (dir + "/" +model + ".conf" dir + "/" +model + rep)
    sim = sim(cfg, prefOut)

    os.remove(dir + "/" + model + rep + ".sim.bz2")
    os.remove(dir + "/" + model + rep + ".gen.bz2")
    gzip_file(dir + "/" + model + rep + ".sim")
    gzip_file(dir + "/" + model + rep + ".gen")
    os.remove(dir + "/" + model + rep + ".sim")
    os.remove(dir + "/" + model + rep + ".gen")
예제 #11
0
파일: hab.py 프로젝트: chrskly/hab
    def __init__(self):
        # interface to magnetometer
        self.magnetometer = magnetometer(gauss=4.7, declination=(-2, 5))
        # interface to barometer
        self.barometer = barometer()
        # interface to persistent store
        self.store = store()
        # interface to GPS/GPRS module
        self.sim = sim()

        # current heading
        self.current_heading = None
        # current GPS data (json returned from sim.get_gps())
        self.current_gps = None
        # current baromoeter data
        self.current_barom = {}
예제 #12
0
파일: runsim.py 프로젝트: csheehy/cpmdeproj
def dosim(ba, bb, r, theta, dk, inputmap, rlz, sn, i, Ttt, QUtt):

    s = sim.sim(ba,
                bb,
                r=r,
                theta=theta,
                dk=dk,
                inputmap=inputmap,
                rlz=rlz,
                sn=sn)

    s.runsim(sigtype='TnoP', Ttemptype=Ttt, QUtemptype=QUtt)
    fnt = s.save(i)
    s.runsim(sigtype='sig', Ttemptype=Ttt, QUtemptype=QUtt)
    fns = s.save(i)
    s.runsim(sigtype='noi', Ttemptype=Ttt, QUtemptype=QUtt)
    fnn = s.save(i)
    s.runsim(sigtype='EnoB', Ttemptype=Ttt, QUtemptype=QUtt)
    fnn = s.save(i)
예제 #13
0
def main(players=1):
    filename = 'not_alone_card_value_data_' + unicode(players)  + '.csv'
    with open(filename, 'w') as csvfile:
        fieldnames = [
            'players',
            'hunted', 
            'artemia', 
            'survival card value',
            'hunt card value',
            'rounds',
        ]

        writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
        writer.writeheader()
    hunt_val = 0
    data = []
    while hunt_val <= 1.0:

        survival_val = 0
        while survival_val <= 1.0:
            data.append(sim.sim(players, TRIALS, hunt_val, survival_val, False))
            print 'players ' + unicode(players)
            print 'hunt ' + unicode(hunt_val)
            print 'suvive' + unicode(survival_val)
            survival_val = round(survival_val + 0.05, 2)
        hunt_val = round(hunt_val + 0.05, 2)

    with open(filename, 'a') as csvfile:
        fieldnames = [
            'players',
            'hunted', 
            'artemia', 
            'survival card value',
            'hunt card value',
            'rounds',
        ]

        writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
        for x in data:
            writer.writerow(x)
예제 #14
0
def dosim(ba, bb, r, theta, dk, inputmap, rlz, sn, i, Ttt, QUtt, tempNside):

    s = sim.sim(ba,
                bb,
                r=r,
                theta=theta,
                dk=dk,
                inputmap=inputmap,
                rlz=rlz,
                sn=sn,
                i=i,
                tempNside=tempNside)

    s.runsim(sigtype='TnoP', Ttemptype=Ttt, QUtemptype=QUtt)
    s.runsim(sigtype='sig', Ttemptype=Ttt, QUtemptype=QUtt)
    s.runsim(sigtype='noi', Ttemptype=Ttt, QUtemptype=QUtt)
    s.runsim(sigtype='EnoB', Ttemptype=Ttt, QUtemptype=QUtt)
    s.runsim(sigtype='TnoPnosl', Ttemptype=Ttt, QUtemptype=QUtt)
    s.runsim(sigtype='signosl', Ttemptype=Ttt, QUtemptype=QUtt)
    s.runsim(sigtype='EnoBnosl', Ttemptype=Ttt, QUtemptype=QUtt)

    return s
예제 #15
0
파일: spacey.py 프로젝트: j2le/spacey
screen = pygame.display.set_mode(game_constants.size)

#Loop until the user clicks the close button.
game = game(pygame, scenario)

# initialize earth
earth = earth()

# initialize vehicle state
vehicle = vehicle(scenario)

# initialize display
display = display(scenario, game)

# initialize truth
sim = sim()

# initialize flight computer
flight_computer = flight_computer()

# begin the loop for the game
while game.done == False:

    # http://www.pygame.org/docs/ref/key.html
    # User did something
    for event in pygame.event.get():

        # update the game based on keystrokes
        game.update_events(event, vehicle, display, pygame, sim)

    # for loop around propagation
예제 #16
0
    with open(f"{data_directory}/sim_params.json", "r") as f:
        sim_params = json.load(f)

    # use a different seed than evo runs to generate different spike
    # train realization and class labels
    sim_params["seed"] = 817818273

    n_runs = 6
    for i in range(n_runs):
        with open(os.path.join(data_directory, f"res_{i}.pkl"), "rb") as f:
            res = pickle.load(f)

        print("sim for", res["expr_champion"][-1])

        errors = np.empty(sim_params["n_trials_per_individual"])
        for trial in range(sim_params["n_trials_per_individual"]):
            print(
                f'  trial {trial + 1}/{sim_params["n_trials_per_individual"]}')
            data_u, data_u_target, *_ = sim(sim_params,
                                            expr_str=res["expr_champion"][-1],
                                            trial=trial)

            errors[trial] = -error(
                data_u["V_m"], data_u_target["V_m"], 0.1 *
                sim_params["t_sim"])  # skip first 10% (initial transients)

        with open(os.path.join(data_directory, f"res_test_{i}.pkl"),
                  "wb") as f:
            pickle.dump(errors, f)
예제 #17
0
파일: sls.py 프로젝트: sourtin/SLS
#Space Launch Simulator
from sim import sim

pp = [[31, 0], [60, 26], [100, 46.85], [160, 68.8], [240, 60.8],
		[390, 72.436], [540, 79.744], [620, 81.3], [700, 91.4]]

satv = sim()
satv.stage('payload',1939+45693,0,425)
satv.stage('LES',4042)
satv.stage(1,2239795,113,0,0,163)
satv.stage(1j,5206,0,0,0,193)
satv.stage(2,479964,79,0,0,550)
satv.stage(2j,3663,0,0,0,561)
satv.stage(3,119119,34,0,0)
satv.prog_pitch(0,pp)
satv.burn(1,.3,[13231,13348],304,49,1793760)
satv.burn(1,135.20,10638,304,39,281151)
satv.burn(1j,163,176.3,237,0,617,angle=10)
satv.burn(2,164,1224,424,3.*5,363053)
satv.burn(2,460.62,979,424,3.*4,38560)
satv.burn(2,500,728,434,3.*4,35111)
satv.burn(2j,549,13.38,231,0,53.524,angle=10)
satv.burn(3,552.2,213.416,431,3.,31400)
satv.burn(3,9856.2,204.91,445,3.,71068)

satv.run(712) # run 712s of simulation
dat=satv.analyse() # print energies


#Use this to generate a data file for gnuplot
예제 #18
0
 def test_x(self):
     #        s = 'aaaaaa'
     #        s = 'abbababb'
     s = 'ababaa'
     #        s = 'aabbabab'
     print sim.sim(s)
예제 #19
0
def main(parsed_gtfs_prefix,
         osm_data,
         depots_filename,
         output_dir,
         battery_cap_kwh,
         nondepot_charger_rate,
         parameter_override=None):
    """Runs Dispatch simulator with the given scenarios, in series.

  Parameters
  ----------
  parsed_gtfs_prefix :  str
    location and prefix of the parsed files generated by `parse_gtfs.py`
  osm_data : str
    location of the OSM pbf file for the GTFS feed being simulated
  depots_filename : str
    location of a csv file of depots in the following format:
      lat : float
        latitude of depot
      lng : float
        longitude of depot
      name : str
        name of the depot
      max_buses : int
        bus capacity
      url : str
        hyperlink to source of depot details
  output_dir : str
    location
  battery_cap_kwh : list
    list of ints,start (inclusive), end (exclusive), and step size values for 
    generating battery capacity scenarios
  nondepot_charger_rate : list
    list of ints, start (inclusive), end (exclusive), and step size values for
    generating non-depot EVSE charger scenarios
  parameter_override : dict
    dict that overrides any Dispatch parameter defaults. See
    `sim.generateParams()` for details on possible inputs.

  Returns
  -------
  scenarios_results: dataframe
    a df of of summary results for each scenario run
  """
    scenarios = genScenarios(battery_cap_kwh, nondepot_charger_rate)
    b_caps, cpowers = list(zip(*scenarios.values()))
    #TODO, probably should move these options into params of main(), but is that
    #getting too long? Advise on best practice here.
    an_costs = genAnnualizedCosts(b_caps,
                                  cpowers,
                                  rate=0.07,
                                  years=14,
                                  bus_base_price=500_000,
                                  bprice_kwh=100)
    params = {}
    #TODO Note that this will overwrite params passed to main()
    # It may be best to just move all this out of CLI and into params exclusively?
    if parameter_override is not None:
        params = parameter_override
    scen_costs = {}
    for key, val in scenarios.items():
        bat_cap = val[0]
        cpower = val[1]
        prefix = f'{bat_cap}kwh_{cpower}_kw'
        #set up scenario parameters
        params['battery_cap_kwh'] = bat_cap
        params['nondepot_charger_rate'] = cpower
        params['bus_cost'] = an_costs['annualized_base_bus']
        params['battery_cost_per_kwh'] = an_costs['annualized_bat'][bat_cap]
        params['nondepot_charger_cost'] = an_costs['annualized_charger'][
            cpower]

        results = sim(parsed_gtfs_prefix,
                      osm_data,
                      depots_filename,
                      parameters=params)
        # parse results and write to file
        scen_costs[prefix] = {
            'battery_cap_kwh': bat_cap,
            'nondepot_charger_rate': cpower,
            'optimized_buses': results['opti_buses'],
            'optimized_chargers': results['opti_chargers'],
            'optimized_cost': results['opti_cost'],
            'nc_buses': results['nc_buses'],
            'nc_chargers': results['nc_chargers'],
            'nc_cost': results['nc_cost'],
            'ac_buses': results['ac_buses'],
            'ac_chargers': results['ac_chargers'],
            'ac_costs': results['ac_cost']
        }
        #create output dir if needed, and write to file
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)
        results['opti_trips'].to_csv(f'{output_dir}/{prefix}_trips.csv')

        depot_res_name = f'{output_dir}/{prefix}_depot_counts.csv'
        with open(depot_res_name, 'w', newline='') as csvfile:
            fieldnames = ['depot', 'bus_count']
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writeheader()
            #TODO include nc and ac depots as well
            for key, val in results['opti_depot_counts'].items():
                writer.writerow({'depot': key, 'bus_count': val})
        #create DF of summary results and write to file
    scen_costs = pd.DataFrame(scen_costs).T
    scen_costs.to_csv(f'{output_dir}/scenarios_results.csv')
예제 #20
0
from sim import sim

sys.path.insert(0, "../includes/")
import utils  # noqa: E402


if __name__ == "__main__":

    nest.Install("us_sympy_synapse_module")

    data_directory = utils.get_data_directory_from_cmd_args()

    with open(f"{data_directory}/sim_params.json", "r") as f:
        sim_params = json.load(f)

    sim_params["t_sim"] *= 2.0

    (history_u_student, history_u_teacher, history_weights_student, weights_teacher) = sim(
        sim_params, expr_str="(x_0 - x_1) * x_2", trial=0
    )

    res = {
        "data_u": history_u_student,
        "data_u_teacher": history_u_teacher,
        "data_syn": history_weights_student,
        "data_syn_teacher": weights_teacher,
    }
    with open(os.path.join(data_directory, "traces.pkl"), "wb") as f:
        pickle.dump(res, f)
예제 #21
0
def setup_and_sim(worm, time_to_run):
    import sys
    """
    # If no command-line arguments are given, prompt for them.
    if (len (sys.argv) <= 1):
        args = 'main.py ' + input('Arguments: ')
        sys.argv = re.split (' ', args)
    regress_mode = (len(sys.argv) == 4) and (sys.argv[3]=="--regress")
    if (regress_mode):
        sys.argv = sys.argv[0:3]
    if (len(sys.argv) != 3):
       raise SyntaxError('Usage: python3 main.py test-name-to-run sim-end-time')
    """
    end_time = time_to_run

    # Run whatever test got chosen on the command line.
    GP = sim.Params()
    setup_lab_worm(GP, worm)
    #eval ('setup_'+sys.argv[1]+'(GP)')

    #if (regress_mode): # Works even if setup_...() overrules these params!
    #    GP.adaptive_timestep = False   # Force regression
    #    GP.sim_dump_interval = 1
    #    GP.sim_long_dump_interval = 10

    # Initialize Vmem -- typically 0, or close to that.
    Vm = sim.compute_Vm (sim.cc_cells, GP)
    assert (Vm<.5).all()    # I.e.,roughly charge neutral
    np.set_printoptions (formatter={'float': '{: 6.3g}'.format}, linewidth=90)
    #print ('Initial Vm   ={}mV\n'.format(1000*Vm))

    #print ('Starting main simulation loop')
    t_shots, cc_shots = sim.sim (end_time, GP)
    #print ('Simulation is finished.')

    # Now, the simulation is over. Do any end-of-sim analysis.
    #edb.analyze_equiv_network (GP)

    # If your network contains cells that are interconnected by GJs, then
    # pretty_plot() can make a nice picture of the interconnections and each
    # cell's Vm.
    Vm = sim.compute_Vm (sim.cc_cells, GP)
    # print("Worm Fitness = " + str(worm.fitness))
    # print("Km = " + str(worm.Km))
    # print("N = " + str(worm.N))
    # print("Gj_scale = " + str(worm.Gj_scale))
    # print("num_cells = " + str(worm.num_cells))
    # print("G_k = " + str(worm.G_k))
    # print("G_k = " + str(worm.G_na))
    # print("G_cl = " + str(worm.G_cl))
    # print("Gj_diff_m = " + str(worm.Gj_diff_m))

    #print("Vmem?: {}".format(Vm))
    Vgrad = abs(Vm[0] - Vm[worm.num_cells -1])
    worm.grad_stren = Vgrad
    #eplt.pretty_plot (Vm*1e3)

    # We often want a printed dump of the final simulation results.
    np.set_printoptions (formatter={'float': '{:.6g}'.format}, linewidth=90)
    #edb.dump (end_time, sim.cc_cells, edb.Units.mV_per_s, True)
    #edb.dump (end_time, sim.cc_cells, edb.Units.mol_per_m2s, True)

    # We often want graphs of various quantities vs. time.
    # If so, then comment out the quit() here, and then modify the code below
    # as desired.
    # quit()
    Na = sim.ion_i['Na']; K = sim.ion_i['K']; Cl=sim.ion_i['Cl']
    # P=sim.ion_i['P']; M=sim.ion_i['M']
    Vm_shots = [sim.compute_Vm (c,GP)*1000 for c in cc_shots]
    n_cells = sim.cc_cells.shape[1]
    # eplt.plot_Vmem_graph(t_shots,Vm_shots, np.arange(n_cells),'Vmem(mV)')
    # eplt.plot_Vmem_graph(t_shots,[s[Na] for s in cc_shots],np.arange(n_cells),'[Na] (mol/m3')
    # eplt.plot_Vmem_graph(t_shots,[s[K]  for s in cc_shots],np.arange(n_cells),'[K] (mol/m3')
    # eplt.plot_Vmem_graph(t_shots,[s[Cl]  for s in cc_shots],np.arange(n_cells),'[Cl] (mol/m3')
    #eplt.plot_Vmem_graph(t_shots,[s[M] for s in cc_shots],np.arange(n_cells),'[M] (mol/m3')
예제 #22
0
파일: tmp.py 프로젝트: mynkpl1998/a2c
    def _thunk():

        env = sim(sim_config)
        #print(sim_config)
        return env
예제 #23
0
    if prun:
        f_P.make_fake_data()
    if ndirun:
        f_NDI.make_fake_data()
    if jdrun:
        f_DI.make_fake_data()


    #####################
    #######Save Fake Data

    # if not os.path.exists(work_dir+ 'data/fake_data'):
    #     os.mkdir(work_dir+ 'data/fake_data')

    if prun:
        f_P.save_fake_data(P_plots_dir + 'FD'+fake_tag+'.txt.gz')
        f_P.save_fake_data_key(P_plots_dir + 'FD_key'+fake_tag+'.txt.gz')
    if ndirun:
        f_NDI.save_fake_data(NDI_plots_dir + 'FD'+fake_tag+'.txt.gz')
        f_NDI.save_fake_data_key(NDI_plots_dir + 'FD_key'+fake_tag+'.txt.gz')
    if jdrun:
        f_DI.save_fake_data(DI_plots_dir + 'FD'+fake_tag+'.txt.gz')
        f_DI.save_fake_data_key(DI_plots_dir + 'FD_key'+fake_tag+'.txt.gz')

if do_sim:
    sim()



예제 #24
0
async def run1sim(runjobs, runtime, currentid, open, fight, stattable,
                  abilities, party, pbuffs, buffs, potion, ast, log):
    tasks = []
    tasks2 = []
    tasks3 = []
    a = []
    b = []
    c = []
    atime = []
    btime = []
    ctime = []
    abilitydict = {}
    returntable = []
    one = False
    two = False
    three = False
    x = 0
    while x < runjobs:
        one = True
        sim1 = sim(1, runtime, open, fight, stattable, abilities, party,
                   pbuffs, buffs, potion, ast, log)
        tasks.append(asyncio.create_task(sim1.sim()))
        x = x + 1
        if x < runjobs:
            two = True
            sim2 = sim(1, runtime, open, fight, stattable, abilities, party,
                       pbuffs, buffs, potion, ast, log)
            tasks2.append(asyncio.create_task(sim2.sim()))
        x = x + 1
        if x < runjobs:
            three = True
            sim3 = sim(1, runtime, open, fight, stattable, abilities, party,
                       pbuffs, buffs, potion, ast, log)
            tasks3.append(asyncio.create_task(sim3.sim()))
        x = x + 1
    if one:
        a = await asyncio.gather(*tasks)
        atime = sim1.timetable
        ability = sim1.abilities
        returntable.append(atime)
        for i in ability.keys():
            if i in abilitydict.keys():
                abilitydict[i] = abilitydict[i] + ability[i].totalpotency
            else:
                abilitydict[i] = ability[i].totalpotency
    if two:
        b = await asyncio.gather(*tasks2)
        btime = sim2.timetable
        ability = sim2.abilities
        returntable.append(btime)
        for i in ability.keys():
            if i in abilitydict.keys():
                abilitydict[i] = abilitydict[i] + ability[i].totalpotency
            else:
                abilitydict[i] = ability[i].totalpotency
    if three:
        c = await asyncio.gather(*tasks3)
        ability = sim3.abilities
        for i in ability.keys():
            if i in abilitydict.keys():
                abilitydict[i] = abilitydict[i] + ability[i].totalpotency
            else:
                abilitydict[i] = ability[i].totalpotency
        ctime = sim3.timetable
        returntable.append(ctime)

    return a + b + c, returntable, abilitydict
예제 #25
0
def diff_dirs(
    diff,
    dir1,
    dir2,
    usecache=True,
    cache_dir_base=None,
    include=[],
    ignore1=[],
    ignore2=[],
    load_fact=False,
    fact_dir=None,
    fact_versions=[],
    fact_proj='',
    fact_proj_roots=[],
    ignore_unmodified=False,
    restrict_fact=False,
    fact_for_changes=False,
    fact_for_mapping=False,
    fact_for_ast=False,
    fact_into_virtuoso='',
    fact_into_directory='',
    fact_size_thresh=diffts.DEFAULT_FACT_SIZE_THRESH,
    fact_for_cfg=False,
    fact_encoding=Enc.FDLCO,
    fact_hash_algo=HashAlgo.MD5,
    line_sim=False,
    dumpccs=False,
    check=False,
    keep_filtered_temp=False,
    local_cache_name=None,
    dump_delta=False,
    fact_for_delta=False,
    keep_going=False,
    use_sim=False,
    sim_thresh=0.7,
    quiet=False,
):

    filt = lambda x: True

    if include:
        filt = lambda x: any(x.startswith(p) for p in include)

    logger.info('"{}" - "{}" cache_dir_base="{}"'.format(
        dir1, dir2, cache_dir_base))

    cost = 0
    nmappings = 0
    nnodes = 0
    nnodes1 = 0
    nnodes2 = 0
    nrelabels = 0

    line_sim_sum = 0.0
    line_sim_count = 0

    info = get_info(dir1,
                    dir2,
                    usecache=usecache,
                    cache_dir_base=cache_dir_base,
                    load_fact=load_fact,
                    fact_dir=fact_dir,
                    fact_versions=fact_versions,
                    fact_proj=fact_proj,
                    fact_proj_roots=fact_proj_roots,
                    fact_for_changes=fact_for_changes,
                    fact_for_cfg=fact_for_cfg,
                    fact_for_mapping=fact_for_mapping,
                    fact_into_virtuoso=fact_into_virtuoso,
                    fact_into_directory=fact_into_directory,
                    fact_size_thresh=fact_size_thresh,
                    fact_encoding=fact_encoding,
                    fact_hash_algo=fact_hash_algo,
                    local_cache_name=local_cache_name,
                    fact_for_delta=fact_for_delta,
                    keep_going=keep_going)

    logger.info('"{}" - "{}" get_info finished'.format(dir1, dir2))

    get_rel1 = lambda x: x
    get_rel2 = lambda x: x
    if len(fact_proj_roots) == 2:
        (d1, d2) = fact_proj_roots
        pat1 = re.compile(r'^{}{}'.format(d1.rstrip(os.path.sep), os.path.sep))
        pat2 = re.compile(r'^{}{}'.format(d2.rstrip(os.path.sep), os.path.sep))
        get_rel1 = lambda x: pat1.sub('', x)
        get_rel2 = lambda x: pat2.sub('', x)

    modified = filter_pairs(info['modified'], ignore1, ignore2, get_rel1,
                            get_rel2, filt)
    unmodified = filter_pairs(info['unmodified'], ignore1, ignore2, get_rel1,
                              get_rel2, filt)
    renamed = filter_pairs(info['renamed'], ignore1, ignore2, get_rel1,
                           get_rel2, filt)
    moved = filter_pairs(info['moved'], ignore1, ignore2, get_rel1, get_rel2,
                         filt)

    added = filter_sources(info['added'], ignore2, get_rel2, filt)
    copied = filter_sources(info['copied'], ignore2, get_rel2, filt)
    removed = filter_sources(info['removed'], ignore1, get_rel1, filt)
    glued = filter_sources(info['glued'], ignore1, get_rel1, filt)

    extra_pairs = []
    if use_sim:
        logger.debug('matching removed and added files...')
        l = []
        for x in removed:
            logger.debug('{}'.format(x))
            cs_ = []
            for x_ in added:
                s = sim.sim(x, x_)
                if s > sim_thresh:
                    logger.debug('  {} ({})'.format(x_, s))
                    cs_.append((x_, s))
            if cs_:
                l.append((x, cs_))
        pairs = set()
        pairs0 = set()
        for (x, cs_) in l:
            if len(cs_) == 1:
                pairs.add((x, cs_[0][0]))
            else:
                pairs0.add((x, max(cs_, key=lambda x: x[1])[0]))

        l_ = []
        for x_ in added:
            logger.debug('{}'.format(x_))
            cands = []
            for x in removed:
                s = sim.sim(x, x_)
                if s > sim_thresh:
                    logger.debug('  {} ({})'.format(x, s))
                    cands.append((x, s))
            if cands:
                l_.append((cands, x_))
        pairs_ = set()
        pairs0_ = set()
        for (cs, x_) in l_:
            if len(cs) == 1:
                pairs_.add((cs[0][0], x_))
            else:
                pairs0_.add((max(cs, key=lambda x: x[1])[0], x_))

        extra_pairs = list((pairs & pairs_) | (pairs0 & pairs0_))

        logger.info('extra pairs (sim_thresh={}):'.format(sim_thresh))
        for p in extra_pairs:
            logger.info('  {} - {}'.format(*p))

    if extra_pairs:
        for p in extra_pairs:
            (x, x_) = p
            removed.remove(x)
            added.remove(x_)
            modified.append(p)

    modified0 = [p[0] for p in modified]
    unmodified0 = [p[0] for p in unmodified]
    moved0 = [p[0] for p in moved]
    renamed0 = [p[0] for p in renamed]

    # for multi-processing
    random.shuffle(unmodified0)
    random.shuffle(moved0)
    random.shuffle(renamed0)
    random.shuffle(added)
    random.shuffle(copied)
    random.shuffle(removed)
    random.shuffle(glued)

    count_opts = {
        'cache_dir_base': cache_dir_base,
        'load_fact': load_fact,
        'fact_dir': fact_dir,
        'fact_versions': fact_versions,
        'fact_proj': fact_proj,
        'fact_proj_roots': fact_proj_roots,
        'fact_for_ast': fact_for_ast,
        'fact_into_virtuoso': fact_into_virtuoso,
        'fact_into_directory': fact_into_directory,
        'fact_size_thresh': fact_size_thresh,
        'restrict_fact': restrict_fact,
        'fact_encoding': fact_encoding,
        'fact_hash_algo': fact_hash_algo,
        'local_cache_name': local_cache_name,
    }

    modified0set = set(modified0)
    moved0set = set(moved0)
    renamed0set = set(renamed0)
    if ignore_unmodified:
        nunmodified0 = 0
        nmoved0 = len(moved0set - modified0set)
        nrenamed0 = len(renamed0set - modified0set)
    else:
        nunmodified0 = count_nodes(unmodified0, **count_opts)
        nmoved0 = count_nodes(moved0set - modified0set, **count_opts)
        nrenamed0 = count_nodes(renamed0set - modified0set, **count_opts)

    fvs0 = []
    fvs1 = []
    if len(fact_versions) == 2 and load_fact:
        fvs0 = [fact_versions[0]]
        fvs1 = [fact_versions[1]]

    fpr0 = []
    fpr1 = []
    if len(fact_proj_roots) == 2 and load_fact:
        fpr0 = [fact_proj_roots[0]]
        fpr1 = [fact_proj_roots[1]]

    count_opts['fact_versions'] = fvs1
    count_opts['fact_proj_roots'] = fpr1

    nadded = count_nodes(added, **count_opts)
    ncopied = count_nodes(copied, **count_opts)

    count_opts['fact_versions'] = fvs0
    count_opts['fact_proj_roots'] = fpr0

    nremoved = count_nodes(removed, **count_opts)
    nglued = count_nodes(glued, **count_opts)

    d_nnodes1 = nunmodified0 + nmoved0 + nrenamed0 + nremoved + nglued
    d_nnodes2 = nunmodified0 + nmoved0 + nrenamed0 + nadded + ncopied

    nnodes1 += d_nnodes1
    nnodes2 += d_nnodes2
    nnodes += d_nnodes1 + d_nnodes2

    nmappings += nunmodified0 + nmoved0 + nrenamed0
    cost += nadded + ncopied + nremoved + nglued

    logger.info('nnodes={}, nmappings={}, cost={}'.format(
        nnodes, nmappings, cost))

    st_time = time.time()

    try:
        modified_all = modified

        logger.info('{} modified files'.format(len(modified_all)))

        random.shuffle(modified_all)  # for multi-processing

        n_modified_all = len(modified_all)

        count = 0

        for (file1, file2) in modified_all:

            if is_auxfile(file1):
                logger.info('pre-source "{}" is ignored'.format(file1))
                continue

            if is_auxfile(file2):
                logger.info('pre-source "{}" is ignored'.format(file2))
                continue

            count += 1

            logger.info('*** processing modified files ({}/{})'.format(
                count, n_modified_all))

            if line_sim:
                line_sim_sum += sim.line_sim(file1, file2)
                line_sim_count += 1

            r = diff(
                file1,
                file2,
                cache_dir_base=cache_dir_base,
                load_fact=load_fact,
                fact_dir=fact_dir,
                fact_versions=fact_versions,
                fact_proj_roots=fact_proj_roots,
                restrict_fact=restrict_fact,
                fact_for_changes=fact_for_changes,
                fact_for_mapping=fact_for_mapping,
                fact_for_ast=fact_for_ast,
                fact_into_virtuoso=fact_into_virtuoso,
                fact_into_directory=fact_into_directory,
                fact_size_thresh=fact_size_thresh,
                fact_encoding=fact_encoding,
                fact_hash_algo=fact_hash_algo,
                dumpccs=dumpccs,
                check=check,
                keep_filtered_temp=keep_filtered_temp,
                local_cache_name=local_cache_name,
                dump_delta=dump_delta,
                fact_for_delta=fact_for_delta,
                keep_going=keep_going,
                quiet=quiet,
            )

            c = r['cost']
            m = r['nmappings']

            logger.info('"{}" - "{}": CMR=({}/{})'.format(file1, file2, c, m))

            fvs0 = []
            fvs1 = []
            if len(fact_versions) == 2 and load_fact:
                fvs0 = [fact_versions[0]]
                fvs1 = [fact_versions[1]]

            fpr0 = []
            fpr1 = []
            if len(fact_proj_roots) == 2 and load_fact:
                fpr0 = [fact_proj_roots[0]]
                fpr1 = [fact_proj_roots[1]]

            count_opts['fact_versions'] = fvs0
            count_opts['fact_proj_roots'] = fpr0

            d_nnodes1 = count_nodes([file1], **count_opts)

            count_opts['fact_versions'] = fvs1
            count_opts['fact_proj_roots'] = fpr1

            d_nnodes2 = count_nodes([file2], **count_opts)

            nnodes1 += d_nnodes1
            nnodes2 += d_nnodes2
            nnodes += d_nnodes1 + d_nnodes2

            cost += c
            nmappings += m
            nrelabels += r['nrelabels']

    except Exception as e:
        logger.warning('{}'.format(str(e)))

    t = time.time() - st_time

    m = t / 60.0

    ncomp = len(modified)

    logger.info('"{}" - "{}" --> {} comparisons ({} min.)'.format(
        dir1, dir2, ncomp, m))

    res = {
        'cost': cost,
        'ncomparisons': ncomp,
        'nmappings': nmappings,
        'nnodes1': nnodes1,
        'nnodes2': nnodes2,
        'nnodes': nnodes,
        'nrelabels': nrelabels,
    }

    if line_sim and line_sim_count > 0:
        res['line_sim'] = line_sim_sum / line_sim_count

    return res
예제 #26
0
for n in range(0, N):
    # random factor between (1-d) and (1+d)
    factor = ((1.0 - d) + np.random.rand() * (2 * d))
    Pref0 = 32 * 1.0 * factor
    Pref1 = 32 * 0.5 * factor
    Pref2 = 32 * 0.5 * factor
    Pref3 = 32 * 0.25 * factor

    Qref0 = 32 * 0.0 * factor
    Qref1 = 32 * 0.0 * factor
    Qref2 = 32 * 0.2 * factor
    Qref3 = 32 * 0.2 * factor
    Pref.append((Pref0, Pref1, Pref2, Pref3))
    Qref.append((Qref0, Qref1, Qref2, Qref3))

(t, states, P, Q) = sim(Pref[0], Qref[0])

for n in range(1, N):
    (tn, statesn, Pn, Qn) = sim(Pref[n], Qref[n])
    P = P + Pn
    Q = Q + Qn
    for i, state in enumerate(states):
        if (i > 0):
            states[i] = states[i] + statesn[i]

# plot results
plt.figure(1)
plt.plot(t, states[0], color='b')
plt.xlabel('Time (sec)')
plt.ylabel('Angle mod 2*pi (rad)')
plt.title('Grid Angle')
예제 #27
0
def doSim(model, reps, repe, dir):
    for rep in range(reps, repe):
        sims = sim(dir + "/" + model + ".conf")
        write(dir + "/" + model + rep + ".sim", sims)
        write(dir + "/" + model + rep + ".gen", sims)  #???
예제 #28
0
#coding=utf8

import sim
import baidu

f_sim = sim.sim()
f_sim.sim_init('mkb3new.txt')


def writeonly(txt):
    ss = txt.encode("utf-8")
    baidu.q = ss
    tr = baidu.GetTrans()
    st = tr[(tr.find('dst') + 6):(tr.find('"', tr.find('dst') + 6))]
    result = f_sim.calc_sim(st)
    return result[1]
예제 #29
0
    if prun:
        f_P.make_fake_data()
    if ndirun:
        f_NDI.make_fake_data()
    if jdrun:
        f_DI.make_fake_data()


    #####################
    #######Save Fake Data

    # if not os.path.exists(work_dir+ 'data/fake_data'):
    #     os.mkdir(work_dir+ 'data/fake_data')

    if prun:
        f_P.save_fake_data(P_plots_dir + 'FD'+fake_tag+'.txt.gz')
        f_P.save_fake_data_key(P_plots_dir + 'FD_key'+fake_tag+'.txt.gz')
    if ndirun:
        f_NDI.save_fake_data(NDI_plots_dir + 'FD'+fake_tag+'.txt.gz')
        f_NDI.save_fake_data_key(NDI_plots_dir + 'FD_key'+fake_tag+'.txt.gz')
    if jdrun:
        f_DI.save_fake_data(DI_plots_dir + 'FD'+fake_tag+'.txt.gz')
        f_DI.save_fake_data_key(DI_plots_dir + 'FD_key'+fake_tag+'.txt.gz')

if do_sim:
    sim()



예제 #30
0
파일: tmp.py 프로젝트: mynkpl1998/a2c
	return _thunk
'''
# Select Device
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

# Vector Envs
envs = [make_env(i) for i in range(num_envs)]
envs = SubprocVecEnv(envs)

# Test Env
with open("sim-config.yaml", "r") as handle:
    sim_config = yaml.load(handle)

env = sim(sim_config)

#env = gym.make("CartPole-v0")


# Network
class ActorCritic(nn.Module):
    def __init__(self, num_inputs, num_outputs, hidden_size):
        super(ActorCritic, self).__init__()

        self.critic = nn.Sequential(nn.Linear(num_inputs, hidden_size),
                                    nn.ReLU(), nn.Linear(hidden_size, 1))

        self.actor = nn.Sequential(nn.Linear(num_inputs, hidden_size),
                                   nn.ReLU(),
                                   nn.Linear(hidden_size, num_outputs),
예제 #31
0
import numpy as np
import sim
from math import pi
import graph as g

k = 1.0

betas1 = np.array([10.0 * pi / 180] * 10)
sim.betas = betas1
print(sim.betas)

log = sim.sim()
# print(log)
s = log.shape

J1 = log[s[0] - 1, 1]**2
djdb = 5.0 * pi / 180

while True:
    sim.betas = sim.betas - k * djdb
    print(sim.betas)

    log = sim.sim()
    # print(log)
    s = log.shape

    J2 = log[s[0] - 1, 1]**2
    # print((sim.betas-betas1))

    djdb = (J2 - J1) / ((sim.betas - betas1))
    print(J2, djdb)
예제 #32
0
def ypp_sim_strategy(home, away):
    drives = 1000
    home_score = sim.sim(ypp[home]['dist'], drives)
    away_score = sim.sim(ypp[away]['dist'], drives)
    return away if away_score > home_score else home
예제 #33
0
#        ctg_idx = pool.run_one_iter(thresh)
#        new_ctg = ctg1 if ctg_idx == 0 else ctg2
#        new_err = err_fn(ctg1, ctg2)
#
#        pbar.set_postfix_str(f"err: {new_err}")
#        if np.abs((new_err - err) / err) < .00001 and err < err_tol * 10:
#            break
#        err = new_err
#        if err < err_tol:
#            break
#
#        if (1+i) % 100 == 0:
#            state_space.data[0] = new_ctg
#            p = sim(f"outputs/videos/output_epoch_{1+i}", (0,0,0,0), env, state_space, action_space, thresh)
#            np.save(f"outputs/precompute/new_new_ctg_{resolution}", new_ctg)
#
#    pool.close()
#
#    state_space.data[0] = new_ctg
#    np.save(f"outputs/precompute/new_new_ctg_{resolution}", state_space.data[0])

# simulate
procs = []
for idx, start_state in enumerate([(0, 0, 0, 0), (pi, 0, 0, 0), (pi / 2., 0, 0 ,0), (1, 0, 0, 0)]):
    #p = sim(f"outputs/videos/output_{idx}", start_state, env, state_space, action_space)
    #procs.append(p)
    p = sim(f"outputs/videos/natural_{idx}", start_state, env)
    procs.append(p)
[p.join() for p in procs]