def main(): parser = argparse.ArgumentParser() parser.add_argument('--sim', nargs='?', type=int, const=10) args = parser.parse_args() if args.sim: run_sim(args.sim) else: run_game()
def BL_Industry_Simulation(task_queue,result_queue,error_queue): try: while True: if task_queue.empty(): break customer_id = task_queue.get(block = False,timeout = 1) strategy = BLStrategy(customer_id,'bl_industry',universe,data_proxy) run_sim(strategy,'20180101',data_proxy,rebalance_freq = 30) result_queue.put('BL Industry Succeded For Customer: %s'%customer_id) except Exception as e: error_queue.put('BL Industry Failed For Customer:%s \n Reasons:%s'%(customer_id,e)) result_queue.put('BL Industry Failed For Customer:%s \n Reasons:%s'%(customer_id,e))
def run_batch(param, env, instance, controllers): torch.set_num_threads(1) s0 = load_instance(param, env, instance) for name, controller in controllers.items(): print("Running simulation with " + name) states, observations, actions, step = run_sim(param, env, controller, s0, name=instance) states_and_actions = np.zeros((step, 4 * param.n_agents), dtype=np.float32) states_and_actions[:, 0::4] = states[:step, 0::env.state_dim_per_agent] states_and_actions[:, 1::4] = states[:step, 1::env.state_dim_per_agent] states_and_actions[:, 2::4] = actions[:step, 0::env.action_dim_per_agent] states_and_actions[:, 3::4] = actions[:step, 1::env.action_dim_per_agent] result = np.hstack( (param.sim_times[0:step].reshape(-1, 1), states_and_actions)) basename = os.path.splitext(os.path.basename(instance))[0] folder_name = "../results/singleintegrator/{}".format(name) if not os.path.exists(folder_name): os.mkdir(folder_name) output_file = "{}/{}.npy".format(folder_name, basename) with open(output_file, "wb") as f: np.save(f, result.astype(np.float32), allow_pickle=False)
def MV_Simulation(task_queue,result_queue,error_queue): try: universe = ['881001.WI','513500.SH','159920.SZ','518880.SH','H11025.CSI'] while True: if task_queue.empty(): break task = task_queue.get(block = False,timeout = 1) strategy_id = task[0] risk_level = task[1] strategy = MVStrategy(strategy_id,universe,risk_level,data_proxy) run_sim(strategy,'20180101',data_proxy,rebalance_freq = 30) result_queue.put('MV Succeded For %s'%strategy_id) except Exception as e: error_queue.put('MV Failed For %s \n Reasons:%s'%(strategy_id,e)) result_queue.put('MV Failed For %s \n Reasons:%s'%(strategy_id,e))
def BL_Style_Simulation(task_queue,result_queue,error_queue): try: data_source = BLDataSourceStyle() save_source = CommonSaveSource() data_proxy = DataProxy(data_source,save_source) while True: if task_queue.empty(): break customer_id = task_queue.get(block = False,timeout = 1) strategy = BLStrategy(customer_id,'bl_style',universe,data_proxy) run_sim(strategy,'20180101',data_proxy,rebalance_freq = 30) result_queue.put('BL Style Succeded For Customer: %s'%customer_id) except Exception as e: error_queue.put('BL Industry Failed For Customer:%s \n Reasons:%s'%(customer_id,e)) result_queue.put('BL Industry Failed For Customer:%s \n Reasons:%s'%(customer_id,e))
def run(pl): try: print("\n Running: ", pl.ID) sim_df = run_sim(pl.ID, pl, no_img=True).round( 2) # Mit no_img = False wird ein Graph pro Runde generiert return sim_df except Exception as e: print(e)
def run_batch(param, env, instance, controllers): torch.set_num_threads(1) s0 = load_instance(param, env, instance) for name, controller in controllers.items(): print("Running simulation with " + name) states, observations, actions, step = run_sim(param, env, controller, s0, name=instance) # print(states[0:step].shape) # print(param.sim_times[0:step].shape) # exit() result = np.hstack((param.sim_times[0:step].reshape(-1,1), states[0:step])) # store in binary format basename = os.path.splitext(os.path.basename(instance))[0] folder_name = "../results/doubleintegrator/{}".format(name) if not os.path.exists(folder_name): os.mkdir(folder_name) output_file = "{}/{}.npy".format(folder_name, basename) with open(output_file, "wb") as f: np.save(f, result.astype(np.float32), allow_pickle=False)
def main(): params = sim.SimParams(4, 1000, 10, 10000, "trainsmall.txt") results = sim.run_sim(params) results.display()
'801750.SI', '801760.SI', '801770.SI', '801780.SI', '801790.SI', '801880.SI', '801890.SI'] from data_objs import LocalBLDataSource, LocalMVSaveSource data_source = LocalBLDataSource() save_source = LocalMVSaveSource('D:\\Temp') data_proxy = DataProxy(data_source, save_source) bl_strategy = ['sample_customer'] for strategy_id in bl_strategy: bl_strategy = BLStrategy(strategy_id, bl_industry_universe, data_proxy, 'industry') rebalance_df,weight_df,net_value_df = \ run_sim(strategy_id,bl_strategy,'20150105',data_proxy,30) # 风格配置 #bl_style_universe = ['399372.SZ', '399373.SZ', '399374.SZ', '399375.SZ', '399376.SZ', '399377.SZ'] # # #bl_strategy = ['sample_customer'] #for strategy_id in bl_strategy: # bl_strategy = BLStrategy(strategy_id,bl_style_universe,data_proxy) # rebalance_df,weight_df,net_value_df = \ # run_sim(bl_strategy,'20150105',data_proxy,30) #--------------------------------------------
try: print("\n Running: ", pl.ID) sim_df = run_sim(pl.ID, pl, no_img=True).round( 2) # Mit no_img = False wird ein Graph pro Runde generiert return sim_df except Exception as e: print(e) if __name__ == '__main__': BATCH = True # Wenn True werden die Parameter oben genutzt, Sonst werden die Parameter aus batch_parameter.csv eingelesen und überschreiben oben if not BATCH: parameters = Parameters() print(run_sim(0, parameters)) else: params = [] import random seeds = [] for i in range(0, 5): n = random.randint(1, 999999) seeds.append(n) df_results = pd.DataFrame() df_batch = pd.read_csv("batch_parameters.csv", sep=";") batch_test = df_batch.copy().drop(columns=['ID']) param_list_dirty = [] # dirty means the parameters including Nan values
'881001.WI', '513500.SH', '159920.SZ', '518880.SH', 'H11025.CSI' ] mv_strategy_ids = { 'medium': 0.4, 'medium_high': 0.6, 'high': 1, 'medium_low': 0.2, 'low': 0.1 } net_value_comb = pd.DataFrame() statics = pd.DataFrame() for strategy_id, risk_level in mv_strategy_ids.items(): mv_strategy = MVStrategy(strategy_id, mv_universe, risk_level) rebalance_df, weight_df, net_value_df, static_indies = run_sim( mv_strategy, '20150105', 30, save_path=r'D:\Work\tmp') net_value_df.rename(inplace=True, columns={'net_value': strategy_id}) net_value_comb = pd.concat([net_value_comb, net_value_df], axis=1) statics = pd.concat([statics, static_indies], axis=1) net_value_comb.to_excel('D:\\Work\\tmp\\net_value_comb.xlsx') statics.to_excel('D:\\Work\\tmp\\statics.xlsx') data = pd.read_excel('data.xlsx') wind_a = data[['881001.WI']] wind_a.index = wind_a.index.strftime('%Y%m%d') net_value_comb1 = net_value_comb.join(wind_a, how='left') net_value_comb1['881001.WI'] = net_value_comb1['881001.WI'] / net_value_comb1[ '881001.WI'].iloc[0] net_value_comb1.to_excel('D:\\Work\\tmp\\net_value_comb.xlsx')
#!/usr/bin/python # -*- coding: utf-8 -*- import sys sys.path.append('./build/sim') import sim import math from matplotlib.pyplot import * sweep = sim.run_sim( duration = 10.0, loop_duration = 18.6e-3, sim_steps_per_loop = 1000, mass = 20 / 2.2, stall_torque = 0.7344, free_spin = 100. * 2. * math.pi / 60., num_motors = 4, wheel_rad = 2.0 * 2.54 / 100., friction = 5.0, # motors on order of 20N ticks_per_rev = 90 * 4 ) plot( sweep.t, sweep.v, 'b', sweep.t, sweep.vt, 'g', sweep.t, sweep.user1, 'r') show()
import numpy as np import pandas as pd import networkx as nx import sim import analytic import timeit from sim import run_sim #Fitness range and n range Must be saved in a list or tuple even if only one condition is being run sim_output = sim.run_sim(numsim=10, fitness_range=[1.1], n_range=[100], init_mut=1, max_time=5000) math_output = analytic.analytic_solution(n=50) analytic_data = pd.DataFrame(math_output) analytic_data.columns = [ "n", "initial_mut", "relative_fitness", "fixation_prob" ] analytic_data.to_csv( "C:/Users/dtw43/Documents/Eco_Evo_Research/Stochastic_Practice/data/math_data.csv" ) pop_data = pd.DataFrame(sim_output) pop_data.columns = [ "n", "relative_fitness", "sim_num", "time_step", "pop_a", "pop_b" ] pop_data.to_csv( "C:/Users/dtw43/Documents/Eco_Evo_Research/Stochastic_Practice/data/sim_data.csv"
'801200.SI', '801210.SI', '801230.SI', '801710.SI', '801720.SI', '801730.SI', '801740.SI', '801750.SI', '801760.SI', '801770.SI', '801780.SI', '801790.SI', '801880.SI', '801890.SI'] cust_ids = [1,2,3] data_source = CommonDataSource(universe = universe,data_type = 'bl_industry', custs_list = cust_ids) save_source = CommonSaveSource() data_proxy = DataProxy(data_source,save_source) data_proxy.pre_load(5) #mv_strategy_ids = [('medium',0.4),('medium_high',0.7),('high',1),('medium_low',0.2),('low',0.1)] for cust_id in cust_ids: strategy = BLStrategy(cust_id,'bl_industry',universe,data_proxy) run_sim(cust_id,strategy,'20140601',data_proxy,rebalance_freq = 30) # ----------------------------------------- BL行业模型 -----------------------------------------
def execute_simulation(directory_name, results_db): start_time = time.time() print("START") results_config = configparser.ConfigParser() settings_config = configparser.ConfigParser() settings_config.read(directory_name + '/settings.ini') conn = sqlite3.connect(settings_config['FILES']['DB']) conn.row_factory = sqlite3.Row create_db(conn) if settings_config['FILES']['dataset'] == 'flights': parse_airline_data(settings_config, conn) else: parse_stanford_data(settings_config, conn) if settings_config['PARAMS']['thresh_prop'] == '0': thresholds_random(settings_config, conn) else: thresholds_proportion(settings_config, conn) if settings_config['PARAMS']['lambda_val'] == '0': lambda_value_degree(settings_config, conn) files_in_parent_dir = os.listdir( settings_config['FILES']['parent_directory']) current_threshold_db = 'nodes-thresh-{}-{}.db'.format( settings_config['PARAMS']['thresh_prop'], settings_config['FILES']['dataset']).replace('.', '') if current_threshold_db not in files_in_parent_dir: thresholds_all_nodes(settings_config, conn, current_threshold_db) results_config['RESULTS'] = dict() incentivize(settings_config, results_config, conn) thresholds_target_set(settings_config, conn, current_threshold_db) run_sim(settings_config, conn) thresholds_influenced_nodes(settings_config, conn) thresholds_not_influenced_nodes(settings_config, conn) record_results(settings_config, results_config, conn, results_db) with open(directory_name + '/results.ini', 'w') as configfile: results_config.write(configfile) print('END {}'.format(str(round(time.time() - start_time, 2)))) conn.close() os.remove(settings_config['FILES']['DB'])