def load_algorithm_options(): argv = sys.argv _, kwargs = parse_args(argv[1:]) for kword in kwargs: if kword in options: options[kword] = kwargs[kword] logger.info('Loading %s' % (kword)) else: if len(kword) > 5: logger.warning( 'Parameter %s is not in the algorithm options. Check algorithm parameters wiki.' % (kword)) if 'lines_freq' not in kwargs: options['lines_freq'] = int(options['max_iter'] / 10)
def load_hydro_data(approach, dus_type): global T global nr global lag global dro_radius global Rmatrix global RHSnoise global initial_inflow global prices argv = sys.argv DW_extended = 1 DW_sampling = None positional_args, kwargs = parse_args(argv[1:]) if 'R' in kwargs: nr = kwargs['R'] if 'T' in kwargs: T = kwargs['T'] if 'lag' in kwargs: lag = kwargs['lag'] if 'dro_r' in kwargs: dro_radius = kwargs['dro_r'] if 'N' in kwargs: N = kwargs['N'] if 'DW_extended' in kwargs: #N*DW_extended would be the number of oracles DW_extended = kwargs['DW_extended'] if 'DW_sampling' in kwargs: DW_sampling = kwargs['DW_sampling'] from InstanceGen.ReservoirChainGen import read_instance prices = [18 + round(5 * np.sin(0.5 * (x - 2)), 2) for x in range(0, T)] print(prices) hydro_instance = read_instance( 'hydro_rnd_instance_R30_UD0_T48_LAG1_OUT10K_AR1.pkl', lag=lag) Rmatrix = hydro_instance.ar_matrices RHSnoise_density = hydro_instance.RHS_noise[0:nr, :, 0:T] #Total of 10,000 samples initial_inflow = np.array(hydro_instance.inital_inflows)[:, 0:nr] #=========================================================================== # import codecs, json # json_file_obj = {} # json_file_obj['ar_matrix'] = hydro_instance.ar_matrices # json_file_obj['RHS_noise'] = hydro_instance.RHS_noise[0:nr, [8192, 4098] , 0:T].tolist() # json_file_obj['initial_inflow'] = initial_inflow.tolist() # # file_path = "./HydroModelInput.json" ## your path variable # json.dump(json_file_obj, codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4) ### this saves the array in .json format #=========================================================================== valley_turbines = Turbine([50, 60, 70], [55, 65, 70]) N_data = N #Reset experiment design stream reset_experiment_desing_gen() #For out of sample performance measure test_indeces = set( experiment_desing_gen.choice(range(len(RHSnoise_density[0])), size=9000, replace=False)) l_test = list(test_indeces) l_test.sort() RHSnoise_oos = RHSnoise_density[:, l_test] valley_chain_oos = [ Reservoir(MIN_LEVEL, MAX_LEVEL, INI_LEVEL, valley_turbines, Water_Penalty, Spillage_Penalty, x) for x in RHSnoise_oos ] #Train indices for Wasserstein distance available_indices = set(range(len(RHSnoise_density[0]))) - test_indeces available_indices = np.array(list(available_indices)) #data_indeces=set(experiment_desing_gen.choice(list(available_indices), size=N_data, replace=False)) data_indeces = available_indices[0:N_data] RHSnoise_data = RHSnoise_density[:, data_indeces] valley_chain_data = [ Reservoir(MIN_LEVEL, MAX_LEVEL, INI_LEVEL, valley_turbines, Water_Penalty, Spillage_Penalty, x) for x in RHSnoise_data ] print(data_indeces) if DW_extended > 1 and dus_type == 'DW': #Generate additional data points from the data if DW_sampling is None or DW_sampling == 'none' or DW_sampling == 'None': #available_indices = set(available_indices) - set(data_indeces) N_wasserstein = N_data * DW_extended #train_indeces = set(experiment_desing_gen.choice(list(available_indices), size=N_wasserstein, replace=False)) train_indeces = available_indices[0:N_wasserstein] assert set(data_indeces).issubset(train_indeces) N_training = len(train_indeces) l_train = list(train_indeces) l_train.sort() RHSnoise = RHSnoise_density[:, l_train] else: N_wasserstein = N_data * DW_extended - N_data available_indices = available_indices[N_data:] avail_realizations = RHSnoise_density[:, available_indices] gen_data = generate_extra_data( RHSnoise_data.transpose(), N_wasserstein, method=DW_sampling, realizations=avail_realizations.transpose()) RHSnoise = np.hstack((RHSnoise_data, gen_data.transpose())) N_training = len(RHSnoise[0]) else: train_indeces = data_indeces N_training = N_data RHSnoise = np.copy(RHSnoise_data) cut_type = 'MC' if options['multicut'] else 'SC' sampling_type = 'DS' if options['dynamic_sampling'] else 'ES' def instance_name_gen(n_dro_radius): if approach == "SP": instance_name = "Hydro_R%i_AR%i_T%i_N%i_%i_I%i_Time%i_%s_%s_%s" % ( nr, lag, T, N_data, N_training, options['max_iter'], options['max_time'], approach, cut_type, sampling_type) else: alg_iters = options['max_iter'] alg_cpu_time = options['max_time'] beta = options['dynamic_sampling_beta'] instance_name = f"Hydro_R{nr}_AR{lag}_T{T}_N_{N_data}_{N_training}_I_{alg_iters}_T{alg_cpu_time}" \ f"_{dus_type}_{approach}_{cut_type}_{sampling_type}_{n_dro_radius:.7f}_{DW_sampling}_{beta}" return instance_name instance_name = instance_name_gen(dro_radius) sddp_log.addHandler( logging.FileHandler(hydro_path + "/Output/log/%s.log" % (instance_name), mode='w')) valley_chain = [ Reservoir(MIN_LEVEL, MAX_LEVEL, INI_LEVEL, valley_turbines, Water_Penalty, Spillage_Penalty, x) for x in RHSnoise ] def rnd_builder_n_train(): return random_builder(valley_chain) def model_builder_n_tran(stage): return model_builder(stage, valley_chain) rnd_container_oos = random_builder(valley_chain_oos) rnd_container_data = random_builder(valley_chain_data) return T, model_builder_n_tran, rnd_builder_n_train, rnd_container_data, rnd_container_oos, dro_radius, instance_name, instance_name_gen
'dispatchCtr[%i]' % (i)) objfun = -prices[stage] * generation + quicksum( 0 * r.spill_cost * spill[i] for (i, r) in enumerate(valley_chain)) + quicksum( r.spill_cost * pour[i] for (i, r) in enumerate(valley_chain)) m.setObjective(objfun, GRB.MINIMIZE) m.update() return m, in_state, out_state, rhs_vars if __name__ == '__main__': sddp_log.addHandler(logging.FileHandler("HydroAR1_CS.log", mode='w')) argv = sys.argv positional_args, kwargs = parse_args(argv[1:]) if 'R' in kwargs: nr = kwargs['R'] if 'T' in kwargs: T = kwargs['T'] if 'max_iter' in kwargs: SDDP.options['max_iter'] = kwargs['max_iter'] SDDP.options['lines_freq'] = int(SDDP.options['max_iter'] / 10) if 'sim_iter' in kwargs: SDDP.options['sim_iter'] = kwargs['sim_iter'] for nr in [5, 10, 50, 100, 500, 1000]: instance_name = "Hydro_R%i_AR1_T%i_I%i_CS" % (nr, T, SDDP.options['max_iter']) Rmatrix = hydro_instance.ar_matrices RHSnoise = hydro_instance.RHS_noise[0:nr]