Ejemplo n.º 1
0
'''
import csv
import SDDP
import logging
import numpy as np
from SDDP.RandomnessHandler import RandomContainer, StageRandomVector, AR1_depedency
from SDDP.SDPP_Alg import SDDP
from SDDP import logger as sddp_log
from HydroExamples import Turbine, Reservoir
from Utils.argv_parser import sys, parse_args
from gurobipy import Model, GRB, quicksum
from InstanceGen.ReservoirChainGen import read_instance, HydroRndInstance
'''
Global variables to store instance data
'''
hydro_instance = read_instance()
T = None
nr = None
Rmatrix = None
RHSnoise = None
initial_inflow = None
valley_chain = None
prices = None


def random_builder():
    '''
    Random builder function
    '''
    rc = RandomContainer()
    rndVectors = []
Ejemplo n.º 2
0
def load_hydro_data(approach, dus_type):
    global T
    global nr
    global lag
    global dro_radius
    global Rmatrix
    global RHSnoise
    global initial_inflow
    global prices
    argv = sys.argv
    DW_extended = 1
    DW_sampling = None
    positional_args, kwargs = parse_args(argv[1:])
    if 'R' in kwargs:
        nr = kwargs['R']
    if 'T' in kwargs:
        T = kwargs['T']
    if 'lag' in kwargs:
        lag = kwargs['lag']
    if 'dro_r' in kwargs:
        dro_radius = kwargs['dro_r']
    if 'N' in kwargs:
        N = kwargs['N']
    if 'DW_extended' in kwargs:
        #N*DW_extended would be the number of oracles
        DW_extended = kwargs['DW_extended']
    if 'DW_sampling' in kwargs:
        DW_sampling = kwargs['DW_sampling']

    from InstanceGen.ReservoirChainGen import read_instance
    prices = [18 + round(5 * np.sin(0.5 * (x - 2)), 2) for x in range(0, T)]
    print(prices)
    hydro_instance = read_instance(
        'hydro_rnd_instance_R30_UD0_T48_LAG1_OUT10K_AR1.pkl', lag=lag)
    Rmatrix = hydro_instance.ar_matrices
    RHSnoise_density = hydro_instance.RHS_noise[0:nr, :,
                                                0:T]  #Total of 10,000 samples
    initial_inflow = np.array(hydro_instance.inital_inflows)[:, 0:nr]

    #===========================================================================
    # import codecs, json
    # json_file_obj = {}
    # json_file_obj['ar_matrix'] = hydro_instance.ar_matrices
    # json_file_obj['RHS_noise'] = hydro_instance.RHS_noise[0:nr, [8192, 4098] , 0:T].tolist()
    # json_file_obj['initial_inflow'] = initial_inflow.tolist()
    #
    # file_path = "./HydroModelInput.json" ## your path variable
    # json.dump(json_file_obj, codecs.open(file_path, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4) ### this saves the array in .json format
    #===========================================================================

    valley_turbines = Turbine([50, 60, 70], [55, 65, 70])

    N_data = N
    #Reset experiment design stream
    reset_experiment_desing_gen()

    #For out of sample performance measure
    test_indeces = set(
        experiment_desing_gen.choice(range(len(RHSnoise_density[0])),
                                     size=9000,
                                     replace=False))
    l_test = list(test_indeces)
    l_test.sort()
    RHSnoise_oos = RHSnoise_density[:, l_test]
    valley_chain_oos = [
        Reservoir(MIN_LEVEL, MAX_LEVEL, INI_LEVEL, valley_turbines,
                  Water_Penalty, Spillage_Penalty, x) for x in RHSnoise_oos
    ]

    #Train indices for Wasserstein distance
    available_indices = set(range(len(RHSnoise_density[0]))) - test_indeces
    available_indices = np.array(list(available_indices))
    #data_indeces=set(experiment_desing_gen.choice(list(available_indices), size=N_data, replace=False))
    data_indeces = available_indices[0:N_data]
    RHSnoise_data = RHSnoise_density[:, data_indeces]
    valley_chain_data = [
        Reservoir(MIN_LEVEL, MAX_LEVEL, INI_LEVEL, valley_turbines,
                  Water_Penalty, Spillage_Penalty, x) for x in RHSnoise_data
    ]
    print(data_indeces)
    if DW_extended > 1 and dus_type == 'DW':
        #Generate additional data points from the data
        if DW_sampling is None or DW_sampling == 'none' or DW_sampling == 'None':
            #available_indices = set(available_indices) - set(data_indeces)
            N_wasserstein = N_data * DW_extended
            #train_indeces = set(experiment_desing_gen.choice(list(available_indices), size=N_wasserstein, replace=False))
            train_indeces = available_indices[0:N_wasserstein]
            assert set(data_indeces).issubset(train_indeces)
            N_training = len(train_indeces)
            l_train = list(train_indeces)
            l_train.sort()
            RHSnoise = RHSnoise_density[:, l_train]
        else:
            N_wasserstein = N_data * DW_extended - N_data
            available_indices = available_indices[N_data:]
            avail_realizations = RHSnoise_density[:, available_indices]
            gen_data = generate_extra_data(
                RHSnoise_data.transpose(),
                N_wasserstein,
                method=DW_sampling,
                realizations=avail_realizations.transpose())
            RHSnoise = np.hstack((RHSnoise_data, gen_data.transpose()))
            N_training = len(RHSnoise[0])
    else:
        train_indeces = data_indeces
        N_training = N_data
        RHSnoise = np.copy(RHSnoise_data)

    cut_type = 'MC' if options['multicut'] else 'SC'
    sampling_type = 'DS' if options['dynamic_sampling'] else 'ES'

    def instance_name_gen(n_dro_radius):
        if approach == "SP":
            instance_name = "Hydro_R%i_AR%i_T%i_N%i_%i_I%i_Time%i_%s_%s_%s" % (
                nr, lag, T, N_data, N_training, options['max_iter'],
                options['max_time'], approach, cut_type, sampling_type)
        else:
            alg_iters = options['max_iter']
            alg_cpu_time = options['max_time']
            beta = options['dynamic_sampling_beta']
            instance_name = f"Hydro_R{nr}_AR{lag}_T{T}_N_{N_data}_{N_training}_I_{alg_iters}_T{alg_cpu_time}" \
                f"_{dus_type}_{approach}_{cut_type}_{sampling_type}_{n_dro_radius:.7f}_{DW_sampling}_{beta}"

        return instance_name

    instance_name = instance_name_gen(dro_radius)
    sddp_log.addHandler(
        logging.FileHandler(hydro_path + "/Output/log/%s.log" %
                            (instance_name),
                            mode='w'))

    valley_chain = [
        Reservoir(MIN_LEVEL, MAX_LEVEL, INI_LEVEL, valley_turbines,
                  Water_Penalty, Spillage_Penalty, x) for x in RHSnoise
    ]

    def rnd_builder_n_train():
        return random_builder(valley_chain)

    def model_builder_n_tran(stage):
        return model_builder(stage, valley_chain)

    rnd_container_oos = random_builder(valley_chain_oos)
    rnd_container_data = random_builder(valley_chain_data)

    return T, model_builder_n_tran, rnd_builder_n_train, rnd_container_data, rnd_container_oos, dro_radius, instance_name, instance_name_gen
Ejemplo n.º 3
0
 if 'T' in kwargs:
     T = kwargs['T']
 if 'max_iter' in kwargs:
     SDDP.options['max_iter'] = 100#kwargs['max_iter']
     SDDP.options['lines_freq'] = 1#int(SDDP.options['max_iter']/10)
 if 'sim_iter' in kwargs:
     SDDP.options['sim_iter'] = kwargs['sim_iter']
 if 'lag' in kwargs:
     lag = kwargs['lag']
 if 'dro_radius' in kwargs:
     dro_radius = kwargs['dro_radius']
 if 'N' in kwargs:
     N = kwargs['N']
     
 sddp_log.addHandler(logging.FileHandler("HydroAR%i_ESS.log" %(lag), mode='w'))
 hydro_instance = read_instance('hydro_rnd_instance_R10_UD1_T120_LAG1_OUT10K_AR.pkl' , lag = lag)
 
 
 instance_name = "Hydro_R%i_AR%i_T%i_I%i_ESS" % (nr, lag, T, SDDP.options['max_iter'])
 Rmatrix = hydro_instance.ar_matrices
 RHSnoise_density = hydro_instance.RHS_noise[0:nr]
 N_training = N
 #Reset experiment design stream 
 reset_experiment_desing_gen()
 train_indeces = set(experiment_desing_gen.choice(range(len(RHSnoise_density[0])),size=N_training, replace = False))
 test_indeces = set(range(len(RHSnoise_density[0]))) - train_indeces
 assert len(train_indeces.intersection(test_indeces))==0,  'Not disjoint'
 
 l_train = list(train_indeces)
 l_train.sort()
 RHSnoise = RHSnoise_density[:,l_train]