Exemple #1
0
def _load_raw_data():
    """import data from spreadsheets"""
    datadir = user_config.directory

    if not os.path.isdir(datadir):
        raise OSError('data directory "{d}" does not exist'.format(d=datadir))
    [file_gens, file_loads, file_lines, file_init] = \
        [joindir(datadir, filename) for filename in (
            user_config.file_gens,
            user_config.file_loads,
            user_config.file_lines,
            user_config.file_init)]

    generators_data = nice_names(read_csv(file_gens))
    loads_data = nice_names(read_csv(file_loads))

    try:
        lines_data = nice_names(read_csv(file_lines))
    except Exception:
        lines_data = DataFrame()

    try:
        init_data = nice_names(read_csv(file_init))
    except Exception:
        init_data = DataFrame()

    return generators_data, loads_data, lines_data, init_data
Exemple #2
0
def _load_raw_data():
    """import data from spreadsheets"""
    datadir = user_config.directory

    if not os.path.isdir(datadir):
        raise OSError('data directory "{d}" does not exist'.format(d=datadir))
    [file_gens, file_loads, file_lines, file_init] = [
        joindir(datadir, filename)
        for filename in (user_config.file_gens, user_config.file_loads, user_config.file_lines, user_config.file_init)
    ]

    generators_data = nice_names(read_csv(file_gens))
    loads_data = nice_names(read_csv(file_loads))

    try:
        lines_data = nice_names(read_csv(file_lines))
    except Exception:
        lines_data = DataFrame()

    try:
        init_data = nice_names(read_csv(file_init))
    except Exception:
        init_data = DataFrame()

    return generators_data, loads_data, lines_data, init_data
Exemple #3
0
def get_dir_config(directory):
    '''
    pull the config from a specific directory.
    this allows post-load updating of the user_config.
    '''
    dirparser = SafeConfigParser()
    dirparser.read([
        # the minpower default set, from the minpower/configuration directory
        # need this to set the defaults
        joindir(os.path.split(__file__)[0], 'configuration/minpower.cfg'),
        # need the home directory overrides too
        os.path.expanduser('~/minpower.cfg'),
        os.path.expanduser('~/.minpowerrc'),
        # the directory's defaults
        joindir(directory, 'minpower.cfg'),
    ])

    new_user_config, new_scheduler_conifg = parse_config(dirparser)
    # return (filter_non_defaults(new_user_config, user_config),
    #    filter_non_defaults(new_scheduler_conifg, scheduler_config))
    return new_user_config, new_scheduler_conifg
Exemple #4
0
def get_dir_config(directory):
    '''
    pull the config from a specific directory.
    this allows post-load updating of the user_config.
    '''
    dirparser = SafeConfigParser()
    dirparser.read([
        # the minpower default set, from the minpower/configuration directory
        # need this to set the defaults
        joindir(os.path.split(__file__)[0], 'configuration/minpower.cfg'),
        # need the home directory overrides too
        os.path.expanduser('~/minpower.cfg'),
        os.path.expanduser('~/.minpowerrc'),
        # the directory's defaults
        joindir(directory, 'minpower.cfg'),
    ])

    new_user_config, new_scheduler_conifg = parse_config(dirparser)
    # return (filter_non_defaults(new_user_config, user_config),
    #    filter_non_defaults(new_scheduler_conifg, scheduler_config))
    return new_user_config, new_scheduler_conifg
Exemple #5
0
def _setup_logging(pid=None):
    ''' set up the logging to report on the status'''
    kwds = dict(level=int(user_config.logging_level)
                if not user_config.debugger else logging.DEBUG,
                datefmt='%Y-%m-%d %H:%M:%S',
                format='%(asctime)s %(levelname)s: %(message)s')
    # log to file if pid is set, unless in debugging mode
    if (user_config.output_prefix or user_config.pid) \
            and not user_config.debugger:
        kwds['filename'] = joindir(user_config.directory, '{}.log'.format(pid))
    if (user_config.logging_level > 10) and ('filename' not in kwds):
        # don't log the time if debugging isn't turned on
        kwds['format'] = '%(levelname)s: %(message)s'
    logging.basicConfig(**kwds)
Exemple #6
0
def _setup_logging(pid=None):
    ''' set up the logging to report on the status'''
    kwds = dict(
        level=int(user_config.logging_level) if not user_config.debugger else logging.DEBUG,
        datefmt='%Y-%m-%d %H:%M:%S',
        format='%(asctime)s %(levelname)s: %(message)s')
    # log to file if pid is set, unless in debugging mode
    if (user_config.output_prefix or user_config.pid) \
        and not user_config.debugger:
        kwds['filename'] = joindir(user_config.directory,
            '{}.log'.format(pid))
    if (user_config.logging_level > 10) and (not 'filename' in kwds):
        # don't log the time if debugging isn't turned on
        kwds['format'] = '%(levelname)s: %(message)s'
    logging.basicConfig(**kwds)
Exemple #7
0
def build_class_list(data, model, times=None, timeseries=None):
    """
    Create list of class instances from the row of a DataFrame.
    """
    datadir = user_config.directory
    is_generator = (model == Generator)

    all_models = []

    if 'schedulename' in data.columns:
        data['schedule'] = None

    for i, row in data.iterrows():
        row_model = model
        row = row.dropna()

        power = row.get('power')
        schedulename = row.get('schedulename')

        if is_generator:
            # get all those extra things which need to be parsed
            observed_name = row.get('observedname')
            forecast_name = row.get('forecastname')

            scenariosfilename = row.get('scenariosfilename')
            scenariosdirectory = row.get('scenariosdirectory')

            if scenariosdirectory and user_config.scenarios_directory:
                # override the scenarios directory with the one \
                # specified in the commandline options
                scenariosdirectory = user_config.scenarios_directory
                data.ix[i,
                        'scenariosdirectory'] = user_config.scenarios_directory

            bid_points_filename = row.get('costcurvepointsfilename')

        if is_generator:
            if schedulename or power or \
                (forecast_name and user_config.deterministic_solve) or \
                    (observed_name and user_config.perfect_solve):
                row_model = Generator_nonControllable
            elif scenariosdirectory or scenariosfilename:
                row_model = Generator_Stochastic

        # warn about fields not in model
        valid_fields = pd.Index(fields[model.__name__] + ['schedulename'])
        if is_generator:
            valid_fields = valid_fields.union(pd.Index(gen_extra_fields))
        invalid_fields = row.index.diff(valid_fields)
        if len(invalid_fields) > 0:
            raise ValueError('invalid fields in model:: {}'.format(
                invalid_fields.tolist()))
            # logging.warning

        kwds = row[row.index.isin(fields[model.__name__])].to_dict()

        # add in any schedules
        if schedulename:
            kwds['schedule'] = timeseries[schedulename]
        elif pd.notnull(power):
            # a constant power schedule
            kwds['schedule'] = make_constant_schedule(times, power)
            kwds.pop('power')

        if is_generator:
            if observed_name:
                kwds['observed_values'] = timeseries[observed_name]

            if user_config.perfect_solve and observed_name:
                # for a perfect information solve forecast = observed
                kwds['schedule'] = kwds['observed_values'] \
                    = timeseries[observed_name]
            elif forecast_name:
                kwds['schedule'] = timeseries[forecast_name]

            if scenariosdirectory:
                try:
                    kwds['observed_values'] = timeseries[observed_name]
                except:
                    raise IOError('''you must provide an
                        observed filename for a rolling stochastic UC''')

            # add a custom bid points file with {power, cost} columns
            if bid_points_filename:
                kwds['bid_points'] = read_bid_points(
                    joindir(datadir, bid_points_filename))
                kwds['costcurveequation'] = None

        try:
            obj = row_model(index=i, **kwds)
        except TypeError:
            print '{} model got unexpected parameter'.format(model)
            raise

        all_models.append(obj)

    return all_models
Exemple #8
0
"""
Provide the defaults and configuration for other modules.
`user_config` is treated as a global in minpower.
"""
import os
import sys
from ConfigParser import SafeConfigParser
from commonscripts import DotDict, joindir

parser = SafeConfigParser()
parser.read([
    # the minpower default set, from the minpower/configuration directory
    joindir(os.path.split(__file__)[0], 'configuration/minpower.cfg'),
    # the user's global overrides, from the home directory
    os.path.expanduser('~/minpower.cfg'),
    os.path.expanduser('~/.minpowerrc'),
])

option_types = dict(
    duals=bool,
    breakpoints=int,
    hours_commitment=int,
    hours_overlap=int,

    cost_load_shedding=float,
    cost_wind_shedding=float,
    economic_wind_shed=bool,
    dispatch_decommit_allowed=bool,
    solver=str,
    mipgap=float,
    solver_time_limit=float,
Exemple #9
0
def full_filename(filename):
    return joindir(user_config.directory, filename)
Exemple #10
0
def _set_store_filename(pid=None):
    fnm = 'stage-store.hd5'
    if user_config.output_prefix or user_config.pid:
        fnm = '{}-{}'.format(pid, fnm)

    user_config.store_filename = joindir(user_config.directory, fnm)
Exemple #11
0
def setup_scenarios(gen_data, generators, times):

    col = "scenariosdirectory"
    scenario_values = pd.Panel()
    if user_config.deterministic_solve or user_config.perfect_solve or (not col in gen_data.columns):
        # a deterministic problem
        return scenario_values

    gen_params = gen_data[gen_data[col].notnull()]
    if len(gen_params) > 1:
        raise NotImplementedError("more than one generator with scenarios.")

    gen = generators[gen_params.index[0]]
    gen.has_scenarios = True

    # directory of scenario values where each file is one day
    scenarios_directory = gen_params["scenariosdirectory"].values[0]

    searchstr = "*.csv"

    filenames = sorted(glob(joindir(user_config.directory, joindir(scenarios_directory, searchstr))))
    if not filenames:
        raise IOError('no scenario files in "{}"'.format(scenarios_directory))

    alldata = OrderedDict()
    for i, f in enumerate(filenames):
        data = _parse_scenario_day(f)
        # label scenarios for the day with the date
        date = Timestamp(data.columns.drop("probability")[0]).date()
        alldata[date] = data

    # TODO - assumes one hour intervals!!
    hrs = user_config.hours_commitment + user_config.hours_overlap

    # make scenarios into a pd.Panel with axes: day, scenario, {prob, [hours]}
    scenario_values = pd.Panel(
        items=alldata.keys(),
        major_axis=range(max([len(dat) for dat in alldata.values()])),
        minor_axis=["probability"] + range(hrs),
    )

    for day, scenarios in alldata.iteritems():
        if "probability" == scenarios.columns[-1]:
            # reoder so that probability is the first column
            scenarios = scenarios[scenarios.columns[:-1].insert(0, "probability")]
        # rename the times into just hour offsets
        scenarios = scenarios.rename(
            columns=dict(zip(scenarios.columns, ["probability"] + range(len(scenarios.columns) - 1)))
        )

        # and take the number of hours needed
        scenarios = scenarios[scenarios.columns[: 1 + hrs]]

        scenario_values[day] = scenarios

    if user_config.wind_multiplier != 1.0:
        scenario_values *= user_config.wind_multiplier
        svt = scenario_values.transpose(2, 1, 0)
        svt["probability"] *= 1 / user_config.wind_multiplier
        scenario_values = svt.transpose(2, 1, 0)

    gen.scenario_values = scenario_values
    # defer scenario tree construction until actual time stage starts
    return scenario_values
Exemple #12
0
def setup_times(generators_data, loads_data):
    """
    Create a :class:`~schedule.TimeIndex` object
    from the schedule files.

    Also create a unified DataFrame of all the schedules, `timeseries`.

    If there are no schedule files (as in ED,OPF),
    create an index with just a single time.
    """
    fcol = "schedulefilename"
    ncol = "schedulename"

    loads_data[ncol] = None
    generators_data[ncol] = None

    if fcol not in loads_data.columns:
        loads_data[fcol] = None
    if fcol not in generators_data.columns:
        generators_data[fcol] = None

    datadir = user_config.directory

    timeseries = {}

    def filter_notnull(df, col):
        return df[df[col].notnull()]

    for i, load in filter_notnull(loads_data, fcol).iterrows():
        name = "d{}".format(i)
        loads_data.ix[i, ncol] = name
        timeseries[name] = (
            get_schedule(joindir(datadir, load[fcol])) * user_config.load_multiplier + user_config.load_adder
        )

    for i, gen in filter_notnull(generators_data, fcol).iterrows():
        name = "g{}".format(i)
        generators_data.ix[i, ncol] = name
        timeseries[name] = get_schedule(joindir(datadir, gen[fcol]))

    # handle observed and forecast power
    fobscol = "observedfilename"
    obscol = "observedname"
    ffcstcol = "forecastfilename"
    fcstcol = "forecastname"

    obs_name = None
    if fobscol in generators_data:
        generators_data[obscol] = None
        for i, gen in filter_notnull(generators_data, fobscol).iterrows():
            obs_name = "g{}_observations".format(i)
            generators_data.ix[i, obscol] = obs_name
            timeseries[obs_name] = get_schedule(joindir(datadir, gen[fobscol]))
            if user_config.wind_multiplier != 1.0:
                timeseries[obs_name] *= user_config.wind_multiplier

        generators_data = generators_data.drop(fobscol, axis=1)

    fcst_name = None
    if ffcstcol in generators_data:
        generators_data[fcstcol] = None
        for i, gen in filter_notnull(generators_data, ffcstcol).iterrows():
            fcst_name = "g{}_forecast".format(i)
            generators_data.ix[i, fcstcol] = fcst_name
            timeseries[fcst_name] = (
                get_schedule(joindir(datadir, gen[ffcstcol])) * user_config.wind_multiplier
                + user_config.wind_forecast_adder
            )

            if user_config.wind_error_multiplier != 1.0:
                logging.debug("scaling wind forecast error")
                obs_name = "g{}_observations".format(i)
                error = timeseries[fcst_name] - timeseries[obs_name]
                timeseries[fcst_name] = timeseries[obs_name] + error * user_config.wind_error_multiplier

            if (timeseries[fcst_name] < 0).any():
                print timeseries[fcst_name].describe()
                logging.warning("Wind forecast must always be at least zero.")
                timeseries[fcst_name][timeseries[fcst_name] < 0] = 0

        generators_data = generators_data.drop(ffcstcol, axis=1)

    generators_data = generators_data.drop(fcol, axis=1)
    loads_data = loads_data.drop(fcol, axis=1)

    if len(timeseries) == 0:
        # this is a ED or OPF problem - only one time
        return DataFrame(), just_one_time(), generators_data, loads_data

    timeseries = DataFrame(timeseries)
    times = TimeIndex(timeseries.index)
    timeseries.index = times.strings.values

    if user_config.wind_capacity_factor != 0:
        if len(filter_notnull(generators_data, obscol)) != 1:
            raise NotImplementedError("wind capacity factor only works with one wind generator")

        all_loads = timeseries[filter(lambda col: col.startswith("d"), timeseries.columns)]

        capf_current = timeseries[obs_name].sum() / all_loads.sum(axis=1).sum()

        wind_mult = user_config.wind_capacity_factor / capf_current
        user_config.wind_multiplier = wind_mult

        logging.info(
            "scaling wind from a c.f. of {} to a c.f. of {}".format(capf_current, user_config.wind_capacity_factor)
        )
        timeseries[obs_name] *= wind_mult
        if fcst_name:
            timeseries[fcst_name] *= wind_mult

    return timeseries, times, generators_data, loads_data
Exemple #13
0
def build_class_list(data, model, times=None, timeseries=None):
    """
    Create list of class instances from the row of a DataFrame.
    """
    datadir = user_config.directory
    is_generator = model == Generator

    all_models = []

    if "schedulename" in data.columns:
        data["schedule"] = None

    for i, row in data.iterrows():
        row_model = model
        row = row.dropna()

        power = row.get("power")
        schedulename = row.get("schedulename")

        if is_generator:
            # get all those extra things which need to be parsed
            observed_name = row.get("observedname")
            forecast_name = row.get("forecastname")

            scenariosfilename = row.get("scenariosfilename")
            scenariosdirectory = row.get("scenariosdirectory")

            if scenariosdirectory and user_config.scenarios_directory:
                # override the scenarios directory with the one \
                # specified in the commandline options
                scenariosdirectory = user_config.scenarios_directory
                data.ix[i, "scenariosdirectory"] = user_config.scenarios_directory

            bid_points_filename = row.get("costcurvepointsfilename")

        if is_generator:
            if (
                schedulename
                or power
                or (forecast_name and user_config.deterministic_solve)
                or (observed_name and user_config.perfect_solve)
            ):
                row_model = Generator_nonControllable
            elif scenariosdirectory or scenariosfilename:
                row_model = Generator_Stochastic

        # warn about fields not in model
        valid_fields = pd.Index(fields[model.__name__] + ["schedulename"])
        if is_generator:
            valid_fields = valid_fields.union(pd.Index(gen_extra_fields))
        invalid_fields = row.index.difference(valid_fields)
        if len(invalid_fields) > 0:
            raise ValueError("invalid fields in model:: {}".format(invalid_fields.tolist()))
            # logging.warning

        kwds = row[row.index.isin(fields[model.__name__])].to_dict()

        # add in any schedules
        if schedulename:
            kwds["schedule"] = timeseries[schedulename]
        elif pd.notnull(power):
            # a constant power schedule
            kwds["schedule"] = make_constant_schedule(times, power)
            kwds.pop("power")

        if is_generator:
            if observed_name:
                kwds["observed_values"] = timeseries[observed_name]

            if user_config.perfect_solve and observed_name:
                # for a perfect information solve forecast = observed
                kwds["schedule"] = kwds["observed_values"] = timeseries[observed_name]
            elif forecast_name:
                kwds["schedule"] = timeseries[forecast_name]

            if scenariosdirectory:
                try:
                    kwds["observed_values"] = timeseries[observed_name]
                except:
                    raise IOError(
                        """you must provide an
                        observed filename for a rolling stochastic UC"""
                    )

            # add a custom bid points file with {power, cost} columns
            if bid_points_filename:
                kwds["bid_points"] = read_bid_points(joindir(datadir, bid_points_filename))
                kwds["costcurveequation"] = None

        try:
            obj = row_model(index=i, **kwds)
        except TypeError:
            print "{} model got unexpected parameter".format(model)
            raise

        all_models.append(obj)

    return all_models
Exemple #14
0
def full_filename(filename):
    return joindir(user_config.directory, filename)
Exemple #15
0
def setup_times(generators_data, loads_data):
    """
    Create a :class:`~schedule.TimeIndex` object
    from the schedule files.

    Also create a unified DataFrame of all the schedules, `timeseries`.

    If there are no schedule files (as in ED,OPF),
    create an index with just a single time.
    """
    fcol = 'schedulefilename'
    ncol = 'schedulename'

    loads_data[ncol] = None
    generators_data[ncol] = None

    if fcol not in loads_data.columns:
        loads_data[fcol] = None
    if fcol not in generators_data.columns:
        generators_data[fcol] = None

    datadir = user_config.directory

    timeseries = {}

    def filter_notnull(df, col):
        return df[df[col].notnull()]

    for i, load in filter_notnull(loads_data, fcol).iterrows():
        name = 'd{}'.format(i)
        loads_data.ix[i, ncol] = name
        timeseries[name] = get_schedule(joindir(datadir, load[fcol])) * \
            user_config.load_multiplier + user_config.load_adder

    for i, gen in filter_notnull(generators_data, fcol).iterrows():
        name = 'g{}'.format(i)
        generators_data.ix[i, ncol] = name
        timeseries[name] = get_schedule(joindir(datadir, gen[fcol]))

    # handle observed and forecast power
    fobscol = 'observedfilename'
    obscol = 'observedname'
    ffcstcol = 'forecastfilename'
    fcstcol = 'forecastname'

    obs_name = None
    if fobscol in generators_data:
        generators_data[obscol] = None
        for i, gen in filter_notnull(generators_data, fobscol).iterrows():
            obs_name = 'g{}_observations'.format(i)
            generators_data.ix[i, obscol] = obs_name
            timeseries[obs_name] = get_schedule(joindir(datadir, gen[fobscol]))
            if user_config.wind_multiplier != 1.0:
                timeseries[obs_name] *= user_config.wind_multiplier

        generators_data = generators_data.drop(fobscol, axis=1)

    fcst_name = None
    if ffcstcol in generators_data:
        generators_data[fcstcol] = None
        for i, gen in filter_notnull(generators_data, ffcstcol).iterrows():
            fcst_name = 'g{}_forecast'.format(i)
            generators_data.ix[i, fcstcol] = fcst_name
            timeseries[fcst_name] = get_schedule(joindir(datadir, gen[ffcstcol])) * \
                user_config.wind_multiplier + user_config.wind_forecast_adder

            if user_config.wind_error_multiplier != 1.0:
                logging.debug('scaling wind forecast error')
                obs_name = 'g{}_observations'.format(i)
                error = timeseries[fcst_name] - timeseries[obs_name]
                timeseries[fcst_name] = timeseries[obs_name] + \
                    error * user_config.wind_error_multiplier

            if (timeseries[fcst_name] < 0).any():
                print timeseries[fcst_name].describe()
                logging.warning('Wind forecast must always be at least zero.')
                timeseries[fcst_name][timeseries[fcst_name] < 0] = 0

        generators_data = generators_data.drop(ffcstcol, axis=1)

    generators_data = generators_data.drop(fcol, axis=1)
    loads_data = loads_data.drop(fcol, axis=1)

    if len(timeseries) == 0:
        # this is a ED or OPF problem - only one time
        return DataFrame(), just_one_time(), generators_data, loads_data

    timeseries = DataFrame(timeseries)
    times = TimeIndex(timeseries.index)
    timeseries.index = times.strings.values

    if user_config.wind_capacity_factor != 0:
        if len(filter_notnull(generators_data, obscol)) != 1:
            raise NotImplementedError(
                'wind capacity factor only works with one wind generator')

        all_loads = timeseries[filter(lambda col: col.startswith('d'),
                                      timeseries.columns)]

        capf_current = timeseries[obs_name].sum() / all_loads.sum(axis=1).sum()

        wind_mult = user_config.wind_capacity_factor / capf_current
        user_config.wind_multiplier = wind_mult

        logging.info('scaling wind from a c.f. of {} to a c.f. of {}'.format(
            capf_current, user_config.wind_capacity_factor))
        timeseries[obs_name] *= wind_mult
        if fcst_name:
            timeseries[fcst_name] *= wind_mult

    return timeseries, times, generators_data, loads_data
Exemple #16
0
def setup_scenarios(gen_data, generators, times):

    col = 'scenariosdirectory'
    scenario_values = pd.Panel()
    if user_config.deterministic_solve or user_config.perfect_solve or \
            (not col in gen_data.columns):
        # a deterministic problem
        return scenario_values

    gen_params = gen_data[gen_data[col].notnull()]
    if len(gen_params) > 1:
        raise NotImplementedError('more than one generator with scenarios.')

    gen = generators[gen_params.index[0]]
    gen.has_scenarios = True

    # directory of scenario values where each file is one day
    scenarios_directory = gen_params['scenariosdirectory'].values[0]

    searchstr = "*.csv"

    filenames = sorted(
        glob(
            joindir(user_config.directory,
                    joindir(scenarios_directory, searchstr))))
    if not filenames:
        raise IOError('no scenario files in "{}"'.format(scenarios_directory))

    alldata = OrderedDict()
    for i, f in enumerate(filenames):
        data = _parse_scenario_day(f)
        # label scenarios for the day with the date
        date = Timestamp(data.columns.drop('probability')[0]).date()
        alldata[date] = data

    # TODO - assumes one hour intervals!!
    hrs = user_config.hours_commitment + user_config.hours_overlap

    # make scenarios into a pd.Panel with axes: day, scenario, {prob, [hours]}
    scenario_values = pd.Panel(
        items=alldata.keys(),
        major_axis=range(max([len(dat) for dat in alldata.values()])),
        minor_axis=['probability'] + range(hrs))

    for day, scenarios in alldata.iteritems():
        if 'probability' == scenarios.columns[-1]:
            # reoder so that probability is the first column
            scenarios = scenarios[scenarios.columns[:-1].insert(
                0, 'probability')]
        # rename the times into just hour offsets
        scenarios = scenarios.rename(columns=dict(
            zip(scenarios.columns, ['probability'] +
                range(len(scenarios.columns) - 1))))

        # and take the number of hours needed
        scenarios = scenarios[scenarios.columns[:1 + hrs]]

        scenario_values[day] = scenarios

    if user_config.wind_multiplier != 1.0:
        scenario_values *= user_config.wind_multiplier
        svt = scenario_values.transpose(2, 1, 0)
        svt['probability'] *= 1 / user_config.wind_multiplier
        scenario_values = svt.transpose(2, 1, 0)

    gen.scenario_values = scenario_values
    # defer scenario tree construction until actual time stage starts
    return scenario_values
Exemple #17
0
def _set_store_filename(pid=None):
    fnm = 'stage-store.hd5'
    if user_config.output_prefix or user_config.pid:
        fnm = '{}-{}'.format(pid, fnm)

    user_config.store_filename = joindir(user_config.directory, fnm)
Exemple #18
0
def full_filename(filename):
    if user_config.standalone:
        filename = '{}-{}'.format(user_config.pid, filename)
    return joindir(user_config.directory, filename)
Exemple #19
0
"""
Provide the defaults and configuration for other modules.
`user_config` is treated as a global in minpower.
"""
import os
import sys
from ConfigParser import SafeConfigParser
from commonscripts import DotDict, joindir

parser = SafeConfigParser()
parser.read([
    # the minpower default set, from the minpower/configuration directory
    joindir(os.path.split(__file__)[0], 'configuration/minpower.cfg'),
    # the user's global overrides, from the home directory
    os.path.expanduser('~/minpower.cfg'),
    os.path.expanduser('~/.minpowerrc'),
])

option_types = dict(
    duals=bool,
    breakpoints=int,
    hours_commitment=int,
    hours_overlap=int,
    cost_load_shedding=float,
    cost_wind_shedding=float,
    economic_wind_shed=bool,
    dispatch_decommit_allowed=bool,
    solver=str,
    mipgap=float,
    solver_time_limit=float,
    reserve_fixed=float,