Exemplo n.º 1
0
def create_um(input_data, timesteps):
    """
    Creates an urbs model for given input, time steps

    Args:
        input_data: input data
        timesteps: simulation timesteps

    Returns:
        model: a model instance
    """
    # create model
    print('CREATING urbs MODEL')
    start = time.perf_counter()
    model = urbs.create_model(input_data, 1, timesteps)
    end = time.perf_counter()

    # solve model and read results
    optim = SolverFactory('glpk')
    result = optim.solve(model, logfile='urbs_log.txt', tee=False)

    # write LP file
    filename = os.path.join(os.path.dirname(__file__), 'mimo_urbs.lp')
    model.write(filename, io_options={'symbolic_solver_labels': True})

    return model, end - start
Exemplo n.º 2
0
def run_scenario(input_file,
                 timesteps,
                 scenario,
                 result_dir,
                 plot_tuples=None,
                 plot_periods=None,
                 report_tuples=None):
    """ run an urbs model for given input, time steps and scenario

    Args:
        input_file: filename to an Excel spreadsheet for urbs.read_excel
        timesteps: a list of timesteps, e.g. range(0,8761)
        scenario: a scenario function that modifies the input data dict
        result_dir: directory name for result spreadsheet and plots
        plot_tuples: (optional) list of plot tuples (c.f. urbs.result_figures)
        plot_periods: (optional) dict of plot periods (c.f. urbs.result_figures)
        report_tuples: (optional) list of (sit, com) tuples (c.f. urbs.report)

    Returns:
        the urbs model instance
    """

    # scenario name, read and modify data for scenario
    sce = scenario.__name__
    data = urbs.read_excel(input_file)
    data = scenario(data)

    # create model
    prob = urbs.create_model(data, timesteps)

    # refresh time stamp string and create filename for logfile
    now = prob.created
    log_filename = os.path.join(result_dir, '{}.log').format(sce)

    # solve model and read results
    optim = SolverFactory('glpk')  # cplex, glpk, gurobi, ...
    optim = setup_solver(optim, logfile=log_filename)
    result = optim.solve(prob, tee=True)

    # copy input file to result directory
    shutil.copyfile(input_file, os.path.join(result_dir, input_file))

    # save problem solution (and input data) to HDF5 file
    urbs.save(prob, os.path.join(result_dir, '{}.h5'.format(sce)))

    # write report to spreadsheet
    urbs.report(prob,
                os.path.join(result_dir, '{}.xlsx').format(sce),
                report_tuples=report_tuples)

    # result plots
    urbs.result_figures(prob,
                        os.path.join(result_dir, '{}'.format(sce)),
                        plot_title_prefix=sce.replace('_', ' '),
                        plot_tuples=plot_tuples,
                        periods=plot_periods,
                        figure_size=(24, 9))
    return prob
Exemplo n.º 3
0
def run_scenario(scenario, result_dir):
    # scenario name
    sce = scenario.__name__
    sce_nice_name = sce.replace('_', ' ').title()

    # prepare input data
    data = rivus.read_excel(data_spreadsheet)
    vertex = pdshp.read_shp(vertex_shapefile)
    edge = prepare_edge(edge_shapefile, building_shapefile)

    # apply scenario function to input data
    data, vertex, edge = scenario(data, vertex, edge)

    log_filename = os.path.join(result_dir, sce+'.log')

    # create & solve model
    prob = rivus.create_model(
        data, vertex, edge,
        peak_multiplier=lambda x:scale_peak_demand(x, peak_demand_prefactor))
    
    # scale peak demand according to pickled urbs findings
    #reduced_peak = scale_peak_demand(model, peak_demand_prefactor)
    #model.peak = reduced_peak
    
    if PYOMO3:
        prob = prob.create()
    optim = SolverFactory('glpk')
    optim = setup_solver(optim, logfile=log_filename)
    result = optim.solve(prob, tee=True)
    if PYOMO3:
        prob.load(result)

    # report
    rivus.save(prob, os.path.join(result_dir, sce+'.pgz'))
    rivus.report(prob, os.path.join(result_dir, sce+'.xlsx'))
    
    # plot without buildings
    rivus.result_figures(prob, os.path.join(result_dir, sce))
    
    # plot with buildings and to_edge lines
    more_shapefiles = [{'name': 'to_edge',
                        'color': rivus.to_rgb(192, 192, 192),
                        'shapefile': to_edge_shapefile,
                        'zorder': 1,
                        'linewidth': 0.1}]
    rivus.result_figures(prob, os.path.join(result_dir, sce+'_bld'), 
                         buildings=(building_shapefile, False),
                         shapefiles=more_shapefiles)
    return prob
Exemplo n.º 4
0
def _check_available(name):
    from pyomo.opt.base import (UnknownSolver, SolverFactory)
    try:
        opt = SolverFactory(name)
    except:
        return False
    if opt is None or isinstance(opt, UnknownSolver):
        return False
    elif (name == "gurobi") and \
       (not GUROBISHELL.license_is_valid()):
        return False
    elif (name == "baron") and \
       (not BARONSHELL.license_is_valid()):
        return False
    else:
        return (opt.available(exception_flag=False)) and \
            ((not hasattr(opt,'executable')) or \
             (opt.executable() is not None))
Exemplo n.º 5
0
def run_path(data):
    #call solver
    opt = SolverFactory('cplex')
    solver_manager = SolverManagerFactory('pyro')
    #report_timing = True to see time taken for each model component
    inst = pmod.model.create_instance(data, report_timing=True)
    #fix variable I to be 0
    for (h, v, t) in inst.aux:
        inst.I[h, v, t].fix(0)
    #tee=True to trace the cplex process
    result = opt.solve(inst, tee=True, warmstart=True)
    inst.solutions.load_from(result)
    '''
    for (s,e) in inst.OD:
         for p in inst.P:
             for j in inst.N:
                for t in inst.TS:
                    if inst.q[s,e,p,j,t]() > 0:
                        print 'queue', s,e,p,j,t,inst.q[s,e,p,j,t]()
    '''
    for (s, e) in inst.OD:
        for p in inst.P:
            for j in inst.N:
                for t in inst.TS:
                    if inst.f[s, e, p, j, t]() > 0:
                        print >> f, 'outflow', s, e, p, j, t, inst.f[s, e, p,
                                                                     j, t]()
    '''
    for (s,e) in inst.OD:
        for p in inst.P:
            for j in inst.N:
                for t in inst.TS:
                    for v in inst.VS:
                        if inst.b[s,e,p,v,t,j]() > 0:
                            print 'board', s,e,p,v,t,j,inst.b[s,e,p,v,t,j]()
    '''
    print >> f, 'objective value', inst.obj()
    #result.write()
    inst.display()
    exec_time = time.time() - start_time
    print "execution time", exec_time, 's'
Exemplo n.º 6
0
def run_scenario(input_files,
                 Solver,
                 timesteps,
                 scenario,
                 result_dir,
                 dt,
                 objective,
                 plot_tuples=None,
                 plot_sites_name=None,
                 plot_periods=None,
                 report_tuples=None,
                 report_sites_name=None):
    """ run an urbs model for given input, time steps and scenario

    Args:
        - input_files: filenames of input Excel spreadsheets
        - Solver: the user specified solver
        - timesteps: a list of timesteps, e.g. range(0,8761)
        - scenario: a scenario function that modifies the input data dict
        - result_dir: directory name for result spreadsheet and plots
        - dt: length of each time step (unit: hours)
        - objective: objective function chosen (either "cost" or "CO2")
        - plot_tuples: (optional) list of plot tuples (c.f. urbs.result_figures)
        - plot_sites_name: (optional) dict of names for sites in plot_tuples
        - plot_periods: (optional) dict of plot periods
          (c.f. urbs.result_figures)
        - report_tuples: (optional) list of (sit, com) tuples
          (c.f. urbs.report)
        - report_sites_name: (optional) dict of names for sites in
          report_tuples

    Returns:
        the urbs model instance
    """

    # sets a modeled year for non-intertemporal problems
    # (necessary for consitency)
    year = date.today().year

    # scenario name, read and modify data for scenario
    sce = scenario.__name__
    data = read_input(input_files, year)
    data = scenario(data)
    validate_input(data)
    validate_dc_objective(data, objective)

    # create model
    prob = create_model(data, dt, timesteps, objective)
    # prob_filename = os.path.join(result_dir, 'model.lp')
    # prob.write(prob_filename, io_options={'symbolic_solver_labels':True})

    # refresh time stamp string and create filename for logfile
    log_filename = os.path.join(result_dir, '{}.log').format(sce)

    # solve model and read results
    optim = SolverFactory(Solver)  # cplex, glpk, gurobi, ...
    optim = setup_solver(optim, logfile=log_filename)
    result = optim.solve(prob, tee=True)
    assert str(result.solver.termination_condition) == 'optimal'

    # save problem solution (and input data) to HDF5 file
    save(prob, os.path.join(result_dir, '{}.h5'.format(sce)))

    # write report to spreadsheet
    report(prob,
           os.path.join(result_dir, '{}.xlsx').format(sce),
           report_tuples=report_tuples,
           report_sites_name=report_sites_name)

    # result plots
    result_figures(prob,
                   os.path.join(result_dir, '{}'.format(sce)),
                   timesteps,
                   plot_title_prefix=sce.replace('_', ' '),
                   plot_tuples=plot_tuples,
                   plot_sites_name=plot_sites_name,
                   periods=plot_periods,
                   figure_size=(24, 9))

    return prob
Exemplo n.º 7
0
    def _get_task_data(self, ah, *args, **kwds):

        opt = kwds.pop('solver', kwds.pop('opt', None))
        if opt is None:
            raise ActionManagerError(
                "No solver passed to %s, use keyword option 'solver'" %
                (type(self).__name__))
        if isinstance(opt, str):
            opt = SolverFactory(opt, solver_io=kwds.pop('solver_io', None))

        #
        # The following block of code is taken from the OptSolver.solve()
        # method, which we do not directly invoke with this interface
        #

        #
        # If the inputs are models, then validate that they have been
        # constructed! Collect suffix names to try and import from solution.
        #
        for arg in args:
            if isinstance(arg, (Block, IBlock)):
                if isinstance(arg, Block):
                    if not arg.is_constructed():
                        raise RuntimeError(
                            "Attempting to solve model=%s with unconstructed "
                            "component(s)" % (arg.name))
                # import suffixes must be on the top-level model
                if isinstance(arg, Block):
                    model_suffixes = list(name for (name,comp) \
                                          in pyomo.core.base.suffix.\
                                          active_import_suffix_generator(arg))
                else:
                    assert isinstance(arg, IBlock)
                    model_suffixes = list(comp.storage_key for comp \
                                          in pyomo.core.base.suffix.\
                                          import_suffix_generator(arg,
                                                                  active=True,
                                                                  descend_into=False))
                if len(model_suffixes) > 0:
                    kwds_suffixes = kwds.setdefault('suffixes', [])
                    for name in model_suffixes:
                        if name not in kwds_suffixes:
                            kwds_suffixes.append(name)

        #
        # Handle ephemeral solvers options here. These
        # will override whatever is currently in the options
        # dictionary, but we will reset these options to
        # their original value at the end of this method.
        #
        ephemeral_solver_options = {}
        ephemeral_solver_options.update(kwds.pop('options', {}))
        ephemeral_solver_options.update(
            OptSolver._options_string_to_dict(kwds.pop('options_string', '')))

        #
        # Force pyomo.opt to ignore tests for availability, at least locally.
        #
        del_available = bool('available' not in kwds)
        kwds['available'] = True
        opt._presolve(*args, **kwds)
        problem_file_string = None
        with open(opt._problem_files[0], 'r') as f:
            problem_file_string = f.read()

        #
        # Delete this option, to ensure that the remote worker does the check for
        # availability.
        #
        if del_available:
            del kwds['available']

        #
        # We can't pickle the options object itself - so extract a simple
        # dictionary of solver options and re-construct it on the other end.
        #
        solver_options = {}
        for key in opt.options:
            solver_options[key] = opt.options[key]
        solver_options.update(ephemeral_solver_options)

        #
        # NOTE: let the distributed node deal with the warm-start
        # pick up the warm-start file, if available.
        #
        warm_start_file_string = None
        warm_start_file_name = None
        if hasattr(opt, "_warm_start_solve"):
            if opt._warm_start_solve  and \
               (opt._warm_start_file_name is not None):
                warm_start_file_name = opt._warm_start_file_name
                with open(warm_start_file_name, 'r') as f:
                    warm_start_file_string = f.read()

        data = Bunch(opt=opt.type, \
                                   file=problem_file_string, \
                                   filename=opt._problem_files[0], \
                                   warmstart_file=warm_start_file_string, \
                                   warmstart_filename=warm_start_file_name, \
                                   kwds=kwds, \
                                   solver_options=solver_options, \
                                   suffixes=opt._suffixes)

        self._args[ah.id] = args
        self._opt_data[ah.id] = (opt._smap_id, opt._load_solutions,
                                 opt._select_index,
                                 opt._default_variable_value)

        return data
Exemplo n.º 8
0
def run_bunch(use_email=False):
    """Run a bunch of optimizations and analysis automated. """
    # Files Access | INITs
    proj_name = 'runbunch'
    base_directory = os.path.join('data', proj_name)
    data_spreadsheet = os.path.join(base_directory, 'data.xlsx')
    profile_log = Series(name='{}-profiler'.format(proj_name))

    # Email connection
    email_setup = {
        'sender': config['email']['s_user'],
        'send_pass': config['email']['s_pass'],
        'recipient': config['email']['r_user'],
        'smtp_addr': config['email']['smtp_addr'],
        'smtp_port': config['email']['smtp_port']
    }

    # DB connection
    _user = config['db']['user']
    _pass = config['db']['pass']
    _host = config['db']['host']
    _base = config['db']['base']
    engine_string = ('postgresql://{}:{}@{}/{}'
                     .format(_user, _pass, _host, _base))
    engine = create_engine(engine_string)

    # Input Data
    # ----------
    # Spatial
    street_lengths = arange(50, 300, 100)
    num_edge_xs = [5, ]
    # Non-spatial
    data = read_excel(data_spreadsheet)
    original_data = deepcopy(data)
    interesting_parameters = [
        {'df_name': 'commodity',
         'args': {'index': 'Heat',
                  'column': 'cost-inv-fix',
                  'lim_lo': 0.5, 'lim_up': 1.6, 'step': 0.5}},
        {'df_name': 'commodity',
         'args': {'index': 'Heat',
                  'column': 'cost-fix',
                  'lim_lo': 0.5, 'lim_up': 1.6, 'step': 0.5}}
        # {'df_name': 'commodity',
        #  'args': {'index': 'Elec',
        #           'column': 'cost-var',
        #           'step': 0.1}}
    ]
    # Model Creation
    solver = SolverFactory(config['solver'])
    solver = setup_solver(solver, log_to_console=False, guro_time_lim=14400)
    # Solve | Analyse | Store | Change | Repeat
    for dx in street_lengths:
        for len_x, len_y in [(dx, dx), (dx, dx / 2)]:
            run_summary = 'Run with x:{}, y:{}'.format(len_x, len_y)
            for num_edge_x in num_edge_xs:
                vdf, edf = create_square_grid(num_edge_x=num_edge_x, dx=len_x,
                                              dy=len_y)
                extend_edge_data(edf)
                dim_x = num_edge_x + 1
                dim_y = dim_x
                for _vdf in _source_variations(vdf, dim_x, dim_y):
                    for param in interesting_parameters:
                        para_name = param['args']['column']
                        print('{0}\n{3}x{3} grid\t'
                              'dx:{1}, dy:{2}, #e:{3}, src:-, par:{4}\n'
                              .format('=' * 10, len_x, len_y, num_edge_x, para_name))
                        counter = 1
                        for variant in parameter_range(data[param['df_name']],
                                                       **param['args']):
                            changed = (variant.loc[param['args']['index']]
                                       [param['args']['column']])
                            print('variant <{0}>:{1}'.format(counter, changed))
                            counter = counter + 1
                            # Use temporal local versions.
                            # As create_model is destructive. See Issue #31.
                            __vdf = deepcopy(_vdf)
                            __edf = deepcopy(edf)
                            __data = data.copy()
                            __data[param['df_name']] = variant
                            print('\tcreating model')
                            _p_model = timenow()
                            prob = create_model(__data, __vdf, __edf)
                            profile_log['model_creation'] = (
                                timenow() - _p_model)
                            _p_solve = timenow()
                            print('\tsolving...')
                            try:
                                results = solver.solve(prob, tee=True)
                            except Exception as solve_error:
                                print(solve_error)
                                if use_email:
                                    sub = run_summary + '[rivus][solve-error]'
                                    email_me(solve_error, subject=sub,
                                             **email_setup)
                            if (results.solver.status != SolverStatus.ok):
                                status = 'error'
                                outcome = 'error'
                            else:
                                status = 'run'
                                if (results.solver.termination_condition !=
                                        TerminationCondition.optimal):
                                    outcome = 'optimum_not_reached'
                                else:
                                    outcome = 'optimum'
                            profile_log['solve'] = (timenow() - _p_solve)
                            # Plot
                            _p_plot = timenow()
                            plotcomms = ['Gas', 'Heat', 'Elec']
                            try:
                                fig = fig3d(prob, plotcomms, linescale=8,
                                            use_hubs=True)
                            except Exception as plot_error:
                                print(plot_error)
                                if use_email:
                                    sub = run_summary + '[rivus][plot-error]'
                                    email_me(plot_error, subject=sub,
                                             **email_setup)
                            profile_log['3d_plot_prep'] = (timenow() - _p_plot)
                            # Graph
                            _p_graph = timenow()
                            try:
                                _, pmax, _, _ = get_constants(prob)
                                graphs = to_nx(_vdf, edf, pmax)
                                graph_results = minimal_graph_anal(graphs)
                            except Exception as graph_error:
                                print(graph_error)
                                if use_email:
                                    sub = run_summary + '[rivus][graph-error]'
                                    email_me(graph_error, subject=sub,
                                             **email_setup)
                            profile_log['all_graph_related'] = (
                                timenow() - _p_graph)
                            # Store
                            this_run = {
                                'comment': config['run_comment'],
                                'status': status,
                                'outcome': outcome,
                                'runner': 'lnksz',
                                'plot_dict': fig,
                                'profiler': profile_log}
                            try:
                                rdb.store(engine, prob, run_data=this_run,
                                          graph_results=graph_results)
                            except Exception as db_error:
                                print(db_error)
                                if use_email:
                                    sub = run_summary + '[rivus][db-error]'
                                    email_me(db_error, subject=sub,
                                             **email_setup)
                            del __vdf
                            del __edf
                            del __data
                            print('\tRun ended with: <{}>\n'.format(outcome))

                        data = original_data
                if use_email:
                    status_txt = ('Finished iteration with edge number {}\n'
                                  'did: [source-var, param-seek]\n'
                                  'from [street-length, dim-shift, source-var,'
                                  ' param-seek]'
                                  'dx:{}, dy:{}'
                                  .format(num_edge_x, len_x, len_y))
                    sub = run_summary + '[rivus][finish-a-src]'
                    email_me(status_txt, subject=sub, **email_setup)
        if use_email:
            status_txt = ('Finished iteration with street lengths {}-{}\n'
                          'did: [dim-shift, source-var, param-seek]\n'
                          'from [street-length, dim-shift, source-var,'
                          ' param-seek]'
                          .format(len_x, len_y))
            sub = run_summary + '[rivus][finish-a-len-combo]'
            email_me(status_txt, subject=sub, **email_setup)
    if use_email:
        status_txt = ('Finished run-bunch at {}\n'
                      'did: [street-length, dim-shift, source-var, param-seek]'
                      .format(datetime.now().strftime('%y%m%dT%H%M')))
        sub = run_summary + '[rivus][finish-run]'
        email_me(status_txt, subject=sub, **email_setup)
    print('End of runbunch.')
Exemplo n.º 9
0
def run_scenario(input_files, Solver, timesteps, scenario, result_dir, dt,
                 objective, plot_tuples=None,  plot_sites_name=None,
                 plot_periods=None, report_tuples=None,
                 report_sites_name=None):
    """ run an urbs model for given input, time steps and scenario

    Args:
        - input_files: filenames of input Excel spreadsheets
        - Solver: the user specified solver
        - timesteps: a list of timesteps, e.g. range(0,8761)
        - scenario: a scenario function that modifies the input data dict
        - result_dir: directory name for result spreadsheet and plots
        - dt: length of each time step (unit: hours)
        - objective: objective function chosen (either "cost" or "CO2")
        - plot_tuples: (optional) list of plot tuples (c.f. urbs.result_figures)
        - plot_sites_name: (optional) dict of names for sites in plot_tuples
        - plot_periods: (optional) dict of plot periods
          (c.f. urbs.result_figures)
        - report_tuples: (optional) list of (sit, com) tuples
          (c.f. urbs.report)
        - report_sites_name: (optional) dict of names for sites in
          report_tuples

    Returns:
        the urbs model instance
    """

    # sets a modeled year for non-intertemporal problems
    #(necessary for consitency)
    year = date.today().year

    # scenario name, read and modify data for scenario
    sce = scenario.__name__
    data = read_input(input_files,year)
    data = scenario(data)
    validate_input(data)

    # create model
    prob = create_model(data, dt, timesteps, objective)
    # prob.write('model.lp', io_options={'symbolic_solver_labels':True})

    # refresh time stamp string and create filename for logfile
    log_filename = os.path.join(result_dir, '{}.log').format(sce)

    # solve model and read results
    optim = SolverFactory(Solver)  # cplex, glpk, gurobi, ...
    optim = setup_solver(optim, logfile=log_filename)
    # Belerofontech: manually redirect cbc output (stdout only) to logfile
    from os import dup, dup2, close
    f = open(log_filename, 'w')
    orig_std_out = dup(1)
    dup2(f.fileno(), 1)
    # Belerofontech: manually redirect cbc output (stdout only) to logfile (end)
    result = optim.solve(prob, tee=True)
    # Belerofontech: restore stdout behaviour
    dup2(orig_std_out, 1)
    close(orig_std_out)
    f.close()
    # Belerofontech: restore stdout behaviour (end)
    assert str(result.solver.termination_condition) == 'optimal'

    # save problem solution (and input data) to HDF5 file
    save(prob, os.path.join(result_dir, '{}.h5'.format(sce)))

    # write report to spreadsheet
    report(
        prob,
        os.path.join(result_dir, '{}.xlsx').format(sce),
        report_tuples=report_tuples,
        report_sites_name=report_sites_name)

    # Belerofontech: avoid generating PDF and PNG results when not possible...
    if platform.system() == 'Linux' and os.environ.get('DISPLAY', '') == '' and os.environ.get('MPLBACKEND', '') == '':
            return prob
    # Belerofontech: avoid generating PDF and PNG results when not possible... (end)

    # result plots
    result_figures(
        prob,
        os.path.join(result_dir, '{}'.format(sce)),
        timesteps,
        plot_title_prefix=sce.replace('_', ' '),
        plot_tuples=plot_tuples,
        plot_sites_name=plot_sites_name,
        periods=plot_periods,
        figure_size=(24, 9))

    return prob
Exemplo n.º 10
0
def prosumer_model_milp(price_photovoltaic, price_battery, inputs, filespath, volume_fee_n, capacity_fee_n, fix_fee_n,
                        volume_share, nameinstance):
    """
    Optimises the planning and operation of a photovoltaic installation with or without batteries, for one user.

    :param price_photovoltaic: price of the photovoltaic modules, in €/kWp.
    :param price_battery: price of batteries, in €/kWh.
    :param inputs: dictionary containing the inputs.
    :param filespath: path to the files containing all of the time series of hourly demand and hourly availability factor.
    :param volume_fee_n: distribution tariff at billing period n, in €/kWh.
    :param capacity_fee_n: capacity price ar billing period n, in €/kWp.
    :param nameinstance: file containing the particular user characterised by hourly demand and hourly availability factor.
    :return: sizing, economic, and interaction dictionaries.
    """

    price_pv = price_photovoltaic
    price_bat = price_battery

    years = int(inputs['years'])
    periods = int(inputs['periods'])
    battery_charge_efficiency = inputs['bat_eff_charge']
    battery_discharge_efficiency = inputs['bat_eff_discharge']
    bat_rate_discharge = inputs['bat_rate_discharge']
    bat_rate_charge = inputs['bat_rate_charge']
    discount_rate = inputs['discount_factor']
    operation_maintenance_pv = inputs['pv_om']
    operation_maintenance_bat = inputs['bat_om']
    subsidy_pv = inputs['bat_su']
    subsidy_bat = inputs['pv_su']
    energy_price = inputs['energy_price']
    selling_price = inputs['sell_price']

    battery_lifetime = 8
    battery_replacement = years / battery_lifetime

    if inputs['meters'] == 1:
        NM = True
    else:
        NM = False

    # TYPE OF MODEL
    model = AbstractModel()

    # SETS
    model.years = RangeSet(years)
    model.total_years = Param(initialize=years)
    model.periods = RangeSet(0, periods - 1)

    # PARAMETERS - USERS
    model.demand = Param(model.periods)
    model.potential_generation = Param(model.periods)
    # PARAMETERS - PV
    model.pv_price = Param(initialize=price_pv)
    model.pv_opman = Param(initialize=operation_maintenance_pv)
    model.pv_sub = Param(initialize=subsidy_pv)
    # PARAMETERS - BATTERY
    model.bat_eff_charge = Param(initialize=battery_charge_efficiency)
    model.bat_eff_discharge = Param(initialize=battery_discharge_efficiency)
    model.bat_price = Param(initialize=price_bat)
    model.bat_opman = Param(initialize=operation_maintenance_bat)
    model.bat_rate_discharge = Param(initialize=bat_rate_discharge)
    model.bat_rate_charge = Param(initialize=bat_rate_charge)
    model.bat_sub = Param(initialize=subsidy_bat)
    model.bat_replacement = Param(initialize=battery_replacement)
    model.bat_max_capacity = Param(initialize=30)
    # PARAMETERS - TARIFF
    model.tariff_en = Param(initialize=energy_price)
    if volume_share == 1:
        model.tariff_cap = Param(initialize=float(1e-5))
        model.tariff_vol = Param(initialize=volume_fee_n)
    elif volume_share == 0:
        model.tariff_cap = Param(initialize=capacity_fee_n)
        model.tariff_vol = Param(initialize=float(0))
    else:
        model.tariff_cap = Param(initialize=capacity_fee_n)
        model.tariff_vol = Param(initialize=volume_fee_n)
    model.tariff_fix = Param(initialize=fix_fee_n)
    model.tariff_out = Param(initialize=selling_price)
    # PARAMETERS ECONOMICS
    model.discount_factor = Param(initialize=discount_rate)

    # VARIABLES - PV
    model.pv_capacity = Var(within=NonNegativeReals, bounds=(0, 10))
    model.pv_output = Var(model.periods, within=NonNegativeReals)
    # VARIABLES - BATTERY
    model.bat_soc = Var(model.periods, within=NonNegativeReals)
    model.bat_outflow = Var(model.periods, within=NonNegativeReals)
    model.bat_inflow = Var(model.periods, within=NonNegativeReals)
    model.bat_binary = Var(model.periods, within=Binary)
    model.bat_capacity = Var(within=NonNegativeReals, bounds=(0, model.bat_max_capacity))
    model.bat_power_capacity_in = Var(within=NonNegativeReals)
    model.bat_power_capacity_out = Var(within=NonNegativeReals)
    # VARIABLES BALANCE
    model.electricity_imports = Var(model.periods, within=NonNegativeReals)
    model.electricity_exports = Var(model.periods, within=NonNegativeReals)
    # VARIABLES ECONOMICS
    if NM:
        model.apparent_consumption = Var(within=NonNegativeReals)
    model.dso_revenues = Var(within=NonNegativeReals)
    model.dso_revenues_original = Var(within=NonNegativeReals)
    model.user_costs = Var(within=NonNegativeReals)
    model.user_costs_original = Var(within=NonNegativeReals)
    model.user_costs_original_per_kWh = Var(within=NonNegativeReals)
    model.user_peak_demand = Var(within=NonNegativeReals)
    model.user_peak_demand_original = Var(within=NonNegativeReals)
    model.user_revenues = Var(within=NonNegativeReals)
    model.user_investment_costs = Var(within=NonNegativeReals)
    model.dre_electricity_costs = Var(within=NonNegativeReals)
    model.dre_om_costs = Var(within=NonNegativeReals)
    model.dre_selfconsumption = Var(within=NonNegativeReals)
    # VARIABLES DISTRIBUTION NETWORK ANALYSIS
    model.demand_one_year = Var(within=NonNegativeReals)
    model.imports_one_year = Var(within=NonNegativeReals)
    model.exports_one_year = Var(within=NonNegativeReals)
    model.lcoe = Var(within=NonNegativeReals)

    # EQUATIONS OF THE SYSTEM

    # OBJECTIVE
    def objective_function(model):
        """
        Minimisation of the LVOE (levelized value of electricity).

        :param model: pyomo model.
        :return: LVOE.
        """
        return (
                (model.user_costs - model.user_revenues)  # + model.user_peak_demand_original)
                /
                (sum(sum(model.demand[t] for t in model.periods) / (1 + model.discount_factor) ** y for y in
                     model.years))
        )

    # CONSTRAINTS
    def photovoltaic_production(model, t):
        """
        Computes the power output as a function of the pv capacity installed, which is also optimised.

        :param model: pyomo model.
        :param t: step of the optimisation (h)
        :return: computed hourly power output.
        """
        return model.pv_output[t] == model.pv_capacity * model.potential_generation[t]

    def battery_state_of_charge(model, t):
        """
        Establish the state of charge of the battery at every time step.

        :param model: pyomo model.
        :param t: step of the optimisation (h)
        :return: state of charge of the battery.
        """
        if t == 0:
            return model.bat_soc[
                       t] == 0  # (model.bat_capacity * 0.5)# - (model.bat_outflow[t] / model.bat_eff_discharge) + (model.bat_inflow[t] * model.bat_eff_charge)
        else:
            return model.bat_soc[t] == (
                    model.bat_soc[t - 1] -
                    (model.bat_outflow[t] / model.bat_eff_discharge) +
                    (model.bat_inflow[t] * model.bat_eff_charge)
            )

    def battery_max_charge(model, t):
        """

        :param model:
        :param t:
        :return:
        """
        return model.bat_soc[t] <= model.bat_capacity

    def battery_max_flow_in(model):
        """

        :param model:
        :param t:
        :return:
        """
        return model.bat_power_capacity_in == (model.bat_capacity / model.bat_rate_charge)

    def battery_max_flow_out(model):
        """

        :param model:
        :param t:
        :return:
        """
        return model.bat_power_capacity_out == (model.bat_capacity / model.bat_rate_discharge)

    def battery_max_inflow_capacity(model, t):
        """

        :param model:
        :param t:
        :return:
        """
        return model.bat_inflow[t] <= model.bat_power_capacity_in

    def battery_max_outflow_capacity(model, t):
        """

        :param model:
        :param t:
        :return:
        """
        return model.bat_outflow[t] <= model.bat_power_capacity_out

    def battery_outflow_control(model, t):
        """

        :param model:
        :param t:
        :return:
        """
        return model.bat_outflow[t] <= model.bat_max_capacity * (1 - model.bat_binary[t])

    def battery_inflow_control(model, t):
        """

        :param model:
        :param t:
        :return:
        """
        return model.bat_inflow[t] <= model.bat_max_capacity * model.bat_binary[t]

    def energy_balance(model, t):
        """

        :param model:
        :param t:
        :return:
        """
        return model.demand[t] == (
                model.electricity_imports[t] +
                model.pv_output[t] +
                model.bat_outflow[t] -
                model.bat_inflow[t] -
                model.electricity_exports[t]
        )

    def peak_demand_1(model, t):
        """

        :param model:
        :param t:
        :return:
        """
        return model.user_peak_demand >= model.electricity_imports[t]

    def peak_demand_2(model, t):
        """

        :param model:
        :param t:
        :return:
        """
        return model.user_peak_demand <= model.demand[t]

    def peak_demand_original(model, t):
        """

        :param model:
        :param t:
        :return:
        """
        # Perform cheap fix!
        return model.user_peak_demand_original >= model.demand[t]

    def user_investment(model):
        """

        :param model:
        :return:
        """
        return model.user_investment_costs == (
                (model.pv_capacity * model.pv_price * (1 - model.pv_sub)) +
                (model.bat_replacement * (model.bat_capacity * model.bat_price * (1 - model.bat_sub)))
        )

    def dso_revenue_computation(model):
        """
        Computes the revenues of the DSO for any billing period, coming from a particular user.

        :param model: model.
        :return: the revenues for a billing period.
        """
        if NM:
            return model.dso_revenues == (
                    (model.user_peak_demand * model.tariff_cap) +
                    (model.apparent_consumption * model.tariff_vol) +
                    model.tariff_fix
            )
        else:
            return model.dso_revenues == (
                    (model.user_peak_demand * model.tariff_cap) +
                    (sum(model.electricity_imports[t] for t in model.periods) * model.tariff_vol) +
                    model.tariff_fix
            )

    def dso_revenue_original_computation(model):
        """
        Computes the revenues of the DSO for any billing period, coming from a particular user, if no installation was
        deployed.

        :param model: model.
        :return: the revenues for a billing period.
        """
        return model.dso_revenues_original == (
                (model.user_peak_demand_original * model.tariff_cap) +
                (sum(model.demand[t] for t in model.periods) * model.tariff_vol) +
                model.tariff_fix
        )

    def user_costs_computation(model):
        """
        Computes the total (after the lifetime of the project) costs for a particular user.

        :param model: model.
        :return: the total costs for a particular user.
        """
        if NM:
            return model.user_costs == (
                    model.user_investment_costs +
                    sum(
                        (
                                (model.dso_revenues) +
                                (model.apparent_consumption * model.tariff_en) +
                                (model.dre_om_costs)
                        ) /
                        (1 + model.discount_factor) ** y for y in model.years)
            )
        else:
            return model.user_costs == (
                    model.user_investment_costs +
                    sum(
                        (
                                (model.dso_revenues) +
                                (sum(model.electricity_imports[t] for t in model.periods) * model.tariff_en) +
                                (model.dre_om_costs)
                        ) /
                        (1 + model.discount_factor) ** y for y in model.years)
            )

    def apparent_consumption_computation(model):
        """

        :param model:
        :return:
        """
        return model.apparent_consumption >= \
               sum((model.electricity_imports[t] - model.electricity_exports[t]) for t in model.periods)

    def user_costs_original_computation(model):
        """

        :param model:
        :return:
        """
        return model.user_costs_original == (
            sum(
                (
                        (model.dso_revenues_original) +
                        (sum(model.demand[t] for t in model.periods) * model.tariff_en)
                ) /
                (1 + model.discount_factor) ** y for y in model.years)
        )

    def user_costs_original_per_kWh_computation(model):
        """

        :param model:
        :return:
        """
        return model.user_costs_original_per_kWh == (
                model.user_costs_original
                /
                (sum(sum(model.demand[t] for t in model.periods) / (1 + model.discount_factor) ** y for y in
                     model.years))
        )

    def user_electricity_revenues(model):

        """
        Computes the total revenues for a particular user.

        :param model: model.
        :return: the total revenues for a particular user.
        """
        if NM:
            return model.user_revenues == float(0)
        else:
            return model.user_revenues == (
                sum(
                    (sum(model.electricity_exports[t] for t in model.periods) * model.tariff_out) /
                    (1 + model.discount_factor) ** y for y in model.years)
            )

    def operation_maintenance_costs(model):
        """

        :param model:
        :return:
        """
        return model.dre_om_costs == (
                (model.pv_capacity * model.pv_opman) +
                (model.bat_capacity * model.bat_opman)
        )

    def lcoe_computation(model):
        """

        :param model:
        :return:
        """
        return model.lcoe == (
                (model.user_costs)
                /
                sum(sum(model.demand[t] for t in model.periods) / (1 + model.discount_factor) ** y for y in
                    model.years)
        )

    def demand_one_year(model):
        """

        :param model:
        :return:
        """
        return model.demand_one_year == sum(model.demand[t] for t in model.periods)

    def imports_one_year(model):
        """

        :param model:
        :return:
        """
        return model.imports_one_year == sum(model.electricity_imports[t] for t in model.periods)

    def exports_one_year(model):
        """
        """
        return model.exports_one_year == sum(model.electricity_exports[t] for t in model.periods)

    # EQUUATION CALLING
    model.eqn_objective_function = Objective(rule=objective_function, sense=1)
    model.eqn_photovoltaic_production = Constraint(model.periods, rule=photovoltaic_production)
    model.eqn_battery_state_of_charge = Constraint(model.periods, rule=battery_state_of_charge)
    model.eqn_battery_max_charge = Constraint(model.periods, rule=battery_max_charge)
    model.eqn_battery_max_inflow_capacity = Constraint(model.periods, rule=battery_max_inflow_capacity)
    model.eqn_battery_max_outflow_capacity = Constraint(model.periods, rule=battery_max_outflow_capacity)
    model.eqn_battery_max_flow_in = Constraint(rule=battery_max_flow_in)
    model.eqn_battery_max_flow_out = Constraint(rule=battery_max_flow_out)
    model.eqn_battery_outflow_control = Constraint(model.periods, rule=battery_outflow_control)
    model.eqn_battery_inflow_control = Constraint(model.periods, rule=battery_inflow_control)
    model.eqn_energy_balance = Constraint(model.periods, rule=energy_balance)
    model.eqn_peak_demand_1 = Constraint(model.periods, rule=peak_demand_1)
    model.eqn_peak_demand_original = Constraint(model.periods, rule=peak_demand_original)
    model.eqn_user_investment = Constraint(rule=user_investment)
    model.eqn_dso_revenue_computation = Constraint(rule=dso_revenue_computation)
    model.eqn_dso_revenue_original_computation = Constraint(rule=dso_revenue_original_computation)
    model.eqn_user_costs_computation = Constraint(rule=user_costs_computation)
    model.eqn_user_costs_original_computation = Constraint(rule=user_costs_original_computation)
    model.eqn_user_costs_original_per_kWh_computation = Constraint(rule=user_costs_original_per_kWh_computation)
    model.eqn_electricity_revenues = Constraint(rule=user_electricity_revenues)
    model.eqn_operation_maintenance_costs = Constraint(rule=operation_maintenance_costs)
    model.eqn_lcoe_computation = Constraint(rule=lcoe_computation)
    model.eqn_demand_one_year = Constraint(rule=demand_one_year)
    model.eqn_imports_one_year = Constraint(rule=imports_one_year)
    model.eqn_exports_one_year = Constraint(rule=exports_one_year)
    if NM:
        model.eqn_apparent_consumption_computation = Constraint(rule=apparent_consumption_computation)

    instance = model.create_instance(os.path.join(filespath, nameinstance))  # path_files + nameinstance)

    #opt = SolverFactory('glpk')
    opt = SolverFactory('cplex')

    time_before = time.time()
    
    opt.options['mipgap'] = 0.001
    opt.options['threads'] = 1
    
    #opt.solve(instance, tee=True)
    opt.solve(instance)
    
    print('Optimisation of prosumer {} took {} seconds.'.format(nameinstance, time.time() - time_before))

    # Sizing
    sizing = dict()
    sizing['pv'] = instance.pv_capacity.get_values()[None]
    sizing['battery'] = instance.bat_capacity.get_values()[None]

    # Economic analysis
    economic = dict()
    economic['user_costs'] = instance.user_costs.get_values()[None]
    economic['user_costs_original'] = instance.user_costs_original.get_values()[None]
    economic['user_costs_original_kWh'] = instance.user_costs_original_per_kWh.get_values()[None]
    economic['user_revenues'] = instance.user_revenues.get_values()[None]
    economic['dso_revenues'] = instance.dso_revenues.get_values()[None]
    economic['dso_revenues_original'] = instance.dso_revenues_original.get_values()[None]
    economic['lvoe'] = instance.eqn_objective_function.expr()
    economic['lcoe'] = instance.lcoe.get_values()[None]
    economic['dist_volume'] = volume_fee_n
    economic['dist_capacity'] = capacity_fee_n
    economic['demand_1year'] = instance.demand_one_year.get_values()[None]
    economic['imports_1year'] = instance.imports_one_year.get_values()[None]
    economic['exports_1year'] = instance.exports_one_year.get_values()[None]
    economic['peak_demand_original'] = instance.user_peak_demand_original.get_values()[None]
    economic['peak_demand'] = instance.user_peak_demand.get_values()[None]
    economic['scenario'] = nameinstance

    # Demand and production profiles
    # profiles = dict()
    # profiles['prosumer'] = nameinstance
    # profiles['demand'] = instance.demand.extract_values()
    # profiles['solar'] = instance.potential_generation.extract_values()
    #
    # import pickle
    # os.makedirs('../../prosumers', exist_ok=True)
    # with open('.prosumers/_{}.p'.format(nameinstance), 'wb') as f:
    #     pickle.dump(profiles, f)
   
    return sizing, economic
Exemplo n.º 11
0
import pyomo.environ
import urbs
from pyomo.core import Constraint
from pyomo.opt.base import SolverFactory

data = urbs.read_excel('mimo-example.xlsx')
prob = urbs.create_model(data, timesteps=range(1, 8), dual=True)

optim = SolverFactory('glpk')
result = optim.solve(prob, tee=True)

res_vertex_duals = urbs.get_entity(prob, 'res_vertex')
marg_costs = res_vertex_duals.xs(('Elec', 'Demand'), level=('com', 'com_type'))
print(marg_costs)
Exemplo n.º 12
0
    def _perform_queue(self, ah, *args, **kwds):
        """
        Perform the queue operation.  This method returns the ActionHandle,
        and the ActionHandle status indicates whether the queue was successful.
        """
        solver = kwds.pop('solver', kwds.pop('opt', None))
        if solver is None:
            raise ActionManagerError(
                "No solver passed to %s, use keyword option 'solver'" %
                (type(self).__name__))
        if not isinstance(solver, six.string_types):
            solver_name = solver.name
            if solver_name == 'asl':
                solver_name = \
                    os.path.basename(solver.executable())
        else:
            solver_name = solver
            solver = None

        #
        # Handle ephemeral solvers options here. These
        # will override whatever is currently in the options
        # dictionary, but we will reset these options to
        # their original value at the end of this method.
        #
        user_solver_options = {}
        # make sure to transfer the options dict on the
        # solver plugin if the user does not use a string
        # to identify the neos solver. The ephemeral
        # options must also go after these.
        if solver is not None:
            user_solver_options.update(solver.options)
        user_solver_options.update(kwds.pop('options', {}))
        user_solver_options.update(
            OptSolver._options_string_to_dict(kwds.pop('options_string', '')))

        opt = SolverFactory('_neos')
        opt._presolve(*args, **kwds)
        #
        # Map NEOS name, using lowercase convention in Pyomo
        #
        if len(self._solvers) == 0:
            for name in self.kestrel.solvers():
                if name.endswith('AMPL'):
                    self._solvers[name[:-5].lower()] = name[:-5]
        if solver_name not in self._solvers:
            raise ActionManagerError(
                "Solver '%s' is not recognized by NEOS. "
                "Solver names recognized:\n%s" %
                (solver_name, str(sorted(self._solvers.keys()))))
        #
        # Apply kestrel
        #
        os.environ[
            'kestrel_options'] = 'solver=%s' % self._solvers[solver_name]
        solver_options = {}
        for key in opt.options:
            solver_options[key] = opt.options[key]
        solver_options.update(user_solver_options)

        options = opt._get_options_string(solver_options)
        # GH: Should we really be modifying the environment
        #     for this manager (knowing that we are not
        #     executing locally)
        if not options == "":
            os.environ[self._solvers[solver_name].lower()+'_options'] = \
                opt._get_options_string()
        xml = self.kestrel.formXML(opt._problem_files[0])
        (jobNumber, password) = self.kestrel.submit(xml)
        ah.job = jobNumber
        ah.password = password
        #
        # Store action handle, and return
        #
        self._ah[jobNumber] = ah
        self._neos_log[jobNumber] = (0, "")
        self._opt_data[jobNumber] = (opt, opt._smap_id, opt._load_solutions,
                                     opt._select_index,
                                     opt._default_variable_value)
        self._args[jobNumber] = args
        return ah
Exemplo n.º 13
0
def run_scenario(input_file, solver, timesteps, scenario, result_dir, dt,
                 objective,
                 plot_tuples=None,  plot_sites_name=None, plot_periods=None,
                 report_tuples=None, report_sites_name=None):
    """ run an urbs model for given input, time steps and scenario

    Args:
        input_file: filename to an Excel spreadsheet for urbs.read_excel
        timesteps: a list of timesteps, e.g. range(0,8761)
        scenario: a scenario function that modifies the input data dict
        result_dir: directory name for result spreadsheet and plots
        dt: length of each time step (unit: hours)
        plot_tuples: (optional) list of plot tuples (c.f. urbs.result_figures)
        plot_sites_name: (optional) dict of names for sites in plot_tuples
        plot_periods: (optional) dict of plot periods(c.f. urbs.result_figures)
        report_tuples: (optional) list of (sit, com) tuples (c.f. urbs.report)
        report_sites_name: (optional) dict of names for sites in report_tuples

    Returns:
        the urbs model instance
    """

    # start time measurement
    t_start = time.time()

    # scenario name, read and modify data for scenario
    sce = scenario.__name__
    data = read_excel(input_file)
    data = scenario(data)
    validate_input(data)

    # measure time to read file
    t_read = time.time() - t_start
    print("Time to read file: %.2f sec" % t_read)

    t = time.time()
    # create model
    prob = create_model(data, dt, timesteps, objective)
    # prob.write('model.lp', io_options={'symbolic_solver_labels':True})

    # measure time to create model
    t_model = time.time() - t
    print("Time to create model: %.2f sec" % t_model)

    # refresh time stamp string and create filename for logfile
    # now = prob.created
    log_filename = os.path.join(result_dir, '{}.log').format(sce)

    t = time.time()

    # solve model and read results
    optim = SolverFactory(solver)  # cplex, glpk, gurobi, ...
    optim = setup_solver(optim, logfile=log_filename)
    result = optim.solve(prob, tee=True)
    assert str(result.solver.termination_condition) == 'optimal'

    # measure time to solve 
    t_solve = time.time() - t
    print("Time to solve model: %.2f sec" % t_solve)

    t = time.time()

    # save problem solution (and input data) to HDF5 file
    save(prob, os.path.join(result_dir, '{}.h5'.format(sce)))

    # # measure time to save solution
    # save_time = time.time() - t
    # print("Time to save solution in HDF5 file: %.2f sec" % save_time)

    # t = time.time()

    # write report to spreadsheet
    report(
        prob,
        os.path.join(result_dir, '{}.xlsx').format(sce),
        report_tuples=report_tuples,
        report_sites_name=report_sites_name)

    # result plots
    result_figures(
        prob,
        os.path.join(result_dir, '{}'.format(sce)),
        timesteps,
        plot_title_prefix=sce.replace('_', ' '),
        plot_tuples=plot_tuples,
        plot_sites_name=plot_sites_name,
        periods=plot_periods,
        figure_size=(24, 9))

    t_repplot = time.time() - t
    print("Time to report and plot: %.2f sec" % t_repplot)

    # measure time to run scenario
    t_sce = time.time() - t_start
    print("Time to run scenario: %.2f sec" % t_sce)

    # write time measurements into file "timelog.txt" in result directory
    timelog = open(os.path.join(result_dir, "timelog.txt"), "a")
    timelog.write("%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%s\n"
                  % (t_sce, t_read, t_model, t_solve, t_repplot, sce))

    return prob
Exemplo n.º 14
0
def apply_optimizer(data, instance=None):
    """
    Perform optimization with a concrete instance

    Required:
        instance:   Problem instance.

    Returned:
        results:    Optimization results.
        opt:        Optimizer object.
    """
    #
    if not data.options.runtime.logging == 'quiet':
        sys.stdout.write('[%8.2f] Applying solver\n' %
                         (time.time() - start_time))
        sys.stdout.flush()
    #
    #
    # Create Solver and Perform Optimization
    #
    solver = data.options.solvers[0].solver_name
    if solver is None:
        raise ValueError("Problem constructing solver:  no solver specified")

    if len(data.options.solvers[0].suffixes) > 0:
        for suffix_name in data.options.solvers[0].suffixes:
            if suffix_name[0] in ['"', "'"]:
                suffix_name = suffix_name[1:-1]
            # Don't redeclare the suffix if it already exists
            suffix = getattr(instance, suffix_name, None)
            if suffix is None:
                setattr(instance, suffix_name, Suffix(direction=Suffix.IMPORT))
            else:
                raise ValueError("Problem declaring solver suffix %s. A component "\
                                  "with that name already exists on model %s."
                                 % (suffix_name, instance.name))

    if getattr(data.options.solvers[0].options, 'timelimit', 0) == 0:
        data.options.solvers[0].options.timelimit = None
    #
    # Default results
    #
    results = None
    #
    # Figure out the type of solver manager
    #
    solver_mngr_name = None
    if data.options.solvers[0].manager is None:
        solver_mngr_name = 'serial'
    elif not data.options.solvers[0].manager in SolverManagerFactory:
        raise ValueError("Unknown solver manager %s" %
                         data.options.solvers[0].manager)
    else:
        solver_mngr_name = data.options.solvers[0].manager
    #
    # Create the solver manager
    #
    solver_mngr_kwds = {}
    with SolverManagerFactory(solver_mngr_name,
                              **solver_mngr_kwds) as solver_mngr:
        if solver_mngr is None:
            msg = "Problem constructing solver manager '%s'"
            raise ValueError(msg % str(data.options.solvers[0].manager))
        #
        # Setup keywords for the solve
        #
        keywords = {}
        if (data.options.runtime.keep_files or \
            data.options.postsolve.print_logfile):
            keywords['keepfiles'] = True
        if data.options.model.symbolic_solver_labels:
            keywords['symbolic_solver_labels'] = True
        if data.options.model.file_determinism != 1:
            keywords['file_determinism'] = data.options.model.file_determinism
        keywords['tee'] = data.options.runtime.stream_output
        keywords['timelimit'] = getattr(data.options.solvers[0].options,
                                        'timelimit', 0)
        keywords['report_timing'] = data.options.runtime.report_timing

        # FIXME: solver_io and executable are not being used
        #        in the case of a non-serial solver manager

        #
        # Call the solver
        #
        if solver_mngr_name == 'serial':
            #
            # If we're running locally, then we create the
            # optimizer and pass it into the solver manager.
            #
            sf_kwds = {}
            sf_kwds['solver_io'] = data.options.solvers[0].io_format
            if data.options.solvers[0].solver_executable is not None:
                sf_kwds['executable'] = data.options.solvers[
                    0].solver_executable
            with SolverFactory(solver, **sf_kwds) as opt:
                if opt is None:
                    raise ValueError("Problem constructing solver `%s`" %
                                     str(solver))

                for name in registered_callback:
                    opt.set_callback(name, registered_callback[name])

                if len(data.options.solvers[0].options) > 0:
                    opt.set_options(data.options.solvers[0].options)
                    #opt.set_options(" ".join("%s=%s" % (key, value)
                    #                         for key, value in data.options.solvers[0].options.iteritems()
                    #                         if not key == 'timelimit'))
                if not data.options.solvers[0].options_string is None:
                    opt.set_options(data.options.solvers[0].options_string)
                #
                # Use the solver manager to call the optimizer
                #
                results = solver_mngr.solve(instance, opt=opt, **keywords)
        else:
            #
            # Get the solver option arguments
            #
            if len(
                    data.options.solvers[0].options
            ) > 0 and not data.options.solvers[0].options_string is None:
                # If both 'options' and 'options_string' were specified, then create a
                # single options string that is passed to the solver.
                ostring = " ".join("%s=%s" % (key, value) for key, value in
                                   data.options.solvers[0].options.iteritems()
                                   if not value is None)
                keywords['options'] = ostring + ' ' + data.options.solvers[
                    0].options_string
            elif len(data.options.solvers[0].options) > 0:
                keywords['options'] = data.options.solvers[0].options
            else:
                keywords['options'] = data.options.solvers[0].options_string
            #
            # If we're running remotely, then we pass the optimizer name to the solver
            # manager.
            #
            results = solver_mngr.solve(instance, opt=solver, **keywords)

    if data.options.runtime.profile_memory >= 1 and pympler_available:
        global memory_data
        mem_used = pympler.muppy.get_size(pympler.muppy.get_objects())
        if mem_used > data.local.max_memory:
            data.local.max_memory = mem_used
        print("   Total memory = %d bytes following optimization" % mem_used)

    return Bunch(results=results, opt=solver, local=data.local)
Exemplo n.º 15
0
    def process(self, data):
        self._worker_task_return_queue = self._current_task_client
        data = pyutilib.misc.Bunch(**data)

        if hasattr(data, 'action') and \
           data.action == 'Pyomo_pyro_mip_server_shutdown':
            print("Received shutdown request")
            self._worker_shutdown = True
            return

        time_start = time.time()
        with pyutilib.services.TempfileManager.push():
            #
            # Construct the solver on this end, based on the input
            # type stored in "data.opt".  This is slightly more
            # complicated for asl-based solvers, whose real executable
            # name is stored in data.solver_options["solver"].
            #
            with SolverFactory(data.opt) as opt:

                if opt is None:
                    self._worker_error = True
                    return TaskProcessingError(
                        "Problem constructing solver `" + data.opt + "'")

                # here is where we should set any options required by
                # the solver, available as specific attributes of the
                # input data object.
                solver_options = data.solver_options
                del data.solver_options
                for key, value in solver_options.items():
                    setattr(opt.options, key, value)

                problem_filename_suffix = os.path.split(data.filename)[1]
                temp_problem_filename = \
                    pyutilib.services.TempfileManager.\
                    create_tempfile(suffix="."+problem_filename_suffix)

                with open(temp_problem_filename, 'w') as f:
                    f.write(data.file)

                if data.warmstart_filename is not None:
                    warmstart_filename_suffix = \
                        os.path.split(data.warmstart_filename)[1]
                    temp_warmstart_filename = \
                        pyutilib.services.TempfileManager.\
                        create_tempfile(suffix="."+warmstart_filename_suffix)
                    with open(temp_warmstart_filename, 'w') as f:
                        f.write(data.warmstart_file)
                    assert opt.warm_start_capable()
                    assert (('warmstart' in data.kwds) and \
                            data.kwds['warmstart'])
                    data.kwds['warmstart_file'] = temp_warmstart_filename

                now = datetime.datetime.now()
                if self._verbose:
                    print(
                        str(now) + ": Applying solver=" + data.opt +
                        " to solve problem=" + temp_problem_filename)
                    sys.stdout.flush()
                results = opt.solve(temp_problem_filename, **data.kwds)
                assert results._smap_id is None
                # NOTE: This results object contains solutions,
                # because no model is provided (just a model file).
                # Also, the results._smap_id value is None.

        results.pyomo_solve_time = time.time() - time_start

        now = datetime.datetime.now()
        if self._verbose:
            print(
                str(now) + ": Solve completed - number of solutions=" +
                str(len(results.solution)))
            sys.stdout.flush()

        # PYTHON3 / PYRO4 Fix
        # The default serializer in Pyro4 is not pickle and does not
        # support user defined types (e.g., the results object).
        # Therefore, we pickle the results object before sending it
        # over the wire so the user does not need to change the Pyro
        # serializer.
        results = pickle.dumps(results, protocol=pickle.HIGHEST_PROTOCOL)

        if using_pyro4:
            #
            # The standard bytes object returned by pickle.dumps must be
            # converted to base64 to avoid errors sending over the
            # wire with Pyro4. Also, the base64 bytes must be wrapped
            # in a str object to avoid a different set of Pyro4 errors
            # related to its default serializer (Serpent)
            if six.PY3:
                results = str(base64.encodebytes(results))
            else:
                results = base64.encodestring(results)

        return results
Exemplo n.º 16
0
    def test_df_insert_query(self):
        """Are the stored dataframes and the retrieved ones identical?

        - Comparison form of frames is *after* create_model. (index is set)
        - Comparison form expects that input dataframes only have meaningful
          columns. (See pull request #23)
        - Only implemented dataframes are tested.

        Note
        ----
        Requires a ``config.json`` file in the root of rivus-repo with the
        database credentials. For Example:
        ::

            {
                "db" : {
                    "user" : "postgres",
                    "pass" : "postgres",
                    "host" : "localhost",
                    "base" : "rivus"
                }
            }
        """
        conf_path = os.path.join(pdir(pdir(pdir(__file__))), 'config.json')
        config = []
        with open(conf_path) as conf:
            config = json.load(conf)
        # DB connection
        _user = config['db']['user']
        _pass = config['db']['pass']
        _host = config['db']['host']
        _base = config['db']['base']
        engine_string = ('postgresql://{}:{}@{}/{}'.format(
            _user, _pass, _host, _base))
        engine = create_engine(engine_string)

        proj_name = 'mnl'
        base_directory = os.path.join('data', proj_name)
        data_spreadsheet = os.path.join(base_directory, 'data.xlsx')
        data = read_excel(data_spreadsheet)
        # data_bup = data.copy()
        vertex, edge = square_grid()
        vert_init_commodities(vertex, ['Elec', 'Gas'], [('Elec', 0, 100000)])
        extend_edge_data(edge)
        prob = create_model(data, vertex, edge)
        solver = SolverFactory(config['solver'])
        solver = setup_solver(solver, log_to_console=False)
        solver.solve(prob, tee=True)

        test_id = rdb.init_run(engine, runner='Unittest')
        rdb.store(engine, prob, run_id=test_id)

        this_df = None
        dfs = data.keys()
        for df in dfs:
            if df == 'hub':
                continue  # is not implemented yet
            this_df = data[df]
            print(df)
            re_df = rdb.df_from_table(engine, df, test_id)
            self.assertTrue(all(
                this_df.fillna(0) == re_df.reindex(this_df.index).fillna(0)),
                            msg=('{}: Original and retrieved frames'
                                 ' are not identical'.format(df)))
        # Add implemented result dataframes
        cost, pmax, kappa_hub, kappa_process = get_constants(prob)
        source, _, _, _, _, = get_timeseries(prob)
        results = dict(source=source,
                       cost=cost,
                       pmax=pmax,
                       kappa_hub=kappa_hub,
                       kappa_process=kappa_process)
        dfs = ['source', 'cost', 'pmax', 'kappa_hub', 'kappa_process']
        for df in dfs:
            this_df = results[df]
            print(df)
            re_df = rdb.df_from_table(engine, df, test_id)
            self.assertTrue(all(
                this_df.fillna(0) == re_df.reindex(this_df.index).fillna(0)),
                            msg=('{}: Original and retrieved frames'
                                 ' are not identical'.format(df)))
Exemplo n.º 17
0
# Equation 10
m.C2Constraint = Constraint(m.Tm, rule=C2_constraint_rule)
# Equation 11
#m.dsmup2Constraint = Constraint(m.tm, rule=dsmup2_constraint_rule)

# Power
m.power1Constraint = Constraint(m.tm, rule=power1_constraint_rule)
m.power2Constraint = Constraint(m.tm, rule=power2_constraint_rule)

# Objective

m.obj = Objective(rule=obj_expression_cost, sense=minimize)

###############################################################################
#                                    SOLVE
# solve model and read results

optim = SolverFactory('cbc')
result = optim.solve(m, tee=False)

# Check obj or var example
print('Objective:', m.obj())

output(m)

filename = os.path.join(os.path.dirname(__file__),
                        './Comparisson/dsm_pyomo.lp')
m.write(filename, io_options={'symbolic_solver_labels': True})

#import pdb;    pdb.set_trace()
Exemplo n.º 18
0
    vert_init_commodities(vertex, ('Elec', 'Gas', 'Heat'),
                          [('Elec', 0, 100000), ('Gas', 0, 5000)])
    profile_log['grid_data'] = timenow() - extendgrid

    # Non spatial input
    data_spreadsheet = os.path.join(base_directory, 'data.xlsx')
    excelread = timenow()
    data = read_excel(data_spreadsheet)
    profile_log['excel_read'] = timenow() - excelread

    # Create and solve model
    rivusmain = timenow()
    prob = create_model(data, vertex, edge)
    profile_log['rivus_main'] = timenow() - rivusmain

    solver = SolverFactory(config['solver'])
    solver = setup_solver(solver)

    startsolver = timenow()
    result = solver.solve(prob, tee=True)
    profile_log['solver'] = timenow() - startsolver

    # Handling results
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    if SAVE_PICKLE:
        print('Saving pickle...')
        rivuspickle = timenow()
        save(prob, os.path.join(result_dir, 'prob.pgz'))
        profile_log['save_data'] = timenow() - rivuspickle
Exemplo n.º 19
0
def EFAlgorithmBuilder(options, scenario_tree):

    solution_writer_plugins = ExtensionPoint(ISolutionWriterExtension)
    for plugin in solution_writer_plugins:
        plugin.disable()

    solution_plugins = []
    if len(options.solution_writer) > 0:
        for this_extension in options.solution_writer:
            if this_extension in sys.modules:
                print("User-defined EF solution writer module="
                      +this_extension+" already imported - skipping")
            else:
                print("Trying to import user-defined EF "
                      "solution writer module="+this_extension)
                # make sure "." is in the PATH.
                original_path = list(sys.path)
                sys.path.insert(0,'.')
                pyutilib.misc.import_file(this_extension)
                print("Module successfully loaded")
                sys.path[:] = original_path # restore to what it was

            # now that we're sure the module is loaded, re-enable this
            # specific plugin.  recall that all plugins are disabled
            # by default in phinit.py, for various reasons. if we want
            # them to be picked up, we need to enable them explicitly.
            import inspect
            module_to_find = this_extension
            if module_to_find.rfind(".py"):
                module_to_find = module_to_find.rstrip(".py")
            if module_to_find.find("/") != -1:
                module_to_find = module_to_find.split("/")[-1]

            for name, obj in inspect.getmembers(sys.modules[module_to_find], inspect.isclass):
                import pyomo.util
                # the second condition gets around goofyness related to issubclass returning
                # True when the obj is the same as the test class.
                if issubclass(obj, pyomo.util.plugin.SingletonPlugin) and name != "SingletonPlugin":
                    for plugin in solution_writer_plugins(all=True):
                        if isinstance(plugin, obj):
                            plugin.enable()
                            solution_plugins.append(plugin)

    ef_solver = SolverFactory(options.solver_type,
                              solver_io=options.solver_io)
    if isinstance(ef_solver, UnknownSolver):
        raise ValueError("Failed to create solver of type="+
                         options.solver_type+
                         " for use in extensive form solve")
    if len(options.solver_options) > 0:
        print("Initializing ef solver with options="
              +str(options.solver_options))
        ef_solver.set_options("".join(options.solver_options))
    if options.mipgap is not None:
        if (options.mipgap < 0.0) or (options.mipgap > 1.0):
            raise ValueError("Value of the mipgap parameter for the EF "
                             "solve must be on the unit interval; "
                             "value specified="+str(options.mipgap))
        ef_solver.options.mipgap = float(options.mipgap)

    ef_solver_manager = SolverManagerFactory(options.solver_manager_type,
                                             host=options.pyro_host,
                                             port=options.pyro_port)
    if ef_solver_manager is None:
        raise ValueError("Failed to create solver manager of type="
                         +options.solver_type+
                         " for use in extensive form solve")

    binding_instance = CreateExtensiveFormInstance(options, scenario_tree)

    ef = ExtensiveFormAlgorithm(options,
                                binding_instance,
                                scenario_tree,
                                ef_solver_manager,
                                ef_solver,
                                solution_plugins=solution_plugins)

    return ef
Exemplo n.º 20
0
    def _perform_queue(self, ah, *args, **kwds):
        """
        Perform the queue operation.  This method returns the ActionHandle,
        and the ActionHandle status indicates whether the queue was successful.
        """
        solver = kwds.pop('solver', kwds.pop('opt', None))
        if solver is None:
            raise ActionManagerError(
                "No solver passed to %s, use keyword option 'solver'" %
                (type(self).__name__))
        if not isinstance(solver, six.string_types):
            solver_name = solver.name
            if solver_name == 'asl':
                solver_name = \
                    os.path.basename(solver.executable())
        else:
            solver_name = solver
            solver = None

        #
        # Handle ephemeral solvers options here. These
        # will override whatever is currently in the options
        # dictionary, but we will reset these options to
        # their original value at the end of this method.
        #
        user_solver_options = {}
        # make sure to transfer the options dict on the
        # solver plugin if the user does not use a string
        # to identify the neos solver. The ephemeral
        # options must also go after these.
        if solver is not None:
            user_solver_options.update(solver.options)
        _options = kwds.pop('options', {})
        if isinstance(_options, six.string_types):
            _options = OptSolver._options_string_to_dict(_options)
        user_solver_options.update(_options)
        user_solver_options.update(
            OptSolver._options_string_to_dict(kwds.pop('options_string', '')))

        # JDS: [5/13/17] The following is a HACK.  This timeout flag is
        # set by pyomo/scripting/util.py:apply_optimizer.  If we do not
        # remove it, it will get passed to the NEOS solver.  For solvers
        # like CPLEX 12.7.0, this will cause a fatal error as it is not
        # a known option.
        if user_solver_options.get('timelimit', 0) is None:
            del user_solver_options['timelimit']

        opt = SolverFactory('_neos')
        opt._presolve(*args, **kwds)
        #
        # Map NEOS name, using lowercase convention in Pyomo
        #
        if len(self._solvers) == 0:
            for name in self.kestrel.solvers():
                if name.endswith('AMPL'):
                    self._solvers[name[:-5].lower()] = name[:-5]
        if solver_name not in self._solvers:
            raise ActionManagerError(
                "Solver '%s' is not recognized by NEOS. "
                "Solver names recognized:\n%s" %
                (solver_name, str(sorted(self._solvers.keys()))))
        #
        # Apply kestrel
        #
        # Set the kestrel_options environment
        #
        neos_sname = self._solvers[solver_name].lower()
        os.environ[
            'kestrel_options'] = 'solver=%s' % self._solvers[solver_name]
        #
        # Set the <solver>_options environment
        #
        solver_options = {}
        for key in opt.options:
            solver_options[key] = opt.options[key]
        solver_options.update(user_solver_options)
        options = opt._get_options_string(solver_options)
        if not options == "":
            os.environ[neos_sname + '_options'] = options
        #
        # Generate an XML string using these two environment variables
        #
        xml = self.kestrel.formXML(opt._problem_files[0])
        (jobNumber, password) = self.kestrel.submit(xml)
        ah.job = jobNumber
        ah.password = password
        #
        # Cleanup
        #
        del os.environ['kestrel_options']
        try:
            del os.environ[neos_sname + "_options"]
        except:
            pass
        #
        # Store action handle, and return
        #
        self._ah[jobNumber] = ah
        self._neos_log[jobNumber] = (0, "")
        self._opt_data[jobNumber] = (opt, opt._smap_id, opt._load_solutions,
                                     opt._select_index,
                                     opt._default_variable_value)
        self._args[jobNumber] = args
        return ah
Exemplo n.º 21
0
# read vertices and edges from shapefiles...
vertex = geopandas.read_file(vertex_file)
edge = geopandas.read_file(edge_file)
# ... and set indices to agree with Excel format
vertex.set_index(['Vertex'], inplace=True)
edge.set_index(['Edge', 'Vertex1', 'Vertex2'], inplace=True)

# at this point, rundh.py and rundhshp.py work identically!
# dhmin.create_model must not rely on vertex/edge DataFrames to contain any
# geometry information

# get model
# create instance
# solver interface (GLPK)
prob = dhmin.create_model(vertex, edge, params, timesteps)
solver = SolverFactory('glpk')
result = solver.solve(prob, timelimit=30, tee=True)
prob.solutions.load_from(result)

# use special-purpose function to plot power flows (works unchanged!)
dhmintools.plot_flows_min(prob)

# read time-independent variable values to DataFrame
# (list all variables using dhmin.list_entities(instance, 'variables')
caps = dhmin.get_entities(prob, ['Pmax', 'x'])
costs = dhmin.get_entity(prob, 'costs')

# remove Edge from index, so that edge and caps are both indexed on
# (vertex, vertex) tuples, i.e. their indices match for identical edges
edge.reset_index('Edge', inplace=True)
Exemplo n.º 22
0
def run_scenario(scenario):
    # scenario name
    sce = scenario.__name__
    sce_nice_name = sce.replace('_', ' ').title()

    # prepare input data
    data = rivus.read_excel(data_spreadsheet)
    vertex = pdshp.read_shp(vertex_shapefile)
    edge = prepare_edge(edge_shapefile, building_shapefile)

    # apply scenario function to input data
    data, vertex, edge = scenario(data, vertex, edge)

    # create & solve model
    prob = rivus.create_model(data, vertex, edge)
    if PYOMO3:
        prob = prob.create()  # no longer needed in Pyomo 4+
    optim = SolverFactory('gurobi')
    optim = setup_solver(optim)
    result = optim.solve(prob, tee=True)
    if PYOMO3:
        prob.load(result)  # no longer needed in Pyomo 4+

    # create result directory if not existent
    result_dir = os.path.join('result', os.path.basename(base_directory))
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    # report
    rivus.report(prob, os.path.join(result_dir, 'report.xlsx'))

    # plots
    for com, plot_type in [('Elec', 'caps'), ('Heat', 'caps'), ('Gas', 'caps'),
                           ('Elec', 'peak'), ('Heat', 'peak')]:

        # two plot variants
        for plot_annotations in [False, True]:
            # create plot
            fig = rivus.plot(prob,
                             com,
                             mapscale=False,
                             tick_labels=False,
                             plot_demand=(plot_type == 'peak'),
                             annotations=plot_annotations)
            plt.title('')

            # save to file
            for ext, transp in [('png', True), ('png', False), ('pdf', True)]:
                transp_str = ('-transp' if transp and ext != 'pdf' else '')
                annote_str = ('-annote' if plot_annotations else '')

                # determine figure filename from scenario name, plot type,
                # commodity, transparency, annotations and extension
                fig_filename = '{}-{}-{}{}{}.{}'.format(
                    sce, plot_type, com, transp_str, annote_str, ext)
                fig_filename = os.path.join(result_dir, fig_filename)
                fig.savefig(fig_filename,
                            dpi=300,
                            bbox_inches='tight',
                            transparent=transp)

    return prob
Exemplo n.º 23
0
    def _perform_queue(self, ah, *args, **kwds):
        """
        Perform the queue operation.  This method returns the ActionHandle,
        and the ActionHandle status indicates whether the queue was successful.
        """
        solver = kwds.pop('solver', kwds.pop('opt', None))
        if solver is None:
            raise ActionManagerError(
                "No solver passed to %s, use keyword option 'solver'" %
                (type(self).__name__))
        if not isinstance(solver, six.string_types):
            solver = solver.name

        #
        # Handle ephemeral solvers options here. These
        # will override whatever is currently in the options
        # dictionary, but we will reset these options to
        # their original value at the end of this method.
        #
        ephemeral_solver_options = {}
        ephemeral_solver_options.update(kwds.pop('options', {}))
        ephemeral_solver_options.update(
            OptSolver._options_string_to_dict(kwds.pop('options_string', '')))

        opt = SolverFactory('_neos')
        opt._presolve(*args, **kwds)
        #
        # Map NEOS name, using lowercase convention in Pyomo
        #
        if len(self._solvers) == 0:
            for name in self.kestrel.solvers():
                if name.endswith('AMPL'):
                    self._solvers[name[:-5].lower()] = name[:-5]
        if not solver in self._solvers:
            raise ActionManagerError("Solver '%s' is not recognized by NEOS" %
                                     solver)
        #
        # Apply kestrel
        #
        os.environ['kestrel_options'] = 'solver=%s' % self._solvers[solver]
        solver_options = {}
        for key in opt.options:
            solver_options[key] = opt.options[key]
        solver_options.update(ephemeral_solver_options)

        options = opt._get_options_string(solver_options)
        if not options == "":
            os.environ[self._solvers[solver].lower() +
                       '_options'] = opt._get_options_string()
        xml = self.kestrel.formXML(opt._problem_files[0])
        (jobNumber, password) = self.kestrel.submit(xml)
        ah.job = jobNumber
        ah.password = password
        #
        # Store action handle, and return
        #
        self._ah[jobNumber] = ah
        self._neos_log[jobNumber] = (0, "")
        self._opt_data[jobNumber] = (opt, opt._smap_id, opt._load_solutions,
                                     opt._select_index,
                                     opt._default_variable_value)
        self._args[jobNumber] = args
        return ah
Exemplo n.º 24
0
def run_scenario(input_file, timesteps, scenario, result_dir):
    """ run an urbs model for given input, time steps and scenario
    
    Args:
        input_file: filename to an Excel spreadsheet for urbs.read_excel
        timesteps: a list of timesteps, e.g. range(0,8761)
        scenario: a scenario function that modifies the input data dict
        result_dir: directory name for result spreadsheet and plots
        
    Returns:
        the urbs model instance
    """
    
    # scenario name, read and modify data for scenario
    sce = scenario.__name__
    data = urbs.read_excel(input_file)
    data = scenario(data)

    # create model, solve it, read results
    prob = urbs.create_model(data, timesteps)
    optim = SolverFactory('glpk')  # cplex, glpk, gurobi, ...
    result = optim.solve(prob, tee=True)
    prob.solutions.load_from(result)
    
    # refresh time stamp string
    now = prob.created
    
    # write report to spreadsheet
    urbs.report(
        prob,
        os.path.join(result_dir, '{}-{}.xlsx').format(sce, now),
        prob.com_demand, prob.sit)

    # store optimisation problem for later re-analysis
    urbs.save(
        prob,
        os.path.join(result_dir, '{}-{}.pgz').format(sce, now))

    # add or change plot colors
    my_colors = {
        'Vled Haven': (230, 200, 200),
        'Stryworf Key': (200, 230, 200),
        'Qlyph Archipelago': (200, 200, 230),
        'Jepid Island': (215,215,215)}
    for country, color in my_colors.items():
        urbs.COLORS[country] = color
    
    # create timeseries plot for each demand (site, commodity) timeseries
    for sit, com in prob.demand.columns:
        # create figure
        fig = urbs.plot(prob, com, sit)
        
        # change the figure title
        ax0 = fig.get_axes()[0]
        nice_sce_name = sce.replace('_', ' ').title()
        new_figure_title = ax0.get_title().replace(
            'Energy balance of ', '{}: '.format(nice_sce_name))
        ax0.set_title(new_figure_title)
        
        # save plot to files 
        for ext in ['png', 'pdf']:
            fig_filename = os.path.join(
                result_dir, '{}-{}-{}-{}.{}').format(sce, com, sit, now, ext)
            fig.savefig(fig_filename, bbox_inches='tight')
    
    return prob