コード例 #1
0
def reanalyse(directory):
    """Return constants for all pickled rivus results in directory

    Args:
        directory: a directory with 1 or multiple pickled rivus instances

    Returns:
        tuple (demand, cost, Pmax, Kappa_hub, Kappa_process) of concatenated
        DataFrames
    """
    glob_pattern = os.path.join(directory, '*.pgz')
    pickle_filenames = glob.glob(glob_pattern)

    demand = {}
    cost = {}
    Pmax = {}
    Kappa_hub = {}
    Kappa_process = {}

    for pf in pickle_filenames:
        # load original problem object including solution
        prob = rivus.load(pf)

        # truncate directory name and extension from pickle filename
        # remove 'scenario_' prefix, if present
        scenario_name = os.path.splitext(os.path.basename(pf))[0]
        scenario_name = scenario_name.replace('scenario_', '')

        # retrieve costs and capacities from result
        constants = rivus.get_constants(prob)

        # assign dict values per scenario
        cost[scenario_name] = constants[0]
        Pmax[scenario_name] = constants[1]
        Kappa_hub[scenario_name] = constants[2]
        Kappa_process[scenario_name] = constants[3]
        demand[scenario_name] = prob.peak

    # merge into single dataframe
    demand = pd.concat(demand, axis=1)
    cost = pd.concat(cost, axis=1)
    Pmax = pd.concat(Pmax, axis=1)
    Kappa_hub = pd.concat(Kappa_hub, axis=1)
    Kappa_process = pd.concat(Kappa_process, axis=1)

    return demand, cost, Pmax, Kappa_hub, Kappa_process
コード例 #2
0
ファイル: runbunch.py プロジェクト: vishwacolours/rivus
def run_bunch(use_email=False):
    """Run a bunch of optimizations and analysis automated. """
    # Files Access | INITs
    proj_name = 'runbunch'
    base_directory = os.path.join('data', proj_name)
    data_spreadsheet = os.path.join(base_directory, 'data.xlsx')
    profile_log = Series(name='{}-profiler'.format(proj_name))

    # Email connection
    email_setup = {
        'sender': config['email']['s_user'],
        'send_pass': config['email']['s_pass'],
        'recipient': config['email']['r_user'],
        'smtp_addr': config['email']['smtp_addr'],
        'smtp_port': config['email']['smtp_port']
    }

    # DB connection
    _user = config['db']['user']
    _pass = config['db']['pass']
    _host = config['db']['host']
    _base = config['db']['base']
    engine_string = ('postgresql://{}:{}@{}/{}'
                     .format(_user, _pass, _host, _base))
    engine = create_engine(engine_string)

    # Input Data
    # ----------
    # Spatial
    street_lengths = arange(50, 300, 100)
    num_edge_xs = [5, ]
    # Non-spatial
    data = read_excel(data_spreadsheet)
    original_data = deepcopy(data)
    interesting_parameters = [
        {'df_name': 'commodity',
         'args': {'index': 'Heat',
                  'column': 'cost-inv-fix',
                  'lim_lo': 0.5, 'lim_up': 1.6, 'step': 0.5}},
        {'df_name': 'commodity',
         'args': {'index': 'Heat',
                  'column': 'cost-fix',
                  'lim_lo': 0.5, 'lim_up': 1.6, 'step': 0.5}}
        # {'df_name': 'commodity',
        #  'args': {'index': 'Elec',
        #           'column': 'cost-var',
        #           'step': 0.1}}
    ]
    # Model Creation
    solver = SolverFactory(config['solver'])
    solver = setup_solver(solver, log_to_console=False, guro_time_lim=14400)
    # Solve | Analyse | Store | Change | Repeat
    for dx in street_lengths:
        for len_x, len_y in [(dx, dx), (dx, dx / 2)]:
            run_summary = 'Run with x:{}, y:{}'.format(len_x, len_y)
            for num_edge_x in num_edge_xs:
                vdf, edf = create_square_grid(num_edge_x=num_edge_x, dx=len_x,
                                              dy=len_y)
                extend_edge_data(edf)
                dim_x = num_edge_x + 1
                dim_y = dim_x
                for _vdf in _source_variations(vdf, dim_x, dim_y):
                    for param in interesting_parameters:
                        para_name = param['args']['column']
                        print('{0}\n{3}x{3} grid\t'
                              'dx:{1}, dy:{2}, #e:{3}, src:-, par:{4}\n'
                              .format('=' * 10, len_x, len_y, num_edge_x, para_name))
                        counter = 1
                        for variant in parameter_range(data[param['df_name']],
                                                       **param['args']):
                            changed = (variant.loc[param['args']['index']]
                                       [param['args']['column']])
                            print('variant <{0}>:{1}'.format(counter, changed))
                            counter = counter + 1
                            # Use temporal local versions.
                            # As create_model is destructive. See Issue #31.
                            __vdf = deepcopy(_vdf)
                            __edf = deepcopy(edf)
                            __data = data.copy()
                            __data[param['df_name']] = variant
                            print('\tcreating model')
                            _p_model = timenow()
                            prob = create_model(__data, __vdf, __edf)
                            profile_log['model_creation'] = (
                                timenow() - _p_model)
                            _p_solve = timenow()
                            print('\tsolving...')
                            try:
                                results = solver.solve(prob, tee=True)
                            except Exception as solve_error:
                                print(solve_error)
                                if use_email:
                                    sub = run_summary + '[rivus][solve-error]'
                                    email_me(solve_error, subject=sub,
                                             **email_setup)
                            if (results.solver.status != SolverStatus.ok):
                                status = 'error'
                                outcome = 'error'
                            else:
                                status = 'run'
                                if (results.solver.termination_condition !=
                                        TerminationCondition.optimal):
                                    outcome = 'optimum_not_reached'
                                else:
                                    outcome = 'optimum'
                            profile_log['solve'] = (timenow() - _p_solve)
                            # Plot
                            _p_plot = timenow()
                            plotcomms = ['Gas', 'Heat', 'Elec']
                            try:
                                fig = fig3d(prob, plotcomms, linescale=8,
                                            use_hubs=True)
                            except Exception as plot_error:
                                print(plot_error)
                                if use_email:
                                    sub = run_summary + '[rivus][plot-error]'
                                    email_me(plot_error, subject=sub,
                                             **email_setup)
                            profile_log['3d_plot_prep'] = (timenow() - _p_plot)
                            # Graph
                            _p_graph = timenow()
                            try:
                                _, pmax, _, _ = get_constants(prob)
                                graphs = to_nx(_vdf, edf, pmax)
                                graph_results = minimal_graph_anal(graphs)
                            except Exception as graph_error:
                                print(graph_error)
                                if use_email:
                                    sub = run_summary + '[rivus][graph-error]'
                                    email_me(graph_error, subject=sub,
                                             **email_setup)
                            profile_log['all_graph_related'] = (
                                timenow() - _p_graph)
                            # Store
                            this_run = {
                                'comment': config['run_comment'],
                                'status': status,
                                'outcome': outcome,
                                'runner': 'lnksz',
                                'plot_dict': fig,
                                'profiler': profile_log}
                            try:
                                rdb.store(engine, prob, run_data=this_run,
                                          graph_results=graph_results)
                            except Exception as db_error:
                                print(db_error)
                                if use_email:
                                    sub = run_summary + '[rivus][db-error]'
                                    email_me(db_error, subject=sub,
                                             **email_setup)
                            del __vdf
                            del __edf
                            del __data
                            print('\tRun ended with: <{}>\n'.format(outcome))

                        data = original_data
                if use_email:
                    status_txt = ('Finished iteration with edge number {}\n'
                                  'did: [source-var, param-seek]\n'
                                  'from [street-length, dim-shift, source-var,'
                                  ' param-seek]'
                                  'dx:{}, dy:{}'
                                  .format(num_edge_x, len_x, len_y))
                    sub = run_summary + '[rivus][finish-a-src]'
                    email_me(status_txt, subject=sub, **email_setup)
        if use_email:
            status_txt = ('Finished iteration with street lengths {}-{}\n'
                          'did: [dim-shift, source-var, param-seek]\n'
                          'from [street-length, dim-shift, source-var,'
                          ' param-seek]'
                          .format(len_x, len_y))
            sub = run_summary + '[rivus][finish-a-len-combo]'
            email_me(status_txt, subject=sub, **email_setup)
    if use_email:
        status_txt = ('Finished run-bunch at {}\n'
                      'did: [street-length, dim-shift, source-var, param-seek]'
                      .format(datetime.now().strftime('%y%m%dT%H%M')))
        sub = run_summary + '[rivus][finish-run]'
        email_me(status_txt, subject=sub, **email_setup)
    print('End of runbunch.')
コード例 #3
0
ファイル: runmoosh.py プロジェクト: vishwacolours/rivus
# load nodes
vertex = pdshp.read_shp(vertex_shapefile)

# load spreadsheet data
data = rivus.read_excel(data_spreadsheet)

# create & solve model
prob = rivus.create_model(data, vertex, edge)
if PYOMO3:
    prob = prob.create()  # no longer needed in Pyomo 4
optim = SolverFactory('glpk')
optim = setup_solver(optim)
result = optim.solve(prob, tee=True)
if PYOMO3:
    prob.load(result)  # no longer needed in Pyomo 4

# load results
costs, Pmax, Kappa_hub, Kappa_process = rivus.get_constants(prob)
source, flows, hub_io, proc_io, proc_tau = rivus.get_timeseries(prob)

result_dir = os.path.join('result', os.path.basename(base_directory))

# create result directory if not existing already
if not os.path.exists(result_dir):
    os.makedirs(result_dir)

rivus.save(prob, os.path.join(result_dir, 'prob.pgz'))
rivus.report(prob, os.path.join(result_dir, 'prob.xlsx'))
rivus.result_figures(prob, os.path.join(result_dir, 'plot'))
コード例 #4
0
ファイル: test_db.py プロジェクト: vishwacolours/rivus
    def test_df_insert_query(self):
        """Are the stored dataframes and the retrieved ones identical?

        - Comparison form of frames is *after* create_model. (index is set)
        - Comparison form expects that input dataframes only have meaningful
          columns. (See pull request #23)
        - Only implemented dataframes are tested.

        Note
        ----
        Requires a ``config.json`` file in the root of rivus-repo with the
        database credentials. For Example:
        ::

            {
                "db" : {
                    "user" : "postgres",
                    "pass" : "postgres",
                    "host" : "localhost",
                    "base" : "rivus"
                }
            }
        """
        conf_path = os.path.join(pdir(pdir(pdir(__file__))), 'config.json')
        config = []
        with open(conf_path) as conf:
            config = json.load(conf)
        # DB connection
        _user = config['db']['user']
        _pass = config['db']['pass']
        _host = config['db']['host']
        _base = config['db']['base']
        engine_string = ('postgresql://{}:{}@{}/{}'.format(
            _user, _pass, _host, _base))
        engine = create_engine(engine_string)

        proj_name = 'mnl'
        base_directory = os.path.join('data', proj_name)
        data_spreadsheet = os.path.join(base_directory, 'data.xlsx')
        data = read_excel(data_spreadsheet)
        # data_bup = data.copy()
        vertex, edge = square_grid()
        vert_init_commodities(vertex, ['Elec', 'Gas'], [('Elec', 0, 100000)])
        extend_edge_data(edge)
        prob = create_model(data, vertex, edge)
        solver = SolverFactory(config['solver'])
        solver = setup_solver(solver, log_to_console=False)
        solver.solve(prob, tee=True)

        test_id = rdb.init_run(engine, runner='Unittest')
        rdb.store(engine, prob, run_id=test_id)

        this_df = None
        dfs = data.keys()
        for df in dfs:
            if df == 'hub':
                continue  # is not implemented yet
            this_df = data[df]
            print(df)
            re_df = rdb.df_from_table(engine, df, test_id)
            self.assertTrue(all(
                this_df.fillna(0) == re_df.reindex(this_df.index).fillna(0)),
                            msg=('{}: Original and retrieved frames'
                                 ' are not identical'.format(df)))
        # Add implemented result dataframes
        cost, pmax, kappa_hub, kappa_process = get_constants(prob)
        source, _, _, _, _, = get_timeseries(prob)
        results = dict(source=source,
                       cost=cost,
                       pmax=pmax,
                       kappa_hub=kappa_hub,
                       kappa_process=kappa_process)
        dfs = ['source', 'cost', 'pmax', 'kappa_hub', 'kappa_process']
        for df in dfs:
            this_df = results[df]
            print(df)
            re_df = rdb.df_from_table(engine, df, test_id)
            self.assertTrue(all(
                this_df.fillna(0) == re_df.reindex(this_df.index).fillna(0)),
                            msg=('{}: Original and retrieved frames'
                                 ' are not identical'.format(df)))
コード例 #5
0
ファイル: runchess.py プロジェクト: vishwacolours/rivus
    plotcomms = ['Gas', 'Heat', 'Elec']
    fig = fig3d(prob,
                linescale=8,
                comms=plotcomms,
                use_hubs=True,
                dz=(.25 * DX))
    if SOLVER:
        plot3d(fig, filename=os.path.join(result_dir, 'rivus_result.html'))
    else:
        plot3d(fig, filename=os.path.join(arch_dir, 'rivus_result.html'))
    profile_log['plotting'] = timenow() - myprintstart

if GRAPHS:
    print('Graph handling.')
    graph_prep = timenow()
    _, pmax, _, _ = get_constants(prob)
    graphs = to_nx(prob.params['vertex'], prob.params['edge'], pmax)
    profile_log['graph_prep'] = timenow() - graph_prep

    graph_anal_sum = timenow()
    graph_data = []
    for G in graphs:
        print('Analyzing <{}> graph'.format(G.graph['Commodity']))
        g_data = {
            'commodity': G.graph['Commodity'],
            'is_connected': nx.is_connected(G),
            'connected_components': nx.number_connected_components(G)
        }
        if SPANNER:
            spanner = nx.minimum_spanning_tree(G)
            g_data['is_minimal'] = nx.is_isomorphic(G, spanner)
コード例 #6
0
ファイル: plot.py プロジェクト: vishwacolours/rivus
def fig3d(prob, comms=None, linescale=1.0, use_hubs=False, hub_opac=0.55, dz=5,
          layout=None, verbose=False):
    """Generate 3D representation of the rivus results using plotly

    Parameters
    ----------
    prob : rivus_archive
        A rivus model (later extract of it)
    comms : None, optional
        list/ndarray of commodity names to plot,
        Order: ['C1', 'C2', 'C3'] -> Bottom: C1, Top: C3
    linescale : float, optional
        A multiplier to get proportionally thicker lines.
    use_hubs : bool, optional
        Switch to depict hub processes.
    hub_opac : float, optional
        0-1 opacity param.
    dz : number, optional
        Distance between layers along 'z' axis .
    layout : None, optional
        A plotly layout dict to overwrite default.
    verbose : bool, optional
        To print out progress and the time it took.

    Example
    -------
    ::

        import plotly.offline as po
        fig = fig3d(prob, ['Gas', 'Heat', 'Elec'], hub_opac=0.55, linescale=7)
        # for static image
        # po.plot(fig, filename='plotly-game.html', image='png')
        po.plot(fig, filename='plotly-game.html')

    Returns
    -------
    plotly compatible figure *dict* (in plotly everything is kinda a dict.)

    Note
    -----
        Greatly inspired by
        `Example1 <https://plot.ly/python/lines-on-maps/>`_ and
        `Example2 <https://plot.ly/python/3d-network-graph/>`_.
    """
    if verbose:
        import time
        plotprep = time.time()

    # Map projection
    bbox, cent_para, cent_meri = _getbb(prob)
    bm = Basemap(
        projection='tmerc', resolution=None,
        llcrnrlat=bbox[0], llcrnrlon=bbox[1],
        urcrnrlat=bbox[2], urcrnrlon=bbox[3],
        lat_0=cent_para, lon_0=cent_meri)

    # Get result values for plotting
    _, pmax, kappa_hub, kappa_process = get_constants(prob)
    source = get_timeseries(prob)[0]

    # Use all involved commodities if none is given
    if comms is None:
        comm_order = dict(Demand=0, Gas=5, CO2=10, Heat=15, Elec=20, Cool=25)
        # Drop all 0 columns in pmax
        for column in pmax:
            if all(pmax[column] == 0):
                del pmax[column]
        comms = pmax.columns.values
        # Figure out commodities involved through processes
        proc_used = kappa_process.columns.values
        if len(proc_used):
            in_comms = (prob.r_in.sort_index(
                level=['Process', 'Commodity'], ascending=[1, 0])
                .loc[proc_used].index
                .get_level_values(level='Commodity')
                .unique())
            ot_comms = (prob.r_out.sort_index(
                level=['Process', 'Commodity'], ascending=[1, 0])
                .loc[proc_used].index
                .get_level_values(level='Commodity')
                .unique())
            proc_comms = in_comms.union(ot_comms)
            comms = union1d(comms, proc_comms.values)
        # Figure out commodities involved through hubs
        hubs_used = kappa_hub.columns.values
        if len(hubs_used):
            in_comms = (prob.r_in.sort_index(
                level=['Process', 'Commodity'], ascending=[1, 0])
                .loc[hubs_used].index
                .get_level_values(level='Commodity')
                .unique())
            ot_comms = (prob.r_out.sort_index(
                level=['Process', 'Commodity'], ascending=[1, 0])
                .loc[hubs_used].index
                .get_level_values(level='Commodity')
                .unique())
            hub_comms = in_comms.union(ot_comms)
            comms = union1d(comms, hub_comms.values)
        comms = sorted(comms, key=lambda comm: comm_order[comm])

    comm_zs = [dz * k for k, c in enumerate(comms)]
    comm_zs = dict(zip(comms, comm_zs))
    # geoPmax = pmax.join(prob.params['edge'].geometry, how='inner')
    if verbose:
        print("plot prep took: {:.4f}".format(time.time() - plotprep))
        layersstart = time.time()

    # Adding capacity lines: capacities and hubs
    edge_kwargs = dict(pmax=pmax, hubs=kappa_hub, proc=kappa_process,
                       source=source, dz=5, use_hubs=use_hubs,
                       hub_opac=hub_opac, linescale=linescale)
    cap_layers, hub_layer = _add_edges(prob, bm, comms, comm_zs, **edge_kwargs)
    # Adding markers
    markers = _add_points(prob, bm, comm_zs, source, kappa_process)

    if verbose:
        print("layers took: {:.4f}".format(time.time() - layersstart))

    layout_default = {
        # 'autosize': False,
        # 'width' : 500,
        # 'height' : 500,
        # paper_bgcolor='#7f7f7f', plot_bgcolor='#c7c7c7'
        'margin': {
            'l': 0, 'r': 0,
            'b': 10, 't': 0,
            'pad': 4
        },
        'legend': {
            'traceorder': 'reversed',
            # 'y': 2,
            # 'yanchor' : 'center'
        },
        'scene': {
            'xaxis': {
                'visible': False
            },
            'yaxis': {
                'visible': False
            },
            'zaxis': {
                'visible': False,
                # 'range' : [0, comm_zs[-1] + dz]
            },
            'aspectmode': 'data',
            # 'aspectratio': {
            #     'x': 1, 'y': 1, 'z': .6
            # }
            'camera': {
                'eye': dict(x=2, y=-2, z=2)
            }
        }
        # 'width' : 700
    }
    layout = layout_default if layout is None else layout

    # Uniting the elements which make up a plotly figure
    data = cap_layers + hub_layer + markers
    fig = dict(data=data, layout=layout)
    return fig