Example #1
0
def aggregate_zone_benefits(aggregate_zone_summary):

    trace_label = 'aggregate_zone_benefits'

    zone_summary = aggregate_zone_summary.to_frame()

    model_settings = config.read_model_settings('aggregate_zone.yaml')
    spec_file_name = model_settings.get('spec_file_name', 'aggregate_zone.csv')
    aggregate_zone_spec = bca.read_assignment_spec(spec_file_name)

    add_aggregate_results(zone_summary,
                          aggregate_zone_spec,
                          source=trace_label)
Example #2
0
def aggregate_zone_processor(zones, trace_od):
    """
    zones: orca table

    zone data for base and build scenario dat files combined into a single dataframe
    with columns names prefixed with base_ or build_ indexed by ZONE
    """

    trace_label = 'aggregate_zone'
    model_settings = config.read_model_settings('aggregate_zone.yaml')
    spec_file_name = model_settings.get('spec_file_name', 'aggregate_zone.csv')
    aggregate_zone_spec = bca.read_assignment_spec(spec_file_name)

    zones_df = zones.to_frame()

    logger.info("Running aggregate_zone_processor with %d zones" %
                (len(zones_df.index), ))

    if trace_od:
        trace_orig, trace_dest = trace_od
        trace_od_rows = (zones_df.index == trace_orig) | (zones_df.index
                                                          == trace_dest)
    else:
        trace_od_rows = None

    # locals whose values will be accessible to the execution context
    # when the expressions in spec are applied to choosers
    locals_dict = config.get_model_constants(model_settings)
    locals_dict.update(config.setting('globals'))

    # eval_variables evaluates each of the expressions in spec
    # in the context of each row in of the choosers dataframe
    results, trace_results, trace_assigned_locals = \
        assign.assign_variables(aggregate_zone_spec,
                                zones_df,
                                locals_dict,
                                df_alias='zones',
                                trace_rows=trace_od_rows)

    pipeline.replace_table('aggregate_zone_summary', results)

    if trace_results is not None:

        tracing.write_csv(trace_results,
                          file_name="aggregate_zone",
                          index_label='zone',
                          column_labels=['label', 'zone'])

        if trace_assigned_locals:
            tracing.write_csv(trace_assigned_locals,
                              file_name="aggregate_zone_locals")
def aggregate_demographics_spec():
    return bca.read_assignment_spec('aggregate_demographics.csv')
Example #4
0
def link_daily_spec():
    return bca.read_assignment_spec('link_daily.csv')
Example #5
0
def link_spec():
    return bca.read_assignment_spec('link.csv')
Example #6
0
def demographics_spec():
    return bca.read_assignment_spec('demographics.csv')
Example #7
0
def auto_ownership_spec():
    return bca.read_assignment_spec('auto_ownership.csv')
Example #8
0
def physical_activity_person_spec():
    return bca.read_assignment_spec('physical_activity_person.csv')
Example #9
0
def physical_activity_trip_spec():
    return bca.read_assignment_spec('physical_activity_trip.csv')
Example #10
0
def person_trips_spec():
    return bca.read_assignment_spec('person_trips.csv')
Example #11
0
def aggregate_od_processor(zone_districts, zones, data_dir, trace_od):

    trace_label = 'aggregate_od'

    logger.info("Running %s" % (trace_label, ))

    model_settings = config.read_model_settings('aggregate_od.yaml')

    spec_file_name = model_settings.get('spec_file_name', 'aggregate_od.csv')
    aggregate_od_spec = bca.read_assignment_spec(spec_file_name)

    zones = zones.to_frame()
    zone_districts = zone_districts.to_frame()
    zone_count = zone_districts.shape[0]

    assert zones.index.equals(zone_districts.index)

    # create OD dataframe in order compatible with ODSkims
    od_df = pd.DataFrame(
        data={
            'orig': np.repeat(np.asanyarray(zones.index), zone_count),
            'dest': np.tile(np.asanyarray(zones.index), zone_count),
        })

    # locals whose values will be accessible to the execution context
    # when the expressions in spec are applied to choosers
    locals_dict = config.get_model_constants(model_settings)
    locals_dict.update(config.setting('globals'))
    locals_dict['logger'] = logger

    logger.debug('%s mem before create_skim_locals_dict, %s' % (
        trace_label,
        memory_info(),
    ))

    # - add ODSkims to locals (note: we use local_skims list later to close omx files)
    cache_skims = model_settings.get('cache_skims', False)
    local_skims = create_skim_locals_dict(model_settings, data_dir, zones,
                                          cache_skims)
    locals_dict.update(local_skims)

    # - create_zone_matrices dicts
    locals_dict.update(create_zone_matrices(model_settings, zones))

    if trace_od:
        trace_orig, trace_dest = trace_od
        trace_od_rows = (od_df.orig == trace_orig) & (od_df.dest == trace_dest)
    else:
        trace_od_rows = None

    logger.debug("%s assigning variables" % (trace_label, ))
    results, trace_results, trace_assigned_locals = \
        assign.assign_variables(aggregate_od_spec,
                                od_df,
                                locals_dict=locals_dict,
                                df_alias='od',
                                trace_rows=trace_od_rows)

    logger.debug('%s mem after assign_variables, %s' % (
        trace_label,
        memory_info(),
    ))

    for local_name, od_skims in local_skims.items():
        logger.debug("closing %s" % local_name)
        od_skims.log_skim_usage()
        od_skims.close()

    # summarize aggregate_od_benefits by orig and dest districts
    logger.debug("%s district summary" % (trace_label, ))
    results['orig'] = np.repeat(np.asanyarray(zone_districts.district),
                                zone_count)
    results['dest'] = np.tile(np.asanyarray(zone_districts.district),
                              zone_count)
    district_summary = results.groupby(['orig', 'dest']).sum()
    pipeline.replace_table('aggregate_od_district_summary', district_summary)

    # attribute aggregate_results benefits to origin zone
    logger.debug("%s zone summary" % (trace_label, ))
    results['orig'] = od_df['orig']
    del results['dest']
    zone_summary = results.groupby(['orig']).sum()
    pipeline.replace_table('aggregate_od_zone_summary', zone_summary)

    add_aggregate_results(zone_summary, aggregate_od_spec, source=trace_label)

    if trace_results is not None:
        tracing.write_csv(trace_results,
                          file_name=trace_label,
                          index_label='index',
                          column_labels=['label', 'od'])

        if trace_assigned_locals:
            tracing.write_csv(trace_assigned_locals,
                              file_name="%s_locals" % trace_label,
                              index_label='variable',
                              columns='value')
Example #12
0
def aggregate_trips_spec():
    return bca.read_assignment_spec('aggregate_trips.csv')