def main():

    # get origin information
    parcel_df = pd.read_csv(os.path.join(model_path, parcel_file_name),
                            sep=' ')
    print 'here0'
    parcel_df['HH_P_test'] = parcel_df['HH_P']
    geo_df = pd.DataFrame.from_csv(os.path.join(geo_path, geo_file_name),
                                   sep=',',
                                   index_col=None)
    geo_df = pd.merge(parcel_df,
                      geo_df,
                      left_on='PARCELID',
                      right_on='parcel_id')
    city_dict = geo_df.set_index(['TAZ_P']).to_dict()['city_id']
    county_dict = geo_df.set_index(['TAZ_P']).to_dict()['county_id']
    city_name_dict = geo_df.set_index(['TAZ_P']).to_dict()['city_name']
    print 'here1'
    # organize origin information
    origin_df = pd.DataFrame(geo_df.groupby(['TAZ_P'])['HH_P'].sum())
    origin_df.reset_index(inplace=True)
    origin_df['city_id'] = origin_df['TAZ_P'].map(city_dict)
    origin_df['county_id'] = origin_df['TAZ_P'].map(county_dict)
    origin_df['city_name'] = origin_df['TAZ_P'].map(city_name_dict)
    origin_df['region_id'] = 1
    # orgnize destination information
    dest_df = pd.DataFrame(
        geo_df.groupby(['TAZ_P'])[parcel_attributes_list].sum())
    dest_df.reset_index(inplace=True)
    # process transit time
    #bank = _eb.Emmebank(os.path.join(model_path, 'Banks', bank_tod, 'emmebank'))
    bank_am = _eb.Emmebank(
        os.path.join(model_path, 'Banks', BANK_TOD_AM, 'emmebank'))
    bank_pm = _eb.Emmebank(
        os.path.join(model_path, 'Banks', BANK_TOD_PM, 'emmebank'))
    if mode_meansurement == 'auto':
        print mode_meansurement
        transit_time_df = get_auto_information(
            bank_am, bank_pm)  # will have to fix this code later
    else:
        print mode_meansurement
        transit_time_df = get_transit_information(bank)
    #transit_time_df = get_auto_information(bank)
    #transit_time_df = get_transit_information(bank)
    print transit_time_df.head()
    transit_df = process_transit_attribute(transit_time_df, transit_time_max,
                                           'TAZ_P', 'from',
                                           parcel_attributes_list, origin_df,
                                           dest_df)
    print transit_df.head()
    # calculate jobs on transit time
    average_jobs_df = get_average_jobs(transit_df, geo_boundry[geo],
                                       parcel_attributes_list)
    print average_jobs_df
    average_jobs_df.to_csv(os.path.join(output_path, output_file_name),
                           index=False)
    print 'output file name is: ', output_file_name
    #return average_jobs_df
    print year, scenario, transit_time_max
    print 'done'
def main():

    # get origin information
    parcel_df = pd.read_csv(os.path.join(output_path, parcel_file_name), sep = ' ')
    geo_df = pd.DataFrame.from_csv(os.path.join(output_path, geo_file_name), sep = ',', index_col = None )
    geo_df = pd.merge(parcel_df, geo_df, left_on = 'PARCELID', right_on = 'parcel_id')
    city_dict = geo_df.set_index(['TAZ_P']).to_dict()['city_id']
    county_dict = geo_df.set_index(['TAZ_P']).to_dict()['county_id']
    city_name_dict = geo_df.set_index(['TAZ_P']).to_dict()['city_name']
    # organize origin information
    origin_df = pd.DataFrame(geo_df.groupby(['TAZ_P'])['HH_P'].sum())
    origin_df.reset_index(inplace=True)
    origin_df['city_id'] = origin_df['TAZ_P'].map(city_dict)
    origin_df['county_id'] = origin_df['TAZ_P'].map(county_dict)
    origin_df['city_name'] = origin_df['TAZ_P'].map(city_name_dict)
    origin_df['region_id'] = 1
    # orgnize destination information
    dest_df = pd.DataFrame(geo_df.groupby(['TAZ_P'])[parcel_attributes_list].sum())
    dest_df.reset_index(inplace=True)
    # process transit time
    bank = _eb.Emmebank(os.path.join(model_path, 'Banks', bank_tod, 'emmebank'))
    transit_time_df = get_transit_information(bank)
    transit_df = process_transit_attribute(transit_time_df, transit_time_max, 'TAZ_P', 'from', parcel_attributes_list, origin_df, dest_df)
    # calculate jobs on transit time
    average_jobs_df = get_average_jobs(transit_df, geo_boundry[geo], parcel_attributes_list) 
    output_file_name = geo + '_transit_' + str(year) + '_' + str(transit_time_max) + '_' + 'min.csv'
    average_jobs_df.to_csv(os.path.join(output_path, output_file_name), index=False)
    print 'output file name is: ', output_file_name
    print 'done'
Beispiel #3
0
    def copy_results(self, database_path, scenarios, matrices):
        with _eb.Emmebank(_join(database_path, "emmebank")) as remote_emmebank:
            for dst_scen in scenarios:
                remote_scen = remote_emmebank.scenario(dst_scen.id)
                # Create extra attributes and network fields which do not exist
                for attr in sorted(remote_scen.extra_attributes(),
                                   key=lambda x: x._id):
                    if not dst_scen.extra_attribute(attr.name):
                        dst_attr = dst_scen.create_extra_attribute(
                            attr.type, attr.name, attr.default_value)
                        dst_attr.description = attr.description
                for field in remote_scen.network_fields():
                    if not dst_scen.network_field(field.type, field.name):
                        dst_scen.create_network_field(field.type, field.name,
                                                      field.atype,
                                                      field.description)
                dst_scen.has_traffic_results = remote_scen.has_traffic_results
                dst_scen.has_transit_results = remote_scen.has_transit_results

                dst_scen.publish_network(remote_scen.get_network())

            dst_emmebank = dst_scen.emmebank
            scen_id = dst_scen.id
            for matrix_id in matrices:
                src_matrix = remote_emmebank.matrix(matrix_id)
                dst_matrix = dst_emmebank.matrix(matrix_id)
                dst_matrix.set_data(src_matrix.get_data(scen_id), scen_id)
Beispiel #4
0
    def _RetrieveFunctionIds(self):
        functionList = []
        with _emmebank.Emmebank(self.DatabasePath) as emmebank:
            for funcs in emmebank.functions():
                functionList.append(funcs.id)

        return functionList         
Beispiel #5
0
def get_total_sov_trips(tod_list):
    trip_table = np.zeros((len(zones), len(zones)))
    for tod in tod_list:
        for trip_table_name in ['svtl1', 'svtl2', 'svtl3']:
            my_bank = _eb.Emmebank('Banks/' + tod + '/emmebank')
            skim = my_bank.matrix(trip_table_name).get_numpy_data()
            trip_table = trip_table + skim
    return trip_table
Beispiel #6
0
def main():

    # get geo location information
    parcel_df = pd.read_csv(os.path.join(model_path, parcel_file_name),
                            sep=' ')
    parcel_df['HH_P_test'] = parcel_df['HH_P']
    minority_df = pd.DataFrame.from_csv(os.path.join(geo_path,
                                                     minority_file_name),
                                        sep=',',
                                        index_col=None)
    geo_df = pd.DataFrame.from_csv(os.path.join(geo_path, geo_file_name),
                                   sep=',',
                                   index_col=None)
    geo_df = pd.merge(parcel_df,
                      geo_df,
                      left_on='PARCELID',
                      right_on='parcel_id')
    tract_dict = geo_df.set_index(['parcel_id']).to_dict()['census_tract']
    taz_dict = geo_df.set_index(['parcel_id']).to_dict()['TAZ_P']

    # organize origin information
    origin_df = pd.DataFrame(geo_df.groupby(['parcel_id'])['HH_P'].sum())
    origin_df.reset_index(inplace=True)
    origin_df['taz_id'] = origin_df['parcel_id'].map(
        taz_dict)  #need TAZ to join with transit time table
    # orgnize destination information
    dest_df = pd.DataFrame(
        geo_df.groupby(['TAZ_P'])[parcel_attributes_list].sum())
    dest_df.reset_index(inplace=True)

    # process transit time
    bank = _eb.Emmebank(os.path.join(model_path, 'Banks', bank_tod,
                                     'emmebank'))
    transit_time_df = get_transit_information(bank)
    transit_hh_emp = process_transit_attribute(transit_time_df,
                                               transit_time_max,
                                               parcel_attributes_list,
                                               origin_df, dest_df, tract_dict,
                                               taz_dict)
    # flag the minority tracts
    transit_hh_emp = transit_hh_emp.merge(minority_df,
                                          left_on='census_tract',
                                          right_on='GEOID10',
                                          how='left')
    print transit_hh_emp.head()
    # calculate jobs on transit time
    for geo in geo_list:
        print geo
        average_jobs_df = get_average_jobs(transit_hh_emp, geo_boundry[geo],
                                           parcel_attributes_list)
        output_file_name = str(
            year) + '_' + scenario + '_' + geo + '_transit_' + str(
                transit_time_max) + '_' + 'min.csv'
        print output_file_name
        average_jobs_df.to_csv(os.path.join(output_path, output_file_name),
                               index=False)
    print scenario, 'done'
Beispiel #7
0
def daily_counts(writer):
    """Export daily network volumes and compare to observed."""

    # Load observed data
    count_id_df = pd.read_csv(r'inputs/observed/observed_daily_counts.csv')

    # add daily bank to project if it exists
    if os.path.isfile(r'Banks/Daily/emmebank'):
        bank = _eb.Emmebank(r'Banks/Daily/emmebank')
        scenario = bank.scenario(1002)

        # Add/refresh screenline ID link attribute
        if scenario.extra_attribute('@scrn'):
            scenario.delete_extra_attribute('@scrn')
        attr = scenario.create_extra_attribute('LINK', '@scrn')

        # Add/refresh screenline count value from assignment results
        if scenario.extra_attribute('@count'):
            scenario.delete_extra_attribute('@count')
        attr_count = scenario.create_extra_attribute('LINK', '@count')

        network = scenario.get_network()

        inode_list = []
        jnode_list = []
        scrn_id = []
        facility_list = []
        observed_volume = []
        model_volume = []

        for row in count_id_df.iterrows():
            inode = int(row[1].NewINode) 
            jnode = int(row[1].NewJNode) 
            if network.link(inode, jnode):
                link = network.link(inode, jnode)
                link['@scrn'] = row[1]['ScreenLineID']
                link['@count'] = row[1]['Year_2014']

                inode_list.append(inode)
                jnode_list.append(jnode)
                facility_list.append(link['data3'])
                scrn_id.append(link['@scrn'])
                observed_volume.append(link['@count'])
                model_volume.append(link['@tveh'])

        scenario.publish_network(network)

        df = pd.DataFrame([inode_list,jnode_list,facility_list,model_volume,scrn_id,observed_volume]).T
        df.columns=['i','j','ul3','@tveh','@scrn','count']

        df.to_excel(excel_writer=writer, sheet_name='Daily Counts')

    else:
        raise Exception('no daily bank found')
Beispiel #8
0
def convert_auto_tripTables():
    auto_paths_dict = {
        'ni': 'ni/emmebank',
        'am': 'am/emmebank',
        'md': 'md/emmebank',
        'pm': 'pm/emmebank',
        'ev': 'ev/emmebank'
    }
    #auto
    my_store = open_h5_file(auto_h5_file)

    tod_dict = {"am": 0, "md": 1, "pm": 2, "ev": 3, "ni": 4}

    # keep of a list of the time matrices to calculate costs from

    for key_time, value in auto_paths_dict.iteritems():
        #my_store=h5py.File(hdf5_filename, "r+")
        e = key_time in my_store
        if e:
            del my_store[key_time]
            create_h5_group(my_store, key_time)
            print "Group Skims Exists. Group deleted then created"
            #If not there, create the group
        else:
            create_h5_group(my_store, key_time)
            print "Group Skims Created"

        #time_matrices = {}
        emmebank_4kauto = _eb.Emmebank(auto4k_path + value)
        list_of_matrices = emmebank_4kauto.matrices()

        for emme_matrix in list_of_matrices:
            print emme_matrix
            #matrix_name= 'ivtwa' + item

            matrix_id = emmebank_4kauto.matrix(emme_matrix).id
            matrix = emmebank_4kauto.matrix(matrix_id)
            if matrix.type == 'FULL':
                matrix_value = np.matrix(matrix.raw_data)
                #make sure max value is set to uint16 max
                #matrix_value = np.where(matrix_value > np.iinfo('uint16').max, np.iinfo('uint16').max, matrix_value)
                my_store[key_time].create_dataset(
                    matrix.name,
                    data=matrix_value.astype('float32'),
                    compression='gzip')
                print matrix.name + ' was transferred to the HDF5 container.'
    my_store.close()
Beispiel #9
0
def export_network_shape(tod):
    """
    Loop through network components and export shape points
    """

    if os.path.isfile(r'Banks/' + tod + '/emmebank'):
        bank = _eb.Emmebank(r'Banks/' + tod + '/emmebank')
        scenario = bank.scenario(1002)
        network = scenario.get_network()

        inode_list = []
        jnode_list = []
        shape_x = []
        shape_y = []
        shape_loc = []

        for link in network.links():
            local_index = 0
            for point in link.shape:
                inode_list.append(link.i_node)
                jnode_list.append(link.j_node)
                shape_x.append(point[0])
                shape_y.append(point[1])
                shape_loc.append(local_index)
                local_index += 1

        df = pd.DataFrame(
            [inode_list, jnode_list, shape_loc, shape_x, shape_y]).T
        df.columns = ['i', 'j', 'shape_local_index', 'x', 'y']

        df['ij'] = df['i'].astype('str') + '-' + df['j'].astype('str')

        # convert to lat-lon
        df['lat_lon'] = df[['x', 'y']].apply(
            lambda row: project_to_wgs84(row['x'], row['y']), axis=1)
        df['lon'] = df['lat_lon'].apply(lambda row: row[0])
        df['lat'] = df['lat_lon'].apply(lambda row: row[-1])

        df.to_csv('outputs/network/network_shape.csv', index=False)
Beispiel #10
0
    def _insert_tsegs(self):
        ''' Populate the TransitSegs table from Emme transit assignments. '''
        emmebank = _eb.Emmebank(self._emmebank_path)
        for tod in xrange(1, 9):
            scenario_id = '{0}{1}'.format(self._trn_scen_id_prefix, tod)
            scenario = emmebank.scenario(scenario_id)
            network = scenario.get_network()
            for tseg in network.transit_segments():
                # Get values
                inode = tseg.i_node
                jnode = tseg.j_node
                if inode and jnode:
                    link = tseg.link
                    tline = tseg.line
                    tline_desc = tline.description  # Should this be trimmed? Combined with mode (tline[0])?
                    is_rail = True if tline.mode.id.upper() in ('C',
                                                                'M') else False
                    boardings = tseg.transit_boardings
                    allow_brd = tseg.allow_boardings
                    passengers = tseg.transit_volume
                    pass_hrs = passengers * tseg.transit_time / 60.0
                    pass_mi = passengers * link.length

                    # Insert into table (if valid link)
                    db_row = (tseg.id, tline.id, tline_desc, tline.headway,
                              tseg.number, inode.number, jnode.number, tod,
                              tline.mode.id, is_rail, boardings, allow_brd,
                              passengers, pass_hrs, pass_mi)
                    insert_sql = 'INSERT INTO TransitSegs VALUES ({0})'.format(
                        ','.join(['?'] * len(db_row)))
                    self._con.execute(insert_sql, db_row)

            self._con.commit()

        emmebank.dispose()  # Close Emmebank, remove lock
        return None
Beispiel #11
0
def vmt_statistics(abm, csv_path=False):
    ''' Export a CSV file containing VMT, stratified by zone group and
        facility type. '''
    # Use default output location if none specified
    if not csv_path:
        csv_path = os.path.join(abm._output_dir, 'results_vmt_statistics.csv')

    # Initialize VMT dict
    vmt_subset = {g: {t: 0 for t in abm.facility_types} for g in abm.zone_groups}

    # Populate dict from Emme data
    emmebank = _eb.Emmebank(abm._emmebank_path)

    for tod in xrange(1, 9):

        # Auto VMT from links in TOD's highway network
        scenario_id_hwy = '{0}'.format(tod)
        scenario_hwy = emmebank.scenario(scenario_id_hwy)
        network_hwy = scenario_hwy.get_network()

        for link in network_hwy.links():
            # Stratify by zone group
            zn = link.i_node['@zone']
            for g, z in abm.zone_groups.iteritems():
                if zn in z:
                    zone_group = g
                    break
            # Stratify by facility type
            for t, v in abm.facility_types.iteritems():
                if link.volume_delay_func in v:
                    facility_type = t
                    break

            # Calculate VMT
            vol = (  # Convert vehicle-equivalents to vehicles
                link['@vso1ntlo']/1 + link['@vso1nthi']/1 + link['@vso1tllo']/1 + link['@vso1tlhi']/1 +
                link['@vho2ntlo']/1 + link['@vho2nthi']/1 + link['@vho2tllo']/1 + link['@vho2tlhi']/1 +
                link['@vho3ntlo']/1 + link['@vho3nthi']/1 + link['@vho3tllo']/1 + link['@vho3tlhi']/1 +
                link['@vltrnt']/1 + link['@vltrtl']/1 +
                link['@vmtrnt']/2 + link['@vmtrtl']/2 +
                link['@vhtrnt']/3 + link['@vhtrtl']/3
            )
            vmt = vol * link.length

            # Add VMT to appropriate group
            vmt_subset[zone_group][facility_type] += vmt

        # Bus VMT from transit segments in TOD's transit network
        scenario_id_trn = '{0}{1}'.format(abm._trn_scen_id_prefix, tod)
        scenario_trn = emmebank.scenario(scenario_id_trn)
        network_trn = scenario_trn.get_network()

        for link in network_trn.links():
            # Stratify by zone group
            zn = link.i_node['@zone']
            for g, z in abm.zone_groups.iteritems():
                if zn in z:
                    zone_group = g
                    break
            # Stratify by facility type
            for t, v in abm.facility_types.iteritems():
                if link.volume_delay_func in v:
                    facility_type = v
                    break

            # Calculate headway- and TTF-adjusted VMT for each bus segment
            for tseg in link.segments():
                if tseg.line.mode in ('B', 'E', 'L', 'P', 'Q'):

                    # Calculate line-specific volume from headway: must be at least 1; ignore headways of 99 mins)
                    vol = max(abm.tod_minutes(tod) / tseg['@hdway'], 1) if tseg['@hdway'] != 99 else 1
                    vmt = vol * link.length

                    vmt_subset[zone_group][facility_type] += vmt

    emmebank.dispose()  # Close Emmebank, remove lock

    # Write results to CSV
    zngrp_order = ['Chicago', 'Cook Balance', 'DuPage', 'Kane', 'Kendall', 'Lake', 'McHenry', 'Will', 'IL Balance', 'Indiana', 'Wisconsin']
    factype_order = ['Expressway', 'Arterial', 'Ramp/Toll', 'Centroid']
    with open(csv_path, 'wb') as w:
        row = '{0},{1},{2}\n'
        w.write(row.format('DISTRICT', 'FACILITY_TYPE', 'VMT'))

        # Iterate through zone groups
        for zone_group in zngrp_order:
            for facility_type in factype_order:
                vmt = vmt_subset[zone_group][facility_type]
                w.write(row.format(zone_group, facility_type, vmt))
            vmt_subtotal = sum(v for v in vmt_subset[zone_group].itervalues())
            w.write(row.format(zone_group, 'Subtotal', vmt_subtotal))

        # Summarize entire network by facility type
        network_totals = {t: sum(vmt_subset[g][t] for g in abm.zone_groups) for t in abm.facility_types}
        for facility_type in factype_order:
            vmt = network_totals[facility_type]
            w.write(row.format('Entire Network', facility_type, vmt))
        grand_total = sum(v for v in network_totals.itervalues())
        w.write(row.format('Entire Network', 'Grand Total', grand_total))

        # Summarize CMAP 7-county region by facility type
        cmap_groups = ['Chicago', 'Cook Balance', 'McHenry', 'Lake', 'Kane', 'DuPage', 'Will', 'Kendall']
        cmap_totals = {t: sum(vmt_subset[g][t] for g in cmap_groups) for t in abm.facility_types}
        for facility_type in factype_order:
            vmt = cmap_totals[facility_type]
            w.write(row.format('CMAP Region', facility_type, vmt))
        cmap_total = sum(v for v in cmap_totals.itervalues())
        w.write(row.format('CMAP Region', 'Region Total', cmap_total))

    return csv_path
import numpy as np
import os

transit_time_max = 60
#drive_time_max = 30
year = 2014
model_path = r'N:\T2040\soundcast_2014'
bank_tod = '7to8'
output_dir = r'S:\angela\job_housing'

parcel_df = pd.read_csv(os.path.join(model_path, r'inputs\accessibility\parcels_urbansim.txt'), sep = ' ')
parcel_df = parcel_df[['EMPTOT_P', 'TAZ_P', 'HH_P']]
parcel_df = pd.DataFrame(parcel_df.groupby('TAZ_P').sum())
parcel_df.reset_index(inplace=True)

bank = _eb.Emmebank(os.path.join(model_path, 'Banks', bank_tod, 'emmebank'))

bus_time = bank.matrix('auxwa').get_numpy_data() + bank.matrix('twtwa').get_numpy_data() + bank.matrix('ivtwa').get_numpy_data() 
rail_time = bank.matrix('auxwr').get_numpy_data() + bank.matrix('twtwr').get_numpy_data() + bank.matrix('ivtwr').get_numpy_data() 
transit_time = np.minimum(bus_time, rail_time)
transit_time = transit_time[0:3700, 0:3700]
transit_time_df = pd.DataFrame(transit_time)
transit_time_df['from'] = transit_time_df.index
transit_time_df = pd.melt(transit_time_df, id_vars= 'from', value_vars=list(transit_time_df.columns[0:3700]), var_name = 'to', value_name='transit_time')
# add 1 into zone id before join with parcel data
transit_time_df['to'] = transit_time_df['to'] + 1 
transit_time_df['from'] = transit_time_df['from'] + 1
transit_time_df = transit_time_df[transit_time_df.transit_time <= transit_time_max]
transit_time_df = transit_time_df.merge(parcel_df, how = 'left', left_on = 'to', right_on = 'TAZ_P')
#get the destination employment & household info 
transit_time_max_emp = pd.DataFrame(transit_time_df.groupby('from')['HH_P','EMPTOT_P'].sum())
Beispiel #13
0
def convert_fullmats(srcbankpath,
                     h5filepath,
                     h5grouppath=None,
                     mats=None,
                     dryrun=False):
    """
    Converts the full matrices in an Emmebank and stores them in an
    HDF5 file. Optionally specify the destination group and a list of
    matrices to convert. By default, creates a destination group with
    the name of the emmebank and converts all matrices.
    """

    global num_to_convert, converted, start

    # Context managers work with Emme 4.0.8 or newer, otherwise the
    # 'with' keyword fails on Emmebank()
    # Sadly, opening multiple files in a single un-nested with block
    # was added in Python 2.7, and so this is a little nesty.
    # See: http://goo.gl/ucEsR for details.
    with ebank.Emmebank(srcbankpath) as srcbank:
        h5grouppath = h5grouppath or "/" + re.sub('[^0-9a-zA-Z]+', '',
                                                  srcbank.title)

        # Hacky work-around for Emmebanks that were created with an
        # error-producing scenario 9999. According to Chris J.,
        # sometimes scenario 9999 is bogus and should be removed, but
        # sometimes is not. This is a crude heuristic, and it would be
        # better to do some clean-up in the model.
        sn = [x.number for x in srcbank.scenarios()]
        if len(sn) > 1 and 9999 in sn:
            try:
                srcbank.delete_scenario(9999)
            except:
                pass

        with tables.File(h5filepath, mode="a", filters=filters) as h5file:
            for mat in srcbank.matrices():
                full_mat_name = h5grouppath + '/' + mat.name
                if full_mat_name not in mats:
                    pass
                else:
                    if dryrun:
                        print "%s/%s" % (h5grouppath, mat.name),
                    else:
                        print("Converting: %s/%s\n         -> %s/%s" %
                              (srcbankpath, mat.name, h5grouppath, mat.name))

                    group = get_or_create_group(h5file, h5grouppath)

                    if not dryrun:
                        a = h5file.create_array(h5grouppath, mat.name,
                                                emmemat2np(mat))
                        a.attrs.description = mat.description

                    # Housekeeping
                    converted += 1
                    mats.remove(full_mat_name)

                    now = datetime.now()
                    fraction = 1.0 * converted / num_to_convert
                    time_so_far = now - start

                    seconds_so_far = time_so_far.seconds
                    total_estimate = 1.0 * seconds_so_far / fraction
                    seconds_left = total_estimate - seconds_so_far
                    time_left = timedelta(seconds=seconds_left)

                    if not dryrun:
                        print "            Finished %d of %d. Estimated time remaining: %s\n" % (
                            converted, num_to_convert, time_left)
Beispiel #14
0
    def _get_vmt_by_speed(self):
        ''' Sum daily VMT by vehicle speed within the CMAP region, using 5 mph
            bins. For each TOD, process highway network first, followed by
            buses in corresponding transit network. '''
        vmt_by_speed = {i * 5: 0
                        for i in xrange(15)
                        }  # 15 5-mph bins, keyed by minimum speed
        link_speeds = self._get_link_speeds()
        emmebank = _eb.Emmebank(self._emmebank_path)

        for tod in xrange(1, 9):

            # Auto VMT from links in TOD's highway network
            scenario_id_hwy = '{0}'.format(tod)
            scenario_hwy = emmebank.scenario(scenario_id_hwy)
            network_hwy = scenario_hwy.get_network()

            for link in network_hwy.links():
                if 1 <= link.i_node['@zone'] <= 1711:

                    # Calculate VMT
                    vol = (  # Convert vehicle-equivalents to vehicles
                        link['@vso1ntlo'] / 1 + link['@vso1nthi'] / 1 +
                        link['@vso1tllo'] / 1 + link['@vso1tlhi'] / 1 +
                        link['@vho2ntlo'] / 1 + link['@vho2nthi'] / 1 +
                        link['@vho2tllo'] / 1 + link['@vho2tlhi'] / 1 +
                        link['@vho3ntlo'] / 1 + link['@vho3nthi'] / 1 +
                        link['@vho3tllo'] / 1 + link['@vho3tlhi'] / 1 +
                        link['@vltrnt'] / 1 + link['@vltrtl'] / 1 +
                        link['@vmtrnt'] / 2 + link['@vmtrtl'] / 2 +
                        link['@vhtrnt'] / 3 + link['@vhtrtl'] / 3)
                    vmt = vol * link.length

                    # Get link travel times (minutes) and free-flow/modeled speeds (mph)
                    fmph, mph = link_speeds[tod][link.id]

                    # Add VMT to appropriate speed bin
                    mph_bin = 5 * min(int(math.floor(mph / 5)), 14)
                    vmt_by_speed[mph_bin] += vmt

            # Bus VMT from transit segments in TOD's transit network
            scenario_id_trn = '{0}{1}'.format(self._trn_scen_id_prefix, tod)
            scenario_trn = emmebank.scenario(scenario_id_trn)
            network_trn = scenario_trn.get_network()

            for link in network_trn.links():
                if 1 <= link.i_node['@zone'] <= 1711:

                    # Calculate headway- and TTF-adjusted VMT for each bus segment
                    for tseg in link.segments():
                        if tseg.line.mode in ('B', 'E', 'L', 'P', 'Q'):

                            # Calculate line-specific volume from headway: must be at least 1; ignore headways of 99 mins)
                            vol = max(
                                self.tod_minutes(tod) / tseg['@hdway'],
                                1) if tseg['@hdway'] != 99 else 1
                            vmt = vol * link.length

                            # Get link travel times (minutes) and free-flow/modeled speeds (mph)
                            fmph, mph = link_speeds[tod][link.id]

                            # Add VMT to appropriate speed bin
                            if tseg.transit_time_func == 2:
                                mph_bin = 5 * min(int(math.floor(fmph / 5)),
                                                  14)
                            else:
                                mph_bin = 5 * min(int(math.floor(mph / 5)), 14)
                            vmt_by_speed[mph_bin] += vmt

        emmebank.dispose()  # Close Emmebank, remove lock
        return vmt_by_speed
Beispiel #15
0
    def _get_link_speeds(self):
        ''' Assign highway links a free-flow and congested speed (mph), based
            on the in-link in the case of toll links (vdf=7). Return a nested
            dict keyed by TOD and link id, with values being tuples of
            (free-speed, congested-speed). '''
        link_speeds = {}
        toll_link_speeds = {}
        emmebank = _eb.Emmebank(self._emmebank_path)
        for tod in xrange(1, 9):
            link_speeds[tod] = {}
            toll_link_speeds[tod] = {}
            scenario_id_hwy = '{0}'.format(tod)
            scenario_hwy = emmebank.scenario(scenario_id_hwy)
            network_hwy = scenario_hwy.get_network()

            # Get inode for toll links
            for link in network_hwy.links():
                if link.volume_delay_func == 7:
                    toll_link_speeds[tod][link.id] = None

            toll_link_inodes = {
                link_id.split('-')[0]: link_id
                for link_id in toll_link_speeds[tod].iterkeys()
            }

            # Calc speed for non-toll links, also assigning to toll links as appropriate
            for link in network_hwy.links():
                if link.volume_delay_func != 7:

                    # Calculate volumes)
                    vol = (  # Convert vehicle-equivalents to vehicles
                        link['@vso1ntlo'] / 1 + link['@vso1nthi'] / 1 +
                        link['@vso1tllo'] / 1 + link['@vso1tlhi'] / 1 +
                        link['@vho2ntlo'] / 1 + link['@vho2nthi'] / 1 +
                        link['@vho2tllo'] / 1 + link['@vho2tlhi'] / 1 +
                        link['@vho3ntlo'] / 1 + link['@vho3nthi'] / 1 +
                        link['@vho3tllo'] / 1 + link['@vho3tlhi'] / 1 +
                        link['@vltrnt'] / 1 + link['@vltrtl'] / 1 +
                        link['@vmtrnt'] / 2 + link['@vmtrtl'] / 2 +
                        link['@vhtrnt'] / 3 + link['@vhtrtl'] / 3)

                    # Get link travel times (minutes) and free-flow/modeled speeds (mph)
                    fmph = link.length / (link['@ftime'] /
                                          60) if link['@ftime'] else 0
                    mph = link.length / (link.auto_time /
                                         60) if link.auto_time else 0

                    # Adjust arterial speeds
                    if link.volume_delay_func == 1:
                        cap = link.data2  # Capacity is batched in to ul2 during network building
                        mph = fmph / ((math.log(fmph) * 0.249) + 0.153 *
                                      (vol / (cap * 0.75))**3.98)

                    # Write speeds to appropriate dicts
                    link_speeds[tod][link.id] = (fmph, mph)
                    if link.j_node.id in toll_link_inodes:
                        toll_link_id = toll_link_inodes[link.j_node.id]
                        toll_link_speeds[tod][toll_link_id] = link_speeds[tod][
                            link.id]

            # Merge toll TOD dict into non-toll TOD dict
            link_speeds[tod].update(toll_link_speeds[tod])

        emmebank.dispose()  # Close Emmebank, remove lock
        return link_speeds
Beispiel #16
0
def main():
    print 'creating daily bank'
    #Use a copy of an existing bank for the daily bank
    copy_emmebank('Banks/7to8', 'Banks/Daily')

    daily_emmebank = _emmebank.Emmebank(r'Banks/Daily/emmebank')
    # Set the emmebank title
    daily_emmebank.title = 'daily'
    daily_scenario = daily_emmebank.scenario(1002)
    daily_network = daily_scenario.get_network()

    matrix_dict = text_to_dictionary('demand_matrix_dictionary')
    uniqueMatrices = set(matrix_dict.values())

    ################## delete all matrices #################

    for matrix in daily_emmebank.matrices():
        daily_emmebank.delete_matrix(matrix.id)

    ################ create new matrices in daily emmebank for trip tables only ##############

    for unique_name in uniqueMatrices:
        daily_matrix = daily_emmebank.create_matrix(
            daily_emmebank.available_matrix_identifier(
                'FULL'))  #'FULL' means the full-type of trip table
        daily_matrix.name = unique_name

    daily_matrix_dict = {}
    for matrix in daily_emmebank.matrices():
        daily_arr = matrix.get_numpy_data()
        daily_matrix_dict[matrix.name] = daily_arr

    time_period_list = []

    for tod, time_period in sound_cast_net_dict.iteritems():
        path = os.path.join('Banks', tod, 'emmebank')
        bank = _emmebank.Emmebank(path)
        scenario = bank.scenario(1002)
        network = scenario.get_network()
        # Trip  table stuff:
        for matrix in bank.matrices():
            if matrix.name in daily_matrix_dict:
                hourly_arr = matrix.get_numpy_data()
                daily_matrix_dict[
                    matrix.name] = daily_matrix_dict[matrix.name] + hourly_arr

        # Network stuff:
        if len(time_period_list) == 0:
            daily_network = network
            time_period_list.append(time_period)
        elif time_period not in time_period_list:
            time_period_list.append(time_period)  #this line was repeated below
            daily_network = merge_networks(daily_network, network)
            time_period_list.append(time_period)  #this line was repeated above
    daily_scenario.publish_network(daily_network, resolve_attributes=True)

    # Write daily trip tables:
    for matrix in daily_emmebank.matrices():
        matrix.set_numpy_data(daily_matrix_dict[matrix.name])

    for extra_attribute in daily_scenario.extra_attributes():
        if extra_attribute not in keep_atts:
            daily_scenario.delete_extra_attribute(extra_attribute)
    daily_volume_attr = daily_scenario.create_extra_attribute('LINK', '@tveh')
    daily_network = daily_scenario.get_network()

    for tod, time_period in sound_cast_net_dict.iteritems():
        path = os.path.join('Banks', tod, 'emmebank')
        bank = _emmebank.Emmebank(path)
        scenario = bank.scenario(1002)
        network = scenario.get_network()
        if daily_scenario.extra_attribute('@v' + tod[:4]):
            daily_scenario.delete_extra_attribute('@v' + tod[:4])
        attr = daily_scenario.create_extra_attribute('LINK', '@v' + tod[:4])
        values = scenario.get_attribute_values('LINK', ['@tveh'])
        daily_scenario.set_attribute_values('LINK', [attr], values)
        #daily_scenario.publish_network(daily_network)
        #daily_network = daily_scenario.get_network()

    daily_network = daily_scenario.get_network()
    attr_list = ['@tv' + x for x in tods]

    for link in daily_network.links():
        for item in tods:
            link['@tveh'] = link['@tveh'] + link['@v' + item[:4]]
    daily_scenario.publish_network(daily_network, resolve_attributes=True)

    ######################## Validate results ##########################

    zone1 = 100
    zone2 = 100

    for matrix1 in daily_emmebank.matrices():
        NAME = matrix1.name
        a = 0
        for tod, time_period in sound_cast_net_dict.iteritems():
            path = os.path.join('banks', tod, 'emmebank')
            bank = _emmebank.Emmebank(path)
            for matrix2 in bank.matrices():
                if matrix2.name == NAME:
                    my_arr = matrix2.get_numpy_data()
                    a += my_arr[zone1][zone2]

    print 'daily bank created'

    # Write daily link-level results
    my_project = EmmeProject(network_summary_project)
    export_link_values(my_project)
Beispiel #17
0
    def setup_remote_database(self, src_scenarios, periods, remote_num,
                              msa_iteration):
        with _m.logbook_trace("Set up remote database #%s for %s" %
                              (remote_num, ", ".join(periods))):
            init_matrices = _m.Modeller().tool(
                "sandag.initialize.initialize_matrices")
            create_function = _m.Modeller().tool(
                "inro.emme.data.function.create_function")
            src_emmebank = src_scenarios[0].emmebank
            remote_db_dir = _join(self._path, "emme_project",
                                  "Database_remote" + str(remote_num))
            if msa_iteration == 1:
                # Create and initialize database at first iteration, overwrite existing
                if os.path.exists(remote_db_dir):
                    _shutil.rmtree(remote_db_dir)
                    _time.sleep(1)
                os.mkdir(remote_db_dir)
                dimensions = src_emmebank.dimensions
                dimensions["scenarios"] = len(src_scenarios)
                remote_emmebank = _eb.create(_join(remote_db_dir, "emmebank"),
                                             dimensions)
                try:
                    remote_emmebank.title = src_emmebank.title
                    remote_emmebank.coord_unit_length = src_emmebank.coord_unit_length
                    remote_emmebank.unit_of_length = src_emmebank.unit_of_length
                    remote_emmebank.unit_of_cost = src_emmebank.unit_of_cost
                    remote_emmebank.unit_of_energy = src_emmebank.unit_of_energy
                    remote_emmebank.use_engineering_notation = src_emmebank.use_engineering_notation
                    remote_emmebank.node_number_digits = src_emmebank.node_number_digits

                    for src_scen in src_scenarios:
                        remote_scen = remote_emmebank.create_scenario(
                            src_scen.id)
                        remote_scen.title = src_scen.title
                        for attr in sorted(src_scen.extra_attributes(),
                                           key=lambda x: x._id):
                            dst_attr = remote_scen.create_extra_attribute(
                                attr.type, attr.name, attr.default_value)
                            dst_attr.description = attr.description
                        for field in src_scen.network_fields():
                            remote_scen.create_network_field(
                                field.type, field.name, field.atype,
                                field.description)
                        remote_scen.has_traffic_results = src_scen.has_traffic_results
                        remote_scen.has_transit_results = src_scen.has_transit_results
                        remote_scen.publish_network(src_scen.get_network())
                    for function in src_emmebank.functions():
                        create_function(function.id, function.expression,
                                        remote_emmebank)
                    init_matrices(["traffic_skims", "traffic_demand"], periods,
                                  remote_scen)
                finally:
                    remote_emmebank.dispose()

            src_scen = src_scenarios[0]
            with _m.logbook_trace("Copy demand matrices to remote database"):
                with _eb.Emmebank(_join(remote_db_dir,
                                        "emmebank")) as remote_emmebank:
                    demand_matrices = init_matrices.get_matrix_names(
                        "traffic_demand", periods, src_scen)
                    for matrix_name in demand_matrices:
                        matrix = remote_emmebank.matrix(matrix_name)
                        src_matrix = src_emmebank.matrix(matrix_name)
                        if matrix.type == "SCALAR":
                            matrix.data = src_matrix.data
                        else:
                            matrix.set_data(src_matrix.get_data(src_scen.id),
                                            src_scen.id)
            skim_matrices = init_matrices.get_matrix_names(
                "traffic_skims", periods, src_scen)
            return remote_db_dir, skim_matrices
Beispiel #18
0
    def __call__(self,
                 main_directory,
                 scenario_id,
                 scenario_title,
                 emmebank_title,
                 num_processors,
                 select_link=None,
                 periods=["EA", "AM", "MD", "PM", "EV"],
                 username=None,
                 password=None):
        attributes = {
            "main_directory": main_directory,
            "scenario_id": scenario_id,
            "scenario_title": scenario_title,
            "emmebank_title": emmebank_title,
            "num_processors": num_processors,
            "select_link": select_link,
            "periods": periods,
            "username": username,
        }
        gen_utils.log_snapshot("Master run model", str(self), attributes)

        modeller = _m.Modeller()
        copy_scenario = modeller.tool("inro.emme.data.scenario.copy_scenario")
        import_network = modeller.tool("sandag.import.import_network")
        init_transit_db = modeller.tool(
            "sandag.initialize.initialize_transit_database")
        init_matrices = modeller.tool("sandag.initialize.initialize_matrices")
        import_demand = modeller.tool("sandag.import.import_seed_demand")
        build_transit_scen = modeller.tool(
            "sandag.assignment.build_transit_scenario")
        transit_assign = modeller.tool("sandag.assignment.transit_assignment")
        run_truck = modeller.tool("sandag.model.truck.run_truck_model")
        external_internal = modeller.tool("sandag.model.external_internal")
        external_external = modeller.tool("sandag.model.external_external")
        import_auto_demand = modeller.tool("sandag.import.import_auto_demand")
        import_transit_demand = modeller.tool(
            "sandag.import.import_transit_demand")
        export_transit_skims = modeller.tool(
            "sandag.export.export_transit_skims")
        export_network_data = modeller.tool(
            "sandag.export.export_data_loader_network")
        export_matrix_data = modeller.tool(
            "sandag.export.export_data_loader_matrices")
        export_tap_adjacent_lines = modeller.tool(
            "sandag.export.export_tap_adjacent_lines")
        export_for_commercial_vehicle = modeller.tool(
            "sandag.export.export_for_commercial_vehicle")

        utils = modeller.module('sandag.utilities.demand')
        load_properties = modeller.tool('sandag.utilities.properties')

        self.username = username
        self.password = password
        self._path = main_directory
        drive, path_no_drive = os.path.splitdrive(main_directory)
        path_forward_slash = path_no_drive.replace("\\", "/")
        input_dir = _join(main_directory, "input")
        input_truck_dir = _join(main_directory, "input_truck")
        output_dir = _join(main_directory, "output")
        main_emmebank = _eb.Emmebank(
            _join(main_directory, "emme_project", "Database", "emmebank"))
        if emmebank_title:
            main_emmebank.title = emmebank_title
        external_zones = "1-12"

        props = load_properties(
            _join(main_directory, "conf", "sandag_abm.properties"))
        props.set_year_specific_properties(
            _join(main_directory, "input", "parametersByYears.csv"))
        props.save()
        # Log current state of props file for debugging of UI / file sync issues
        attributes = dict((name, props["RunModel." + name])
                          for name in self._run_model_names)
        _m.logbook_write("SANDAG properties file", attributes=attributes)
        if self._properties:  # Tool has been called via the UI
            # Compare UI values and file values to make sure they are the same
            error_text = (
                "Different value found in sandag_abm.properties than specified in UI for '%s'. "
                "Close sandag_abm.properties if open in any text editor, check UI and re-run."
            )
            for name in self._run_model_names:
                if getattr(self, name) != props["RunModel." + name]:
                    raise Exception(error_text % name)

        scenarioYear = str(props["scenarioYear"])
        startFromIteration = props["RunModel.startFromIteration"]
        precision = props["RunModel.MatrixPrecision"]
        minSpaceOnC = props["RunModel.minSpaceOnC"]
        sample_rate = props["sample_rates"]
        end_iteration = len(sample_rate)
        scale_factor = props["cvm.scale_factor"]

        period_ids = list(enumerate(periods, start=int(scenario_id) + 1))

        skipInitialization = props["RunModel.skipInitialization"]
        deleteAllMatrices = props["RunModel.deleteAllMatrices"]
        skipCopyWarmupTripTables = props["RunModel.skipCopyWarmupTripTables"]
        skipCopyBikeLogsum = props["RunModel.skipCopyBikeLogsum"]
        skipCopyWalkImpedance = props["RunModel.skipCopyWalkImpedance"]
        skipWalkLogsums = props["RunModel.skipWalkLogsums"]
        skipBikeLogsums = props["RunModel.skipBikeLogsums"]
        skipBuildNetwork = props["RunModel.skipBuildNetwork"]
        skipHighwayAssignment = props["RunModel.skipHighwayAssignment"]
        skipTransitSkimming = props["RunModel.skipTransitSkimming"]
        skipCoreABM = props["RunModel.skipCoreABM"]
        skipOtherSimulateModel = props["RunModel.skipOtherSimulateModel"]
        skipCTM = props["RunModel.skipCTM"]
        skipEI = props["RunModel.skipEI"]
        skipExternal = props["RunModel.skipExternalExternal"]
        skipTruck = props["RunModel.skipTruck"]
        skipTripTableCreation = props["RunModel.skipTripTableCreation"]
        skipFinalHighwayAssignment = props[
            "RunModel.skipFinalHighwayAssignment"]
        skipFinalTransitAssignment = props[
            "RunModel.skipFinalTransitAssignment"]
        skipDataExport = props["RunModel.skipDataExport"]
        skipDataLoadRequest = props["RunModel.skipDataLoadRequest"]
        skipDeleteIntermediateFiles = props[
            "RunModel.skipDeleteIntermediateFiles"]
        skipTransitShed = props["RunModel.skipTransitShed"]
        transitShedThreshold = props["transitShed.threshold"]
        transitShedTOD = props["transitShed.TOD"]

        travel_modes = ["auto", "tran", "nmot", "othr"]
        core_abm_files = ["Trips*.omx", "InternalExternalTrips*.omx"]
        core_abm_files = [
            mode + name for name in core_abm_files for mode in travel_modes
        ]
        smm_abm_files = [
            "AirportTrips*.omx", "CrossBorderTrips*.omx", "VisitorTrips*.omx"
        ]
        smm_abm_files = [
            mode + name for name in smm_abm_files for mode in travel_modes
        ]

        relative_gap = props["convergence"]
        max_assign_iterations = 1000
        mgra_lu_input_file = props["mgra.socec.file"]

        with _m.logbook_trace("Setup and initialization"):
            self.set_global_logbook_level(props)

            # Swap Server Configurations
            self.run_proc("serverswap.bat",
                          [drive, path_no_drive, path_forward_slash],
                          "Run ServerSwap")
            self.check_for_fatal(
                _join(main_directory, "logFiles", "serverswap.log"),
                "ServerSwap failed! Open logFiles/serverswap.log for details.")
            self.check_free_space(minSpaceOnC)
            self.run_proc(
                "checkAtTransitNetworkConsistency.cmd",
                [drive, path_forward_slash],
                "Checking if AT and Transit Networks are consistent")
            self.check_for_fatal(
                _join(main_directory, "logFiles", "AtTransitCheck_event.log"),
                "AT and Transit network consistency checking failed! Open AtTransitCheck_event.log for details."
            )

            if startFromIteration == 1:  # only run the setup / init steps if starting from iteration 1
                if not skipWalkLogsums:
                    self.run_proc("runSandagWalkLogsums.cmd",
                                  [drive, path_forward_slash],
                                  "Walk - create AT logsums and impedances")
                if not skipCopyWalkImpedance:
                    self.copy_files([
                        "walkMgraEquivMinutes.csv",
                        "walkMgraTapEquivMinutes.csv"
                    ], input_dir, output_dir)
                if not skipBikeLogsums:
                    self.run_proc("runSandagBikeLogsums.cmd",
                                  [drive, path_forward_slash],
                                  "Bike - create AT logsums and impedances")
                if not skipCopyBikeLogsum:
                    self.copy_files(
                        ["bikeMgraLogsum.csv", "bikeTazLogsum.csv"], input_dir,
                        output_dir)

                if not skipBuildNetwork:
                    base_scenario = import_network(
                        source=input_dir,
                        merged_scenario_id=scenario_id,
                        title=scenario_title,
                        data_table_name=scenarioYear,
                        overwrite=True,
                        emmebank=main_emmebank)
                    export_tap_adjacent_lines(
                        _join(output_dir, "tapLines.csv"), base_scenario)
                    # initialize per time-period scenarios
                    for number, period in period_ids:
                        title = "%s - %s assign" % (base_scenario.title,
                                                    period)
                        copy_scenario(base_scenario,
                                      number,
                                      title,
                                      overwrite=True)
                else:
                    base_scenario = main_emmebank.scenario(scenario_id)

                if not skipInitialization:
                    # initialize traffic demand, skims, truck, CV, EI, EE matrices
                    traffic_components = [
                        "traffic_skims", "truck_model",
                        "commercial_vehicle_model", "external_internal_model",
                        "external_external_model"
                    ]
                    if not skipCopyWarmupTripTables:
                        traffic_components.append("traffic_demand")
                    init_matrices(traffic_components, periods, base_scenario,
                                  deleteAllMatrices)

                    transit_scenario = init_transit_db(base_scenario)
                    transit_emmebank = transit_scenario.emmebank
                    transit_components = ["transit_skims"]
                    if not skipCopyWarmupTripTables:
                        transit_components.append("transit_demand")
                    init_matrices(transit_components, periods,
                                  transit_scenario, deleteAllMatrices)
                else:
                    transit_emmebank = _eb.Emmebank(
                        _join(main_directory, "emme_project",
                              "Database_transit", "emmebank"))
                    transit_scenario = transit_emmebank.scenario(
                        base_scenario.number)

                if not skipCopyWarmupTripTables:
                    # import seed auto demand and seed truck demand
                    for period in periods:
                        omx_file = _join(input_dir, "trip_%s.omx" % period)
                        import_demand(omx_file, "AUTO", period, base_scenario)
                        import_demand(omx_file, "TRUCK", period, base_scenario)
            else:
                base_scenario = main_emmebank.scenario(scenario_id)
                transit_emmebank = _eb.Emmebank(
                    _join(main_directory, "emme_project", "Database_transit",
                          "emmebank"))
                transit_scenario = transit_emmebank.scenario(
                    base_scenario.number)

        # Note: iteration indexes from 0, msa_iteration indexes from 1
        for iteration in range(startFromIteration - 1, end_iteration):
            msa_iteration = iteration + 1
            with _m.logbook_trace("Iteration %s" % msa_iteration):
                if not skipCoreABM[iteration] or not skipOtherSimulateModel[
                        iteration]:
                    self.run_proc("runMtxMgr.cmd",
                                  [drive, drive + path_no_drive],
                                  "Start matrix manager")
                    self.run_proc("runDriver.cmd",
                                  [drive, drive + path_no_drive],
                                  "Start JPPF Driver")
                    self.run_proc("StartHHAndNodes.cmd",
                                  [drive, path_no_drive],
                                  "Start HH Manager, JPPF Driver, and nodes")

                if not skipHighwayAssignment[iteration]:
                    # run traffic assignment
                    # export traffic skims
                    with _m.logbook_trace("Traffic assignment and skims"):
                        self.run_traffic_assignments(base_scenario, period_ids,
                                                     msa_iteration,
                                                     relative_gap,
                                                     max_assign_iterations,
                                                     num_processors)
                    self.run_proc("CreateD2TAccessFile.bat",
                                  [drive, path_forward_slash],
                                  "Create drive to transit access file",
                                  capture_output=True)

                if not skipTransitSkimming[iteration]:
                    # run transit assignment
                    # export transit skims
                    with _m.logbook_trace("Transit assignments and skims"):
                        for number, period in period_ids:
                            src_period_scenario = main_emmebank.scenario(
                                number)
                            transit_assign_scen = build_transit_scen(
                                period=period,
                                base_scenario=src_period_scenario,
                                transit_emmebank=transit_emmebank,
                                scenario_id=src_period_scenario.id,
                                scenario_title="%s %s transit assign" %
                                (base_scenario.title, period),
                                data_table_name=scenarioYear,
                                overwrite=True)
                            transit_assign(period,
                                           transit_assign_scen,
                                           data_table_name=scenarioYear,
                                           skims_only=True,
                                           num_processors=num_processors)

                        omx_file = _join(output_dir, "transit_skims.omx")
                        export_transit_skims(omx_file, periods,
                                             transit_scenario)

                # For each step move trip matrices so run will stop if ctramp model
                # doesn't produced csv/omx files for assignment
                # also needed as CT-RAMP does not overwrite existing files
                if not skipCoreABM[iteration]:
                    self.remove_prev_iter_files(core_abm_files, output_dir,
                                                iteration)
                    self.run_proc("runSandagAbm_SDRM.cmd", [
                        drive, drive + path_forward_slash,
                        sample_rate[iteration], msa_iteration
                    ],
                                  "Java-Run CT-RAMP",
                                  capture_output=True)
                if not skipOtherSimulateModel[iteration]:
                    self.remove_prev_iter_files(smm_abm_files, output_dir,
                                                iteration)
                    self.run_proc(
                        "runSandagAbm_SMM.cmd", [
                            drive, drive + path_forward_slash,
                            sample_rate[iteration], msa_iteration
                        ],
                        "Java-Run airport model, visitor model, cross-border model",
                        capture_output=True)

                if not skipCTM[iteration]:
                    export_for_commercial_vehicle(output_dir, base_scenario)
                    self.run_proc("cvm.bat", [
                        drive, path_no_drive, path_forward_slash, scale_factor,
                        mgra_lu_input_file, "tazcentroids_cvm.csv"
                    ],
                                  "Commercial vehicle model",
                                  capture_output=True)
                if msa_iteration == startFromIteration:
                    external_zones = "1-12"
                    if not skipTruck[iteration]:
                        # run truck model (generate truck trips)
                        run_truck(True, input_dir, input_truck_dir,
                                  num_processors, base_scenario)
                    # run EI model "US to SD External Trip Model"
                    if not skipEI[iteration]:
                        external_internal(input_dir, base_scenario)
                    # run EE model
                    if not skipExternal[iteration]:
                        external_external(input_dir, external_zones,
                                          base_scenario)

                # import demand from all sub-market models from CT-RAMP and
                #       add CV trips to auto demand
                #       add EE and EI trips to auto demand
                if not skipTripTableCreation[iteration]:
                    import_auto_demand(output_dir, external_zones,
                                       num_processors, base_scenario)

        if not skipFinalHighwayAssignment:
            with _m.logbook_trace("Final traffic assignments"):
                final_iteration = 4
                self.run_traffic_assignments(base_scenario, period_ids,
                                             final_iteration, relative_gap,
                                             max_assign_iterations,
                                             num_processors, select_link)
                # Final iteration is assignment only, no skims

        if not skipFinalTransitAssignment:
            import_transit_demand(output_dir, transit_scenario)
            with _m.logbook_trace("Final transit assignments"):
                for number, period in period_ids:
                    src_period_scenario = main_emmebank.scenario(number)
                    transit_assign_scen = build_transit_scen(
                        period=period,
                        base_scenario=src_period_scenario,
                        transit_emmebank=transit_emmebank,
                        scenario_id=src_period_scenario.id,
                        scenario_title="%s - %s transit assign" %
                        (base_scenario.title, period),
                        data_table_name=scenarioYear,
                        overwrite=True)
                    transit_assign(period,
                                   transit_assign_scen,
                                   data_table_name=scenarioYear,
                                   num_processors=num_processors)
                omx_file = _join(output_dir, "transit_skims.omx")
                export_transit_skims(omx_file,
                                     periods,
                                     transit_scenario,
                                     big_to_zero=True)

                #    transit_assign(period, transit_assign_scen, data_table_name=scenarioYear,
                #                   assignment_only=True, num_processors=num_processors)
                # Final iteration is assignment only, no skims
                # omx_file = _join(output_dir, "transit_skims.omx")
                # export_transit_skims(omx_file, periods, transit_scenario, big_to_zero=True)

        if not skipTransitShed:
            # write walk and drive transit sheds
            self.run_proc("runtransitreporter.cmd", [
                drive, path_forward_slash, transitShedThreshold, transitShedTOD
            ],
                          "Create walk and drive transit sheds",
                          capture_output=True)

        if not skipDataExport:
            export_network_data(main_directory, scenario_id, main_emmebank,
                                transit_emmebank, num_processors)
            export_matrix_data(output_dir, base_scenario, transit_scenario)
            # export core ABM data
            self.run_proc("DataExporter.bat", [drive, path_no_drive],
                          "Export core ABM data",
                          capture_output=True)
        if not skipDataLoadRequest:
            self.run_proc("DataLoadRequest.bat", [
                drive + path_no_drive, end_iteration, scenarioYear,
                sample_rate[end_iteration - 1]
            ], "Data load request")

        # delete trip table files in iteration sub folder if model finishes without crashing
        if not skipDeleteIntermediateFiles:
            for msa_iteration in range(startFromIteration, end_iteration + 1):
                self.delete_files([
                    "auto*Trips*.omx", "tran*Trips*.omx", "nmot*.omx",
                    "othr*.omx", "trip*.omx"
                ], _join(output_dir, "iter%s" % (msa_iteration)))

        # terminate all java processes
        _subprocess.call("taskkill /F /IM java.exe")
Beispiel #19
0
stops_list = []
trips_list = []
trips_ft_list = []
shapes_list = []
routes_list = []
routes_ft_list = []
fare_rules = FareRules()

# Generates a unique ID
id_generator = generate_unique_id(range(1, 999999))

# Load all the networks into the network_dict
# Highway_assignment_tod = {6: '6to7'}

for tod in highway_assignment_tod.itervalues():
    with _eb.Emmebank(banks_path + tod + '/emmebank') as emmebank:
        current_scenario = emmebank.scenario(1002)
        network = current_scenario.get_network()
        network_dict[tod] = network

for tod, my_dict in transit_network_tod.iteritems():
    # A dictionary to hold an instance of GTFS_Utilities for each feed
    gtfs_dict = {}
    # Populate gtfs_dict
    for feed in schedule_tuples:
        gtfs_utils = GTFS_Utilities(inputs_path + 'published_gtfs/' + feed[0],
                                    my_dict['start_time'], my_dict['end_time'],
                                    feed[1])
        gtfs_dict[feed[0]] = gtfs_utils

    # Get the am or md transit network:
Beispiel #20
0
def daily_counts(writer, my_project):
    """Export daily network volumes and compare to observed."""

    # Load observed data
    count_id_df = pd.read_csv(r'inputs/base_year/screenline_count_ids.txt',
                              sep=' ',
                              header=None,
                              names=['NewINode', 'NewJNode', 'ScreenLineID'])
    observed_count_df = pd.read_csv(
        r'inputs/base_year/observed_daily_counts.csv')
    count_id_df = count_id_df.merge(observed_count_df,
                                    how='left',
                                    on='ScreenLineID')
    # add daily bank to project if it exists
    if os.path.isfile(r'Banks/Daily/emmebank'):
        bank = _eb.Emmebank(r'Banks/Daily/emmebank')
        scenario = bank.scenario(1002)

        # Add/refresh screenline ID link attribute
        if scenario.extra_attribute('@scrn'):
            scenario.delete_extra_attribute('@scrn')
        attr = scenario.create_extra_attribute('LINK', '@scrn')

        # Add/refresh screenline count value from assignment results
        if scenario.extra_attribute('@count'):
            scenario.delete_extra_attribute('@count')
        attr_count = scenario.create_extra_attribute('LINK', '@count')

        network = scenario.get_network()

        inode_list = []
        jnode_list = []
        scrn_id = []
        facility_list = []
        observed_volume = []
        model_volume = []

        for row in count_id_df.iterrows():
            inode = int(row[1].NewINode)
            jnode = int(row[1].NewJNode)
            if network.link(inode, jnode):
                link = network.link(inode, jnode)
                link['@scrn'] = row[1]['ScreenLineID']
                link['@count'] = row[1]['Year_2014']

                inode_list.append(inode)
                jnode_list.append(jnode)
                facility_list.append(link['data3'])
                scrn_id.append(link['@scrn'])
                observed_volume.append(link['@count'])
                model_volume.append(link['@tveh'])

        scenario.publish_network(network)

        df = pd.DataFrame([
            inode_list, jnode_list, facility_list, model_volume, scrn_id,
            observed_volume
        ]).T
        df.columns = ['i', 'j', 'ul3', '@tveh', '@scrn', 'count']

        df.to_excel(excel_writer=writer, sheet_name='Daily Counts')

        # Export truck trip tables
        # for matrix_name in ['mfmetrk','mfhvtrk']:
        #     matrix_id = bank.matrix(matrix_name).id
        #     emme_matrix = bank.matrix(matrix_id)
        #     matrix_data = emme_matrix.get_data()
        #     np_matrix = np.matrix(matrix_data.raw_data)
        #     df = pd.DataFrame(np_matrix)
        #     # Attach zone numbers
        #     # Look up zone ID from index location
        #     zones = my_project.current_scenario.zone_numbers
        #     dictZoneLookup = dict((index,value) for index,value in enumerate(zones))
        #     df.columns = [dictZoneLookup[i] for i in df.columns]
        #     df.index = [dictZoneLookup[i] for i in df.index.values]

        #     df.to_csv('outputs/'+matrix_name+'.csv')
    else:
        raise Exception('no daily bank found')
Beispiel #21
0
def convert_transit_skims():
    my_store = open_h5_file(transit_h5_file)
    transit_paths_dict = {
        'am': 'am/all_mode/emmebank',
        'md': 'md/all_mode/emmebank'
    }

    #Transit
    transit_skim_matrix_names = {
        'ivtwa': 'in vehicle time',
        'auxwa': 'walk time',
        'twtwa': 'total wait time',
        'farwa': 'fare',
        'nbdwa': "average boardings"
    }

    for key, value in transit_paths_dict.iteritems():
        e = key in my_store
        if e:
            del my_store[key]
            create_h5_group(my_store, key)
            print "Group Skims Exists. Group deleted then created"
            #If not there, create the group
        else:
            create_h5_group(my_store, key)
            print "Group Skims Created"

        emmebank_4ktransit = _eb.Emmebank(transit4k_path + value)
        list_of_matrices = emmebank_4ktransit.matrices()
        # First get out the iwtwa matrix
        for emme_matrix in list_of_matrices:
            if 'iwtwa' in emme_matrix.name:
                initial_wait_value = np.matrix(emme_matrix.raw_data) * 100
                initial_wait_value = np.where(
                    initial_wait_value > np.iinfo('uint16').max,
                    np.iinfo('uint16').max, initial_wait_value)
                # keep track of the initial wait matrix to use for later calcs
                my_store[key].create_dataset(
                    emme_matrix.name,
                    data=initial_wait_value.astype('uint16'),
                    compression='gzip')
                print emme_matrix.name

        list_of_matrices2 = emmebank_4ktransit.matrices()
        for emme_matrix2 in list_of_matrices2:
            for key1 in transit_skim_matrix_names:
                if key1 in emme_matrix2.name:
                    print key1
                    print emme_matrix2.name
                    if key1 != 'farwa':
                        matrix_value = np.matrix(emme_matrix2.raw_data) * 100

                    # fare is already in cents
                    else:
                        matrix_value = np.matrix(emme_matrix2.raw_data)

                    matrix_value = np.where(
                        matrix_value > np.iinfo('uint16').max,
                        np.iinfo('uint16').max, matrix_value)
                    print matrix_value[0, 0]
                    print np.amin(matrix_value)
                    print np.amax(matrix_value)
                    print np.mean(matrix_value)
                    # the transfer time matrix comes from subtracting the total

                    if key1 == 'twtwa':
                        matrix_value2 = np.subtract(matrix_value,
                                                    initial_wait_value)
                        print matrix_value[0, 0]
                        print np.amin(matrix_value)
                        print np.amax(matrix_value)
                        print np.mean(matrix_value)
                        my_store[key].create_dataset(
                            emme_matrix2.name,
                            data=matrix_value2.astype('uint16'),
                            compression='gzip')
                    else:
                        my_store[key].create_dataset(
                            emme_matrix2.name,
                            data=matrix_value.astype('uint16'),
                            compression='gzip')

    my_store.close()