Exemplo n.º 1
0
 def get_travel_data_set(self, zone_set, matrix_attribute_name_map, bank_path, out_storage=None, matrices_created=False):
     """
     Returns a new travel data set containing the given set of emme/2 matrices 
     populated from the emme/2 data bank.  The columns in the travel data set are 
     those given in the attribute name of the map.
     If matrices_created is True, it is assumed that the matrices files are already created in the bank_path.
     """
     # Compute the from and to zone sets
     nzones = zone_set.size()
     comb_index = indices((nzones,nzones))
                                    
     table_name = 'storage'
     in_storage = StorageFactory().get_storage('dict_storage')
     in_storage.write_table(
             table_name=table_name,
             table_data={
                 'from_zone_id':zone_set.get_id_attribute()[comb_index[0].ravel()],
                 'to_zone_id':zone_set.get_id_attribute()[comb_index[1].ravel()],
                 }
         )
                                    
     travel_data_set = TravelDataDataset(in_storage=in_storage, 
         in_table_name=table_name, out_storage=out_storage)
     travel_data_set.load_dataset_if_not_loaded()
     max_zone_id = zone_set.get_id_attribute().max()
     for matrix_name in matrix_attribute_name_map.keys():
         self._put_one_matrix_into_travel_data_set(travel_data_set, max_zone_id, matrix_name, 
                                                  matrix_attribute_name_map[matrix_name], bank_path, matrices_created)
     return travel_data_set
    def test_run(self):
        print "Entering test run"

        logger.log_status("Loading travel data: %s" % self.travel_data_source)
        # get travel data as an attribute marix
        in_storage = csv_storage(storage_location=self.travel_data_source_dir)
        table_name = "travel_data"
        travel_data_attribute = "single_vehicle_to_work_travel_cost"
        travel_data_set = TravelDataDataset(in_storage=in_storage, in_table_name=table_name)
        travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=0)

        # determine location to store and read attribute matrix
        location1 = os.path.join(self.tempDir, "attrib_matrix1.txt")
        location2 = os.path.join(self.tempDir, "attrib_matrix2.txt")

        # store attribute matrix
        savetxt(location1, travel_data_attribute_mat, fmt="%i")
        savetxt(location2, travel_data_attribute_mat, fmt="%i")

        # read attribute matrix
        matrix1 = genfromtxt(location1)
        matrix2 = genfromtxt(location2)

        # compare both matices
        result = matrix1 == matrix2

        self.assertTrue(result.all())

        print "Leaving test run"
    def compare_travel_data_sets(self):

        # get copied travel data csv
        copied_travel_data_location = os.path.join(self.destination, "opus_matsim", "tmp")
        if not os.path.exists(copied_travel_data_location):
            raise StandardError("Travel data not found: %s" % copied_travel_data_location)
        logger.log_status("Get copied travel data: %s" % copied_travel_data_location)
        # convert travel data csv into travel data set matrix
        in_storage = csv_storage(storage_location=copied_travel_data_location)
        table_name = "travel_data"
        travel_data_attribute = "single_vehicle_to_work_travel_cost"
        travel_data_set = TravelDataDataset(in_storage=in_storage, in_table_name=table_name)
        travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=999)
        # get exsisting travel data set and convert it also into travel data set matrix
        year = self.run_config["base_year"] + 2
        attribute_cache = AttributeCache(cache_directory=self.run_config["cache_directory"])
        cache_storage = attribute_cache.get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset(in_storage=cache_storage, in_table_name=table_name)
        existing_travel_data_attribute_mat = existing_travel_data_set.get_attribute_as_matrix(
            travel_data_attribute, fill=999
        )

        from numpy import savetxt  # for debugging

        savetxt(os.path.join(self.destination, "origin_travel_data.txt"), travel_data_attribute_mat, fmt="%f")
        savetxt(os.path.join(self.destination, "existing_travel_data"), existing_travel_data_attribute_mat, fmt="%f")

        # compare both data set matices
        compare = travel_data_attribute_mat == existing_travel_data_attribute_mat
        # return result
        return compare.all()
Exemplo n.º 4
0
    def get_travel_data_set(self, zone_set, matrix_attribute_name_map, 
                            out_storage=None, **kwargs):
        """
        Returns a new travel data set containing the given set of emme matrices 
        populated from an ascii file. The columns in the travel data set are 
        those given in the attribute name of the map.
        """
        # Compute the from and to zone sets
        nzones = zone_set.size()
        comb_index = indices((nzones,nzones))
                                       
        table_name = 'storage'
        in_storage = StorageFactory().get_storage('dict_storage')
        in_storage.write_table(
                table_name=table_name,
                table_data={
                    'from_zone_id':zone_set.get_id_attribute()[comb_index[0].ravel()].astype('int32'),
                    'to_zone_id':zone_set.get_id_attribute()[comb_index[1].ravel()].astype('int32'),
                    }
            )
                                       
        travel_data_set = TravelDataDataset(in_storage=in_storage, 
            in_table_name=table_name, out_storage=out_storage)
        travel_data_set.load_dataset_if_not_loaded()
        max_zone_id = zone_set.get_id_attribute().max()

        for matrix_name in matrix_attribute_name_map.keys():
            self._put_one_matrix_into_travel_data_set(travel_data_set, max_zone_id, matrix_name, 
                                                     matrix_attribute_name_map[matrix_name], **kwargs)
        return travel_data_set
Exemplo n.º 5
0
    def test_run(self):
        print "Entering test run"

        logger.log_status('Loading travel data: %s' % self.travel_data_source)
        # get travel data as an attribute marix
        in_storage = csv_storage(storage_location=self.travel_data_source_dir)
        table_name = "travel_data"
        travel_data_attribute = 'single_vehicle_to_work_travel_cost'
        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name)
        travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(
            travel_data_attribute, fill=0)

        # determine location to store and read attribute matrix
        location1 = os.path.join(self.tempDir, 'attrib_matrix1.txt')
        location2 = os.path.join(self.tempDir, 'attrib_matrix2.txt')

        # store attribute matrix
        savetxt(location1, travel_data_attribute_mat, fmt="%i")
        savetxt(location2, travel_data_attribute_mat, fmt="%i")

        # read attribute matrix
        matrix1 = genfromtxt(location1)
        matrix2 = genfromtxt(location2)

        # compare both matices
        result = (matrix1 == matrix2)

        self.assertTrue(result.all())

        print "Leaving test run"
Exemplo n.º 6
0
    def get_travel_data_set(self, zone_set, matrix_attribute_name_map, 
                            out_storage=None, **kwargs):
        """Create and return a travel data set containing the given set of emme matrices 
        populated from given storage. The columns in the travel data set are 
        those given in the attribute name of the map.
        
        Arguments:
        zone_set -- dataset of zones
        matrix_attribute_name_map -- dictionary of skim names and corresponding attribute names
        out_storage -- output storage for the resulting travel_data set
        kwargs -- it should contain an argument in_storage containing the emme matrices 
        """
        # Compute the from and to zone sets
        nzones = zone_set.size()
        comb_index = indices((nzones,nzones))
                                       
        table_name = 'storage'
        in_storage = StorageFactory().get_storage('dict_storage')
        in_storage.write_table(
                table_name=table_name,
                table_data={
                    'from_zone_id':zone_set.get_id_attribute()[comb_index[0].ravel()].astype('int32'),
                    'to_zone_id':zone_set.get_id_attribute()[comb_index[1].ravel()].astype('int32'),
                    }
            )
                                       
        travel_data_set = TravelDataDataset(in_storage=in_storage, 
            in_table_name=table_name, out_storage=out_storage)
        travel_data_set.load_dataset_if_not_loaded()
        max_zone_id = zone_set.get_id_attribute().max()

        for matrix_name in matrix_attribute_name_map.keys():
            self._put_one_matrix_into_travel_data_set(travel_data_set, max_zone_id, matrix_name, 
                                                     matrix_attribute_name_map[matrix_name], **kwargs)
        return travel_data_set
Exemplo n.º 7
0
 def get_travel_data_from_travel_model(self, config, year, zone_set):
     """ Reads the output from the travel model and imports the fresh computed travel data attributes into the cache. 
     """
     logger.log_status('Starting GetMatsimDataIntoCache.get_travel_data...')
     
     #try: # tnicolai :for debugging
     #    import pydevd
     #    pydevd.settrace()
     #except: pass
     
     self.init(year, config);
     
     # tnicolai: experimental -> import workplace accessibility from matsim
     #self.get_workplace_accessibility_into_cache(year)
     
     # import travel data from matsim
     travel_data_set = TravelDataDataset( in_storage=self.in_storage, in_table_name=self.travel_data_table_name )
     
     # load actual travel data set from cache
     existing_travel_data_set = TravelDataDataset( in_storage=self.cache_storage, in_table_name=self.travel_data_table_name )
     
     # join remaining data set with imported travel data
     existing_travel_data_set.join(travel_data_set, travel_data_set.get_non_id_primary_attribute_names(),metadata=AttributeType.PRIMARY)
     
     # return new travel data set
     return existing_travel_data_set
Exemplo n.º 8
0
    def get_travel_data_from_travel_model(self, config, 
                                          year, zone_set, 
                                          tm_output_file="tm_output.txt",
                                          ):
        """
        
        Extracts a new travel data set from a given set of transcad matrices 
        by calling a pre-specified transcad macro.  
        The columns in the travel data set are those given in matrix_variable_map in travel_model_configuration.
        """
        tm_config = config['travel_model_configuration']

        tm_data_dir = os.path.join(tm_config['travel_model_base_directory'], tm_config[year]["data_exchange_dir"])
        tm_output_full_name = os.path.join(tm_data_dir, tm_output_file)
        matrix_attribute_name_map = tm_config['tm_to_urbansim_variable_mapping']
        
        transcad_file_location = run_get_file_location_macro(tm_config)
        
        matrices = []
        row_index_name, col_index_name = "ZoneID", "ZoneID"  #default values
        if matrix_attribute_name_map.has_key('row_index_name'):
            row_index_name = matrix_attribute_name_map['row_index_name']
        if matrix_attribute_name_map.has_key('col_index_name'):
            col_index_name = matrix_attribute_name_map['col_index_name']
            
        for key, val in matrix_attribute_name_map.iteritems():
            if (key != 'row_index_name') and (key != 'col_index_name'):
                if val.has_key('row_index_name'):
                    row_index_name = val['row_index_name']
                if val.has_key('col_index_name'):
                    col_index_name = val['col_index_name']
                matrix_file_name = transcad_file_location[key]  #replace internal matrix name with absolute file name
                matrices.append([matrix_file_name, row_index_name, col_index_name, val.items()])
                  
        macro_args =[ ("ExportTo", tm_output_full_name) ]
        macro_args.append(("Matrix", matrices))
        #for macroname, ui_db_file in tm_config['macro']['get_transcad_data_into_cache'].iteritems():
            #ui_db_file = os.path.join(tm_config['directory'], ui_db_file)
        macroname, ui_db_file = tm_config['macro']['get_transcad_data_into_cache'], tm_config['ui_file']
        run_transcad_macro(macroname, ui_db_file, macro_args)

        table_name = "travel_data"
        data_dict = self._read_macro_output_file(tm_output_full_name)
        #data_dict = self._seq_taz_to_zone_conversion(zone_set, data_dict)

        storage = StorageFactory().get_storage('dict_storage')
        storage.write_table(
            table_name=table_name,
            table_data=data_dict
            )
        travel_data_set = TravelDataDataset(in_storage=storage, 
                                            in_table_name=table_name)
        travel_data_set.size()
        return travel_data_set
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """ Integrates modified travel times and pre-computed travel costs
            into the UrbanSim cache.
        """
        
        logger.log_status('Starting GetTestTravelDataIntoCache.get_travel_data...')
        
        # get sensitivity test path asan anchor to determine the location of the MATSim travel_data file (see below).
        test_dir_path = test_dir.__path__[0]
        
        # for debugging
        try: #tnicolai
            import pydevd
            pydevd.settrace()
        except: pass

        # get the exsisting travel data from the current year
        logger.log_status('Loading travel data from UrbanSim cache (year:%i)' % year)
        table_name = "travel_data"
        cache_storage = AttributeCache().get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset( in_storage=cache_storage, in_table_name=table_name )


        ###### modifyed travel time travel data
        logger.log_status('Integrating modifyed travel times in year %i for next simulation year.')
        input_directory = os.path.join( os.environ['OPUS_HOME'], "opus_matsim", "tmp" )
        logger.log_status("input_directory: " + input_directory )
        # location of the modified travel time travel_data
        in_storage = csv_storage(storage_location = input_directory)
        # create travel data set (travel times)
        travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )

        # join the modifyed travel times with the travel data set of the current year
        existing_travel_data_set.join(travel_data_set, travel_data_set.get_non_id_primary_attribute_names(),metadata=AttributeType.PRIMARY)


        ##### pre-calcualted MATSim travel data (travel costs)
#        logger.log_status('Integrating pre-calculated travel costs (MATSim) in year %i for next simulation year.')
#        input_directory = os.path.join( test_dir_path, 'data', 'travel_cost')
#        logger.log_status("input_directory: " + input_directory )
#        # check source file
#        if not os.path.exists( input_directory ):
#            print 'File not found! %s' % input_directory
#            sys.exit()
        # location of pre-calculated MATSim travel costs
#        in_storage = csv_storage(storage_location = input_directory)
        # create travel data set (travel costs)
#        travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )

        # join travel data set from pre-calcualted MATSim results
#        existing_travel_data_set.join(travel_data_set, travel_data_set.get_non_id_primary_attribute_names(),metadata=AttributeType.PRIMARY)

        
        return existing_travel_data_set
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        Returns a new travel data set populated by a travel model
        """
        logger.log_status("Running GetTravelModelDataIntoCache with year %d" %
                          (year))
        logger.log_status(zone_set)
        tm_config = config['travel_model_configuration']

        base_dir = tm_config['travel_model_base_directory']
        run_dir = os.path.join(base_dir, tm_config[year]['year_dir'])
        in_storage = StorageFactory().get_storage('dict_storage')
        max_zone_id = zone_set.get_id_attribute().max()
        in_storage.write_table(table_name=self.TABLE_NAME,
                               table_data=self._read_skims(
                                   run_dir, max_zone_id))

        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=self.TABLE_NAME)
        # This isn't necessary
        # remove_index = where(logical_or(travel_data_set.get_attribute("from_zone_id")>max_zone_id,
        #                                travel_data_set.get_attribute("to_zone_id")>max_zone_id))[0]
        # travel_data_set.size()
        # travel_data_set.remove_elements(remove_index)
        return travel_data_set
Exemplo n.º 11
0
 def setUp(self):
     self.temp_dir = mkdtemp(prefix='psrc_parcel_test_emme_skims')
     self.bank_file = os.path.join(self.temp_dir, '2015-travelmodel.h5')         
     self.bank_storage = hdf5g_storage(storage_location=self.bank_file)
     data = {
         'au1tim': array([[2,  5,  7],
                          [1.3,7.9,3],
                          [6,  10, 0]]),
         'biketm': array([[10,  50,  65],
                          [13, 10.9, 40],
                          [56,  100, 21]])
         }
     self.bank_storage.write_table(table_name = 'Bank1', table_data = data)
     zone_storage = StorageFactory().get_storage('dict_storage')
     zone_table_name = 'zone'
     zone_storage.write_table(
                 table_name=zone_table_name,
                 table_data={
                     'zone_id': array([1,2,3]),
                     'travel_time_to_airport': ones((3,)),
                     'travel_time_to_cbd': ones((3,))
                     },
             )
     self.zone_set = ZoneDataset(in_storage=zone_storage, in_table_name=zone_table_name)                 
     travel_data_storage = StorageFactory().get_storage('dict_storage')
     travel_data_table_name = 'travel_data'
     travel_data_storage.write_table(
                 table_name=travel_data_table_name,
                 table_data={
                     'from_zone_id':array([1,1,1,2,2,2,3,3,3]),
                     'to_zone_id':array([  1,2,3,3,2,1,1,2,3]),
                     },
     )
         
     self.travel_data_set = TravelDataDataset(in_storage=travel_data_storage, in_table_name=travel_data_table_name)
Exemplo n.º 12
0
 def get_travel_data_from_travel_model(self, config, year, zone_set):
     """ Reads the output from the travel model and imports the fresh computed travel data into the cache. 
     """
     logger.log_status('Starting GetMatsimDataIntoCache.get_travel_data...')
     # print >> sys.stderr, "MATSim replaces only _some_ of the columns of travel_data.  Yet, Urbansim does not truly merge them"
     # print >> sys.stderr, " but simply overwrites the columns, without looking for a different sequence of from_zone_id, to_zone_id"
     # solved 3dec08 by hana
     
     #try: # tnicolai :for debugging
     #    import pydevd
     #    pydevd.settrace()
     #except: pass
     
     self.init(year, config);
     
     # import parcel-based accessibilities from MATSim into parcel table
     if( self.__get_value_as_boolean('cell_based_accessibility', self.matsim_controler) ):
         self.get_parcel_based_accessibility_into_cache(year)
     # import zone-based accessibilities from MATSim into zones table
     if( self.__get_value_as_boolean('zone_based_accessibility', self.matsim_controler) ):
         self.get_zone_based_accessibility_into_cache(year)
     # import agent performances from MATSim into persons table
     if( self.__get_value_as_boolean( 'agent_performance', self.matsim_controler ) ):
         self.get_agent_performance_into_cache(year)
     
     # delete travel data attributes in UrbanSim data store (see list above: "self.delete_travel_data_columns") -> (may causes errors in some models)
     # self.clear_cache_travel_data(year) 
     # load current travel data set from cache
     existing_travel_data_set = TravelDataDataset( in_storage=self.cache_storage, in_table_name=self.travel_data_table_name )
     # import zone2zone impedances from MATSim into travel_data table
     if(self.__get_value_as_boolean('zone2zone_impedance', self.matsim_controler)):
         logger.log_status('Importing zone to zone impedances from MATSim ...')
         travel_data_set = TravelDataDataset( in_storage=self.in_storage, in_table_name=self.travel_data_table_name )
         # tnicolai : Replace travel data table 
         # Case 1) Delete all 'columns' but the 'columns' passing to urbansim first. then no join operation is needed
         # Case 2) Join the data sets and delete unneeded 'columns' here
         
         ##TODO:This may not work or may be wrong after the id_names of travel_data 
         ##changed from ['from_zone_id', 'to_zone_id'] to _hidden_id (lmwang)
         # join current data set with imported matsim travel data
         existing_travel_data_set.join(travel_data_set, travel_data_set.get_non_id_primary_attribute_names(),metadata=AttributeType.PRIMARY)
         logger.log_status('Finished join operation for zone to zone impedances (travel_data_set).') 
         
     # return new travel data set
     return existing_travel_data_set
Exemplo n.º 13
0
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        """
        logger.log_status('Starting GetMatsimDataIntoCache.get_travel_data...')
        #        print >> sys.stderr, "MATSim replaces only _some_ of the columns of travel_data.  Yet, Urbansim does not truly merge them"
        #        print >> sys.stderr, " but simply overwrites the columns, without looking for a different sequence of from_zone_id, to_zone_id"
        # solved 3dec08 by hana

        input_directory = os.path.join(os.environ['OPUS_HOME'], "opus_matsim",
                                       "tmp")
        logger.log_status("input_directory: " + input_directory)

        in_storage = csv_storage(storage_location=input_directory)

        table_name = "travel_data"
        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name)

        cache_storage = AttributeCache().get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset(in_storage=cache_storage,
                                                     in_table_name=table_name)
        ##TODO:This may not work or may be wrong after the id_names of travel_data
        ##changed from ['from_zone_id', 'to_zone_id'] to _hidden_id (lmwang)
        existing_travel_data_set.join(
            travel_data_set,
            travel_data_set.get_non_id_primary_attribute_names(),
            metadata=AttributeType.PRIMARY)

        return existing_travel_data_set
Exemplo n.º 14
0
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        """
        logger.log_status('Starting GetMatsimDataIntoCache.get_travel_data...')

        input_directory = os.path.join(config['root'], 'opus_matsim', 'tmp')
        logger.log_status("input_directory: " + input_directory)

        in_storage = csv_storage(storage_location=input_directory)

        table_name = "travel_data"
        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name)

        cache_storage = AttributeCache().get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset(in_storage=cache_storage,
                                                     in_table_name=table_name)
        ##TODO:This may not work or may be wrong after the id_names of travel_data
        ##changed from ['from_zone_id', 'to_zone_id'] to _hidden_id (lmwang)
        existing_travel_data_set.join(
            travel_data_set,
            travel_data_set.get_non_id_primary_attribute_names(),
            metadata=AttributeType.PRIMARY)

        return existing_travel_data_set
    def get_travel_data_from_travel_model(
        self,
        config,
        year,
        zone_set,
        tm_output_file="tm_output.txt",
    ):
        """
        
        Returns a new travel data set from a given set of transcad matrices 
        populated by a specified transcad macro.  
        The columns in the travel data set are those given in matrix_variable_map in travel_model_configuration.
        """
        tm_config = config['travel_model_configuration']

        tm_data_dir = os.path.join(tm_config['directory'], tm_config[year])
        tm_output_full_name = os.path.join(tm_data_dir, tm_output_file)
        matrix_attribute_name_map = tm_config[
            'tm_to_urbansim_variable_mapping']

        transcad_file_location = run_get_file_location_macro(tm_config)
        for matrix in matrix_attribute_name_map:
            matrix[0] = transcad_file_location[matrix[
                0]]  #replace internal matrix name with absolute file name

        macro_args = [("ExportTo", tm_output_full_name)]
        macro_args.append(("Matrix", matrix_attribute_name_map))
        #for macroname, ui_db_file in tm_config['macro']['get_transcad_data_into_cache'].iteritems():
        #ui_db_file = os.path.join(tm_config['directory'], ui_db_file)
        macroname, ui_db_file = tm_config['macro'][
            'get_transcad_data_into_cache']
        run_transcad_macro(macroname, ui_db_file, macro_args)

        table_name = "travel_data"
        data_dict = self._read_macro_output_file(tm_output_full_name)
        data_dict = self._seq_taz_to_zone_conversion(zone_set, data_dict)

        storage = StorageFactory().get_storage('dict_storage')
        storage.write_table(table_name=table_name, table_data=data_dict)
        travel_data_set = TravelDataDataset(in_storage=storage,
                                            in_table_name=table_name)
        travel_data_set.size()
        return travel_data_set
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        """        
        logger.log_status('Starting GetMatsimDataIntoCache.get_travel_data...')

        input_directory = os.path.join( config['root'], 'opus_matsim', 'tmp' )
        logger.log_status("input_directory: " + input_directory )

        in_storage = csv_storage(storage_location = input_directory)

        table_name = "travel_data"
        travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )

        cache_storage = AttributeCache().get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset( in_storage=cache_storage, in_table_name=table_name )
        ##TODO:This may not work or may be wrong after the id_names of travel_data 
        ##changed from ['from_zone_id', 'to_zone_id'] to _hidden_id (lmwang)
        existing_travel_data_set.join(travel_data_set, travel_data_set.get_non_id_primary_attribute_names(),metadata=AttributeType.PRIMARY)
        
        return existing_travel_data_set
Exemplo n.º 17
0
    def test_getting_emme2_data_into_travel_data_set(self):
        if self._has_travel_model:
            zone_storage = StorageFactory().get_storage('dict_storage')
            zone_table_name = 'zone'
            zone_storage.write_table(
                table_name=zone_table_name,
                table_data={
                    'zone_id': array([1, 2, 3]),
                    'travel_time_to_airport': ones((3, )),
                    'travel_time_to_cbd': ones((3, ))
                },
            )

            travel_data_storage = StorageFactory().get_storage('dict_storage')
            travel_data_table_name = 'travel_data'
            travel_data_storage.write_table(
                table_name=travel_data_table_name,
                table_data={
                    'from_zone_id': array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
                    'to_zone_id': array([1, 2, 3, 1, 2, 3, 1, 2, 3]),
                },
            )

            travel_data_set = TravelDataDataset(
                in_storage=travel_data_storage,
                in_table_name=travel_data_table_name)
            travel_data_set.get_id_attribute()
            tm_output = TravelModelOutput()
            tm_output._get_matrix_into_data_file('au1tim', 3,
                                                 self.real_bank_path)
            tm_output._put_one_matrix_into_travel_data_set(
                travel_data_set, 3, 'au1tim',
                'single_vehicle_to_work_travel_time', self.real_bank_path)
            self.assertEqual(
                travel_data_set.get_attribute(
                    'single_vehicle_to_work_travel_time').size, 9)

        else:
            logger.log_warning('Test skipped. TRAVELMODELROOT environment '
                               'variable not found.')
Exemplo n.º 18
0
 def test_run(self):
     print "Entering test run"
     
     # This test checks if the pre-computed MATSim travel data 
     # is loaded correctly into UrbanSim. Therefore the UrbanSim
     # travel data matrix is converted into a numpy array and two 
     # predefined values are checked whether they match with MATSim data.
     #
     # Here an example:
     # my_list = [[1,2,3],
     #           [4,5,6],
     #           [7,8,9]]
     #
     # my_list[0][1] should be = 2
     # my_list[2][2] should be = 9
     
     table_name = 'travel_data'
     travel_data_attribute = 'single_vehicle_to_work_travel_cost'
     # location of pre-calculated MATSim travel costs
     in_storage = csv_storage(storage_location = self.input_directory)
     # create travel data set (travel costs)
     travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
     travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=31)
     
     # converting from numpy array into a 2d list
     travel_list = numpy.atleast_2d(travel_data_attribute_mat).tolist()
     
     # get two values for validation
     value1 = int(travel_list[1][1]) # should be = 0
     value2 = int(travel_list[2][1]) # should be = 120
     
     logger.log_status('First validation value should be 0. Current value is %i' % value1)
     logger.log_status('Second validation value should be 120. Current value is %i' % value2)
     
     self.assertTrue( value1 == 0 )
     self.assertTrue( value2 == 120 )
         
     # self.dump_travel_list(travel_list) # for debugging
     
     print "Leaving test run"
    def get_travel_data_from_travel_model(self, config, 
                                          year, zone_set, 
                                          tm_output_file="tm_output.txt",
                                          ):
        """
        
        Returns a new travel data set from a given set of transcad matrices 
        populated by a specified transcad macro.  
        The columns in the travel data set are those given in matrix_variable_map in travel_model_configuration.
        """
        tm_config = config['travel_model_configuration']

        tm_data_dir = os.path.join(tm_config['directory'], tm_config[year])
        tm_output_full_name = os.path.join(tm_data_dir, tm_output_file)
        matrix_attribute_name_map = tm_config['tm_to_urbansim_variable_mapping']
        
        transcad_file_location = run_get_file_location_macro(tm_config)
        for matrix in matrix_attribute_name_map:
            matrix[0] = transcad_file_location[matrix[0]]  #replace internal matrix name with absolute file name

        macro_args =[ ("ExportTo", tm_output_full_name) ]
        macro_args.append(("Matrix", matrix_attribute_name_map))
        #for macroname, ui_db_file in tm_config['macro']['get_transcad_data_into_cache'].iteritems():
            #ui_db_file = os.path.join(tm_config['directory'], ui_db_file)
        macroname, ui_db_file = tm_config['macro']['get_transcad_data_into_cache']
        run_transcad_macro(macroname, ui_db_file, macro_args)

        table_name = "travel_data"
        data_dict = self._read_macro_output_file(tm_output_full_name)
        data_dict = self._seq_taz_to_zone_conversion(zone_set, data_dict)

        storage = StorageFactory().get_storage('dict_storage')
        storage.write_table(
            table_name=table_name,
            table_data=data_dict
            )
        travel_data_set = TravelDataDataset(in_storage=storage, 
                                            in_table_name=table_name)
        travel_data_set.size()
        return travel_data_set
Exemplo n.º 20
0
 def compare_travel_data_sets(self):
     
     # get copied travel data csv
     copied_travel_data_location = os.path.join( self.destination, 'opus_matsim', 'tmp')
     if not os.path.exists(copied_travel_data_location):
         raise StandardError('Travel data not found: %s' % copied_travel_data_location)
     logger.log_status('Get copied travel data: %s' % copied_travel_data_location)
     # convert travel data csv into travel data set matrix
     in_storage = csv_storage(storage_location = copied_travel_data_location)
     table_name = "travel_data"
     travel_data_attribute = 'single_vehicle_to_work_travel_cost'
     travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
     travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=999)
     # get exsisting travel data set and convert it also into travel data set matrix
     year = self.run_config['base_year']+2
     attribute_cache = AttributeCache(cache_directory=self.run_config['cache_directory'])
     cache_storage = attribute_cache.get_flt_storage_for_year(year)
     existing_travel_data_set = TravelDataDataset( in_storage=cache_storage, in_table_name=table_name )
     existing_travel_data_attribute_mat = existing_travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=999)
     
     from numpy import savetxt # for debugging
     savetxt( os.path.join(self.destination, 'origin_travel_data.txt'), travel_data_attribute_mat , fmt="%f")
     savetxt( os.path.join(self.destination, 'existing_travel_data') , existing_travel_data_attribute_mat, fmt="%f")
     
     # compare both data set matices
     compare = travel_data_attribute_mat == existing_travel_data_attribute_mat
     # return result
     return compare.all()     
Exemplo n.º 21
0
 def test_run(self):
     print "Entering test run"
     
     # This test loads an exising travel data as a TravelDataSet (numpy array)
     # and accesses single (pre-known) values to validate the conversion process
     # (numpy array into standard python list).
     #
     # Here an example:
     # my_list = [[1,2,3],
     #           [4,5,6],
     #           [7,8,9]]
     #
     # my_list[0][1] should be = 2
     # my_list[2][2] should be = 9
     
     table_name = 'travel_data'
     travel_data_attribute = 'single_vehicle_to_work_travel_cost'
     # location of pre-calculated MATSim travel costs
     in_storage = csv_storage(storage_location = self.input_directory)
     # create travel data set (travel costs)
     travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
     travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=31)
     
     # converting from numpy array into a 2d list
     travel_list = numpy.atleast_2d(travel_data_attribute_mat).tolist()
     
     # get two values for validation
     value1 = int(travel_list[1][1]) # should be = 0
     value2 = int(travel_list[2][1]) # should be = 120
     
     logger.log_status('First validation value should be 0. Current value is %i' % value1)
     logger.log_status('Second validation value should be 120. Current value is %i' % value2)
     
     self.assertTrue( value1 == 0 )
     self.assertTrue( value2 == 120 )
         
     # self.dump_travel_list(travel_list) # for debugging
     
     print "Leaving test run"
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        """
        logger.log_status('Starting GetMatsimDataIntoCache.get_travel_data...')
#        print >> sys.stderr, "MATSim replaces only _some_ of the columns of travel_data.  Yet, Urbansim does not truly merge them"
#        print >> sys.stderr, " but simply overwrites the columns, without looking for a different sequence of from_zone_id, to_zone_id"
        # solved 3dec08 by hana
        input_directory = os.path.join( os.environ['OPUS_HOME'], "opus_matsim", "tmp" )
        logger.log_status("input_directory: " + input_directory )

        in_storage = csv_storage(storage_location = input_directory)

        table_name = "travel_data"
        travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )

        cache_storage = AttributeCache().get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset( in_storage=cache_storage, in_table_name=table_name )
        ##TODO:This may not work or may be wrong after the id_names of travel_data 
        ##changed from ['from_zone_id', 'to_zone_id'] to _hidden_id (lmwang)
        existing_travel_data_set.join(travel_data_set, travel_data_set.get_non_id_primary_attribute_names(),metadata=AttributeType.PRIMARY)
        
        return existing_travel_data_set
Exemplo n.º 23
0
 def get_travel_data_from_travel_model(self, config, year, zone_set):
     """
     """
     logger.log_status('Starting GetMatsimDataIntoCache.get_travel_data...')
     # print >> sys.stderr, "MATSim replaces only _some_ of the columns of travel_data.  Yet, Urbansim does not truly merge them"
     # print >> sys.stderr, " but simply overwrites the columns, without looking for a different sequence of from_zone_id, to_zone_id"
     # solved 3dec08 by hana
     
     # tnicolai :for debugging
     #try:
     #    import pydevd
     #    pydevd.settrace()
     #except: pass
     
     self.init(year, config);
     
     # import workplace accessibility from matsim
     self.get_zone_based_accessibility_into_cache(year)
     
     # import travel data from matsim
     travel_data_set = TravelDataDataset( in_storage=self.in_storage, in_table_name=self.travel_data_table_name )
     # tnicolai : Replace travel data table 
     # Case 1) Delete all 'columns' but the 'columns' passing to urbansim first. then no join operation is needed
     # Case 2) Join the data sets and delete unneeded 'columns' here
     
     # delete travel data attributes in UrbanSim data store (see list above: "self.delete_travel_data_columns") -> (tnicolai: may causes errors in some models)
     #self.clear_cache_travel_data(year) 
     # load actual travel data set from cache
     existing_travel_data_set = TravelDataDataset( in_storage=self.cache_storage, in_table_name=self.travel_data_table_name )
     
     ##TODO:This may not work or may be wrong after the id_names of travel_data 
     ##changed from ['from_zone_id', 'to_zone_id'] to _hidden_id (lmwang)
     
     # join remaining data set with imported travel data
     existing_travel_data_set.join(travel_data_set, travel_data_set.get_non_id_primary_attribute_names(),metadata=AttributeType.PRIMARY)
     
     # return new travel data set
     return existing_travel_data_set
Exemplo n.º 24
0
    def get_travel_data_set(self,
                            zone_set,
                            matrix_attribute_name_map,
                            bank_path,
                            out_storage=None,
                            matrices_created=False):
        """
        Returns a new travel data set containing the given set of emme/2 matrices 
        populated from the emme/2 data bank.  The columns in the travel data set are 
        those given in the attribute name of the map.
        If matrices_created is True, it is assumed that the matrices files are already created in the bank_path.
        """
        # Compute the from and to zone sets
        nzones = zone_set.size()
        comb_index = indices((nzones, nzones))

        table_name = 'storage'
        in_storage = StorageFactory().get_storage('dict_storage')
        in_storage.write_table(
            table_name=table_name,
            table_data={
                'from_zone_id':
                zone_set.get_id_attribute()[comb_index[0].ravel()],
                'to_zone_id':
                zone_set.get_id_attribute()[comb_index[1].ravel()],
            })

        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name,
                                            out_storage=out_storage)
        travel_data_set.load_dataset_if_not_loaded()
        max_zone_id = zone_set.get_id_attribute().max()
        for matrix_name in matrix_attribute_name_map.keys():
            self._put_one_matrix_into_travel_data_set(
                travel_data_set, max_zone_id, matrix_name,
                matrix_attribute_name_map[matrix_name], bank_path,
                matrices_created)
        return travel_data_set
Exemplo n.º 25
0
 def get_travel_cost_matrix(self):
     ''' Returns the pre-calculated MATSim travel costs as 2d list
     '''
     # get sensitivity test path
     test_dir_path = test_dir.__path__[0]
     
     input_directory = os.path.join( test_dir_path, 'data', 'travel_cost')
     print "input_directory: %s" % input_directory
     # check source file
     if not os.path.exists( input_directory ):
         print 'File not found! %s' % input_directory
         sys.exit()
     table_name = 'travel_data'
     travel_data_attribute = 'single_vehicle_to_work_travel_cost'
     # location of pre-calculated MATSim travel costs
     in_storage = csv_storage(storage_location = input_directory)
     # create travel data set (travel costs)
     travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
     travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=999)
     # travel cost matris as 2d array
     travel_list = numpy.atleast_2d(travel_data_attribute_mat).tolist()
     
     return travel_list
Exemplo n.º 26
0
    def get_travel_data_from_travel_model(self, config, 
                                          year, zone_set, 
                                          tm_output_file="tm_output.txt",
                                          ):

        table_name = "travel_data"
        tm_config = config['travel_model_configuration']
        base_dir = tm_config['travel_model_base_directory']
        year_dir = tm_config[year]['year_dir']
        xchange_dir = base_dir + '/' + year_dir
        filename = xchange_dir + '/travel_data.csv'
        data_dict = self._read_macro_output_file(filename)
        #data_dict = self._seq_taz_to_zone_conversion(zone_set, data_dict)

        storage = StorageFactory().get_storage('dict_storage')
        storage.write_table(
            table_name=table_name,
            table_data=data_dict
            )
        travel_data_set = TravelDataDataset(in_storage=storage, 
                                            in_table_name=table_name)
        travel_data_set.size()
        return travel_data_set
Exemplo n.º 27
0
 def get_travel_cost_matrix(self):
     ''' Returns the pre-calculated MATSim travel costs as 2d list
     '''
     # get sensitivity test path
     test_dir_path = test_dir.__path__[0]
     
     input_directory = os.path.join( test_dir_path, 'data', 'travel_cost')
     print "input_directory: %s" % input_directory
     # check source file
     if not os.path.exists( input_directory ):
         print 'File not found! %s' % input_directory
         sys.exit()
     table_name = 'travel_data'
     travel_data_attribute = 'single_vehicle_to_work_travel_cost'
     # location of pre-calculated MATSim travel costs
     in_storage = csv_storage(storage_location = input_directory)
     # create travel data set (travel costs)
     travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
     travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=999)
     # travel cost matris as 2d array
     travel_list = numpy.atleast_2d(travel_data_attribute_mat).tolist()
     
     return travel_list
Exemplo n.º 28
0
 def test_getting_emme2_data_into_travel_data_set(self):
     if self._has_travel_model:
         zone_storage = StorageFactory().get_storage('dict_storage')
         zone_table_name = 'zone'
         zone_storage.write_table(
                 table_name=zone_table_name,
                 table_data={
                     'zone_id': array([1,2,3]),
                     'travel_time_to_airport': ones((3,)),
                     'travel_time_to_cbd': ones((3,))
                     },
             )
                      
         travel_data_storage = StorageFactory().get_storage('dict_storage')
         travel_data_table_name = 'travel_data'
         travel_data_storage.write_table(
                 table_name=travel_data_table_name,
                 table_data={
                     'from_zone_id':array([1,1,1,2,2,2,3,3,3]),
                     'to_zone_id':array([1,2,3,1,2,3,1,2,3]),
                     },
             )
         
         travel_data_set = TravelDataDataset(in_storage=travel_data_storage, in_table_name=travel_data_table_name)
         travel_data_set.get_id_attribute()
         tm_output = TravelModelOutput()
         tm_output._get_matrix_into_data_file('au1tim', 3, self.real_bank_path)
         tm_output._put_one_matrix_into_travel_data_set(travel_data_set, 
                                                            3,
                                                            'au1tim', 
                                                            'single_vehicle_to_work_travel_time',
                                                            self.real_bank_path)
         self.assertEqual(travel_data_set.get_attribute('single_vehicle_to_work_travel_time').size, 9)
 
     else:
         logger.log_warning('Test skipped. TRAVELMODELROOT environment '
             'variable not found.')
Exemplo n.º 29
0
class TravelModelOutputTests(opus_unittest.OpusTestCase):
    def setUp(self):
        self.temp_dir = mkdtemp(prefix='psrc_parcel_test_emme_skims')
        self.bank_file = os.path.join(self.temp_dir, '2015-travelmodel.h5')         
        self.bank_storage = hdf5g_storage(storage_location=self.bank_file)
        data = {
            'au1tim': array([[2,  5,  7],
                             [1.3,7.9,3],
                             [6,  10, 0]]),
            'biketm': array([[10,  50,  65],
                             [13, 10.9, 40],
                             [56,  100, 21]])
            }
        self.bank_storage.write_table(table_name = 'Bank1', table_data = data)
        zone_storage = StorageFactory().get_storage('dict_storage')
        zone_table_name = 'zone'
        zone_storage.write_table(
                    table_name=zone_table_name,
                    table_data={
                        'zone_id': array([1,2,3]),
                        'travel_time_to_airport': ones((3,)),
                        'travel_time_to_cbd': ones((3,))
                        },
                )
        self.zone_set = ZoneDataset(in_storage=zone_storage, in_table_name=zone_table_name)                 
        travel_data_storage = StorageFactory().get_storage('dict_storage')
        travel_data_table_name = 'travel_data'
        travel_data_storage.write_table(
                    table_name=travel_data_table_name,
                    table_data={
                        'from_zone_id':array([1,1,1,2,2,2,3,3,3]),
                        'to_zone_id':array([  1,2,3,3,2,1,1,2,3]),
                        },
        )
            
        self.travel_data_set = TravelDataDataset(in_storage=travel_data_storage, in_table_name=travel_data_table_name)

         
    def tearDown(self):
        if os.path.exists(self.bank_file):
            os.remove(self.bank_file)
        if os.path.exists(self.temp_dir):
            rmtree(self.temp_dir)
             
    def test_getting_emme2_data_into_travel_data_set(self):
        tm_output = TravelModelOutput()
        tm_output._put_one_matrix_into_travel_data_set(self.travel_data_set, 
                                                               3,
                                                               'au1tim', 
                                                               'single_vehicle_to_work_travel_time',
                                                               'Bank1', 
                                                               self.bank_storage)
        self.assertEqual(self.travel_data_set.get_attribute('single_vehicle_to_work_travel_time').size, 9)
        self.assertEqual(allclose(self.travel_data_set.get_attribute('single_vehicle_to_work_travel_time'), 
                                  array([2,5,7,3, 7.9, 1.3, 6,10,0])), True)

    def test_getting_several_emme2_data_into_travel_data_set(self):
        matrix_attribute_map = {'au1tim':'single_vehicle_to_work_travel_time',
                                'biketm':'bike_to_work_travel_time'}
        tm_output = TravelModelOutput()
        travel_data_set = tm_output.get_travel_data_set(self.zone_set, matrix_attribute_map, 
                                                        in_storage=self.bank_storage, table_name='Bank1')
        self.assertEqual(travel_data_set.get_attribute('single_vehicle_to_work_travel_time').size, 9)
        self.assertEqual(travel_data_set.get_attribute('bike_to_work_travel_time').size, 9)
        self.assertEqual(False,
                             allclose(travel_data_set.get_attribute('single_vehicle_to_work_travel_time'), 
                                      travel_data_set.get_attribute('bike_to_work_travel_time')))
        self.assertEqual(allclose(travel_data_set.get_attribute('single_vehicle_to_work_travel_time'), 
                                  array([2,5,7,1.3, 7.9, 3, 6,10,0])), True)
        self.assertEqual(allclose(travel_data_set.get_attribute('bike_to_work_travel_time'), 
                                  array([10,50,65, 13, 10.9, 40, 56,100,21])), True)
Exemplo n.º 30
0
    def get_travel_data_from_travel_model(self, config, 
                                          year, zone_set, 
                                          tm_output_file="tm_output.txt",
                                          ):
        """
        
        Extracts a new travel data set from a given set of transcad matrices 
        by calling a pre-specified transcad macro.  
        The columns in the travel data set are those given in matrix_variable_map in travel_model_configuration.
        """
        tm_config = config['travel_model_configuration']

        # #logger.log_status('tm_config: %s' % (tm_config))

        # logger.log_status('tm_config[year]["data_exchange_dir"]: %s' % (tm_config[year]["data_exchange_dir"]))

        # tm_data_dir = os.path.join(tm_config['scenario_directory'], "outputs")

        # logger.log_status('tm_data_dir: %s' % (tm_data_dir))

        # tm_output_full_name = os.path.join(tm_data_dir, tm_output_file)

        #tm_output_full_name = "C:\\TravelModel\\2007test2\\outputs\\hwyam_sov.csv"

        scenario_directory = tm_config['scenario_directory']

        #tm_output_full_name = os.path.join(scenario_directory, "outputs\\hwyam_sov.csv")

        matrix_location = scenario_directory + "\\outputs\\hwyam_sov.mtx"

        logger.log_status('matrix_location: %s' % (matrix_location))
        
        tm_output_full_name = scenario_directory + "\\outputs\\hwyam_sov.csv"

        logger.log_status('tm_output_full_name: %s' % (tm_output_full_name))

        # matrix_attribute_name_map = tm_config['tm_to_urbansim_variable_mapping']

        # logger.log_status('matrix_attribute_name_map: %s' % (matrix_attribute_name_map))
        
        # #transcad_file_location = run_get_file_location_macro(tm_config)

        # transcad_file_location = "C:\\TravelModel\\2007test2\\outputs\\hwyam_sov.mtx"
        
        # matrices = []
        # row_index_name, col_index_name = "ZoneID", "ZoneID"  #default values
        # if matrix_attribute_name_map.has_key('row_index_name'):
            # row_index_name = matrix_attribute_name_map['row_index_name']
        # if matrix_attribute_name_map.has_key('col_index_name'):
            # col_index_name = matrix_attribute_name_map['col_index_name']
            
        # for key, val in matrix_attribute_name_map.iteritems():
            # if (key != 'row_index_name') and (key != 'col_index_name'):
                # if val.has_key('row_index_name'):
                    # row_index_name = val['row_index_name']
                # if val.has_key('col_index_name'):
                    # col_index_name = val['col_index_name']
                # matrix_file_name = transcad_file_location  #replace internal matrix name with absolute file name
                # matrices.append([matrix_file_name, row_index_name, col_index_name, val.items()])

        # logger.log_status('matrices: %s' % (matrices))
                  
        # macro_args =[ ("ExportTo", tm_output_full_name) ]
        # macro_args.append(("Matrix", matrices))

        # logger.log_status('macro_args: %s' % (macro_args))

        # #for macroname, ui_db_file in tm_config['macro']['get_transcad_data_into_cache'].iteritems():
            # #ui_db_file = os.path.join(tm_config['directory'], ui_db_file)

        macroname, ui_db_file = tm_config['macro']['get_transcad_data_into_cache'], tm_config['ui_file']

        run_transcad_macro(macroname, ui_db_file, matrix_location)

        table_name = "travel_data"
        data_dict = self._read_macro_output_file(tm_output_full_name)
        #data_dict = self._seq_taz_to_zone_conversion(zone_set, data_dict)

        storage = StorageFactory().get_storage('dict_storage')
        storage.write_table(
            table_name=table_name,
            table_data=data_dict
            )
        travel_data_set = TravelDataDataset(in_storage=storage, 
                                            in_table_name=table_name)
        travel_data_set.size()
        return travel_data_set
Exemplo n.º 31
0
    def get_resources(self, data_dictionary, dataset):
        """Create resources for computing a variable. """
        resources = Resources()

        for key in data_dictionary.keys():
            if key in self.datasets:
                data = data_dictionary[key]

                storage = StorageFactory().get_storage('dict_storage')

                if self.id_names[key] not in data_dictionary[key].keys(
                ) and not isinstance(self.id_names[key], list):
                    data[self.id_names[key]] = arange(
                        1,
                        len(data_dictionary[key][data_dictionary[key].keys()
                                                 [0]]) + 1)  # add id array

                id_name = self.id_names[key]
                storage.write_table(table_name='data', table_data=data)

                if key == "gridcell":
                    gc = GridcellDataset(in_storage=storage,
                                         in_table_name='data')

                    # add relative_x and relative_y
                    gc.get_id_attribute()
                    n = int(ceil(sqrt(gc.size())))
                    if "relative_x" not in data.keys():
                        x = (indices((n, n)) + 1)[1].ravel()
                        gc.add_attribute(x[0:gc.size()],
                                         "relative_x",
                                         metadata=1)
                    if "relative_y" not in data.keys():
                        y = (indices((n, n)) + 1)[0].ravel()
                        gc.add_attribute(y[0:gc.size()],
                                         "relative_y",
                                         metadata=1)
                    resources.merge({key: gc})

                elif key == "household":
                    resources.merge({
                        key:
                        HouseholdDataset(in_storage=storage,
                                         in_table_name='data')
                    })
                elif key == "development_project":
                    resources.merge({
                        key:
                        DevelopmentProjectDataset(in_storage=storage,
                                                  in_table_name='data')
                    })
                elif key == "development_event":
                    resources.merge({
                        key:
                        DevelopmentEventDataset(in_storage=storage,
                                                in_table_name='data')
                    })
                elif key == "neighborhood":
                    resources.merge({
                        key:
                        NeighborhoodDataset(in_storage=storage,
                                            in_table_name='data')
                    })
                elif key == "job":
                    resources.merge({
                        key:
                        JobDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "zone":
                    resources.merge({
                        key:
                        ZoneDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "travel_data":
                    resources.merge({
                        key:
                        TravelDataDataset(in_storage=storage,
                                          in_table_name='data')
                    })
                elif key == "faz":
                    resources.merge({
                        key:
                        FazDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "fazdistrict":
                    resources.merge({
                        key:
                        FazdistrictDataset(in_storage=storage,
                                           in_table_name='data')
                    })
                elif key == "race":
                    resources.merge({
                        key:
                        RaceDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "county":
                    resources.merge({
                        key:
                        CountyDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "large_area":
                    resources.merge({
                        key:
                        LargeAreaDataset(in_storage=storage,
                                         in_table_name='data')
                    })
                elif key == "development_group":
                    resources.merge({
                        key:
                        DevelopmentGroupDataset(in_storage=storage,
                                                in_table_name='data')
                    })
                elif key == "employment_sector_group":
                    resources.merge({
                        key:
                        EmploymentSectorGroupDataset(in_storage=storage,
                                                     in_table_name='data')
                    })
                elif key == "plan_type_group":
                    resources.merge({
                        key:
                        PlanTypeGroupDataset(in_storage=storage,
                                             in_table_name='data')
                    })
                elif key == "building":
                    resources.merge({
                        key:
                        BuildingDataset(in_storage=storage,
                                        in_table_name='data')
                    })

            else:
                resources.merge({key: data_dictionary[key]})

        if dataset in self.interactions:
            if dataset == "household_x_gridcell":
                resources.merge({
                    "dataset":
                    HouseholdXGridcellDataset(dataset1=resources["household"],
                                              dataset2=resources["gridcell"])
                })
            if dataset == "job_x_gridcell":
                resources.merge({
                    "dataset":
                    JobXGridcellDataset(dataset1=resources["job"],
                                        dataset2=resources["gridcell"])
                })
            if dataset == "household_x_zone":
                resources.merge({
                    "dataset":
                    HouseholdXZoneDataset(dataset1=resources["household"],
                                          dataset2=resources["zone"])
                })
            if dataset == "household_x_neighborhood":
                resources.merge({
                    "dataset":
                    HouseholdXNeighborhoodDataset(
                        dataset1=resources["household"],
                        dataset2=resources["neighborhood"])
                })
            if dataset == "development_project_x_gridcell":
                resources.merge({
                    "dataset":
                    DevelopmentProjectXGridcellDataset(
                        dataset1=resources["development_project"],
                        dataset2=resources["gridcell"])
                })

        else:
            resources.merge({"dataset": resources[dataset]})
        resources.merge({"check_variables": '*', "debug": 4})
        return resources
Exemplo n.º 32
0
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """ Integrates modified travel times and pre-computed travel costs
            into the UrbanSim cache.
        """

        logger.log_status(
            'Starting GetTestTravelDataIntoCache.get_travel_data...')

        # get sensitivity test path asan anchor to determine the location of the MATSim travel_data file (see below).
        test_dir_path = test_dir.__path__[0]

        # for debugging
        try:  #tnicolai
            import pydevd
            pydevd.settrace()
        except:
            pass

        # get the exsisting travel data from the current year
        logger.log_status('Loading travel data from UrbanSim cache (year:%i)' %
                          year)
        table_name = "travel_data"
        cache_storage = AttributeCache().get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset(in_storage=cache_storage,
                                                     in_table_name=table_name)

        ###### modifyed travel time travel data
        logger.log_status(
            'Integrating modifyed travel times in year %i for next simulation year.'
        )
        input_directory = os.path.join(os.environ['OPUS_HOME'], "opus_matsim",
                                       "tmp")
        logger.log_status("input_directory: " + input_directory)
        # location of the modified travel time travel_data
        in_storage = csv_storage(storage_location=input_directory)
        # create travel data set (travel times)
        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name)

        # join the modifyed travel times with the travel data set of the current year
        existing_travel_data_set.join(
            travel_data_set,
            travel_data_set.get_non_id_primary_attribute_names(),
            metadata=AttributeType.PRIMARY)

        ##### pre-calcualted MATSim travel data (travel costs)
        #        logger.log_status('Integrating pre-calculated travel costs (MATSim) in year %i for next simulation year.')
        #        input_directory = os.path.join( test_dir_path, 'data', 'travel_cost')
        #        logger.log_status("input_directory: " + input_directory )
        #        # check source file
        #        if not os.path.exists( input_directory ):
        #            print 'File not found! %s' % input_directory
        #            sys.exit()
        # location of pre-calculated MATSim travel costs
        #        in_storage = csv_storage(storage_location = input_directory)
        # create travel data set (travel costs)
        #        travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )

        # join travel data set from pre-calcualted MATSim results
        #        existing_travel_data_set.join(travel_data_set, travel_data_set.get_non_id_primary_attribute_names(),metadata=AttributeType.PRIMARY)

        return existing_travel_data_set
Exemplo n.º 33
0
    def test_run(self):
        print "Entering test run"
        
        path = os.path.join(os.environ['OPUS_HOME'], 'opus_matsim/tmp')
        # check if travel data exsits
        travel_data = os.path.join( path, "travel_data.csv" )
        if not os.path.exists(travel_data):
            print "Travel Data not found!!!"
            sys.exit()
        
        in_storage = csv_storage(storage_location = path)
        table_name = "travel_data"
        travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
        
        origin_zones = travel_data_set.get_attribute_as_column(self.origin_zone_id)
        l = numpy.atleast_1d(origin_zones).tolist()
        origin_list = set(l) # removes duplicates and sorts the list in ascending order
        # destination_list = len(origin_list) * self.cbd # creates a list that contains the zone id of the cbd an has the same length as "origin_list"

        # set high travel costs for all origin to cbd pairs
        for id in origin_list:
            travel_data_set.set_values_of_one_attribute_with_od_pairs(self.travel_data_attribute, self.high_travel_cost, id, self.cbd)
        # adjust cbd to cbd
        travel_data_set.set_values_of_one_attribute_with_od_pairs(self.travel_data_attribute, 0.0, self.cbd, self.cbd)
        # adjust prefered zone to cbd
        travel_data_set.set_values_of_one_attribute_with_od_pairs(self.travel_data_attribute, self.low_travel_cost, self.preferential_zone, self.cbd)
        
        w = travel_data_set.get_index_by_origin_and_destination_ids(110, 129)
        x = travel_data_set.get_index_by_origin_and_destination_ids(129, 129)
        y = travel_data_set.get_index_by_origin_and_destination_ids(20, 129)
        z = travel_data_set.get_index_by_origin_and_destination_ids(20, 20)
        
        print w
        print x
        print y
        print z 
        
        origin_zones = travel_data_set.get_attribute_as_column(self.origin_zone_id)
        destination_zones = travel_data_set.get_attribute_as_column(self.destination_zone_id)
        
        my_travel_data_attr_mat = travel_data_set.get_attribute_as_matrix('travel_data.single_vehicle_to_work_travel_cost', 
                                                                   fill=999)
        my_travel_data_attr_mat[origin_zones, destination_zones] = 1.03
        
        
        
        cbd_ids = where(origin_zones == 129)
        

        print "Leaving test run"
Exemplo n.º 34
0
    def test_run(self):
        print "Entering test run"
        
        path = paths.get_opus_home_path('opus_matsim', 'tmp')
        # check if travel data exsits
        travel_data = os.path.join( path, "travel_data.csv" )
        if not os.path.exists(travel_data):
            print "Travel Data not found!!!"
            sys.exit()
        
        in_storage = csv_storage(storage_location = path)
        table_name = "travel_data"
        travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
        
        origin_zones = travel_data_set.get_attribute_as_column(self.origin_zone_id)
        l = numpy.atleast_1d(origin_zones).tolist()
        origin_list = set(l) # removes duplicates and sorts the list in ascending order
        # destination_list = len(origin_list) * self.cbd # creates a list that contains the zone id of the cbd an has the same length as "origin_list"

        # set high travel costs for all origin to cbd pairs
        for id in origin_list:
            travel_data_set.set_values_of_one_attribute_with_od_pairs(self.travel_data_attribute, self.high_travel_cost, id, self.cbd)
        # adjust cbd to cbd
        travel_data_set.set_values_of_one_attribute_with_od_pairs(self.travel_data_attribute, 0.0, self.cbd, self.cbd)
        # adjust prefered zone to cbd
        travel_data_set.set_values_of_one_attribute_with_od_pairs(self.travel_data_attribute, self.low_travel_cost, self.preferential_zone, self.cbd)
        
        w = travel_data_set.get_index_by_origin_and_destination_ids(110, 129)
        x = travel_data_set.get_index_by_origin_and_destination_ids(129, 129)
        y = travel_data_set.get_index_by_origin_and_destination_ids(20, 129)
        z = travel_data_set.get_index_by_origin_and_destination_ids(20, 20)
        
        print w
        print x
        print y
        print z 
        
        origin_zones = travel_data_set.get_attribute_as_column(self.origin_zone_id)
        destination_zones = travel_data_set.get_attribute_as_column(self.destination_zone_id)
        
        my_travel_data_attr_mat = travel_data_set.get_attribute_as_matrix('travel_data.single_vehicle_to_work_travel_cost', 
                                                                   fill=999)
        my_travel_data_attr_mat[origin_zones, destination_zones] = 1.03
        
        
        
        cbd_ids = where(origin_zones == 129)
        

        print "Leaving test run"
class GetVisumDataIntoCache(GetTravelModelDataIntoCache):
    """Class to copy travel model results into the UrbanSim cache.
    """
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        """

        tm_config = config['travel_model_configuration']

        data_dict = {}
        table_name = "travel_data"
        storage = StorageFactory().get_storage('dict_storage')
        travel_data_set = None

        #Get config params
        visum_dir, fileName = tm_config[year]['version']
        visum_version_number = tm_config['visum_version_number']

        #Startup Visum
        Visum = load_version_file(visum_dir, fileName, visum_version_number)

        matrices = tm_config["tm_to_urbansim_variables"]
        #Get matrices
        #Note that matrix objects must be defined in version file before getting or setting
        try:
            #Get demand matrices
            if matrices.has_key('od'):
                for od_mat_num, od_mat_name in matrices["od"].iteritems():
                    mat = h.GetODMatrix(
                        Visum, od_mat_num)  #returns a 2d numarray object
                    data_dict[od_mat_name] = ravel(
                        mat)  #flatten to 1d and convert to numpy
                    #mat.tofile(visum_dir + "/od" + str(od_mat_num) + ".mtx") #temp hack to save it

            #Get skim matrices
            if matrices.has_key('skim'):
                for skim_mat_num, skim_mat_name in matrices["skim"].iteritems(
                ):
                    mat = h.GetSkimMatrix(
                        Visum, skim_mat_num)  #returns a 2d numarray object
                    data_dict[skim_mat_name] = ravel(
                        mat)  #flatten to 1d and convert to numpy
                #mat.tofile(visum_dir + "/skim" + str(skim_mat_num) + ".mtx") #temp hack to save it
        except Exception, e:
            error_msg = "Getting matrices failed: %s " % e
            raise StandardError(error_msg)

        ## hack to add keys to the matrix values
        zoneNumbers = h.GetMulti(Visum.Net.Zones, "NO")
        zoneNumbers = array(zoneNumbers)[newaxis, :]
        from_zone_id = ravel(zoneNumbers.repeat(zoneNumbers.size, axis=1))
        to_zone_id = ravel(zoneNumbers.repeat(zoneNumbers.size, axis=0))

        data_dict['from_zone_id'] = from_zone_id
        data_dict['to_zone_id'] = to_zone_id

        storage.write_table(table_name=table_name, table_data=data_dict)
        travel_data_set = TravelDataDataset(in_storage=storage,
                                            in_table_name=table_name)

        travel_data_set.size()
        return travel_data_set