Esempio n. 1
0
 def compare_travel_data_sets(self):
     
     # get copied travel data csv
     copied_travel_data_location = os.path.join( self.destination, 'opus_matsim', 'tmp')
     if not os.path.exists(copied_travel_data_location):
         raise StandardError('Travel data not found: %s' % copied_travel_data_location)
     logger.log_status('Get copied travel data: %s' % copied_travel_data_location)
     # convert travel data csv into travel data set matrix
     in_storage = csv_storage(storage_location = copied_travel_data_location)
     table_name = "travel_data"
     travel_data_attribute = 'single_vehicle_to_work_travel_cost'
     travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
     travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=999)
     # get exsisting travel data set and convert it also into travel data set matrix
     year = self.run_config['base_year']+2
     attribute_cache = AttributeCache(cache_directory=self.run_config['cache_directory'])
     cache_storage = attribute_cache.get_flt_storage_for_year(year)
     existing_travel_data_set = TravelDataDataset( in_storage=cache_storage, in_table_name=table_name )
     existing_travel_data_attribute_mat = existing_travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=999)
     
     from numpy import savetxt # for debugging
     savetxt( os.path.join(self.destination, 'origin_travel_data.txt'), travel_data_attribute_mat , fmt="%f")
     savetxt( os.path.join(self.destination, 'existing_travel_data') , existing_travel_data_attribute_mat, fmt="%f")
     
     # compare both data set matices
     compare = travel_data_attribute_mat == existing_travel_data_attribute_mat
     # return result
     return compare.all()     
Esempio n. 2
0
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        """
        logger.log_status('Starting GetMatsimDataIntoCache.get_travel_data...')

        input_directory = os.path.join(config['root'], 'opus_matsim', 'tmp')
        logger.log_status("input_directory: " + input_directory)

        in_storage = csv_storage(storage_location=input_directory)

        table_name = "travel_data"
        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name)

        cache_storage = AttributeCache().get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset(in_storage=cache_storage,
                                                     in_table_name=table_name)
        ##TODO:This may not work or may be wrong after the id_names of travel_data
        ##changed from ['from_zone_id', 'to_zone_id'] to _hidden_id (lmwang)
        existing_travel_data_set.join(
            travel_data_set,
            travel_data_set.get_non_id_primary_attribute_names(),
            metadata=AttributeType.PRIMARY)

        return existing_travel_data_set
Esempio n. 3
0
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        """
        logger.log_status('Starting GetMatsimDataIntoCache.get_travel_data...')
        #        print >> sys.stderr, "MATSim replaces only _some_ of the columns of travel_data.  Yet, Urbansim does not truly merge them"
        #        print >> sys.stderr, " but simply overwrites the columns, without looking for a different sequence of from_zone_id, to_zone_id"
        # solved 3dec08 by hana

        input_directory = os.path.join(os.environ['OPUS_HOME'], "opus_matsim",
                                       "tmp")
        logger.log_status("input_directory: " + input_directory)

        in_storage = csv_storage(storage_location=input_directory)

        table_name = "travel_data"
        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name)

        cache_storage = AttributeCache().get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset(in_storage=cache_storage,
                                                     in_table_name=table_name)
        ##TODO:This may not work or may be wrong after the id_names of travel_data
        ##changed from ['from_zone_id', 'to_zone_id'] to _hidden_id (lmwang)
        existing_travel_data_set.join(
            travel_data_set,
            travel_data_set.get_non_id_primary_attribute_names(),
            metadata=AttributeType.PRIMARY)

        return existing_travel_data_set
    def test_run(self):
        print "Entering test run"

        logger.log_status('Loading travel data: %s' % self.travel_data_source)
        # get travel data as an attribute marix
        in_storage = csv_storage(storage_location=self.travel_data_source_dir)
        table_name = "travel_data"
        travel_data_attribute = 'single_vehicle_to_work_travel_cost'
        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name)
        travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(
            travel_data_attribute, fill=0)

        # determine location to store and read attribute matrix
        location1 = os.path.join(self.tempDir, 'attrib_matrix1.txt')
        location2 = os.path.join(self.tempDir, 'attrib_matrix2.txt')

        # store attribute matrix
        savetxt(location1, travel_data_attribute_mat, fmt="%i")
        savetxt(location2, travel_data_attribute_mat, fmt="%i")

        # read attribute matrix
        matrix1 = genfromtxt(location1)
        matrix2 = genfromtxt(location2)

        # compare both matices
        result = (matrix1 == matrix2)

        self.assertTrue(result.all())

        print "Leaving test run"
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        Returns a new travel data set populated by a travel model
        """
        logger.log_status("Running GetTravelModelDataIntoCache with year %d" %
                          (year))
        logger.log_status(zone_set)
        tm_config = config['travel_model_configuration']

        base_dir = tm_config['travel_model_base_directory']
        run_dir = os.path.join(base_dir, tm_config[year]['year_dir'])
        in_storage = StorageFactory().get_storage('dict_storage')
        max_zone_id = zone_set.get_id_attribute().max()
        in_storage.write_table(table_name=self.TABLE_NAME,
                               table_data=self._read_skims(
                                   run_dir, max_zone_id))

        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=self.TABLE_NAME)
        # This isn't necessary
        # remove_index = where(logical_or(travel_data_set.get_attribute("from_zone_id")>max_zone_id,
        #                                travel_data_set.get_attribute("to_zone_id")>max_zone_id))[0]
        # travel_data_set.size()
        # travel_data_set.remove_elements(remove_index)
        return travel_data_set
Esempio n. 6
0
    def test_run(self):
        print "Entering test run"
        
        path = os.path.join(os.environ['OPUS_HOME'], 'opus_matsim/tmp')
        # check if travel data exsits
        travel_data = os.path.join( path, "travel_data.csv" )
        if not os.path.exists(travel_data):
            print "Travel Data not found!!!"
            sys.exit()
        
        in_storage = csv_storage(storage_location = path)
        table_name = "travel_data"
        travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
        
        origin_zones = travel_data_set.get_attribute_as_column(self.origin_zone_id)
        l = numpy.atleast_1d(origin_zones).tolist()
        origin_list = set(l) # removes duplicates and sorts the list in ascending order
        # destination_list = len(origin_list) * self.cbd # creates a list that contains the zone id of the cbd an has the same length as "origin_list"

        # set high travel costs for all origin to cbd pairs
        for id in origin_list:
            travel_data_set.set_values_of_one_attribute_with_od_pairs(self.travel_data_attribute, self.high_travel_cost, id, self.cbd)
        # adjust cbd to cbd
        travel_data_set.set_values_of_one_attribute_with_od_pairs(self.travel_data_attribute, 0.0, self.cbd, self.cbd)
        # adjust prefered zone to cbd
        travel_data_set.set_values_of_one_attribute_with_od_pairs(self.travel_data_attribute, self.low_travel_cost, self.preferential_zone, self.cbd)
        
        w = travel_data_set.get_index_by_origin_and_destination_ids(110, 129)
        x = travel_data_set.get_index_by_origin_and_destination_ids(129, 129)
        y = travel_data_set.get_index_by_origin_and_destination_ids(20, 129)
        z = travel_data_set.get_index_by_origin_and_destination_ids(20, 20)
        
        print w
        print x
        print y
        print z 
        
        origin_zones = travel_data_set.get_attribute_as_column(self.origin_zone_id)
        destination_zones = travel_data_set.get_attribute_as_column(self.destination_zone_id)
        
        my_travel_data_attr_mat = travel_data_set.get_attribute_as_matrix('travel_data.single_vehicle_to_work_travel_cost', 
                                                                   fill=999)
        my_travel_data_attr_mat[origin_zones, destination_zones] = 1.03
        
        
        
        cbd_ids = where(origin_zones == 129)
        

        print "Leaving test run"
    def get_travel_data_from_travel_model(
        self,
        config,
        year,
        zone_set,
        tm_output_file="tm_output.txt",
    ):
        """
        
        Returns a new travel data set from a given set of transcad matrices 
        populated by a specified transcad macro.  
        The columns in the travel data set are those given in matrix_variable_map in travel_model_configuration.
        """
        tm_config = config['travel_model_configuration']

        tm_data_dir = os.path.join(tm_config['directory'], tm_config[year])
        tm_output_full_name = os.path.join(tm_data_dir, tm_output_file)
        matrix_attribute_name_map = tm_config[
            'tm_to_urbansim_variable_mapping']

        transcad_file_location = run_get_file_location_macro(tm_config)
        for matrix in matrix_attribute_name_map:
            matrix[0] = transcad_file_location[matrix[
                0]]  #replace internal matrix name with absolute file name

        macro_args = [("ExportTo", tm_output_full_name)]
        macro_args.append(("Matrix", matrix_attribute_name_map))
        #for macroname, ui_db_file in tm_config['macro']['get_transcad_data_into_cache'].iteritems():
        #ui_db_file = os.path.join(tm_config['directory'], ui_db_file)
        macroname, ui_db_file = tm_config['macro'][
            'get_transcad_data_into_cache']
        run_transcad_macro(macroname, ui_db_file, macro_args)

        table_name = "travel_data"
        data_dict = self._read_macro_output_file(tm_output_full_name)
        data_dict = self._seq_taz_to_zone_conversion(zone_set, data_dict)

        storage = StorageFactory().get_storage('dict_storage')
        storage.write_table(table_name=table_name, table_data=data_dict)
        travel_data_set = TravelDataDataset(in_storage=storage,
                                            in_table_name=table_name)
        travel_data_set.size()
        return travel_data_set
Esempio n. 8
0
    def test_getting_emme2_data_into_travel_data_set(self):
        if self._has_travel_model:
            zone_storage = StorageFactory().get_storage('dict_storage')
            zone_table_name = 'zone'
            zone_storage.write_table(
                table_name=zone_table_name,
                table_data={
                    'zone_id': array([1, 2, 3]),
                    'travel_time_to_airport': ones((3, )),
                    'travel_time_to_cbd': ones((3, ))
                },
            )

            travel_data_storage = StorageFactory().get_storage('dict_storage')
            travel_data_table_name = 'travel_data'
            travel_data_storage.write_table(
                table_name=travel_data_table_name,
                table_data={
                    'from_zone_id': array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
                    'to_zone_id': array([1, 2, 3, 1, 2, 3, 1, 2, 3]),
                },
            )

            travel_data_set = TravelDataDataset(
                in_storage=travel_data_storage,
                in_table_name=travel_data_table_name)
            travel_data_set.get_id_attribute()
            tm_output = TravelModelOutput()
            tm_output._get_matrix_into_data_file('au1tim', 3,
                                                 self.real_bank_path)
            tm_output._put_one_matrix_into_travel_data_set(
                travel_data_set, 3, 'au1tim',
                'single_vehicle_to_work_travel_time', self.real_bank_path)
            self.assertEqual(
                travel_data_set.get_attribute(
                    'single_vehicle_to_work_travel_time').size, 9)

        else:
            logger.log_warning('Test skipped. TRAVELMODELROOT environment '
                               'variable not found.')
Esempio n. 9
0
 def test_run(self):
     print "Entering test run"
     
     # This test loads an exising travel data as a TravelDataSet (numpy array)
     # and accesses single (pre-known) values to validate the conversion process
     # (numpy array into standard python list).
     #
     # Here an example:
     # my_list = [[1,2,3],
     #           [4,5,6],
     #           [7,8,9]]
     #
     # my_list[0][1] should be = 2
     # my_list[2][2] should be = 9
     
     table_name = 'travel_data'
     travel_data_attribute = 'single_vehicle_to_work_travel_cost'
     # location of pre-calculated MATSim travel costs
     in_storage = csv_storage(storage_location = self.input_directory)
     # create travel data set (travel costs)
     travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
     travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=31)
     
     # converting from numpy array into a 2d list
     travel_list = numpy.atleast_2d(travel_data_attribute_mat).tolist()
     
     # get two values for validation
     value1 = int(travel_list[1][1]) # should be = 0
     value2 = int(travel_list[2][1]) # should be = 120
     
     logger.log_status('First validation value should be 0. Current value is %i' % value1)
     logger.log_status('Second validation value should be 120. Current value is %i' % value2)
     
     self.assertTrue( value1 == 0 )
     self.assertTrue( value2 == 120 )
         
     # self.dump_travel_list(travel_list) # for debugging
     
     print "Leaving test run"
Esempio n. 10
0
    def get_travel_data_set(self,
                            zone_set,
                            matrix_attribute_name_map,
                            bank_path,
                            out_storage=None,
                            matrices_created=False):
        """
        Returns a new travel data set containing the given set of emme/2 matrices 
        populated from the emme/2 data bank.  The columns in the travel data set are 
        those given in the attribute name of the map.
        If matrices_created is True, it is assumed that the matrices files are already created in the bank_path.
        """
        # Compute the from and to zone sets
        nzones = zone_set.size()
        comb_index = indices((nzones, nzones))

        table_name = 'storage'
        in_storage = StorageFactory().get_storage('dict_storage')
        in_storage.write_table(
            table_name=table_name,
            table_data={
                'from_zone_id':
                zone_set.get_id_attribute()[comb_index[0].ravel()],
                'to_zone_id':
                zone_set.get_id_attribute()[comb_index[1].ravel()],
            })

        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name,
                                            out_storage=out_storage)
        travel_data_set.load_dataset_if_not_loaded()
        max_zone_id = zone_set.get_id_attribute().max()
        for matrix_name in matrix_attribute_name_map.keys():
            self._put_one_matrix_into_travel_data_set(
                travel_data_set, max_zone_id, matrix_name,
                matrix_attribute_name_map[matrix_name], bank_path,
                matrices_created)
        return travel_data_set
Esempio n. 11
0
 def get_travel_cost_matrix(self):
     ''' Returns the pre-calculated MATSim travel costs as 2d list
     '''
     # get sensitivity test path
     test_dir_path = test_dir.__path__[0]
     
     input_directory = os.path.join( test_dir_path, 'data', 'travel_cost')
     print "input_directory: %s" % input_directory
     # check source file
     if not os.path.exists( input_directory ):
         print 'File not found! %s' % input_directory
         sys.exit()
     table_name = 'travel_data'
     travel_data_attribute = 'single_vehicle_to_work_travel_cost'
     # location of pre-calculated MATSim travel costs
     in_storage = csv_storage(storage_location = input_directory)
     # create travel data set (travel costs)
     travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )
     travel_data_attribute_mat = travel_data_set.get_attribute_as_matrix(travel_data_attribute, fill=999)
     # travel cost matris as 2d array
     travel_list = numpy.atleast_2d(travel_data_attribute_mat).tolist()
     
     return travel_list
class GetVisumDataIntoCache(GetTravelModelDataIntoCache):
    """Class to copy travel model results into the UrbanSim cache.
    """
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """
        """

        tm_config = config['travel_model_configuration']

        data_dict = {}
        table_name = "travel_data"
        storage = StorageFactory().get_storage('dict_storage')
        travel_data_set = None

        #Get config params
        visum_dir, fileName = tm_config[year]['version']
        visum_version_number = tm_config['visum_version_number']

        #Startup Visum
        Visum = load_version_file(visum_dir, fileName, visum_version_number)

        matrices = tm_config["tm_to_urbansim_variables"]
        #Get matrices
        #Note that matrix objects must be defined in version file before getting or setting
        try:
            #Get demand matrices
            if matrices.has_key('od'):
                for od_mat_num, od_mat_name in matrices["od"].iteritems():
                    mat = h.GetODMatrix(
                        Visum, od_mat_num)  #returns a 2d numarray object
                    data_dict[od_mat_name] = ravel(
                        mat)  #flatten to 1d and convert to numpy
                    #mat.tofile(visum_dir + "/od" + str(od_mat_num) + ".mtx") #temp hack to save it

            #Get skim matrices
            if matrices.has_key('skim'):
                for skim_mat_num, skim_mat_name in matrices["skim"].iteritems(
                ):
                    mat = h.GetSkimMatrix(
                        Visum, skim_mat_num)  #returns a 2d numarray object
                    data_dict[skim_mat_name] = ravel(
                        mat)  #flatten to 1d and convert to numpy
                #mat.tofile(visum_dir + "/skim" + str(skim_mat_num) + ".mtx") #temp hack to save it
        except Exception, e:
            error_msg = "Getting matrices failed: %s " % e
            raise StandardError(error_msg)

        ## hack to add keys to the matrix values
        zoneNumbers = h.GetMulti(Visum.Net.Zones, "NO")
        zoneNumbers = array(zoneNumbers)[newaxis, :]
        from_zone_id = ravel(zoneNumbers.repeat(zoneNumbers.size, axis=1))
        to_zone_id = ravel(zoneNumbers.repeat(zoneNumbers.size, axis=0))

        data_dict['from_zone_id'] = from_zone_id
        data_dict['to_zone_id'] = to_zone_id

        storage.write_table(table_name=table_name, table_data=data_dict)
        travel_data_set = TravelDataDataset(in_storage=storage,
                                            in_table_name=table_name)

        travel_data_set.size()
        return travel_data_set
Esempio n. 13
0
    def get_resources(self, data_dictionary, dataset):
        """Create resources for computing a variable. """
        resources = Resources()

        for key in data_dictionary.keys():
            if key in self.datasets:
                data = data_dictionary[key]

                storage = StorageFactory().get_storage('dict_storage')

                if self.id_names[key] not in data_dictionary[key].keys(
                ) and not isinstance(self.id_names[key], list):
                    data[self.id_names[key]] = arange(
                        1,
                        len(data_dictionary[key][data_dictionary[key].keys()
                                                 [0]]) + 1)  # add id array

                id_name = self.id_names[key]
                storage.write_table(table_name='data', table_data=data)

                if key == "gridcell":
                    gc = GridcellDataset(in_storage=storage,
                                         in_table_name='data')

                    # add relative_x and relative_y
                    gc.get_id_attribute()
                    n = int(ceil(sqrt(gc.size())))
                    if "relative_x" not in data.keys():
                        x = (indices((n, n)) + 1)[1].ravel()
                        gc.add_attribute(x[0:gc.size()],
                                         "relative_x",
                                         metadata=1)
                    if "relative_y" not in data.keys():
                        y = (indices((n, n)) + 1)[0].ravel()
                        gc.add_attribute(y[0:gc.size()],
                                         "relative_y",
                                         metadata=1)
                    resources.merge({key: gc})

                elif key == "household":
                    resources.merge({
                        key:
                        HouseholdDataset(in_storage=storage,
                                         in_table_name='data')
                    })
                elif key == "development_project":
                    resources.merge({
                        key:
                        DevelopmentProjectDataset(in_storage=storage,
                                                  in_table_name='data')
                    })
                elif key == "development_event":
                    resources.merge({
                        key:
                        DevelopmentEventDataset(in_storage=storage,
                                                in_table_name='data')
                    })
                elif key == "neighborhood":
                    resources.merge({
                        key:
                        NeighborhoodDataset(in_storage=storage,
                                            in_table_name='data')
                    })
                elif key == "job":
                    resources.merge({
                        key:
                        JobDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "zone":
                    resources.merge({
                        key:
                        ZoneDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "travel_data":
                    resources.merge({
                        key:
                        TravelDataDataset(in_storage=storage,
                                          in_table_name='data')
                    })
                elif key == "faz":
                    resources.merge({
                        key:
                        FazDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "fazdistrict":
                    resources.merge({
                        key:
                        FazdistrictDataset(in_storage=storage,
                                           in_table_name='data')
                    })
                elif key == "race":
                    resources.merge({
                        key:
                        RaceDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "county":
                    resources.merge({
                        key:
                        CountyDataset(in_storage=storage, in_table_name='data')
                    })
                elif key == "large_area":
                    resources.merge({
                        key:
                        LargeAreaDataset(in_storage=storage,
                                         in_table_name='data')
                    })
                elif key == "development_group":
                    resources.merge({
                        key:
                        DevelopmentGroupDataset(in_storage=storage,
                                                in_table_name='data')
                    })
                elif key == "employment_sector_group":
                    resources.merge({
                        key:
                        EmploymentSectorGroupDataset(in_storage=storage,
                                                     in_table_name='data')
                    })
                elif key == "plan_type_group":
                    resources.merge({
                        key:
                        PlanTypeGroupDataset(in_storage=storage,
                                             in_table_name='data')
                    })
                elif key == "building":
                    resources.merge({
                        key:
                        BuildingDataset(in_storage=storage,
                                        in_table_name='data')
                    })

            else:
                resources.merge({key: data_dictionary[key]})

        if dataset in self.interactions:
            if dataset == "household_x_gridcell":
                resources.merge({
                    "dataset":
                    HouseholdXGridcellDataset(dataset1=resources["household"],
                                              dataset2=resources["gridcell"])
                })
            if dataset == "job_x_gridcell":
                resources.merge({
                    "dataset":
                    JobXGridcellDataset(dataset1=resources["job"],
                                        dataset2=resources["gridcell"])
                })
            if dataset == "household_x_zone":
                resources.merge({
                    "dataset":
                    HouseholdXZoneDataset(dataset1=resources["household"],
                                          dataset2=resources["zone"])
                })
            if dataset == "household_x_neighborhood":
                resources.merge({
                    "dataset":
                    HouseholdXNeighborhoodDataset(
                        dataset1=resources["household"],
                        dataset2=resources["neighborhood"])
                })
            if dataset == "development_project_x_gridcell":
                resources.merge({
                    "dataset":
                    DevelopmentProjectXGridcellDataset(
                        dataset1=resources["development_project"],
                        dataset2=resources["gridcell"])
                })

        else:
            resources.merge({"dataset": resources[dataset]})
        resources.merge({"check_variables": '*', "debug": 4})
        return resources
Esempio n. 14
0
    def get_travel_data_from_travel_model(self, config, year, zone_set):
        """ Integrates modified travel times and pre-computed travel costs
            into the UrbanSim cache.
        """

        logger.log_status(
            'Starting GetTestTravelDataIntoCache.get_travel_data...')

        # get sensitivity test path asan anchor to determine the location of the MATSim travel_data file (see below).
        test_dir_path = test_dir.__path__[0]

        # for debugging
        try:  #tnicolai
            import pydevd
            pydevd.settrace()
        except:
            pass

        # get the exsisting travel data from the current year
        logger.log_status('Loading travel data from UrbanSim cache (year:%i)' %
                          year)
        table_name = "travel_data"
        cache_storage = AttributeCache().get_flt_storage_for_year(year)
        existing_travel_data_set = TravelDataDataset(in_storage=cache_storage,
                                                     in_table_name=table_name)

        ###### modifyed travel time travel data
        logger.log_status(
            'Integrating modifyed travel times in year %i for next simulation year.'
        )
        input_directory = os.path.join(os.environ['OPUS_HOME'], "opus_matsim",
                                       "tmp")
        logger.log_status("input_directory: " + input_directory)
        # location of the modified travel time travel_data
        in_storage = csv_storage(storage_location=input_directory)
        # create travel data set (travel times)
        travel_data_set = TravelDataDataset(in_storage=in_storage,
                                            in_table_name=table_name)

        # join the modifyed travel times with the travel data set of the current year
        existing_travel_data_set.join(
            travel_data_set,
            travel_data_set.get_non_id_primary_attribute_names(),
            metadata=AttributeType.PRIMARY)

        ##### pre-calcualted MATSim travel data (travel costs)
        #        logger.log_status('Integrating pre-calculated travel costs (MATSim) in year %i for next simulation year.')
        #        input_directory = os.path.join( test_dir_path, 'data', 'travel_cost')
        #        logger.log_status("input_directory: " + input_directory )
        #        # check source file
        #        if not os.path.exists( input_directory ):
        #            print 'File not found! %s' % input_directory
        #            sys.exit()
        # location of pre-calculated MATSim travel costs
        #        in_storage = csv_storage(storage_location = input_directory)
        # create travel data set (travel costs)
        #        travel_data_set = TravelDataDataset( in_storage=in_storage, in_table_name=table_name )

        # join travel data set from pre-calcualted MATSim results
        #        existing_travel_data_set.join(travel_data_set, travel_data_set.get_non_id_primary_attribute_names(),metadata=AttributeType.PRIMARY)

        return existing_travel_data_set