Esempio n. 1
0
    def run(self, config, year):
        """ This class simulates a MATSim run. Therefore it copies 
            real travel data into the OPUS_HOME and modifies the 
            entries in the following runs.
        """        
        logger.start_block("Starting RunDummyTravelTimeTravelModel.run(...)")

        self.config = config
        # get travel model parameter from the opus dictionary
        self.travel_model_configuration = config['travel_model_configuration']
        
        self.first_year = 2001 # TODO make configurable (bayeyear + 1)
        
        # set output directory for travel data
        self.travel_data_dir = paths.get_opus_home_path( "opus_matsim", "tmp" )

        # for debugging
        #try: #tnicolai
        #    import pydevd
        #    pydevd.settrace()
        #except: pass

        # set travel data for test simulation
        if year == self.first_year:
            logger.log_status('Exporting travel_data from base_year_cache to %s' % self.travel_data_dir)
            self.export_travel_data(None)
            logger.log_status("Modifying travel data.")
            self.modify_travel_data()   
            logger.log_status("Finished modifying...")  
        else:
            logger.log_status("Travel data was modified before. Nothing to do...")

        logger.end_block()
    def _write_workplaces_to_files(self, person_set, tm_input_files):
        home_zones = person_set.get_attribute("zone_id")
        job_zones = person_set.get_attribute("job_zone_id")
        igroup = 0
        for tm_file in tm_input_files:
            logger.start_block("Writing to emme2 input file: " +
                               tm_input_files[igroup])
            try:
                newfile = open(tm_input_files[igroup], 'w')
                try:
                    newfile.write(r"""c  prepared: %s
t matrices
m matrix=mf9%s default=incr
""" % (time.strftime("%c", time.localtime(time.time())), igroup + 1))
                    line_template = " %3d    %3d    1 \n"
                    person_idx = where(
                        logical_and(
                            person_set.get_attribute(
                                "is_placed_non_home_based_worker_with_job"),
                            person_set.get_attribute("income_group_%s" %
                                                     (igroup + 1))))[0]
                    for i in person_idx:
                        newfile.write(line_template %
                                      (home_zones[i], job_zones[i]))
                finally:
                    newfile.close()
            finally:
                logger.end_block()
            igroup += 1
        return tm_input_files
    def cache_database_table(self, table_name, base_year, database, in_storage, config):
        """Copy this table from input database into attribute cache.
        """
        logger.start_block('Caching table %s' % table_name)
        try:
            #TODO: why is the config being modified...seems like its kind of useless here...
            config['storage_location'] = os.path.join(config['cache_directory'], str(base_year), table_name)
            
            if not os.path.exists(config['storage_location']):
                flt_storage = StorageFactory().get_storage(
                   type='flt_storage', 
                   subdir='store', 
                   storage_location=config['storage_location'])
                
                table = database.get_table(table_name)
                
                id_name = [primary_key.name.lower() for primary_key in table.primary_key]

                dataset = Dataset(resources=config, 
                                  in_storage=in_storage,
                                  out_storage=flt_storage,
                                  in_table_name=table_name,
                                  id_name = id_name)

                nchunks = config['creating_baseyear_cache_configuration'].tables_to_cache_nchunks.get(table_name, 1)
                current_time = SimulationState().get_current_time()
                SimulationState().set_current_time(base_year)
                dataset.load_dataset(nchunks=nchunks, flush_after_each_chunk=True)
                SimulationState().set_current_time(current_time)
            else:
                logger.log_status(config['storage_location'] + " already exits; skip caching " + table_name)
            
        finally:
            logger.end_block()
Esempio n. 4
0
    def unroll_gridcells_to_cache_from_buildings(self, gridcells, buildings, 
                                  cache_directory, base_year):
        """Populate the cache with the unrolled gridcells info derived
        from the buildings table.
        """
        logger.start_block('Unrolling gridcell data from buildings')

        try:
            storage = AttributeCache().get_flt_storage_for_year(base_year)
            
            urbansim_constant = SessionConfiguration().get_dataset_from_pool('urbansim_constant')
            print "recent_years = %s" % urbansim_constant['recent_years']
            
            recent_years = urbansim_constant['recent_years']
            roller = RollbackGridcellsFromBuildings()
            for year in range(base_year, base_year-recent_years-1, -1):
                logger.start_block('Unrolling gridcells into year %d' % (year-1))
                try:
                    roller.unroll_gridcells_for_one_year(gridcells, 
                                                         buildings, 
                                                         year,
                                                         dataset_pool=SessionConfiguration().get_dataset_pool())
                    flt_directory = os.path.join(cache_directory, str(year-1))
                    flt_storage = StorageFactory().get_storage(
                        type='flt_storage', subdir='store', 
                        storage_location=flt_directory)
                    gridcells.write_dataset(out_storage=flt_storage)
                finally:
                    logger.end_block()
                
        finally:
            logger.end_block()
Esempio n. 5
0
 def _put_one_matrix_into_travel_data_set(self, travel_data_set, max_zone_id, matrix_name, attribute_name, bank_path,
                                          matrices_created=False):
     """
     Adds to the given travel_data_set the data for the given matrix
     that is in the emme/2 data bank.
     """
     logger.start_block('Copying data for matrix %s into variable %s' %
                        (matrix_name, attribute_name))
     try:
         if not matrices_created:
             self._get_matrix_into_data_file(matrix_name, max_zone_id, bank_path)
             file_name = "_one_matrix.txt"
         else:
             file_name = "%s_one_matrix.txt" % matrix_name
         file_contents = self._get_emme2_data_from_file(join(bank_path, file_name))
         
         travel_data_set.add_primary_attribute(data=zeros(travel_data_set.size(), dtype=float32), 
                                               name=attribute_name)
         odv = array([line.split() for line in file_contents], dtype=float32)
         if odv.size == 0:
             logger.log_error("Skipped exporting travel_data attribute %s: No data is exported from EMME matrix." % attribute_name)
         else:            
             travel_data_set.set_values_of_one_attribute_with_od_pairs(attribute=attribute_name,
                                                                       values=odv[:,2],
                                                                       O=odv[:,0].astype('int32'),
                                                                       D=odv[:,1].astype('int32')
                                                                       )
     finally:
         logger.end_block()
Esempio n. 6
0
    def preprocess_datasets(self):
        logger.start_block('Pre-processing buildings')
        self.compute_building_variables()
        # consolidate buildings of the same type on the same parcel
        parcels = self.dataset_pool.get_dataset('parcel')
        number_of_buildings = parcels.compute_variables("number_of_buildings = parcel.number_of_agents(building)", 
                                               dataset_pool=self.dataset_pool)
        multiple_bldg_parcels = where(number_of_buildings > 1)[0]
        bldgs_to_remove = zeros(self.input_buildings.size(), dtype='bool8')
        consolidated = array([0, 0])
        logger.log_status("Original total: %s buildings" % self.input_buildings.size())
        for i in multiple_bldg_parcels:
            bidx = where(self.input_buildings['parcel_id'] == parcels["parcel_id"][i])[0]
            bts = unique(self.input_buildings["building_type_id"][bidx])         
            if bts.size == 1:
                cons_idx = bidx[0]
                for attr in ["non_residential_sqft", "land_area", "residential_units"]:
                    self.input_buildings[attr][cons_idx] = self.input_buildings[attr][bidx].sum()
                unitattr = self.get_units(cons_idx)
                totsqft = self.input_buildings[unitattr][bidx] * self.input_buildings["sqft_per_unit"][bidx]
                if self.input_buildings[unitattr][cons_idx] > 0:
                    self.input_buildings["sqft_per_unit"][cons_idx] = round(totsqft.sum()/self.input_buildings[unitattr][cons_idx].astype("float32"))
                bldgs_to_remove[bidx[1:]] = True
                consolidated = consolidated + array([1, bidx.size])
                continue
#            bldgs_to_remove[bidx] = True
#            self.parcels_not_processed = self.parcels_not_processed + [parcels["parcel_id"][i]]
        self.input_buildings.subset_by_index(where(logical_not(bldgs_to_remove))[0])
        if "template_id" in self.input_buildings.get_known_attribute_names():
            self.original_templates = self.input_buildings["template_id"].copy()
        self.input_buildings.add_attribute(zeros(self.input_buildings.size(), dtype="int32"), name="template_id",
                                           metadata=1)
        logger.log_status("%s buildings consolidated into %s." % (consolidated[1], consolidated[0]))
        logger.log_status("%s parcels were not processed." % len(self.parcels_not_processed))
        logger.end_block()
Esempio n. 7
0
    def unroll_gridcells_to_cache_from_buildings(self, gridcells, buildings, 
                                  cache_directory, base_year):
        """Populate the cache with the unrolled gridcells info derived
        from the buildings table.
        """
        logger.start_block('Unrolling gridcell data from buildings')

        try:
            storage = AttributeCache().get_flt_storage_for_year(base_year)
            
            urbansim_constant = SessionConfiguration().get_dataset_from_pool('urbansim_constant')
            print "recent_years = %s" % urbansim_constant['recent_years']
            
            recent_years = urbansim_constant['recent_years']
            roller = RollbackGridcellsFromBuildings()
            for year in range(base_year, base_year-recent_years-1, -1):
                logger.start_block('Unrolling gridcells into year %d' % (year-1))
                try:
                    roller.unroll_gridcells_for_one_year(gridcells, 
                                                         buildings, 
                                                         year,
                                                         dataset_pool=SessionConfiguration().get_dataset_pool())
                    flt_directory = os.path.join(cache_directory, str(year-1))
                    flt_storage = StorageFactory().get_storage(
                        type='flt_storage', subdir='store', 
                        storage_location=flt_directory)
                    gridcells.write_dataset(out_storage=flt_storage)
                finally:
                    logger.end_block()
                
        finally:
            logger.end_block()
Esempio n. 8
0
    def get_needed_matrices_from_emme2(self,
                                       year,
                                       cache_directory,
                                       bank_dir,
                                       matrix_variable_map,
                                       matrices_created=False):
        """Copies the specified emme/2 matrices into the specified travel_data variable names.
        """
        logger.start_block('Getting matricies from emme2')
        try:
            zone_set = SessionConfiguration().get_dataset_from_pool('zone')
            zone_set.load_dataset()
            travel_data_set = self.get_travel_data_from_emme2(
                zone_set, bank_dir, matrix_variable_map, matrices_created)
        finally:
            logger.end_block()

        logger.start_block('Writing data to cache')
        try:
            next_year = year + 1
            out_storage = AttributeCache().get_flt_storage_for_year(next_year)
            travel_data_set.write_dataset(attributes='*',
                                          out_storage=out_storage,
                                          out_table_name='travel_data')
        finally:
            logger.end_block()
    def run(self, config, year):
        """ This class simulates a MATSim run. Therefore it copies 
            real travel data into the OPUS_HOME and modifies the 
            travel cost entries.
        """
        logger.start_block("Starting ModifyTravelCosts.run(...)")

        self.config = config
        # get travel model parameter from the opus dictionary
        self.travel_model_configuration = config['travel_model_configuration']
        self.base_year = self.travel_model_configuration['base_year']

        # for debugging
        #try: #tnicolai
        #    import pydevd
        #    pydevd.settrace()
        #except: pass

        # set travel data for test simulation
        if year == self.base_year + 1:
            logger.log_status(
                "Prepare copying pre-calculated MATSim travel data to OPUS_HOME tmp directory."
            )
            self.copy_pre_calculated_MATSim_travel_costs()
            logger.log_status("Modifying travel costs.")
            self.modify_travel_costs()  # comment out for base scenario
            logger.log_status(
                "Finished modifying...")  # comment out for base scenario
        # use modified travel data for all following runs
        else:
            logger.log_status(
                "Travel data was modified before. So there is nothing to do..."
            )

        logger.end_block()
    def _generate_input_land_cover(self, current_year, base_directory, 
                         urbansim_cache_directory, years, output_directory,
                         convert_flt, convert_input):
        if current_year == years[0]:
            if not convert_input:
                return base_directory
            else:
                
                package_dir_path = package().get_package_path()
                command = os.path.join(package_dir_path, "tools", "lc_convert.py")
                status = os.system(command + ' %s -i "%s" -o "%s"' % ('input data',  base_directory, self.temp_land_cover_dir))
                assert(status == 0, "generate input failed")
                return self.temp_land_cover_dir
            
        previous_year = self._get_previous_year(current_year, years)
            
        if not convert_flt: 
            logger.start_block("Copy data from %s to temp land cover folder" % urbansim_cache_directory)
            try:
                self._copy_invariants_to_temp_land_cover_dir(os.path.join(urbansim_cache_directory, str(previous_year)))
            finally:
                logger.end_block()
            return self.temp_land_cover_dir
        
#        package_dir_path = package().get_package_path()
#        command = os.path.join(package_dir_path, "tools", "lc_convert.py")
        flt_directory_in = os.path.join(output_directory, str(previous_year))
        flt_directory_out = self.temp_land_cover_dir
        LCCMInputConvert()._convert_lccm_input(flt_directory_in, flt_directory_out)
#        status = os.system(command + ' %d -i "%s" -o "%s"' % (previous_year, flt_directory_in, flt_directory_out))
#        assert(status == 0, "generate input failed")        
        return self.temp_land_cover_dir
Esempio n. 11
0
 def run_emme2_macro(self,
                     macro_path,
                     bank_path,
                     scenario_number=-1,
                     output_file=None,
                     append_to_output=True):
     """
     Runs this emme/2 macro in the bank specified.
     """
     logger.start_block('Running emme2 macro %s in bank at %s' %
                        (macro_path, bank_path))
     # generate a random file name
     temp_macro_file_name = tempfile.NamedTemporaryFile().name
     prior_cwd = os.getcwd()
     if output_file is None:
         out = ""
     else:
         out = "> %s" % output_file
         if append_to_output:
             out = " >%s" % out
     try:
         os.chdir(bank_path)
         shutil.copy(macro_path, temp_macro_file_name)
         cmd = "%s 000 -m %s" % (self.emme_cmd, temp_macro_file_name)
         if scenario_number != -1:
             cmd = "%s %s" % (cmd, scenario_number)
         cmd = "%s%s" % (cmd, out)
         logger.log_status(cmd)
         if os.system(cmd):
             raise StandardError("Problem with simulation")
     finally:
         os.remove(temp_macro_file_name)
         os.chdir(prior_cwd)
         logger.end_block()
    def run(self, config, year):
        """ This class simulates a MATSim run. Therefore it copies 
            real travel data into the OPUS_HOME and modifies the 
            travel cost entries.
        """        
        logger.start_block("Starting ModifyTravelCosts.run(...)")

        self.config = config
        # get travel model parameter from the opus dictionary
        self.travel_model_configuration = config['travel_model_configuration']
        self.base_year = self.travel_model_configuration['base_year']
        
        # for debugging
        #try: #tnicolai
        #    import pydevd
        #    pydevd.settrace()
        #except: pass
        
        # set travel data for test simulation
        if year == self.base_year+1:
            logger.log_status("Prepare copying pre-calculated MATSim travel data to OPUS_HOME tmp directory.")
            self.copy_pre_calculated_MATSim_travel_costs()
            logger.log_status("Modifying travel costs.")
            self.modify_travel_costs()                  # comment out for base scenario
            logger.log_status("Finished modifying...")  # comment out for base scenario
        # use modified travel data for all following runs
        else:
            logger.log_status("Travel data was modified before. So there is nothing to do...")

        logger.end_block()
Esempio n. 13
0
 def _call_input_file_writer(self, year, datasets, in_table_names, out_table_names, variable_names, dataset_pool):
     current_year_tm_dir = self.get_daysim_dir(year)
     file_config = self.config['travel_model_configuration'].get('daysim_file', {})
     file_format = file_config.get('format', 'tab')
     if(file_format == 'hdf5g'):
         current_year_tm_dir = os.path.join(current_year_tm_dir, file_config.get('name', 'daysim_inputs.hdf5'))
     meta_data = self.config['travel_model_configuration'].get('meta_data', {})
     storage = StorageFactory().get_storage('%s_storage' % file_format, storage_location = current_year_tm_dir)
     kwargs = {}
     mode={file_format: Storage.OVERWRITE}
     if file_format == 'csv' or file_format == 'tab' or file_format == 'tsv' or file_format == 'dat':
         kwargs['append_type_info'] = False
     if file_format.startswith('hdf5'):
         kwargs['compression'] = file_config.get('hdf5_compression', None) 
     logger.start_block('Writing Daysim inputs.')
     for dataset_name, dataset in datasets.iteritems():
         ds_meta = meta_data.get(in_table_names[dataset_name], {})
         if file_format.startswith('hdf5'):
             kwargs['column_meta']  = ds_meta
         attr_vals = {}
         for attr in variable_names[dataset_name]:
             attr_vals[attr] = dataset[attr]
         storage.write_table(table_name = out_table_names[dataset_name], table_data = attr_vals, mode = mode[file_format], **kwargs)
         mode['hdf5g'] = Storage.APPEND
     logger.end_block()
     logger.log_status('Daysim inputs written into %s' % current_year_tm_dir)
     return out_table_names.values()
Esempio n. 14
0
 def run_emme2_macro(self, macro_path, bank_path, scenario_number=-1, output_file=None, append_to_output=True):
     """
     Runs this emme/2 macro in the bank specified.
     """
     logger.start_block('Running emme2 macro %s in bank at %s' %
                        (macro_path, bank_path))
     # generate a random file name
     temp_macro_file_name = tempfile.NamedTemporaryFile().name
     prior_cwd = os.getcwd()
     if output_file is None:
         out = ""
     else:
         out = "> %s" % output_file
         if append_to_output:
             out = " >%s" % out
     try:
         os.chdir(bank_path)
         shutil.copy(macro_path, temp_macro_file_name)
         cmd = "%s 000 -m %s" % (self.emme_cmd, temp_macro_file_name)
         if scenario_number != -1:
             cmd = "%s %s" % (cmd, scenario_number)
         cmd = "%s%s" % (cmd, out)
         logger.log_status(cmd)
         if os.system(cmd):
             raise StandardError("Problem with simulation")
     finally:
         os.remove(temp_macro_file_name)
         os.chdir(prior_cwd)
         logger.end_block()
Esempio n. 15
0
    def _run_each_year_as_separate_process(self,
                                           iyear,
                                           year,
                                           seed=None,
                                           resources=None,
                                           profiler_name=None,
                                           log_file=None):

        logger.start_block('Running simulation for year %d in new process' %
                           year)
        resources['years'] = (year, year)
        resources['seed'] = seed,

        if profiler_name is not None:
            # add year to the profile name
            resources["profile_filename"] = "%s_%s" % (profiler_name, year)

        optional_args = []
        if log_file:
            optional_args += ['--log-file-name', os.path.split(log_file)[-1]]

        success = False
        try:
            logger.disable_file_logging(log_file)
            success = self._fork_new_process(
                'opus_core.model_coordinators.model_system',
                resources,
                optional_args=optional_args)
            logger.enable_file_logging(log_file, verbose=False)
        finally:
            logger.end_block()

        return success
Esempio n. 16
0
 def _run_each_year_as_separate_process(
         self,
         start_year,
         end_year,
         seed_array,
         resources,
         log_file_name='run_multiprocess.log'):
     log_file = os.path.join(resources['cache_directory'], log_file_name)
     profiler_name = resources.get("profile_filename", None)
     iyear = 0
     for year in range(start_year, end_year + 1):
         logger.start_block(
             'Running simulation for year %d in new process' % year)
         try:
             resources['years'] = (year, year)
             resources['seed'] = seed_array[iyear],
             logger.disable_file_logging(log_file)
             if profiler_name is not None:
                 resources["profile_filename"] = "%s_%s" % (
                     profiler_name, year)  # add year to the profile name
             self._fork_new_process(
                 'opus_core.model_coordinators.model_system',
                 resources,
                 optional_args=['--log-file-name', log_file_name])
             logger.enable_file_logging(log_file, verbose=False)
         finally:
             logger.end_block()
         iyear += 1
     self._notify_stopped()
 def run(self, config, year):
     """Running MATSim.  A lot of paths are relative; the base path is ${OPUS_HOME}/opus_matsim.  As long as ${OPUS_HOME}
     is correctly set and the matsim tarfile was unpacked in OPUS_HOME, this should work out of the box.  There may eventually
     be problems with the java version.
     """
     try:
         import pydevd
         pydevd.settrace()
     except: pass
     
     logger.start_block("Starting RunTravelModel.run(...)")
     
     self.setUp( config )
     
     config_obj = MATSimConfigObject(config, year, self.matsim_config_full)
     config_obj.marschall()
     
     cmd = """cd %(opus_home)s/opus_matsim ; java %(vmargs)s -cp %(classpath)s %(javaclass)s %(matsim_config_file)s""" % {
             'opus_home': os.environ['OPUS_HOME'],
             'vmargs': "-Xmx2000m",
             'classpath': "libs/log4j/log4j/1.2.15/log4j-1.2.15.jar:libs/jfree/jfreechart/1.0.7/jfreechart-1.0.7.jar:libs/jfree/jcommon/1.0.9/jcommon-1.0.9.jar:classesMATSim:classesToronto:classesTNicolai:classesKai:classesEntry", #  'classpath': "classes:jar/MATSim.jar",
             'javaclass': "playground.run.Matsim4Urbansim",
             'matsim_config_file': self.matsim_config_full } 
     
     logger.log_status('Running command %s' % cmd ) 
     
     cmd_result = os.system(cmd)
     if cmd_result != 0:
         error_msg = "Matsim Run failed. Code returned by cmd was %d" % (cmd_result)
         logger.log_error(error_msg)
         logger.log_error("Note that currently (dec/08), paths in the matsim config files are relative to the opus_matsim root,")
         logger.log_error("  which is one level 'down' from OPUS_HOME.")
         raise StandardError(error_msg)        
     
     logger.end_block()
Esempio n. 18
0
    def log_results(self):
        procedure = self.model_system.run_year_namespace["model"].procedure        
        if not hasattr(procedure, 'print_results'):
            logger.log_warning("Estimation procedure %s doesn't have a print_results() method, "  % procedure + \
                               "which is needed to log estimation results.")
            return

        tmp_config = Resources(self.config)
        outputvar = tmp_config['models_configuration'][self.model_name]['controller']['estimate']['arguments']['output']
        results = self.model_system.vardict.get(outputvar, "process_output")[1]
        
        storage_location = AttributeCache().get_storage_location()
        log_file_name = "estimate_models.log" ## one file for all estimation results
        logger.enable_file_logging( os.path.join(storage_location, log_file_name),
                                    mode = 'a')  ##appending instead of overwriting
        logger.start_block("%s Estimation Results" % self.model_name)        
        for submodel, submodel_results in results.items():
            logger.log_status( "Submodel %s" % submodel)
            if submodel_results == {}:
                logger.log_warning("No estimation results for submodel %s" % submodel)
            else:
                try:
                    procedure.print_results(submodel_results)
                except:
                    logger.log_warning("Problems in printing results for submodel %s" % submodel) 
        logger.end_block()
        logger.disable_file_logging()        
Esempio n. 19
0
    def run(self, config, year):
        """ This class simulates a MATSim run copying a manipulated 
            travel data into opus home tmp directory
        """        
        logger.start_block("Starting CopyDummyTravelData.run(...)")

        self.config = config
        # get travel model parameter from the opus dictionary
        self.travel_model_configuration = config['travel_model_configuration']
        self.base_year = self.travel_model_configuration['base_year']
        
        # for debugging
        #try: #tnicolai
        #    import pydevd
        #    pydevd.settrace()
        #except: pass
        
        # set travel data for test simulation
        if year == self.base_year+1:
            logger.log_status("Prepare copying pre-calculated MATSim travel data to OPUS_HOME tmp directory.")
            self.copy_dummy_travel_data()
            self.modify_workplace_accessibility()
        # use modified travel data for all following runs
        else:
            logger.log_status("Travel data is already copied in the first iteration.")

        logger.end_block()
Esempio n. 20
0
    def _run_each_year_as_separate_process(
        self, iyear, year, seed=None, resources=None, profiler_name=None, log_file=None
    ):

        logger.start_block("Running simulation for year %d in new process" % year)
        resources["years"] = (year, year)
        resources["seed"] = (seed,)

        if profiler_name is not None:
            # add year to the profile name
            resources["profile_filename"] = "%s_%s" % (profiler_name, year)

        optional_args = []
        if log_file:
            optional_args += ["--log-file-name", os.path.split(log_file)[-1]]

        success = False
        try:
            logger.disable_file_logging(log_file)
            success = self._fork_new_process(
                "opus_core.model_coordinators.model_system", resources, optional_args=optional_args
            )
            logger.enable_file_logging(log_file, verbose=False)
        finally:
            logger.end_block()

        return success
Esempio n. 21
0
    def _run_each_year_as_separate_process(self, start_year, end_year, seed_array, resources, log_file_name='run_multiprocess.log'):
        skip_first_year_of_urbansim = resources.get('skip_urbansim', False)
        log_file = os.path.join(resources['cache_directory'], log_file_name)
        profiler_name = resources.get("profile_filename", None)
        iyear = 0
        for year in range(start_year, end_year+1):
            if (year <> start_year) or ((year == start_year) and (not skip_first_year_of_urbansim)):
                logger.start_block('Running UrbanSim for year %d in new process' % year)
                try:
                    resources['years'] = (year, year)
                    resources['seed'] = seed_array[iyear],
                    logger.disable_file_logging(log_file)
                    if profiler_name is not None:
                        resources["profile_filename"] = "%s_%s" % (profiler_name, year) # add year to the profile name
                    self._fork_new_process(
                        'urbansim.model_coordinators.model_system', resources, optional_args=['--log-file-name', log_file_name])
                    logger.enable_file_logging(log_file, verbose=False)
                finally:
                    logger.end_block()
                    
            if ('travel_model_configuration' in resources) and (not resources.get('skip_travel_model', False)):
                # tnicolai add start year to travel model config
                tmc = resources['travel_model_configuration']
                tmc['start_year'] = start_year # end tnicolai
                self._run_travel_models_in_separate_processes(resources['travel_model_configuration'], year, resources)

            if 'post_year_configuration' in resources:
                self._run_travel_models_in_separate_processes(resources['post_year_configuration'], year, resources)
            iyear +=1
        self._notify_stopped()
Esempio n. 22
0
    def run(self, config, year):
        """Running MATSim.  A lot of paths are relative; the base path is ${OPUS_HOME}/opus_matsim.  As long as ${OPUS_HOME}
        is correctly set and the matsim tar-file was unpacked in OPUS_HOME, this should work out of the box.  There may eventually
        be problems with the java version.
        """

        logger.start_block("Starting RunTravelModel.run(...)")

        # try: # tnicolai :for debugging
        #    import pydevd
        #    pydevd.settrace()
        # except: pass

        config_obj_v3 = MATSimConfigObjectV3(config, year)
        self.matsim_config_full = config_obj_v3.marschall()

        # check for test parameter
        tmc = config["travel_model_configuration"]
        if tmc["matsim4urbansim"].get("test_parameter") != None:
            self.test_parameter = tmc["matsim4urbansim"].get("test_parameter")
        # change to directory opus_matsim
        os.chdir(config_obj_v3.matsim4opus_path)

        # int cmd
        cmd = ""
        # calling travel model with cmd command
        if sys.platform.lower() == "win32":
            # reserve memory for java
            xmx = "-Xmx1500m"  # Windows can't reserve more than 1500m
            logger.log_note("Note that Java for Windows can't reserve more than 1500 MB of memory to run MATSim!!!")
            cmd = """java %(vmargs)s -cp %(classpath)s %(javaclass)s %(matsim_config_file)s %(test_parameter)s""" % {
                "vmargs": xmx,
                "classpath": "jar/matsim.jar;jar/contrib/matsim4urbansim.jar",
                "javaclass": "org.matsim.contrib.matsim4urbansim.run.MATSim4UrbanSimZone",
                "matsim_config_file": self.matsim_config_full,
                "test_parameter": self.test_parameter,
            }
        else:
            # reserve memory for java
            xmx = "-Xmx4000m"
            cmd = """java %(vmargs)s -cp %(classpath)s %(javaclass)s %(matsim_config_file)s %(test_parameter)s""" % {
                "vmargs": xmx,
                "classpath": "jar/matsim.jar:jar/contrib/matsim4urbansim.jar",
                "javaclass": "org.matsim.contrib.matsim4urbansim.run.MATSim4UrbanSimZone",
                "matsim_config_file": self.matsim_config_full,
                "test_parameter": self.test_parameter,
            }

        logger.log_status("Running command %s" % cmd)

        cmd_result = os.system(cmd)
        if cmd_result != 0:
            error_msg = "MATSim Run failed. Code returned by cmd was %d" % (cmd_result)
            logger.log_error(error_msg)
            logger.log_error("Note that paths in the matsim config files are relative to the matsim4opus root,")
            logger.log_error("which is one level 'down' from OPUS_HOME.")
            raise StandardError(error_msg)

        logger.end_block()
Esempio n. 23
0
 def logged_run_method (*req_args, **opt_args):
     logger.start_block("Running %s" % an_instance.full_name(), tags=["model", "model-run"],
                                                             verbosity_level=1)
     try:
         results = run_method(*req_args, **opt_args)
     finally:
         logger.end_block()
     return results
Esempio n. 24
0
 def logged_estimate_method (*req_args, **opt_args):
     logger.start_block("Estimating %s" % an_instance.full_name(), tags=["model", "model-estimate"],
                                                             verbosity_level=1)
     try:
         results = estimate_method(*req_args, **opt_args)
     finally:
         logger.end_block()
     return results
Esempio n. 25
0
 def logged_run_method (*req_args, **opt_args):
     logger.start_block("Running %s" % an_instance.full_name(), tags=["model", "model-run"],
                                                             verbosity_level=1)
     try:
         results = run_method(*req_args, **opt_args)
     finally:
         logger.end_block()
     return results
Esempio n. 26
0
 def logged_method(*req_args, **opt_args):
     logger.start_block(name=an_instance.name(), verbose=False)
     try:
         results = compute_method(*req_args, **opt_args)
         an_instance._do_flush_dependent_variables_if_required()
     finally:
         logger.end_block()
     return results
Esempio n. 27
0
 def logged_estimate_method (*req_args, **opt_args):
     logger.start_block("Estimating %s" % an_instance.full_name(), tags=["model", "model-estimate"],
                                                             verbosity_level=1)
     try:
         results = estimate_method(*req_args, **opt_args)
     finally:
         logger.end_block()
     return results
Esempio n. 28
0
    def export_dataset(self, dataset_name, in_storage, out_storage):
        logger.start_block('Exporting dataset %s' % dataset_name)
        try:
            values_from_storage = in_storage.load_table(dataset_name)

            out_storage.write_table(dataset_name, values_from_storage)
        finally:
            logger.end_block()
 def tearDown(self):
     logger.log_status('entering tearDown')
     logger.log_status('Removing loaded/stored file: %s ...' %self.xsd_destination)
     if os.path.exists(self.xsd_destination):
         os.remove(self.xsd_destination)
     logger.log_status('... removing finished.')
     logger.log_status('leaving tearDown')
     logger.end_block('Finished XSDLoadTest')
 def export_dataset(self, dataset_name, in_storage, out_storage):
     logger.start_block('Exporting dataset %s' % dataset_name)
     try:
         values_from_storage = in_storage.load_table(dataset_name)
         
         out_storage.write_table(dataset_name, values_from_storage)
     finally:
         logger.end_block()
Esempio n. 31
0
 def logged_method (*req_args, **opt_args):
     logger.start_block(name=an_instance.name(), verbose=False)
     try:
         results = compute_method(*req_args, **opt_args)
         an_instance._do_flush_dependent_variables_if_required()
     finally:
         logger.end_block()
     return results       
    def run(self, refinement_dataset=None, current_year=None, 
            action_order=['subtract', 'add', 'multiple', 'set_value', 'convert', 'demolish', 'delete'],
            dataset_pool=None):
        
        """
        """
        
        if refinement_dataset is None:
            refinement_dataset = dataset_pool.get_dataset('refinement')
        self.id_names = (refinement_dataset.get_id_name()[0], 'transaction_id')
        
        if current_year is None:
            current_year = SimulationState().get_current_time()
        
        #refinements_this_year = copy.deepcopy(refinement_dataset)
        refinements_this_year = refinement_dataset
        this_year_index = where(refinement_dataset.get_attribute('year')==current_year)[0]
        all_years_index = where(refinement_dataset.get_attribute('year')==-1)[0]
        refinements_this_year.subset_by_index(concatenate( (this_year_index, all_years_index) ), 
                                              flush_attributes_if_not_loaded=False)
        
        transactions = refinements_this_year.get_attribute('transaction_id')
        actions = refinements_this_year.get_attribute('action')
        for this_transaction in sort( unique(transactions) ):
            #transaction_list = [] # list of each action in this transaction
            agents_pool = []  # index to agents to keep track agent within 1 transaction
            logger.start_block("Transaction %i" % this_transaction)
            for action_type in action_order:
                action_function = getattr(self, '_' + action_type)
                for refinement_index in where( logical_and(transactions==this_transaction, actions == action_type))[0]:
                    this_refinement = refinements_this_year.get_data_element(refinement_index)
                    ## get agent_dataset and location_dataset if specified
                    agent_dataset_name = this_refinement.agent_dataset
                    agent_dataset = dataset_pool.get_dataset( agent_dataset_name )
                    location_dataset = None
                    logger.log_status("Action: %s\nAmount: %s\nAttribute: %s\nFilter: %s" % \
                                      (action_type, this_refinement.amount, this_refinement.agent_attribute, 
                                       this_refinement.agent_filter
                                       ) )
                    action_function( agents_pool, this_refinement.amount,
                                     agent_dataset, location_dataset, 
                                     this_refinement, 
                                     dataset_pool )
                    
                    agent_dataset.flush_dataset()
                    dataset_pool._remove_dataset(agent_dataset.get_dataset_name())
                    if location_dataset is not None:
                        location_dataset.flush_dataset()
                        dataset_pool._remove_dataset(location_dataset.get_dataset_name())
                    
            ## delete agents still in agents_pool at the end of the transaction
            #agent_dataset.remove_elements( array(agents_pool) )
            
            
#            dataset_pool.flush_loaded_datasets()
#            dataset_pool.remove_all_datasets()
                        
            logger.end_block()
    def _run_PopSyn(self, tm_config, year, run_dir):
        """ Gets the inputs all setup and then runs the Population Synthesizer.
        """
        # update sfzones.csv with the landusemodel output
        dest_dir = os.path.join(run_dir, self.SUBDIR_POPULATION_SYNTHESIZER)
        if not os.path.exists(dest_dir): os.makedirs(dest_dir)
        
        # copy over inputs files
        dest_inputsdir = os.path.join(dest_dir, "inputs")
        if not os.path.exists(dest_inputsdir): os.makedirs(dest_inputsdir)        
        # copy over the tazdata from the TazDataProcessor step
        shutil.copy2(os.path.join(run_dir, self.SUBDIR_TAZ_DATA_PROCESSOR, "tazdata.dbf"), dest_inputsdir)
        
        # copy over controls files
        dest_controlsdir = os.path.join(dest_dir, "controls")
        if os.path.exists(dest_controlsdir): shutil.rmtree(dest_controlsdir)  
        src_controlsdir = os.path.join(tm_config['popsyn_srcdir'], "controls")
        shutil.copytree(src_controlsdir, dest_controlsdir)

        self._updateConfigPaths(os.path.join(dest_controlsdir, "hhSubmodels.properties"), 
         [ [r"(tazdata.file\s*=\s*)(\S*)",      r"\1inputs\tazdata.dbf"],
           [r"(tazdata.out.file\s*=\s*)(\S*)",  r"\1inputs\tazdata_converted.csv"],
           [r"(\S\s*=\s*)(inputs/)(\S*)",       "%s%s%s" % (r"\1",os.path.join(tm_config['popsyn_srcdir'],"inputs"),r"\\\3")]
         ])
        self._updateConfigPaths(os.path.join(dest_controlsdir, "arc.properties"), 
         [ [r"(Forecast.TazFile\s*=\s*)(\S*)",  r"\1inputs\tazdata_converted.csv"],
           [r"(\S\s*=\s*)(./inputs/)(\S*)",       "%s%s%s" % (r"\1",os.path.join(tm_config['popsyn_srcdir'],"inputs"),r"\\\3")]
         ])
        
        # make the outputs directory
        dest_outputsdir = os.path.join(dest_dir, "outputs")
        if not os.path.exists(dest_outputsdir): os.makedirs(dest_outputsdir)
        for dir in ["intermediate", "syntheticPop", "validation"]:
            newdir = os.path.join(dest_outputsdir, dir)
            if not os.path.exists(newdir): os.makedirs(newdir)

        # copy over the batch              
        batfile             = "runPopSyn.bat"
        shutil.copy2(os.path.join(tm_config['popsyn_srcdir'], batfile), dest_dir)
        
        # run the Population Synthesizer
        sfsampfile = os.path.join(dest_outputsdir, "syntheticPop", "sfsamp.txt")
        if os.path.exists(sfsampfile):
            logger.log_status("Synthesized population file %s exists -- skipping creation!" % sfsampfile)
        else:
            cmd         = os.path.join(dest_dir, batfile)
            logger.start_block("Running [%s]" % (cmd))
            popsynproc = subprocess.Popen( cmd, cwd = dest_dir, stdout=subprocess.PIPE ) 
            for line in popsynproc.stdout:
                logger.log_status(line.strip('\r\n'))
            popsynret  = popsynproc.wait()
            logger.log_status("Returned %d" % (popsynret))
            if popsynret != 0: raise StandardError, "Population Synthesizer exited with bad return code"

        # put the output files in place
        shutil.copy2(sfsampfile, os.path.join(run_dir, self.SUBDIR_LANDUSE_INPUTS))

        logger.end_block()
    def run(self, refinement_dataset=None, current_year=None, 
            action_order=['subtract', 'add', 'multiple', 'set_value', 'convert', 'demolish', 'delete'],
            dataset_pool=None):
        
        """
        """
        
        if refinement_dataset is None:
            refinement_dataset = dataset_pool.get_dataset('refinement')
        self.id_names = (refinement_dataset.get_id_name()[0], 'transaction_id')
        
        if current_year is None:
            current_year = SimulationState().get_current_time()
        
        #refinements_this_year = copy.deepcopy(refinement_dataset)
        refinements_this_year = refinement_dataset
        this_year_index = where(refinement_dataset.get_attribute('year')==current_year)[0]
        all_years_index = where(refinement_dataset.get_attribute('year')==-1)[0]
        refinements_this_year.subset_by_index(concatenate( (this_year_index, all_years_index) ), 
                                              flush_attributes_if_not_loaded=False)
        
        transactions = refinements_this_year.get_attribute('transaction_id')
        actions = refinements_this_year.get_attribute('action')
        for this_transaction in sort( unique(transactions) ):
            #transaction_list = [] # list of each action in this transaction
            agents_pool = []  # index to agents to keep track agent within 1 transaction
            logger.start_block("Transaction %i" % this_transaction)
            for action_type in action_order:
                action_function = getattr(self, '_' + action_type)
                for refinement_index in where( logical_and(transactions==this_transaction, actions == action_type))[0]:
                    this_refinement = refinements_this_year.get_data_element(refinement_index)
                    ## get agent_dataset and location_dataset if specified
                    agent_dataset_name = this_refinement.agent_dataset
                    agent_dataset = dataset_pool.get_dataset( agent_dataset_name )
                    location_dataset = None
                    logger.log_status("Action: %s\nAmount: %s\nAttribute: %s\nFilter: %s" % \
                                      (action_type, this_refinement.amount, this_refinement.agent_attribute, 
                                       this_refinement.agent_filter
                                       ) )
                    action_function( agents_pool, this_refinement.amount,
                                     agent_dataset, location_dataset, 
                                     this_refinement, 
                                     dataset_pool )
                    
                    agent_dataset.flush_dataset()
                    dataset_pool._remove_dataset(agent_dataset.get_dataset_name())
                    if location_dataset is not None:
                        location_dataset.flush_dataset()
                        dataset_pool._remove_dataset(location_dataset.get_dataset_name())
                    
            ## delete agents still in agents_pool at the end of the transaction
            #agent_dataset.remove_elements( array(agents_pool) )
            
            
#            dataset_pool.flush_loaded_datasets()
#            dataset_pool.remove_all_datasets()
                        
            logger.end_block()
Esempio n. 35
0
    def run(self,
            chunk_specification,
            dataset,
            dataset_index=None,
            result_array_type=float32,
            **kwargs):
        """ 'chunk_specification' - determines number of chunks to use when computing over
                the dataset set.
            'dataset' - an object of class Dataset that is to be chunked.
            'dataset_index' - index of individuals in dataset to be chunked.
            'result_array_type' - type of the resulting array. Can be any numerical type of numpy array.
            **kwargs - keyword arguments.
            The method chunks dataset_index in the desired number of chunks (minimum is 1) and for each chunk it calls the method
            'run_chunk'. The order of the individuals entering the chunking is determined by the method 'get_agents_order'.
        """
        if dataset_index == None:
            dataset_index = arange(dataset.size())
        if not isinstance(dataset_index, ndarray):
            dataset_index = array(dataset_index)
        logger.log_status("Total number of individuals: %s" %
                          dataset_index.size)
        result_array = zeros(dataset_index.size, dtype=result_array_type)

        if dataset_index.size <= 0:
            logger.log_status("Nothing to be done.")
            return result_array

        all_indexed_individuals = DatasetSubset(dataset, dataset_index)
        ordered_agent_indices = self.get_agents_order(
            all_indexed_individuals)  # set order of individuals in chunks

        # TODO: Remove next six lines after we inherit chunk specification as a text string.
        if (chunk_specification is None):
            chunk_specification = {'nchunks': 1}
        chunker = ChunkSpecification(chunk_specification)
        self.number_of_chunks = chunker.nchunks(dataset_index)
        chunksize = int(
            ceil(all_indexed_individuals.size() /
                 float(self.number_of_chunks)))
        for ichunk in range(self.number_of_chunks):
            logger.start_block("%s chunk %d out of %d." %
                               (self.model_short_name,
                                (ichunk + 1), self.number_of_chunks))
            self.index_of_current_chunk = ichunk
            try:
                chunk_agent_indices = ordered_agent_indices[arange(
                    (ichunk * chunksize),
                    min((ichunk + 1) * chunksize,
                        all_indexed_individuals.size()))]
                logger.log_status("Number of agents in this chunk: %s" %
                                  chunk_agent_indices.size)
                result_array[chunk_agent_indices] = self.run_chunk(
                    dataset_index[chunk_agent_indices], dataset,
                    **kwargs).astype(result_array_type)
            finally:
                logger.end_block()

        return result_array
Esempio n. 36
0
 def tearDown(self):
     logger.log_status('entering tearDown')
     logger.log_status('Removing loaded/stored file: %s ...' %
                       self.xsd_destination)
     if os.path.exists(self.xsd_destination):
         os.remove(self.xsd_destination)
     logger.log_status('... removing finished.')
     logger.log_status('leaving tearDown')
     logger.end_block('Finished XSDLoadTest')
Esempio n. 37
0
 def _write_to_xls_file(self, data, header, filepath):
     logger.start_block("Writing to transcad input file")
     workbook = xlwt.Workbook()
     worksheet = workbook.add_sheet('data')
     
     [worksheet.write(0, c, cname) for c, cname in enumerate(header)]
     [worksheet.write(rc[0]+1, rc[1], v) for rc, v in ndenumerate(data)]
     workbook.save(filepath)
     logger.end_block()
 def run(self, myconfig={}, year=2001):
     """Runs the travel model, using appropriate info from config. 
     """
     tm_config   = myconfig["travel_model_configuration"]
     
     # verify that the base directory exists and there's a runmodel.bat in it, or we're a nogo
     base_dir    = tm_config['travel_model_base_directory']    
     if not os.path.exists(base_dir):
         raise StandardError, \
             "Travel model base directory '%s' must exist with a standard %s" % (base_dir, self.RUN_BATCH)
              
     src_runmodelfile = os.path.join(base_dir, self.RUN_BATCH)
     if not os.path.exists(src_runmodelfile):
         raise StandardError, \
             "Travel model base directory '%s' must exist with a standard %s" % (base_dir, self.RUN_BATCH)            
     
     run_dir     = os.path.join(base_dir, tm_config[year]['year_dir'])
     if not os.path.exists(run_dir):
         os.makedirs(run_dir)
     
     # if the final skims are already there, then abort -- this has run already
     if os.path.exists(os.path.join(run_dir, "finalTRNWLWAM.h5")):
         logger.log_status("Final skims found in %s, skipping travel model" % run_dir)
         return
     
     runinputs_dir = os.path.join(run_dir, self.SUBDIR_LANDUSE_INPUTS)
     if not os.path.exists(runinputs_dir):
         os.makedirs(runinputs_dir)
     
     # run the TAZ data processor and the Population Synthesizers
     self._run_TazDataProcessor(tm_config, year, run_dir)
     self._run_PopSyn(tm_config, year, run_dir)
     
     # make the run
     shutil.copy2(src_runmodelfile, run_dir)
     self._updateConfigPaths( os.path.join(run_dir, self.RUN_BATCH),
      [ [r"(set LANDUSE\s*=\s*)(\S*)",    r"\1.\%s" % (self.SUBDIR_LANDUSE_INPUTS)],
        [r"(?P<set>set (BASEDEMAND|MTCDEMAND|NETWORKS)\s*=\s*\S*)(\d\d\d\d)[ ]*$", r"\g<set>" + str(year)]
      ], doubleBackslash=False)
     
     # dispatcher to taraval, where models start
     outfile = open( os.path.join(run_dir, self.RUN_DISPATCH), 'w')
     outfile.write("%s\n" % (self.RUN_BATCH))
     outfile.close()
     
     # run it!
     cmd         = self.DISPATCH + " " + self.RUN_DISPATCH + " " + self.CLUSTER_MACHINE
     logger.start_block("Running TRAVEL MODDEL [%s]" % (cmd))
     tmproc = subprocess.Popen( cmd, cwd = run_dir, stdout=subprocess.PIPE ) 
     for line in tmproc.stdout:
         logger.log_status(line.strip('\r\n'))
     tmret  = tmproc.wait()
     logger.log_status("Returned %d" % (tmret))
     # TODO - why does it return 1 when it seems ok?!?!
     if tmret != 0 and tmret != 1: raise StandardError, "%s exited with bad return code" % (cmd)
     logger.end_block()
 def _write_to_txt_file(self, data, header, input_file, delimiter='\t'):
     logger.start_block("Writing to transcad input file")
     newfile = open(input_file, 'w')
     newfile.write(delimiter.join(header) + "\n")
     rows, cols = data.shape
     for n in range(rows):
         newfile.write(delimiter.join([str(x) for x in data[n,]]) + "\n")
                                       
     newfile.close()
     logger.end_block()
 def run_simulation(self, simulation_instance=None):
     logger.start_block("Simulation on database %s" % self.config["scenario_database_configuration"].database_name)
     try:
         if simulation_instance is None:
             simulation_instance = ModelSystem()
         simulation_instance.run(self.config)
         # simulation_instance.run_multiprocess(self.config, is_run_subset=True)
     finally:
         logger.end_block()
     logger.log_status("Data cache in %s" % self.simulation_state.get_cache_directory())
Esempio n. 41
0
    def run(self, config, year):
        """Running MATSim.  A lot of paths are relative; the base path is ${OPUS_HOME}/opus_matsim.  As long as ${OPUS_HOME}
        is correctly set and the matsim tarfile was unpacked in OPUS_HOME, this should work out of the box.  There may eventually
        be problems with the java version.
        """

        logger.start_block("Starting RunTravelModel.run(...)")

        # tnicolai :for debugging
        # try:
        #    import pydevd
        #    pydevd.settrace()
        # except: pass

        self.__setUp(config)

        config_obj = MATSimConfigObject(config, year, self.matsim_config_full)
        config_obj.marschall()

        # tnicolai: original call
        # cmd = """cd %(opus_home)s/opus_matsim ; java %(vmargs)s -cp %(classpath)s %(javaclass)s %(matsim_config_file)s""" % {
        #        'opus_home': paths.OPUS_HOME,
        #        'vmargs': "-Xmx2000m",
        #        'classpath': "libs/log4j/log4j/1.2.15/log4j-1.2.15.jar:libs/jfree/jfreechart/1.0.7/jfreechart-1.0.7.jar:libs/jfree/jcommon/1.0.9/jcommon-1.0.9.jar:classesMATSim:classesToronto:classesTNicolai:classesKai:classesEntry", #  'classpath': "classes:jar/MATSim.jar",
        #        'javaclass': "playground.tnicolai.urbansim.cupum.MATSim4UrbansimCUPUM",
        #        'matsim_config_file': self.matsim_config_full,
        #        'test_parameter': self.test_parameter }

        # tnicolai : test for matsim jar execution ...
        cmd = (
            """cd %(opus_home)s/opus_matsim ; java %(vmargs)s -cp %(classpath)s %(javaclass)s %(matsim_config_file)s %(test_parameter)s"""
            % {
                "opus_home": paths.OPUS_HOME,
                "vmargs": "-Xmx8000m",  # set to 8GB on math cluster and 2GB on Notebook
                "classpath": "jar/matsim4urbansim.jar",
                "javaclass": "playground.tnicolai.urbansim.cupum.MATSim4UrbansimCUPUM",
                "matsim_config_file": self.matsim_config_full,
                "test_parameter": self.test_parameter,
            }
        )

        logger.log_status("Running command %s" % cmd)

        cmd_result = os.system(cmd)
        if cmd_result != 0:
            error_msg = "Matsim Run failed. Code returned by cmd was %d" % (cmd_result)
            logger.log_error(error_msg)
            logger.log_error(
                "Note that currently (dec/08), paths in the matsim config files are relative to the opus_matsim root,"
            )
            logger.log_error("  which is one level 'down' from OPUS_HOME.")
            raise StandardError(error_msg)

        logger.end_block()
Esempio n. 42
0
    def plot_data(self, data_array, main, delimiter=','):
        #def _write_to_txt_file(self, data, header, input_file, delimiter='\t'):
        logger.start_block("Writing data to file " + main)
        newfile = open(main + '.csv', 'w')
        #newfile.write(delimiter.join(header) + "\n")
        rows, cols = data_array.shape
        for n in range(rows):
            newfile.write(delimiter.join([str(x) for x in data_array[n,]]) + "\n")

        newfile.close()
        logger.end_block()
 def run_simulation(self, simulation_instance=None):
     logger.start_block('Simulation on database %s' 
         % self.config['scenario_database_configuration'].database_name)
     try:
         if simulation_instance is None:
             simulation_instance = ModelSystem()
         simulation_instance.run(self.config)
         #simulation_instance.run_multiprocess(self.config, is_run_subset=True)
     finally:
         logger.end_block()
     logger.log_status("Data cache in %s" % self.simulation_state.get_cache_directory())
Esempio n. 44
0
    def run(self):
        
        logger.start_block()
        insert_auto_generated_cache_directory_if_needed(self.config)
         
        run_manager = RunManager(ServicesDatabaseConfiguration())
        run_manager.setup_new_run(cache_directory = self.config['cache_directory'],configuration = self.config)
        
        run_manager.run_run(self.config, run_as_multiprocess = True )

        logger.end_block()
Esempio n. 45
0
    def save_results(self, out_storage=None, model_name=None):
        if self.specification is None or self.coefficients is None:
            raise ValueError, "model specification or coefficient is None"

        #invalid = self.coefficients.is_invalid()
        if False:
            logger.log_warning('Invalid coefficients. Not saving results!')
            return

        if model_name is None:
            model_name = self.config.get('model_name_for_coefficients', None)
            
        if model_name is None:
            if self.model_name is not None:
                model_name = self.model_name
            else:
                raise ValueError, "model_name unspecified"

        out_storage_available = True
        if out_storage:
            pass
        elif 'estimation_database_configuration' in self.config:
            try:
                db_server = DatabaseServer(self.config['estimation_database_configuration'])
                database_name = self.config["estimation_database_configuration"].database_name
    
                if not db_server.has_database(database_name):
                    db_server.create_database(database_name)
    
                output_db = db_server.get_database(database_name)
                out_storage = StorageFactory().get_storage(
                    type='sql_storage',
                    storage_location=output_db)
            except:
                logger.log_warning("Problem with connecting database given by 'estimation_database_configuration'.")
                out_storage_available = False
        else:
            logger.log_warning("No estimation_database_configuration given.")
            out_storage_available = False

        # the original model name of development_project_lcm is too long as a mysql db table name, truncate it
        if model_name.rfind("_development_project_location_choice_model") >=0:
            model_name = model_name.replace('_project', '')
        specification_table = '%s_specification' % model_name
        coefficients_table = '%s_coefficients' % model_name
        if out_storage_available:
            logger.start_block("Writing specification and coefficients into storage given by 'estimation_database_configuration'")
            self.specification.write(out_storage=out_storage, out_table_name=specification_table)
            self.coefficients.write(out_storage=out_storage, out_table_name=coefficients_table)
            logger.end_block()
        logger.start_block("Writing specification and coefficients into %s" % AttributeCache().get_storage_location())
        self.specification.write(out_storage=AttributeCache(), out_table_name=specification_table)
        self.coefficients.write(out_storage=AttributeCache(), out_table_name=coefficients_table)        
        logger.end_block()
Esempio n. 46
0
    def save_results(self, out_storage=None, model_name=None):
        if self.specification is None or self.coefficients is None:
            raise ValueError, "model specification or coefficient is None"

        #invalid = self.coefficients.is_invalid()
        if False:
            logger.log_warning('Invalid coefficients. Not saving results!')
            return

        if model_name is None:
            model_name = self.config.get('model_name_for_coefficients', None)
            
        if model_name is None:
            if self.model_name is not None:
                model_name = self.model_name
            else:
                raise ValueError, "model_name unspecified"

        out_storage_available = True
        if out_storage:
            pass
        elif 'estimation_database_configuration' in self.config:
            try:
                db_server = DatabaseServer(self.config['estimation_database_configuration'])
                database_name = self.config["estimation_database_configuration"].database_name
    
                if not db_server.has_database(database_name):
                    db_server.create_database(database_name)
    
                output_db = db_server.get_database(database_name)
                out_storage = StorageFactory().get_storage(
                    type='sql_storage',
                    storage_location=output_db)
            except:
                logger.log_warning("Problem with connecting database given by 'estimation_database_configuration'.")
                out_storage_available = False
        else:
            logger.log_warning("No estimation_database_configuration given.")
            out_storage_available = False

        # the original model name of development_project_lcm is too long as a mysql db table name, truncate it
        if model_name.rfind("_development_project_location_choice_model") >=0:
            model_name = model_name.replace('_project', '')
        specification_table = '%s_specification' % model_name
        coefficients_table = '%s_coefficients' % model_name
        if out_storage_available:
            logger.start_block("Writing specification and coefficients into storage given by 'estimation_database_configuration'")
            self.specification.write(out_storage=out_storage, out_table_name=specification_table)
            self.coefficients.write(out_storage=out_storage, out_table_name=coefficients_table)
            logger.end_block()
        logger.start_block("Writing specification and coefficients into %s" % AttributeCache().get_storage_location())
        self.specification.write(out_storage=AttributeCache(), out_table_name=specification_table)
        self.coefficients.write(out_storage=AttributeCache(), out_table_name=coefficients_table)
        logger.end_block()
Esempio n. 47
0
    def run(self, config, year):
        """Running MATSim.  A lot of paths are relative; the base path is ${OPUS_HOME}/opus_matsim.  As long as ${OPUS_HOME}
        is correctly set and the matsim tarfile was unpacked in OPUS_HOME, this should work out of the box.  There may eventually
        be problems with the java version.
        """

        logger.start_block("Starting RunTravelModel.run(...)")

        travel_model_configuration = config['travel_model_configuration']

        # The default config file name
        matsim_config_filename = travel_model_configuration[
            'matsim_config_filename']

        # over-written if there is a specific config file name for the year:
        if travel_model_configuration[year]['matsim_config_filename']:
            matsim_config_filename = travel_model_configuration[year][
                'matsim_config_filename']

        cmd = """cd %(opus_home)s/opus_matsim ; java %(vmargs)s -cp %(classpath)s %(javaclass)s %(matsim_config_file)s --year=%(year)i --samplingRate=%(sampling_rate)f""" % {
            'opus_home':
            os.environ['OPUS_HOME'],
            'vmargs':
            "-Xmx2000m",
            'classpath':
            "classes:jar/MATSim.jar",
            'javaclass':
            "playground.run.Matsim4Urbansim",
            'matsim_config_file':
            os.path.join(os.environ['OPUS_HOME'], "opus_matsim",
                         matsim_config_filename),
            'sampling_rate':
            config['travel_model_configuration']['sampling_rate'],
            'year':
            year
        }

        logger.log_status('Running command %s' % cmd)

        cmd_result = os.system(cmd)
        if cmd_result != 0:
            error_msg = "Matsim Run failed. Code returned by cmd was %d" % (
                cmd_result)
            logger.log_error(error_msg)
            logger.log_error(
                "Note that currently (dec/08), paths in the matsim config files are relative to the opus_matsim root,"
            )
            logger.log_error("  which is one level 'down' from OPUS_HOME.")
            raise StandardError(error_msg)

        logger.end_block()
Esempio n. 48
0
def create_file_cache_directories(directory,
                                  prefix='',
                                  file_name='cache_directories'):
    logger.start_block('Creating file %s in %s' % (file_name, directory))
    all_dirs = os.listdir(directory)
    all_dirs = [x for x in all_dirs if x.startswith(prefix)]
    if not prefix.startswith('.'):
        all_dirs = [x for x in all_dirs if not x.startswith('.')]

    for i in range(len(all_dirs)):
        all_dirs[i] = os.path.join(directory, all_dirs[i])

    write_to_text_file(os.path.join(directory, file_name), all_dirs)
    logger.end_block()
Esempio n. 49
0
    def _write_to_txt_file(self, data, header, input_file, delimiter=','):
        logger.start_block("Writing to travel_model input file %s" %
                           input_file)
        newfile = open(input_file, 'w')
        newfile.write(delimiter.join(header).replace("tm.", "") + "\n")
        for row in range(len(data[0])):
            for col in range(len(header)):
                newfile.write(str(data[col][row]))
                if col < len(header) - 1:
                    newfile.write(",")
            newfile.write("\n")

        newfile.close()
        logger.end_block()
Esempio n. 50
0
 def _print_table(self, table_name):
     """Provide debugging info to figure out why the above test is failing, sometimes."""
     try:
         results = self.db.GetResultsFromQuery('select * from %s' %
                                               table_name)
         logger.start_block('Contents of table %s' % table_name)
         try:
             for row in results:
                 logger.log_status(row)
         finally:
             logger.end_block()
     except:
         logger.log_status('Error accessing table %s' % table_name)
         logger.log_stack_trace()
    def _create_db_from_chain_via_python(self, from_database_configuration,
                                         to_database_configuration,
                                         tables_to_copy):

        db_server_from = DatabaseServer(from_database_configuration)
        db_server_to = DatabaseServer(to_database_configuration)
        db_server_to.drop_database(to_database_configuration.database_name)
        db_server_to.create_database(to_database_configuration.database_name)

        database_out = db_server_to.get_database(
            to_database_configuration.database_name)

        scenario_db_manager = ScenarioDatabaseManager(
            server_configuration=from_database_configuration,
            base_scenario_database_name=from_database_configuration.
            database_name)
        table_mapping = scenario_db_manager.get_database_to_table_mapping()

        cross_db_operations = CrossDatabaseOperations()

        #by default, copy all tables
        if tables_to_copy == []:
            tables_to_copy = sum(table_mapping.values(),
                                 [])  # flat a list of lists
        elif 'scenario_information' not in tables_to_copy:
            tables_to_copy.append('scenario_information')

        for database_name, tables in table_mapping.items():
            database_in = db_server_from.get_database(database_name)
            for table in tables:
                if table not in tables_to_copy:
                    continue

                logger.start_block("Copying table '%s' from database '%s'" %
                                   (table, database_name))

                try:
                    cross_db_operations.copy_table(table_to_copy=table,
                                                   database_in=database_in,
                                                   database_out=database_out,
                                                   use_chunking=True)
                finally:
                    logger.end_block()
            database_in.close()

        self._fix_scenario_information_table(database_out)
        database_out.close()
        db_server_from.close()
        db_server_to.close()
Esempio n. 52
0
    def _run_each_year_as_separate_process(
            self,
            start_year,
            end_year,
            seed_array,
            resources,
            log_file_name='run_multiprocess.log'):
        skip_first_year_of_urbansim = resources.get('skip_urbansim', False)
        log_file = os.path.join(resources['cache_directory'], log_file_name)
        profiler_name = resources.get("profile_filename", None)
        iyear = 0
        for year in range(start_year, end_year + 1):
            if (year <> start_year) or ((year == start_year) and
                                        (not skip_first_year_of_urbansim)):
                logger.start_block(
                    'Running UrbanSim for year %d in new process' % year)
                try:
                    resources['years'] = (year, year)
                    resources['seed'] = seed_array[iyear],
                    logger.disable_file_logging(log_file)
                    if profiler_name is not None:
                        resources["profile_filename"] = "%s_%s" % (
                            profiler_name, year
                        )  # add year to the profile name
                    self._fork_new_process(
                        'urbansim.model_coordinators.model_system',
                        resources,
                        optional_args=['--log-file-name', log_file_name])
                    logger.enable_file_logging(log_file, verbose=False)
                finally:
                    logger.end_block()

            if ('travel_model_configuration' in resources) and (
                    not resources.get('skip_travel_model', False)):
                # tnicolai add start year to travel model config
                tmc = resources['travel_model_configuration']
                tmc['start_year'] = start_year  # end tnicolai
                self._run_travel_models_in_separate_processes(
                    resources['travel_model_configuration'], year, resources)

            if 'post_year_configuration' in resources:
                self._run_travel_models_in_separate_processes(
                    resources['post_year_configuration'], year, resources)
            iyear += 1
        self._notify_stopped()
Esempio n. 53
0
    def export(self, in_storage, out_storage):

        dataset_names = in_storage.get_table_names()

        logger.start_block('Exporting tables')
        logger.log_status("Reading tables from '%s'" %
                          in_storage.get_storage_location())

        if not dataset_names:
            logger.log_warning('This location has no tables to export!')
            logger.log_warning(
                'Did you specify a location containing the data for a single year?'
            )
        else:
            for dataset_name in dataset_names:
                self.export_dataset(dataset_name, in_storage, out_storage)

        logger.end_block()
Esempio n. 54
0
 def copy_scenario_database(self, 
                            from_database_configuration, 
                            to_database_configuration,
                            tables_to_copy = []):
     
     logger.start_block("Copying tables from database chain starting at '%s' on '%s'\nto database '%s' on '%s'"
                        % (from_database_configuration.database_name, 
                           from_database_configuration.host_name, 
                           to_database_configuration.database_name, 
                          to_database_configuration.host_name))
     
     try:
         self._create_db_from_chain_via_python(
              from_database_configuration = from_database_configuration, 
              to_database_configuration = to_database_configuration,
              tables_to_copy = tables_to_copy)
     finally:
         logger.end_block()
Esempio n. 55
0
    def run_estimation(self,
                       estimation_config,
                       model_name,
                       save_estimation_results=True):
        config = Baseline()
        config.merge(estimation_config)
        config['config_changes_for_estimation'] = ConfigChangesForEstimation()
        logger.start_block('Estimating %s' % model_name)
        try:
            estimator = UrbansimEstimationRunner(
                models[model_name][0],
                specification_module=models[model_name][1],
                model_group=models[model_name][2],
                configuration=config,
                save_estimation_results=save_estimation_results)
            estimator.estimate()

        finally:
            logger.end_block()
Esempio n. 56
0
    def cache_database_table(self, table_name, base_year, database, in_storage,
                             config):
        """Copy this table from input database into attribute cache.
        """
        logger.start_block('Caching table %s' % table_name)
        try:
            #TODO: why is the config being modified...seems like its kind of useless here...
            config['storage_location'] = os.path.join(
                config['cache_directory'], str(base_year), table_name)

            if not os.path.exists(config['storage_location']):
                flt_storage = StorageFactory().get_storage(
                    type='flt_storage',
                    subdir='store',
                    storage_location=config['storage_location'])

                table = database.get_table(table_name)

                id_name = [
                    primary_key.name.lower()
                    for primary_key in table.primary_key
                ]

                dataset = Dataset(resources=config,
                                  in_storage=in_storage,
                                  out_storage=flt_storage,
                                  in_table_name=table_name,
                                  id_name=id_name)

                nchunks = config[
                    'creating_baseyear_cache_configuration'].tables_to_cache_nchunks.get(
                        table_name, 1)
                current_time = SimulationState().get_current_time()
                SimulationState().set_current_time(base_year)
                dataset.load_dataset(nchunks=nchunks,
                                     flush_after_each_chunk=True)
                SimulationState().set_current_time(current_time)
            else:
                logger.log_status(config['storage_location'] +
                                  " already exits; skip caching " + table_name)

        finally:
            logger.end_block()
Esempio n. 57
0
    def run(self, config, year):
        """Running MATSim.  A lot of paths are relative; the base path is ${OPUS_HOME}/opus_matsim.  As long as ${OPUS_HOME}
        is correctly set and the matsim tarfile was unpacked in OPUS_HOME, this should work out of the box.  There may eventually
        be problems with the java version.
        """
        try:
            import pydevd
            pydevd.settrace()
        except:
            pass

        logger.start_block("Starting RunTravelModel.run(...)")

        self.setUp(config)

        config_obj = MATSimConfigObject(config, year, self.matsim_config_full)
        config_obj.marschall()

        cmd = """cd %(opus_home)s/opus_matsim ; java %(vmargs)s -cp %(classpath)s %(javaclass)s %(matsim_config_file)s""" % {
            'opus_home': os.environ['OPUS_HOME'],
            'vmargs': "-Xmx2000m",
            'classpath':
            "libs/log4j/log4j/1.2.15/log4j-1.2.15.jar:libs/jfree/jfreechart/1.0.7/jfreechart-1.0.7.jar:libs/jfree/jcommon/1.0.9/jcommon-1.0.9.jar:classesMATSim:classesToronto:classesTNicolai:classesKai:classesEntry",  #  'classpath': "classes:jar/MATSim.jar",
            'javaclass': "playground.run.Matsim4Urbansim",
            'matsim_config_file': self.matsim_config_full
        }

        logger.log_status('Running command %s' % cmd)

        cmd_result = os.system(cmd)
        if cmd_result != 0:
            error_msg = "Matsim Run failed. Code returned by cmd was %d" % (
                cmd_result)
            logger.log_error(error_msg)
            logger.log_error(
                "Note that currently (dec/08), paths in the matsim config files are relative to the opus_matsim root,"
            )
            logger.log_error("  which is one level 'down' from OPUS_HOME.")
            raise StandardError(error_msg)

        logger.end_block()
    def _write_to_file(self, zone_set, variables_list, tm_input_file):
        logger.start_block("Writing to emme2 input file: " + tm_input_file)
        try:
            newfile = open(tm_input_file, 'w')
            """write travel model input file into a particular file format emme2 can read"""
            try:
                newfile.write(r"""c  from
c  prepared: %s
t matrices
m matrix="hhemp"
""" % time.strftime("%c", time.localtime(time.time())))
                
                line_template = "%4d    %3d: %8.2f \n"
                for taz_id in zone_set.get_id_attribute():
                    for i in range(101, 125):
                        newfile.write(line_template % (taz_id, i, self._get_value_for_zone(taz_id, zone_set, variables_list[i-101])))
            finally:
                newfile.close()
        finally:
            logger.end_block()
        return tm_input_file
Esempio n. 59
0
    def _put_one_matrix_into_travel_data_set(self,
                                             travel_data_set,
                                             max_zone_id,
                                             matrix_name,
                                             attribute_name,
                                             bank_path,
                                             matrices_created=False):
        """
        Adds to the given travel_data_set the data for the given matrix
        that is in the emme/2 data bank.
        """
        logger.start_block('Copying data for matrix %s into variable %s' %
                           (matrix_name, attribute_name))
        try:
            if not matrices_created:
                self._get_matrix_into_data_file(matrix_name, max_zone_id,
                                                bank_path)
                file_name = "_one_matrix.txt"
            else:
                file_name = "%s_one_matrix.txt" % matrix_name
            file_contents = self._get_emme2_data_from_file(
                join(bank_path, file_name))

            travel_data_set.add_primary_attribute(data=zeros(
                travel_data_set.size(), dtype=float32),
                                                  name=attribute_name)
            odv = array([line.split() for line in file_contents],
                        dtype=float32)
            if odv.size == 0:
                logger.log_error(
                    "Skipped exporting travel_data attribute %s: No data is exported from EMME matrix."
                    % attribute_name)
            else:
                travel_data_set.set_values_of_one_attribute_with_od_pairs(
                    attribute=attribute_name,
                    values=odv[:, 2],
                    O=odv[:, 0].astype('int32'),
                    D=odv[:, 1].astype('int32'))
        finally:
            logger.end_block()