Exemplo n.º 1
0
    def execute_cmd_and_get_stdout(self, cmd, raise_at_error=False):
        """TODO: rewrite for python2.6 using subprocess module
        """
        cmdfilename = self._write_cmd_string_to_temp_file(cmd)
        stdin, stdout, stderr = os.popen3(
            "%s -ssh -l %s -pw %s %s -m %s " %
            (self.plink, self.username, self.password, self.hostname,
             cmdfilename))

        stderr_msg = stderr.readlines()
        if len(stderr_msg) > 0:
            logger.log_error("Error encountered executing cmd through ssh:\n" +
                             ''.join(stderr_msg))
            if raise_at_error:
                raise RuntimeError, "Error encountered executing cmd through ssh:\n" + ''.join(
                    stderr_msg)

        results = stdout.readlines()
        if len(results) == 1:
            results = results[0].strip()

        stdin.close()
        stdout.close()
        stderr.close()
        os.remove(cmdfilename)
        return results
Exemplo n.º 2
0
    def test_get_schema_from_table(self):
        import copy

        test_table_schema = {
            'col1': "INTEGER",
            'col2': "FLOAT",
            'col3': "DOUBLE",
            'col4': "VARCHAR",
            'col5': "BOOLEAN",
            'col6': "MEDIUMTEXT",
            'col7': "SHORT"
        }
        expected_schema = copy.copy(test_table_schema)
        expected_schema.update({
            'col2': 'DOUBLE',
            'col6': 'VARCHAR',
            'col7': 'INTEGER'
        })

        test_table = 'test_table'

        for db, server in self.dbs:
            try:
                self.assertFalse(db.table_exists(test_table))
                db.create_table_from_schema(test_table, test_table_schema)
                new_schema = db.get_schema_from_table(test_table)
                self.assertEqual(new_schema, expected_schema)
            except:
                logger.log_error('Protocol %s' % server.config.protocol)
                raise
Exemplo n.º 3
0
 def _invoke_with_paramiko_wait_until_done_or_failed(self, ssh_client, run_id=None, raise_at_error=False, msg='\n'):
     stdin, stdout, stderr = ssh_client.ssh.exec_command(cmd)
     
     while True:
         #handle when command returns an error in stderr
         try:
             stdout_msg = stdout.readlines()
         except:
             stdout_msg = ''
         try:
             stderr_msg = stderr.readlines()
         except:
             stderr_msg = ''
             
         if len(stderr_msg) > 0:
             logger.log_error('[' + time.ctime + '] ' + "Error encountered executing cmd through ssh:\n" + ''.join(stderr_msg))
             if raise_at_error:
                 raise RuntimeError, "Error encountered executing cmd through ssh:\n" + ''.join(stderr_msg)
         if len(stdout_msg) > 0:
             logger.log_status('[' + time.ctime + '] ' + 'stdout:' + ''.join(stdout_msg))
                                
         if run_id:
             runs_by_status = self.get_run_manager().get_runs_by_status([run_id])            
             if run_id in runs_by_status.get('done', []):
                 break
             if run_id in runs_by_status.get('failed', []):
                 raise RuntimeError, "run failed: %s." % msg
         
         time.sleep(60)
Exemplo n.º 4
0
 def _put_one_matrix_into_travel_data_set(self, travel_data_set, max_zone_id, matrix_name, attribute_name, bank_path,
                                          matrices_created=False):
     """
     Adds to the given travel_data_set the data for the given matrix
     that is in the emme/2 data bank.
     """
     logger.start_block('Copying data for matrix %s into variable %s' %
                        (matrix_name, attribute_name))
     try:
         if not matrices_created:
             self._get_matrix_into_data_file(matrix_name, max_zone_id, bank_path)
             file_name = "_one_matrix.txt"
         else:
             file_name = "%s_one_matrix.txt" % matrix_name
         file_contents = self._get_emme2_data_from_file(join(bank_path, file_name))
         
         travel_data_set.add_primary_attribute(data=zeros(travel_data_set.size(), dtype=float32), 
                                               name=attribute_name)
         odv = array([line.split() for line in file_contents], dtype=float32)
         if odv.size == 0:
             logger.log_error("Skipped exporting travel_data attribute %s: No data is exported from EMME matrix." % attribute_name)
         else:            
             travel_data_set.set_values_of_one_attribute_with_od_pairs(attribute=attribute_name,
                                                                       values=odv[:,2],
                                                                       O=odv[:,0].astype('int32'),
                                                                       D=odv[:,1].astype('int32')
                                                                       )
     finally:
         logger.end_block()
Exemplo n.º 5
0
 def drop_database(self, server, database_name):
     database_path = self._get_database_path(database_name = database_name) 
     try:
         os.remove(database_path)   
     except:
         logger.log_error('Could not remove sqlite database file at %'%database_path)
         raise
def formatExceptionInfo(custom_message = 'Unexpected error', maxTBlevel=5, plainText=False):
    import traceback

#    cla, exc, trbk = sys.exc_info()
    
#    excTb = traceback.format_tb(trbk, maxTBlevel)
    fExc = traceback.format_exc(limit=maxTBlevel)
    
#    excName = cla.__name__
#    try:
#        excArgs = exc.__dict__["args"]
#    except KeyError:
#        excArgs = "<no args>"
            
#    errorinfo = (excName, excArgs, excTb)

    logger.log_error(traceback.format_exc())
    fExc_plain = fExc
    fExc = fExc.replace('\t','   ').replace('\n','<br>').replace(' ', '&nbsp;')
    errorinfo = ('''<qt>%s</qt>
                 '''%(fExc))    
    if plainText:
        return fExc_plain
    else:
        return errorinfo
    def check_reports_creation_and_sizes(
            self,
            travel_model_year_dir,
            expected_files_and_sizes_dir="expected_file_sizes_e05"):
        """If there are missing files or any files over 10% missized then an exception will be thrown.
           If there are files 5-10% missized then a warning will be printed."""

        if self.find_errors(
                travel_model_year_dir,
                os.path.join(
                    expected_files_and_sizes_dir,
                    os.path.abspath(travel_model_year_dir).split(os.sep)[-1])):
            # range: (-5%, 50%) -- smaller than 5% or larger than 50% original size are not allowed

            if self.missing_files:
                logger.log_error(
                    "%d missing report files: %s" %
                    (len(self.missing_files), str(self.missing_files)))
            if self.missized_files:
                logger.log_error("The following files are out of range(-5%, 50%) : " + \
                                                            reduce(lambda prev, file: prev + file[0] + " off by %d percent, \n" \
                                                                    % (file[1]['percent_difference']*100) , \
                                                                    self.missized_files, ""))
            if self.missing_files:
                raise LookupError(
                    "Error, %d missing report files: %s" %
                    (len(self.missing_files), str(self.missing_files)))

            if self.missized_files:
                raise StandardError("Error, the following files are out of range(-5%, 50%) : " + \
                                                            reduce(lambda prev, file: prev + file[0] + " off by %d percent, \n" \
                                                                    % (file[1]['percent_difference']*100) , \
                                                                    self.missized_files, ""))
Exemplo n.º 8
0
    def get_resources_for_run_id_from_history(self,
                                              run_id,
                                              filter_by_status=False):
        """Returns the resources for this run_id, as stored in the run_activity table.
        """

        run_activity = self.services_db.get_table('run_activity')

        if filter_by_status:
            whereclause = and_(run_activity.c.status == 'started',
                               run_activity.c.run_id == int(run_id))
        else:
            whereclause = run_activity.c.run_id == int(run_id)

        query = select(columns=[run_activity.c.resources],
                       whereclause=whereclause)

        run_resources = self.services_db.execute(query).fetchone()

        if not run_resources:
            raise StandardError(
                "run_id %s doesn't exist on server %s" %
                (run_id, self.services_db.get_connection_string(scrub=True)))

        try:
            r = self._unpickle(run_resources[0])
            config = Configuration(r)
        except:
            logger.log_error(
                'Could not create the configuration file for run %i' % run_id)
            raise

        return config
Exemplo n.º 9
0
    def predict(self, predicted_choice_id_name, agents_index=None):
        """ Run prediction. Currently makes sense only for choice models."""
        # Create temporary configuration where all words 'estimate' are replaced by 'run'
        tmp_config = Resources(self.config)
        
        if self.agents_index_for_prediction is None:
            self.agents_index_for_prediction = self.get_agent_set_index().copy()
            
        if agents_index is None:
            agents_index = self.agents_index_for_prediction
        
        tmp_config['models_configuration'][self.model_name]['controller']['run']['arguments']['coefficients'] = "coeff_est"
        tmp_config['models_configuration'][self.model_name]['controller']['run']['arguments']['agents_index'] = "agents_index"
        tmp_config['models_configuration'][self.model_name]['controller']['run']['arguments']['chunk_specification'] = "{'nchunks':1}"

        ### save specification and coefficients to cache (no matter the save_estimation_results flag)
        ### so that the prepare_for_run method could load specification and coefficients from there
        #output_configuration = self.config['output_configuration']
        #del self.config['output_configuration']
        #self.save_results()
        
        #self.config['output_configuration'] = output_configuration
        
        #self.model_system.run_year_namespace["coefficients"] = self.coefficients
        #del tmp_config['models_configuration'][self.model_name]['controller']['prepare_for_run']
        
        try:
            run_year_namespace = copy.copy(self.model_system.run_year_namespace)
        except:
            logger.log_error("The estimate() method must be run first")
            return False
        
        try:
            agents = self.get_agent_set()
            choice_id_name = self.get_choice_set().get_id_name()[0]
            # save current locations of agents
            current_choices = agents.get_attribute(choice_id_name).copy()
            dummy_data = zeros(current_choices.size, dtype=current_choices.dtype)-1
            #agents.modify_attribute(name=choice_id_name, data=dummy_data)  #reset choices for all agents
            agents.modify_attribute(name=choice_id_name, data=dummy_data, index=agents_index)  #reset choices for agents in agents_index
            
            run_year_namespace["process"] = "run"
            run_year_namespace["coeff_est"] = self.coefficients
            run_year_namespace["agents_index"] = agents_index
            run_year_namespace["processmodel_config"] = tmp_config['models_configuration'][self.model_name]['controller']['run']
            new_choices = self.model_system.do_process(run_year_namespace)
            
            #self.model_system.run(tmp_config, write_datasets_to_cache_at_end_of_year=False)
            #new_choices = agents.get_attribute(choice_id_name).copy()
            agents.modify_attribute(name=choice_id_name, data=current_choices)
            dummy_data[agents_index] = new_choices
            if predicted_choice_id_name not in agents.get_known_attribute_names():
                agents.add_primary_attribute(name=predicted_choice_id_name, data=dummy_data)
            else:
                agents.modify_attribute(name=predicted_choice_id_name, data=dummy_data)
            logger.log_status("Predictions saved into attribute " + predicted_choice_id_name)
            return True
        except Exception, e:
            logger.log_error("Error encountered in prediction: %s" % e)
            logger.log_stack_trace()
Exemplo n.º 10
0
def formatExceptionInfo(custom_message='Unexpected error',
                        maxTBlevel=5,
                        plainText=False):
    import traceback

    #    cla, exc, trbk = sys.exc_info()

    #    excTb = traceback.format_tb(trbk, maxTBlevel)
    fExc = traceback.format_exc(limit=maxTBlevel)

    #    excName = cla.__name__
    #    try:
    #        excArgs = exc.__dict__["args"]
    #    except KeyError:
    #        excArgs = "<no args>"

    #    errorinfo = (excName, excArgs, excTb)

    logger.log_error(traceback.format_exc())
    fExc_plain = fExc
    fExc = fExc.replace('\t', '   ').replace('\n',
                                             '<br>').replace(' ', '&nbsp;')
    errorinfo = ('''<qt>%s</qt>
                 ''' % (fExc))
    if plainText:
        return fExc_plain
    else:
        return errorinfo
    def visualize(self, indicators_to_visualize, computed_indicators, visualization_type, *args, **kwargs):

        class_names = {
            "mapnik_map": "MapnikMap",
            "mapnik_animated_map": "MapnikAnimation",
            "matplotlib_chart": "MatplotlibChart",
            "table": "Table",
        }

        module = "opus_gui.results_manager.run.indicator_framework.visualizer.visualizers"
        module_composed_name = module + "." + visualization_type

        example_indicator = computed_indicators[indicators_to_visualize[0]]
        indicator_directory = example_indicator.source_data.get_indicator_directory()
        additional_args = {"indicator_directory": indicator_directory}
        kwargs.update(additional_args)

        visualization = ClassFactory().get_class(
            module_composed_name=module_composed_name, class_name=class_names[visualization_type], arguments=kwargs
        )

        try:
            visualization = visualization.visualize(
                indicators_to_visualize=indicators_to_visualize, computed_indicators=computed_indicators, *args
            )
        except Exception, e:
            msg = "Could not create the %s visualization for %s" % (visualization_type, module_composed_name)
            logger.log_error(msg)
            raise
Exemplo n.º 12
0
 def _clone(self, agents_pool, amount, 
               agent_dataset, location_dataset, 
               this_refinement,
               dataset_pool ):
     """ clone certain amount of agent satisfying condition specified by agent_expression and location_expression
     and add them to agents_pool.  Useful to add certain agents to location where there is no such agent exist previously.
     """
     
     fit_index = self.get_fit_agents_index(agent_dataset, 
                                           this_refinement.agent_expression, 
                                           this_refinement.location_expression,
                                           dataset_pool)
     if fit_index.size == 0:
         logger.log_error("Refinement requests to clone %i agents,  but there are no agents satisfying %s." \
                             % (amount,  ' and '.join( [this_refinement.agent_expression, 
                                                        this_refinement.location_expression] ).strip(' and '),
                                                      ))
         return
    
     
     clone_index = sample_replace( fit_index, amount )
         
     agents_pool += clone_index.tolist()
        
     agent_dataset.modify_attribute(location_dataset.get_id_name()[0], 
                                    -1 * ones( clone_index.size, dtype='int32' ),
                                    index = clone_index
                                    )
     self._add_refinement_info_to_dataset(agent_dataset, self.id_names, this_refinement, index=clone_index)
Exemplo n.º 13
0
 def __create_symbolic_link(self, source_file, link_name):
     ''' this creates a symbolic link / shortcut to a given source_file
         unfortunately java can't handle shortcuts this is why this method 
         is not used anymore
         files are renamed instead
     '''
     symbolic_link = os.path.join( self.target_path, link_name )
     
     if self.is_windows_os: # Windows
         symbolic_link = symbolic_link + '.lnk'
     try: # removing symbolic link / short cut
         os.unlink( symbolic_link )
     except: pass
         
     if self.is_windows_os: # Windows
         try:
             logger.log_status('Creating shortcut %s to (%s) ...' %(symbolic_link, source_file) )
             from win32com.client import Dispatch
             shell = Dispatch('WScript.Shell')
             shortcut = shell.CreateShortCut( symbolic_link )
             shortcut.Targetpath = source_file
             shortcut.save() 
         except: 
             logger.log_error('Error while creating a shortcut to %s!' % source_file)
             logger.log_error('Installation not successful. Try manual installation as described in opus_matsim_user_guide.pdf in opus_matsim/docs')
             exit()
     else: # Mac, Linux
         logger.log_status('Creating symbolic link %s to (%s) ...' %(symbolic_link, source_file) )
         os.symlink( source_file , symbolic_link )
Exemplo n.º 14
0
def formatExceptionInfo(custom_message='Unexpected error',
                        maxTBlevel=5,
                        plainText=False,
                        log=True):
    #    cla, exc, trbk = sys.exc_info()

    #    excTb = traceback.format_tb(trbk, maxTBlevel)
    #    excName = cla.__name__
    #    try:
    #        excArgs = exc.__dict__["args"]
    #    except KeyError:
    #        excArgs = "<no args>"

    #    errorinfo = (excName, excArgs, excTb)

    format_message_and_error = lambda m, e: ('%s\n%s' % (m, e))

    if log:
        logger.log_error(
            format_message_and_error(custom_message, traceback.format_exc()))

    fExc = format_message_and_error(custom_message,
                                    traceback.format_exc(limit=maxTBlevel))

    if plainText:
        return fExc
    else:
        fExc = fExc.replace('\t',
                            '   ').replace('\n',
                                           '<br>').replace(' ', '&nbsp;')
        errorinfo = ('''<qt>%s</qt>
                     ''' % (fExc))
        return errorinfo
Exemplo n.º 15
0
    def _check_syntax_for_dir(self,
                              root_dir,
                              file_names_that_do_not_need_gpl=[]):
        py_file_names = reduce(self._py_file_names, os.walk(root_dir), [])

        files_with_no_license = []
        files_with_tab = []
        for py_file_name in py_file_names:
            lines_with_tabs = self._has_tabs(py_file_name)
            if self._file_needs_GPL(os.path.basename(py_file_name),
                                    file_names_that_do_not_need_gpl
                                    ) and not self._has_GPL(py_file_name):
                logger.log_error("missing GPL in file %s" % py_file_name)
                files_with_no_license.append(py_file_name)
            if lines_with_tabs:
                logger.log_error(
                    "tab(s) in file %s, line(s) %s" %
                    (py_file_name, list2string(lines_with_tabs, ", ")))
                files_with_tab.append(py_file_name)

        if files_with_no_license or files_with_tab:
            files_with_problems = files_with_no_license + files_with_tab
            raise SyntaxError(
                "Please fix reported syntax problems with python files: %s." %
                (','.join(files_with_problems)))
Exemplo n.º 16
0
 def drop_database(self, server, database_name):
     database_path = self._get_database_path(database_name = database_name) 
     try:
         os.remove(database_path)   
     except:
         logger.log_error('Could not remove sqlite database file at %'%database_path)
         raise
Exemplo n.º 17
0
def invoke_run_travel_model(config, year):
    """ 
    """

    tm_config = config['travel_model_configuration']
    scenario = tm_config['travel_model_scenario'] 
    try:
        travel_model_year = travel_model_year_mapping[year]
    except KeyError:
        logger.log_warning("no travel model year mapping for %d." % year)
        travel_model_year = year

    # put the travel model input data in place
    data_exchange_dir = mtc_common.tm_get_data_exchange_dir(config, year)
    cache_directory = config['cache_directory']
    mtc_dir = os.path.join(cache_directory, "mtc_data")
    for f in glob.glob(os.path.join(mtc_dir, '*' + str(year) + '*')):
        logger.log_status("Copying over travel model input " + f + " to " + data_exchange_dir)
        shutil.copy(f, data_exchange_dir)

    my_location = os.path.split(__file__)[0]
    script_filepath = os.path.join(my_location, "run_travel_model.py")
    cmd = "%s %s -s %s -y %s -n" % (sys.executable, script_filepath, scenario, travel_model_year)

    # form the desired output dir for the travel model data.  Make it look
    # like the urbansim run cache for easy association.  Note that we
    # explicitly use the forward slash instead of os.sep and friends
    # because the travel model is managed via ssh on a cygwin machine, not
    # run on the local machine.
    outdir = "runs/" + config['cache_directory'].split(os.sep)[-1]
    outdir = outdir + "/%d_%s" % (year, scenario)
    cmd = cmd + " -o " + outdir

    logger.log_status("Launching %s" % cmd)
    if os.system(cmd) != 0:
        raise TravelModelError

    # Run the emfac report

    # TODO: the travel model server should come from the configuration.  But
    # for this we must migrate the configuration from mtc_config.py to the
    # top-level xml.  So for now we just hard-code it :-/ Same for
    # travel_model_home.
    tm_server = "*****@*****.**"
    travel_model_home = "/cygdrive/e/mtc_travel_model/"
    server_model = winssh.winssh(tm_server, "OPUS_MTC_SERVER_PASSWD")
    (rc, emfac_windir) = server_model.cmd("cygpath -w " + outdir)
    if rc != 0:
        logger.log_error("Failed to find windows path for emfac dir " + outdir)
        sys.exit(1)
    emfac_windir = emfac_windir.replace('\r', '').replace('\n','')
    logger.log_status("Attempting to generate EMFAC report...")
    cmd = 'cd ' + travel_model_home + 'model_support_files/EMFAC_Files'
    logger.log_status(cmd)
    server_model.cmd_or_fail(cmd)
    cmd = "cmd /c 'RunEmfac.bat " + emfac_windir + " " + str(year) + "' | tee emfac.log"
    logger.log_status(cmd)
    (rc, out) = server_model.cmd(cmd, supress_output=False, pipe_position=0)
    if rc != 0:
        logger.log_warning("WARNING: Failed to prepare emfac report")
Exemplo n.º 18
0
    def create_storage(self):

        try:
            server = DatabaseServer(self.server_config)
        except:
            logger.log_error(
                'Cannot connect to the database server that the services database is hosted on %s.'
                % self.server_config.database_name)
            raise

        if not server.has_database(self.server_config.database_name):
            server.create_database(self.server_config.database_name)

        try:
            services_db = server.get_database(self.server_config.database_name)
        except:
            logger.log_error('Cannot connect to a services database on %s.' %
                             server.get_connection_string(scrub=True))
            raise

        metadata.bind = services_db.engine
        setup_all()
        create_all()

        return services_db
Exemplo n.º 19
0
 def run(self, year, output_file=None):
     """Runs the emme2 executables, using appropriate info from config. 
     Assumes the emme2 input files are present. 
     Raise an exception if the emme2 run fails. 
     """        
     emme2_batch_file_path = self.get_emme2_batch_file_path(year)
     emme2_dir, emme2_batch_file_name = os.path.split(emme2_batch_file_path)
     logger.log_status('Using emme2 dir %s for year %d' % (emme2_dir, year))
     os.chdir(emme2_dir)
     if output_file is None:
         log_file_path = os.path.join(self.config['cache_directory'], 'emme2_%d_log.txt' % year)
     else:
         log_file_path = output_file
     
     # if log_file_path is a remote sftp URL, redirect the log file to tempdir           
     log_file_path = redirect_sftp_url_to_local_tempdir(log_file_path)
     cmd = """%(system_cmd)s"%(emme2_batch_file_name)s" > %(log_file_path)s""" % {
             'system_cmd': self.config['travel_model_configuration'].get('system_command', 'cmd /c '),
             'emme2_batch_file_name':emme2_batch_file_path, 
             'log_file_path':log_file_path,
             } 
     logger.log_status('Running command %s' % cmd)
     cmd_result = os.system(cmd)
     if cmd_result != 0:
         error_msg = "Emme2 Run failed. Code returned by cmd was %d" % (cmd_result)
         logger.log_error(error_msg)
         raise StandardError(error_msg)        
Exemplo n.º 20
0
    def get_resources_for_run_id_from_history(self, run_id, filter_by_status = False):
        """Returns the resources for this run_id, as stored in the run_activity table.
        """

        run_activity = self.services_db.get_table('run_activity')

        if filter_by_status:
            whereclause = and_(
                        run_activity.c.status=='started',
                        run_activity.c.run_id==int(run_id))
        else:
            whereclause = run_activity.c.run_id==int(run_id)

        query = select(
            columns = [run_activity.c.resources],
            whereclause = whereclause)

        run_resources = self.services_db.execute(query).fetchone()

        if not run_resources:
            raise StandardError("run_id %s doesn't exist on server %s" % (run_id, self.services_db.get_connection_string(scrub = True)))

        try:
            r = self._unpickle(run_resources[0])
            config = Configuration(r)
        except:
            logger.log_error('Could not create the configuration file for run %i'%run_id)
            raise

        return config
Exemplo n.º 21
0
    def predict(self, predicted_choice_id_name, agents_index=None):
        """ Run prediction. Currently makes sense only for choice models."""
        # Create temporary configuration where all words 'estimate' are replaced by 'run'
        tmp_config = Resources(self.config)
        
        if self.agents_index_for_prediction is None:
            self.agents_index_for_prediction = self.get_agent_set_index().copy()
            
        if agents_index is None:
            agents_index = self.agents_index_for_prediction
        
        tmp_config['models_configuration'][self.model_name]['controller']['run']['arguments']['coefficients'] = "coeff_est"
        tmp_config['models_configuration'][self.model_name]['controller']['run']['arguments']['agents_index'] = "agents_index"
        tmp_config['models_configuration'][self.model_name]['controller']['run']['arguments']['chunk_specification'] = "{'nchunks':1}"

        ### save specification and coefficients to cache (no matter the save_estimation_results flag)
        ### so that the prepare_for_run method could load specification and coefficients from there
        #output_configuration = self.config['output_configuration']
        #del self.config['output_configuration']
        #self.save_results()
        
        #self.config['output_configuration'] = output_configuration
        
        #self.model_system.run_year_namespace["coefficients"] = self.coefficients
        #del tmp_config['models_configuration'][self.model_name]['controller']['prepare_for_run']
        
        try:
            run_year_namespace = copy.copy(self.model_system.run_year_namespace)
        except:
            logger.log_error("The estimate() method must be run first")
            return False
        
        try:
            agents = self.get_agent_set()
            choice_id_name = self.get_choice_set().get_id_name()[0]
            # save current locations of agents
            current_choices = agents.get_attribute(choice_id_name).copy()
            dummy_data = zeros(current_choices.size, dtype=current_choices.dtype)-1
            #agents.modify_attribute(name=choice_id_name, data=dummy_data)  #reset choices for all agents
            agents.modify_attribute(name=choice_id_name, data=dummy_data, index=agents_index)  #reset choices for agents in agents_index
            
            run_year_namespace["process"] = "run"
            run_year_namespace["coeff_est"] = self.coefficients
            run_year_namespace["agents_index"] = agents_index
            run_year_namespace["processmodel_config"] = tmp_config['models_configuration'][self.model_name]['controller']['run']
            new_choices = self.model_system.do_process(run_year_namespace)
            
            #self.model_system.run(tmp_config, write_datasets_to_cache_at_end_of_year=False)
            #new_choices = agents.get_attribute(choice_id_name).copy()
            agents.modify_attribute(name=choice_id_name, data=current_choices)
            dummy_data[agents_index] = new_choices
            if predicted_choice_id_name not in agents.get_known_attribute_names():
                agents.add_primary_attribute(name=predicted_choice_id_name, data=dummy_data)
            else:
                agents.modify_attribute(name=predicted_choice_id_name, data=dummy_data)
            logger.log_status("Predictions saved into attribute " + predicted_choice_id_name)
            return True
        except Exception, e:
            logger.log_error("Error encountered in prediction: %s" % e)
            logger.log_stack_trace()
Exemplo n.º 22
0
 def test_get_schema_from_table(self):
     import copy
     
     test_table_schema = {
         'col1':"INTEGER",
         'col2':"FLOAT",
         'col3':"DOUBLE",
         'col4':"VARCHAR",
         'col5':"BOOLEAN",
         'col6':"MEDIUMTEXT",
         'col7':"SHORT"                    
     }
     expected_schema = copy.copy(test_table_schema)  
     expected_schema.update({'col2':'DOUBLE', 'col6':'VARCHAR', 'col7':'INTEGER'})
     
     test_table = 'test_table'
     
     for db,server in self.dbs:
         try:
             self.assertFalse(db.table_exists(test_table))
             db.create_table_from_schema(test_table, test_table_schema)
             new_schema = db.get_schema_from_table(test_table)
             self.assertEqual(new_schema, expected_schema)
         except:
             logger.log_error('Protocol %s'%server.config.protocol)
             raise
Exemplo n.º 23
0
    def test_get_schema_from_table(self):
        import copy

        test_table_schema = {
            "col1": "INTEGER",
            "col2": "FLOAT",
            "col3": "DOUBLE",
            "col4": "VARCHAR",
            "col5": "BOOLEAN",
            "col6": "MEDIUMTEXT",
            "col7": "SHORT",
        }
        expected_schema = copy.copy(test_table_schema)
        expected_schema.update({"col2": "DOUBLE", "col6": "VARCHAR", "col7": "INTEGER"})

        test_table = "test_table"

        for db, server in self.dbs:
            try:
                self.assertFalse(db.table_exists(test_table))
                db.create_table_from_schema(test_table, test_table_schema)
                new_schema = db.get_schema_from_table(test_table)
                self.assertEqual(new_schema, expected_schema)
            except:
                logger.log_error("Protocol %s" % server.config.protocol)
                raise
Exemplo n.º 24
0
    def _find_opus_test_cases_for_package(self, package, test_case_class):
        root = OpusPackage().get_path_for_package(package)

        modules_with_test_cases = []

        for path, dirs, files in os.walk(root, topdown=True):
            for file in files:
                if not file.endswith('.py'):
                    continue

                f = open(os.path.join(path, file), 'r')
                import_pattern = re.compile('^\s*(import|from).*unittest')
                skip_pattern = re.compile('^.*#.*IGNORE_THIS_FILE')

                found_import = False
                for line in f:
                    if skip_pattern.match(line):
                        break
                    if import_pattern.match(line):
                        found_import = True
                        break

                if not found_import:  # No unittest import found in file.
                    continue

                module_name = self._get_module_name(package, root, path, file)

                try:
                    exec('import %s' % module_name)
                except Exception, val:
                    logger.log_error("Could not import %s!" % module_name)

                    traceback.print_exc()

                    continue

                module = eval(module_name)

                if inspect.ismodule(module):
                    members = inspect.getmembers(module)

                    member_dict = {}
                    for key, value in members:
                        member_dict[key] = value

                    for key in member_dict.keys():
                        try:
                            is_subclass = issubclass(member_dict[key],
                                                     test_case_class)
                        except:
                            pass
                        else:
                            if is_subclass:
                                class_name = member_dict[key].__name__

                                modules_with_test_cases.append(
                                    (module_name, class_name))

                else:
                    logger.log_warning('WARNING: %s is not a module!' % module)
Exemplo n.º 25
0
 def run(self, year, output_file=None):
     """Runs the daysim executables, using appropriate info from config. 
     Assumes the daysim input files are present. 
     Raise an exception if the daysim run fails. 
     """        
     daysim_config_file = self.get_daysim_config_file(year)
     daysim_dir = self.get_daysim_dir(year)
     logger.log_status('Using DaySim directory %s for year %d' % (daysim_dir, year))
     os.chdir(daysim_dir)
     if output_file is None:
         log_file_path = os.path.join(self.config['cache_directory'], 'daysim_%d_log.txt' % year)
     else:
         log_file_path = output_file
     
     # if log_file_path is a remote sftp URL, redirect the log file to tempdir           
     log_file_path = redirect_sftp_url_to_local_tempdir(log_file_path)
     cmd = """%(system_cmd)s"%(config_file)s" > %(log_file_path)s""" % {
             'system_cmd': self.config['travel_model_configuration'].get('daysim_system_command', 'cmd /c '),
             'config_file':daysim_config_file, 
             'log_file_path':log_file_path,
             } 
     logger.log_status('Running command %s' % cmd)
     cmd_result = os.system(cmd)
     if cmd_result != 0:
         error_msg = "DaySim Run failed. Code returned by cmd was %d" % (cmd_result)
         logger.log_error(error_msg)
         raise StandardError(error_msg)        
    def check_reports_creation_and_sizes(self, travel_model_year_dir, expected_files_and_sizes_dir="expected_file_sizes_e05"):
        """If there are missing files or any files over 10% missized then an exception will be thrown.
           If there are files 5-10% missized then a warning will be printed."""
        
           
        if self.find_errors(travel_model_year_dir,
                            os.path.join(expected_files_and_sizes_dir, 
                                         os.path.abspath(travel_model_year_dir).split(os.sep)[-1])):
            # range: (-5%, 50%) -- smaller than 5% or larger than 50% original size are not allowed

            if self.missing_files:
                logger.log_error("%d missing report files: %s" % (len(self.missing_files), str(self.missing_files)))
            if self.missized_files:
                logger.log_error("The following files are out of range(-5%, 50%) : " + \
                                                            reduce(lambda prev, file: prev + file[0] + " off by %d percent, \n" \
                                                                    % (file[1]['percent_difference']*100) , \
                                                                    self.missized_files, ""))                    
            if self.missing_files:
                raise LookupError("Error, %d missing report files: %s" % (len(self.missing_files), str(self.missing_files)))

            if self.missized_files:
                raise StandardError("Error, the following files are out of range(-5%, 50%) : " + \
                                                            reduce(lambda prev, file: prev + file[0] + " off by %d percent, \n" \
                                                                    % (file[1]['percent_difference']*100) , \
                                                                    self.missized_files, ""))
Exemplo n.º 27
0
    def visualize(self, indicators_to_visualize, computed_indicators,
                  visualization_type, *args, **kwargs):

        class_names = {
            'mapnik_map': 'MapnikMap',
            'mapnik_animated_map': 'MapnikAnimation',
            'matplotlib_chart': 'MatplotlibChart',
            'table': 'Table'
        }

        module = 'opus_gui.results_manager.run.indicator_framework.visualizer.visualizers'
        module_composed_name = module + '.' + visualization_type

        example_indicator = computed_indicators[indicators_to_visualize[0]]
        indicator_directory = example_indicator.source_data.get_indicator_directory(
        )
        additional_args = {'indicator_directory': indicator_directory}
        kwargs.update(additional_args)

        visualization = ClassFactory().get_class(
            module_composed_name=module_composed_name,
            class_name=class_names[visualization_type],
            arguments=kwargs)

        try:
            visualization = visualization.visualize(
                indicators_to_visualize=indicators_to_visualize,
                computed_indicators=computed_indicators,
                *args)
        except Exception, e:
            msg = 'Could not create the %s visualization for %s' % (
                visualization_type, module_composed_name)
            logger.log_error(msg)
            raise
Exemplo n.º 28
0
    def convert_table(self,
                      db,
                      table_name,
                      patterns,
                      backup=True,
                      backup_postfix='_old'):
        try:
            db.DoQuery('select * from %s' % table_name)
        except:
            return

        if backup:
            backup_table_name = '%s%s' % (table_name, backup_postfix)

            i = 0
            while self._table_exists(db, backup_table_name):
                i += 1
                backup_table_name = '%s%s%d' % (table_name, backup_postfix, i)

            db.DoQuery(
                'create table %(backup_table)s select * from %(table)s;' % {
                    'backup_table': backup_table_name,
                    'table': table_name
                })

            try:
                db.DoQuery('select * from %s' % backup_table_name)
            except:
                logger.log_error("Back up of table '%s' to '%s' failed. "
                                 "Skipping conversion." %
                                 (table_name, backup_table_name))
                return

        results = db.GetResultsFromQuery('select variable_name from %s' %
                                         table_name)[1:]

        results = [i[0] for i in results]

        for i in range(len(results)):
            try:
                new_row = results[i]
                for pattern, replacement in patterns:
                    new_row = re.sub(pattern, replacement, new_row)
            except TypeError:
                continue  # Not dealing with a string here.

            if new_row == results[i]:
                continue  # Nothing changed. Don't bother with the update query.

            new_row = '"%s"' % new_row

            query = ('update %(table)s set variable_name=%(new_row)s where '
                     'variable_name="%(old_row)s";' % {
                         'table': table_name,
                         'new_row': new_row,
                         'old_row': results[i]
                     })

            db.DoQuery(query)
    def _get_viz_spec(self):
        viz_type = self.cboVizType.currentText()
        translation = self._get_output_types(viz_type)
        dataset_name = self.dataset_name
        output_type = QString(translation[str(self.cboOutputType.currentText())])
        indicators = QString(str(self._get_column_values(column = 0)))

        vals = {
                'indicators': indicators,
                'output_type': output_type,
                'dataset_name': dataset_name,
                'visualization_type': QString(self._get_type_mapper()[str(viz_type)])
        }

        if output_type == 'mapnik_map' or output_type == 'mapnik_animated_map':
            vals['mapnik_bucket_labels'] = self.mapnik_options['mapnik_bucket_labels']
            vals['mapnik_bucket_colors'] = self.mapnik_options['mapnik_bucket_colors']
            vals['mapnik_bucket_ranges'] = self.mapnik_options['mapnik_bucket_ranges']
            vals['mapnik_resolution'] = self.mapnik_options['mapnik_resolution']
            vals['mapnik_page_dims'] = self.mapnik_options['mapnik_page_dims']
            vals['mapnik_map_lower_left'] = self.mapnik_options['mapnik_map_lower_left']
            vals['mapnik_map_upper_right'] = self.mapnik_options['mapnik_map_upper_right']
            vals['mapnik_legend_lower_left'] = self.mapnik_options['mapnik_legend_lower_left']
            vals['mapnik_legend_upper_right'] = self.mapnik_options['mapnik_legend_upper_right']


        elif output_type == 'fixed_field':
            try:
                fixed_field_params = QString(str(self._get_column_values(column = 1)))
            except:
                errorInfo = formatExceptionInfo()
                logger.log_error(errorInfo)
                MessageBox.error(mainwindow = self,
                                text = 'Could not get fixed field parameters for all columns',
                                detailed_text = '')
                return None

            vals['fixed_field_specification'] = fixed_field_params
            vals['id_format'] = self.leOption1.text()
        elif output_type == 'sql':
            vals['database_name'] = self.leOption1.text()
        elif output_type == 'esri':
            vals['storage_location'] = self.leOption1.text()
        elif output_type in ('tab', 'xls'):
            if self.rbSingleTable.isChecked():
                output_style = Table.ALL
            elif self.rbTablePerIndicator.isChecked():
                output_style = Table.PER_ATTRIBUTE
            elif self.rbTablePerYear.isChecked():
                output_style = Table.PER_YEAR
            vals['output_style'] = QString(str(output_style))
            if self.appendTypeCheckBox.isChecked():
                vals['append_col_type'] = True
            else:
                vals['append_col_type'] = False
            if output_type == 'xls':
                vals['storage_location'] = self.leOption1.text()

        return vals
Exemplo n.º 30
0
 def _get_prediction_year(self, current_year, years):
     current_year_index = -1
     for i in range(len(years)):
         if current_year == years[i]:
             prediction_year_index = i+1
     if i <= 0 or i >= len(years):
         logger.log_error("invalid year " + str(current_year))
     return years[prediction_year_index]
 def _get_previous_year(self, current_year, years):
     current_year_index = -1
     for i in range(len(years)):
         if current_year == years[i]:
             current_year_index = i
     if i <= 0 or i >= len(years):
         logger.log_error("invalid year " + str(current_year))
     return years[current_year_index-1]
Exemplo n.º 32
0
    def fork_new_process(
        self,
        module_name,
        resources,
        delete_temp_dir=True,
        optional_args=[],
        stdin=None,
        stdout=None,
        stderr=None,
        run_in_background=False,
    ):
        """Invoke the module whose fully-qualified opus name is module_name and pass it the 
        pickled resources.  Stores resources in pickle_file_path.
        If quiet=True, the console output for the command will not appear.
        """
        self.module_name = module_name
        self._pickle_dir = mkdtemp()
        try:
            if resources is None:
                pickle_file_path = None
            else:
                pickle_file_path = os.path.join(self._pickle_dir, "resources.pickle")
                write_resources_to_file(pickle_file_path, resources)

            self.python_cmd = self._assemble_command_line_call(module_name, resources, pickle_file_path, optional_args)

            if stdin == PIPE:
                stdin = subprocess.PIPE

            if stdout == PIPE:
                stdout = subprocess.PIPE
            elif stdout == LOG:
                log_file_path = os.path.join(self._pickle_dir, "_log_.log")
                stdout = open(log_file_path, "w")

            if stderr == PIPE:
                stderr = subprocess.PIPE
            elif stderr == STDOUT:
                stderr = subprocess.STDOUT
            elif stderr == LOG:
                log_file_path = os.path.join(self._pickle_dir, "_errlog_.log")
                stderr = open(log_file_path, "w")

            logger.log_status("Invoking: %s" % " ".join(self.python_cmd))
            self.popen = subprocess.Popen(self.python_cmd, stdin=stdin, stdout=stdout, stderr=stderr)
            if not run_in_background:
                self.wait()
        finally:
            if not run_in_background and delete_temp_dir:
                self.cleanup()
            returncode = self.popen.poll()

        if returncode != 0:
            if returncode != 10:
                logger.log_error("Error encountered in forked process with traceback:")
            return False
        else:
            return True
Exemplo n.º 33
0
    def _find_opus_test_cases_for_package(self, package, test_case_class):
        root = OpusPackage().get_path_for_package(package)

        modules_with_test_cases = []

        for path, dirs, files in os.walk(root, topdown=True):
            for file in files:
                if not file.endswith(".py"):
                    continue

                f = open(os.path.join(path, file), "r")
                import_pattern = re.compile("^\s*(import|from).*unittest")
                skip_pattern = re.compile("^.*#.*IGNORE_THIS_FILE")

                found_import = False
                for line in f:
                    if skip_pattern.match(line):
                        break
                    if import_pattern.match(line):
                        found_import = True
                        break

                if not found_import:  # No unittest import found in file.
                    continue

                module_name = self._get_module_name(package, root, path, file)

                try:
                    exec("import %s" % module_name)
                except Exception, val:
                    logger.log_error("Could not import %s!" % module_name)

                    traceback.print_exc()

                    continue

                module = eval(module_name)

                if inspect.ismodule(module):
                    members = inspect.getmembers(module)

                    member_dict = {}
                    for key, value in members:
                        member_dict[key] = value

                    for key in member_dict.keys():
                        try:
                            is_subclass = issubclass(member_dict[key], test_case_class)
                        except:
                            pass
                        else:
                            if is_subclass:
                                class_name = member_dict[key].__name__

                                modules_with_test_cases.append((module_name, class_name))

                else:
                    logger.log_warning("WARNING: %s is not a module!" % module)
Exemplo n.º 34
0
 def checkAndCreateFolder(self, path):
     if not os.path.exists(path):
         msg = "Folder %s dosn't exist and is created ..." % (path)
         try:
             logger.log_status(msg)
             os.makedirs(path)
             logger.log_status("done!")
         except: 
             logger.log_error("Folder could not be created!")
Exemplo n.º 35
0
    def fork_new_process(self, module_name, resources, delete_temp_dir=True, optional_args=[],
                         stdin=None, stdout=None, stderr=None, 
                         run_in_background=False):
        """Invoke the module whose fully-qualified opus name is module_name and pass it the 
        pickled resources.  Stores resources in pickle_file_path.
        If quiet=True, the console output for the command will not appear.
        """
        self.module_name = module_name
        self._pickle_dir = mkdtemp()
        try:
            if resources is None:
                pickle_file_path = None
            else:
                pickle_file_path = os.path.join(self._pickle_dir, 'resources.pickle')
                write_resources_to_file(pickle_file_path, resources)
            
            self.python_cmd = \
                self._assemble_command_line_call(module_name, 
                                                 resources, 
                                                 pickle_file_path, 
                                                 optional_args)
        
            if stdin == PIPE:
                stdin = subprocess.PIPE

            if stdout == PIPE:
                stdout = subprocess.PIPE
            elif stdout == LOG:
                log_file_path = os.path.join(self._pickle_dir, '_log_.log')
                stdout = open(log_file_path, "w")
            
            if stderr == PIPE:
                stderr = subprocess.PIPE
            elif stderr == STDOUT:
                stderr = subprocess.STDOUT
            elif stderr == LOG:
                log_file_path = os.path.join(self._pickle_dir, '_errlog_.log')
                stderr = open(log_file_path, "w")

            logger.log_status("Invoking: %s" % " ".join(self.python_cmd))
            self.popen = subprocess.Popen(self.python_cmd, 
                                         stdin=stdin, 
                                         stdout=stdout, 
                                         stderr=stderr)
            if not run_in_background:
                self.wait()
        finally:
            if not run_in_background and delete_temp_dir:
                self.cleanup()    
            returncode = self.popen.poll()
            
        if returncode != 0:
            if returncode != 10:
                logger.log_error("Error encountered in forked process with traceback:" )
            return False
        else:
            return True
Exemplo n.º 36
0
    def load_table(self, table_name, column_names=Storage.ALL_COLUMNS, lowercase=True):
        file_path = self._get_file_path_for_table(table_name)
        
        if not os.path.exists(file_path):
            raise NameError("Table '%s' could not be found in %s." 
                % (table_name, self._output_directory))
        
        # load header
        available_column_names, available_column_types = self._get_header_information(table_name)
        
        if lowercase:
            available_column_names = self._lower_case(available_column_names)
        
        input = open(file_path, 'rb')
        try:
            reader = csv.reader(input, self._dialect_name)
            
            reader.next() # skip header because it was already read above
    
            index_map = self._get_index_map(column_names, available_column_names)
            
            # load data
            result = []
            for dummy_column_name in available_column_types:
                result.append([])
                
            for row in reader:
                for i in range(len(row)):
                    if i in index_map:
                        if available_column_types[i] == 'b1':
                            result[i].append(row[i] == 'True')
                        elif available_column_types[i] == 'f4':
                            result[i].append(float(row[i]))
                        else:
                            result[i].append(row[i])

                    
            table = {}
            for index in index_map:
                column_dtype = dtype(available_column_types[index])
                
                # Cast each value to its appropriate python type.
                for j in range(len(result[index])):
                    try:
                        result[index][j] = column_dtype.type(result[index][j])
                    except:
                        logger.log_error("Error encountered when processing row %s for column %s of type %s: %s." % \
                                         (j+1, available_column_names[index], column_dtype, result[index][j]) )
                        raise
                    
                table[available_column_names[index]] = array(result[index], dtype=column_dtype)
        
        finally:
            input.close()
        
        return table
Exemplo n.º 37
0
def run_transcad_macro(macroname, dbname, args=None):
    tc = w32c.Dispatch("TransCAD.AutomationServer")
    try:
        return_val = tc.Macro(macroname, dbname, args)
        if return_val is not None:
            try:logger.log_error( return_val[-1][-1] )  #if the return is an error msg
            except:
                return return_val  #else pass the return_val to the caller
    finally:
        del tc
Exemplo n.º 38
0
 def post_check(self, values, dataset_pool):
     """Check for where there is population on fully-water zones (e.g. on no land).
     """
     from numpy import isinf, isnan
     if isinf(values).sum():
         logger.log_error("Variable %s has %d Infinity values" %
                          (self.__class__.__module__, isinf(values).sum()))
     if isnan(values).sum():
         logger.log_error("Variable %s has %d NaN values" %
                          (self.__class__.__module__, isnan(values).sum()))
Exemplo n.º 39
0
 def post_check(self, values, dataset_pool):
     """Check for where there is population on fully-water zones (e.g. on no land).
     """
     from numpy import isinf, isnan
     if isinf(values).sum():
         logger.log_error("Variable %s has %d Infinity values" %
                          (self.__class__.__module__, isinf(values).sum()))
     if isnan(values).sum():
         logger.log_error("Variable %s has %d NaN values" %
                          (self.__class__.__module__, isnan(values).sum()))
Exemplo n.º 40
0
    def load_table(self,
                   table_name,
                   column_names=Storage.ALL_COLUMNS,
                   lowercase=True):
        db = self._get_db()

        table = db.get_table(
            table_name)  #Table(table_name, db.metadata, autoload=True)

        available_column_names = self.get_column_names(table_name, lowercase)
        final_cols = self._select_columns(column_names, available_column_names)

        if final_cols == []:
            return {}

        col_data = {}
        selectable_columns = []
        table_data = {}

        for column in table.columns:
            if lowercase:
                col_name = column.name.lower()
            else:
                col_name = column.name

            if col_name in final_cols:
                col_type = self._get_numpy_dtype_from_sql_alchemy_type(
                    column.type)
                col_data[col_name] = (column, col_type)
                table_data[col_name] = []
                selectable_columns.append(column)

        query = select(columns=selectable_columns)

        query_results = db.execute(query)

        while True:
            row = query_results.fetchone()
            if row is None: break
            for col_name, (column, col_type) in col_data.items():
                table_data[col_name].append(row[column])

        for col_name, (column, col_type) in col_data.items():
            try:
                table_data[col_name] = array(table_data[col_name],
                                             dtype=col_type)
            except:
                logger.log_error(
                    "Error occurred when exporting column %s; it may be caused by NULL values."
                    % col_name)
                raise

        self._dispose_db(db)
        return table_data
Exemplo n.º 41
0
    def on_buttonBox_accepted(self):
        path = str(self.lePath.text())
        if not os.path.exists(path):
            msg = 'Cannot import, %s does not exist' % path
            logger.log_warning(msg)
            MessageBox.warning(mainwindow=self, text=msg, detailed_text='')
        else:
            cache_directory = path
            years = []

            for dir in os.listdir(cache_directory):
                if len(dir) == 4 and dir.isdigit():
                    years.append(int(dir))
            if years == []:
                msg = 'Cannot import, %s has no run data' % path
                logger.log_warning(msg)
                MessageBox.warning(mainwindow=self, text=msg, detailed_text='')

            else:
                start_year = min(years)
                end_year = max(years)
                project_name = os.environ['OPUSPROJECTNAME']
                run_name = os.path.basename(path)

                server_config = ServicesDatabaseConfiguration()
                run_manager = RunManager(server_config)

                run_id = run_manager._get_new_run_id()
                resources = {
                    'cache_directory': cache_directory,
                    'description': '',
                    'years': (start_year, end_year),
                    'project_name': project_name
                }

                try:
                    run_manager.add_row_to_history(run_id=run_id,
                                                   resources=resources,
                                                   status='done',
                                                   run_name=run_name)
                    update_available_runs(self.project)
                    logger.log_status(
                        'Added run %s of project %s to run_activity table' %
                        (run_name, project_name))
                except:
                    errorInfo = formatExceptionInfo()
                    logger.log_error(errorInfo)
                    MessageBox.error(
                        mainwindow=self,
                        text=
                        'Could not add run %s of project %s to run_activity table'
                        % (run_name, project_name),
                        detailed_text=errorInfo)
        self.close()
    def on_buttonBox_accepted(self):
        path = str(self.lePath.text())
        if not os.path.exists(path):
            msg = 'Cannot import, %s does not exist' % path
            logger.log_warning(msg)
            MessageBox.warning(mainwindow = self,
                            text = msg,
                            detailed_text = '')
        else:
            cache_directory = path
            years = []

            for dir in os.listdir(cache_directory):
                if len(dir) == 4 and dir.isdigit():
                    years.append(int(dir))
            if years == []:
                msg = 'Cannot import, %s has no run data'%path
                logger.log_warning(msg)
                MessageBox.warning(mainwindow = self,
                                text = msg,
                                detailed_text = '')

            else:
                start_year = min(years)
                end_year = max(years)
                project_name = os.environ['OPUSPROJECTNAME']
                run_name = os.path.basename(path)

                server_config = ServicesDatabaseConfiguration()
                run_manager = RunManager(server_config)

                run_id = run_manager._get_new_run_id()
                resources = {
                     'cache_directory': cache_directory,
                     'description': '',
                     'years': (start_year, end_year),
                     'project_name': project_name
                }

                try:
                    run_manager.add_row_to_history(run_id = run_id,
                                                   resources = resources,
                                                   status = 'done',
                                                   run_name = run_name)
                    update_available_runs(self.project)
                    logger.log_status('Added run %s of project %s to run_activity table'%(run_name, project_name))
                except:
                    errorInfo = formatExceptionInfo()
                    logger.log_error(errorInfo)
                    MessageBox.error(mainwindow = self,
                                    text = 'Could not add run %s of project %s to run_activity table'%(run_name, project_name),
                                    detailed_text = errorInfo)
        self.close()
Exemplo n.º 43
0
def run_transcad_macro(macroname, dbname, args=None):
    tc = w32c.Dispatch("TransCAD.AutomationServer")
    try:
        return_val = tc.Macro(macroname, dbname, args)
        if return_val is not None:
            try:
                logger.log_error(
                    return_val[-1][-1])  #if the return is an error msg
            except:
                return return_val  #else pass the return_val to the caller
    finally:
        del tc
Exemplo n.º 44
0
 def get_travel_data_from_travel_model(self, config, 
                                       year, zone_set ):
     """
     Returns a new travel data set populated by a travel model
     """
     logger.log_status("Running GetTravelModelDataIntoCache with year %d" % (year))
     logger.log_status(zone_set)
     tm_config = config['travel_model_configuration']
     
     base_dir    = tm_config['travel_model_base_directory']    
     run_dir     = os.path.join(base_dir, tm_config[year]['year_dir'])
     in_storage  = StorageFactory().get_storage('dict_storage')
     max_zone_id = zone_set.get_id_attribute().max()
     in_storage.write_table(
             table_name=self.TABLE_NAME,
             table_data=self._read_skims(run_dir, max_zone_id)
         )
             
     travel_data_set = TravelDataDataset(in_storage=in_storage, in_table_name=self.TABLE_NAME)
      
     # Disabling for now.  This is problematic because the dispatcher doesn't give
     # up a handle and things fail.
     if False:
         
         # zip up model_dir
         cmd = r'"C:\Program Files\7-Zip\7z.exe"' 
         cmd = cmd + ' a %s.7z %s' % (tm_config[year]['year_dir'], tm_config[year]['year_dir'])
         logger.start_block("Running [%s]" % (cmd))
         zipproc = subprocess.Popen( cmd, cwd = base_dir, stdout=subprocess.PIPE ) 
         for line in zipproc.stdout:
             logger.log_status(line.strip('\r\n'))
         zipret  = zipproc.wait()
         logger.log_status("Returned %d" % (zipret))
         if zipret != 0: print "Zip (%s) exited with bad return code" % (cmd)
         logger.end_block()
         
         # delete everything except for the triptables subdir
         try:
             delete_list = os.listdir(run_dir)
             for del_file in delete_list:
                 if del_file == "triptables": continue
                 
                 del_file_path = os.path.join(run_dir, del_file)
                 if os.path.isfile(del_file_path): os.remove(del_file_path)
                 elif os.path.isdir(del_file_path): shutil.rmtree(del_file_path)
             
             
             # recycle bin remove
             (drive, tail) = os.path.splitdrive(run_dir)
             recycle_dir =  os.path.join(drive + r"\\", RECYCLE_BIN, tail.lstrip(r"\\"))
             shutil.rmtree(recycle_dir)
         except Exception, err:
             logger.log_error("Error: %s" % str(err))
Exemplo n.º 45
0
    def prepare_for_run(self, config, year, check_tcw_process=False):
        """before calling travel model macro, check if transcad GUI is running, 
        if not, try to start transcad binary process"""
        ## TODO: TransCAD COM server is very picky about tcw.exe process in memory
        ## as of April 2007, a tcw process started by python won't work
        ## so manually start TransCAD program is needed before running this script

        set_project_ini_file(config, year)
        if not check_tcw_process:
            return

        cmdline = config['transcad_binary']
        head, tail = os.path.split(cmdline)
        procname, ext = os.path.splitext(tail)  #tcw

        kill_process = False
        start_program = False
        tc_program_classname = "tcFrame"  #ClassName for TransCAD program
        try:
            hwnd = win32gui.FindWindow(tc_program_classname, None)
        except:
            start_program = True  # No Transcand Window found, we'll need to start TransCAD program
        else:
            try:
                #first check if tcw process is in memory
                win32pdhutil.GetPerformanceAttributes('Process', 'ID Process',
                                                      procname)
                pids = win32pdhutil.FindPerformanceAttributesByName(procname)
                for pid in pids:
                    win32process.TerminateProcess(pid)
                start_program = True
            except:
                raise RuntimeError, "Unable to kill TransCAD process in memory"

        ##transcad not started, try to start it
        if start_program:
            try:
                pass
                cmdline = win32api.GetShortPathName(cmdline)
                cmdline = cmdline + " -q"
                os.system('start /B "start TransCAD" ' +
                          cmdline)  #start TransCAD in background
                time.sleep(9)
                #procHandles = win32process.CreateProcess(None, cmdline, None, None, 0, 0, None, None,
                #win32process.STARTUPINFO())
                #self.hProcess, hThread, PId, TId = procHandles
            except:
                logger.log_error(
                    "Unable to start TransCAD in %s; it must be running to invoke travel model macro."
                    % cmdline)
                sys.exit(1)
Exemplo n.º 46
0
    def test_get_table(self):
        test_table_schema = {"col1": "INTEGER", "col2": "FLOAT"}
        test_table = "test_table"

        for db, server in self.dbs:
            try:
                self.assertFalse(db.table_exists(test_table))
                db.create_table_from_schema(test_table, test_table_schema)
                t = db.get_table(test_table)
                self.assertTrue(isinstance(t, Table))
                self.assertTrue(t.name == test_table)
            except:
                logger.log_error("Protocol %s" % server.config.protocol)
                raise
Exemplo n.º 47
0
    def test_get_table(self):
        test_table_schema = {'col1': "INTEGER", 'col2': "FLOAT"}
        test_table = 'test_table'

        for db, server in self.dbs:
            try:
                self.assertFalse(db.table_exists(test_table))
                db.create_table_from_schema(test_table, test_table_schema)
                t = db.get_table(test_table)
                self.assertTrue(isinstance(t, Table))
                self.assertTrue(t.name == test_table)
            except:
                logger.log_error('Protocol %s' % server.config.protocol)
                raise
Exemplo n.º 48
0
def get_variables_coefficients_from_list(list_spec, definition={}):
    variables = []
    coefficients = []
    fixed_values = []
    error = False

    for var_coef in list_spec:
        if isinstance(var_coef, str):
            #var_coef is just variables long names or alias
            var_name, var_index = get_full_variable_specification_for_var_coef(
                var_coef, definition)
            variables.append(var_name)
            if var_index is not None:
                coefficients.append(definition["coefficients"][var_index])
                fixed_values.append(definition["fixed_values"][var_index])
            else:
                coefficients.append(VariableName(var_coef).get_alias())
                fixed_values.append(0)
        elif isinstance(var_coef, tuple) or isinstance(var_coef, list):
            var_name, var_index = get_full_variable_specification_for_var_coef(
                var_coef[0], definition)
            variables.append(var_name)
            if len(var_coef
                   ) == 1:  # coefficient name is created from variable alias
                coefficients.append(VariableName(var_coef[0]).get_alias())
                fixed_values.append(0)
            elif len(var_coef) > 1:  # coefficient names explicitly given
                coefficients.append(var_coef[1])
                if len(var_coef
                       ) > 2:  # third item is the coefficient fixed value
                    fixed_values.append(var_coef[2])
                else:
                    fixed_values.append(0)
            else:
                logger.log_error("Wrong specification format for variable %s" %
                                 var_coef)
                error = True
        elif isinstance(var_coef, dict):
            var_name, var_index = get_full_variable_specification_for_var_coef(
                var_coef.keys()[0], definition)
            variables.append(var_name)
            coefficients.append(var_coef.values()[0])
            fixed_values.append(0)
        else:
            logger.log_error("Wrong specification format for variable %s" %
                             var_coef)
            error = True

    return (variables, coefficients, fixed_values, error)
 def run(self, config, year, *args, **kwargs):
     """Runs the travel model, using appropriate info from config. 
     """
     
     tm_config = config["travel_model_configuration"]
     tm_data_dir = tm_config["directory"]
     self.prepare_for_run(config, year)
     logger.log_status('Start travel model from directory %s for year %d' % (tm_data_dir, year))
     cmd_batch = self.create_travel_model_command_batch(tm_config, year, *args, **kwargs)
     logger.log_status('Running command %s' % cmd_batch)
     cmd_result = os.system(cmd_batch)
     if cmd_result != 0:
         error_msg = "Run travel model failed. Code returned by cmd was %d" % (cmd_result)
         logger.log_error(error_msg)
         raise StandardError(error_msg)
Exemplo n.º 50
0
    def __check_and_get_matsim_url(self):
        logger.log_status('Checking availability of necessary MATSim files (nightly builds): %s' %self.source_url)
        # parse matsim.org nightly builds webside
        self.html_finder.feed( urllib2.urlopen( self.source_url ).read() )

        if not self.html_finder.allLinksFound():
            logger.log_error('Aborting MATSim4UrbanSim installation!')
            exit()
        
        # building usrls for download preparation
        self.matsim_jar_url = self.source_url + self.html_finder.getMATSimJar() # matsim url
        self.matsim_lib_url = self.source_url + self.html_finder.getMATSimLib() # lib url
        self.matsim_contrib_url = self.source_url + self.html_finder.getMATSimContrib() # contrib url
        
        logger.log_status('Availability check done!')
Exemplo n.º 51
0
 def _cast_values(self, values, arguments):
     """Change the return values to be of type self._return_type.
     If "should_check" is defined, first check for 
     values that are too large for the destination type or
     integer wrap-around."""
     type = values.dtype.str
     if self._return_type == type:
         return values
     if self.should_check(arguments):
         max_value = ma.maximum(values)
         if max_value > self._max_storable_value[self._return_type]:
             max_value_str = str(max_value)
             logger.log_error("Variable '%s' is being cast to type '%s', but contains a value (%s) too large to fit into that type."
                              % (self.name(), self._return_type, max_value_str))
     return values.astype(self._return_type)
Exemplo n.º 52
0
def get_variables_coefficients_equations_for_submodel(submodel_spec,
                                                      sub_model,
                                                      definition={}):
    variables = []
    coefficients = []
    fixed_values = []
    equations = []
    submodels = []
    other_fields = {}
    error = False
    if isinstance(submodel_spec, tuple) or isinstance(
            submodel_spec, list):  # no equations or other fields given
        variables, coefficients, fixed_values, error = get_variables_coefficients_from_list(
            submodel_spec, definition)
    elif isinstance(submodel_spec, dict):
        name = submodel_spec.get('name', 'equation')
        if name.startswith(
                'equation'
        ):  # by default the dictionary is on an equation level
            variables, coefficients, fixed_values, equations, error = get_variables_coefficients_equations_from_dict(
                submodel_spec, definition)
        else:
            del submodel_spec['name']
            other_fields['dim_%s' % name] = []
            for other_field_value, spec in submodel_spec.iteritems():
                variable, coefficient, equation, submodel, fixed_value, other_field = \
                            get_variables_coefficients_equations_for_submodel(spec, sub_model, definition)
                variables += variable
                coefficients += coefficient
                equations += equation
                submodels += submodel
                fixed_values += fixed_value
                other_fields['dim_%s' %
                             name] += len(variable) * [other_field_value]
                for key, value in other_field.iteritems():
                    if key in other_fields:
                        other_fields[key] = concatenate(
                            (other_fields[key], value))
                    else:
                        other_fields[key] = array(value)
    else:
        logger.log_error("Error in specification of submodel %s." % sub_model)
        return ([], [], [], [], [], {})
    if error:
        logger.log_error("Error in specification of submodel %s" % sub_model)
    submodels = len(variables) * [sub_model]
    return (variables, coefficients, equations, submodels, fixed_values,
            other_fields)
Exemplo n.º 53
0
class StartRunOptionGroup(object):
    """ Helper class to start model from an xml config file. 
    """

    logger.start_block("Starting UrbanSim")

    # get program arguments from the command line
    program_arguments = sys.argv[1:]

    # default parameters are:
    # --config=opus_matsim/sustain_city/configs/seattle_parcel_prescheduled_events.xml
    # --executable=Seattle_baseline
    parser = optparse.OptionParser()
    parser.add_option("-c",
                      "--config",
                      dest="config_file_name",
                      action="store",
                      type="string",
                      help="Name of file containing urbansim config")
    parser.add_option("-e",
                      "--executable",
                      dest="scenario_executable",
                      action="store",
                      type="string",
                      help="Model to execute")
    (options, args) = parser.parse_args()

    if options.config_file_name == None:
        logger.log_error("Missing path to the urbansim config file")
    if options.scenario_executable == None:
        logger.log_error("Missing name of executable scenario")

    config = XMLConfiguration(options.config_file_name).get_run_configuration(
        options.scenario_executable)

    insert_auto_generated_cache_directory_if_needed(config)

    run_manager = RunManager(ServicesDatabaseConfiguration())

    run_manager.setup_new_run(cache_directory=config['cache_directory'],
                              configuration=config)

    #try: #tnicolai
    #    import pydevd
    #    pydevd.settrace()
    #except: pass

    run_manager.run_run(config, run_as_multiprocess=True)
Exemplo n.º 54
0
 def _cast_values(self, values, arguments):
     """Change the return values to be of type self._return_type.
     If "should_check" is defined, first check for 
     values that are too large for the destination type or
     integer wrap-around."""
     type = values.dtype.str
     if self._return_type == type:
         return values
     if self.should_check(arguments):
         max_value = ma.maximum(values)
         if max_value > self._max_storable_value[self._return_type]:
             max_value_str = str(max_value)
             logger.log_error(
                 "Variable '%s' is being cast to type '%s', but contains a value (%s) too large to fit into that type."
                 % (self.name(), self._return_type, max_value_str))
     return values.astype(self._return_type)
Exemplo n.º 55
0
 def execute_cmd_and_get_stdout(self, cmd, raise_at_error=False):
     """
     """
     stdin, stdout, stderr = self.ssh.exec_command(cmd)
     stderr_msg = stderr.readlines() 
     if len(stderr_msg) > 0:
         logger.log_error("Error encountered executing cmd through ssh:\n" + ''.join(stderr_msg))
         if raise_at_error:
             raise RuntimeError, "Error encountered executing cmd through ssh:\n" + ''.join(stderr_msg)
 
     results = stdout.readlines()
     if len(results)==1:
         results=results[0].strip()
         
     stdin.close(); stdout.close(); stderr.close()
     return results
Exemplo n.º 56
0
    def test_create_drop_and_has_table(self):
        test_table_schema = {'col1': "INTEGER", 'col2': "FLOAT"}
        test_table = 'test_table'

        for db, server in self.dbs:
            try:
                self.assertFalse(db.table_exists(test_table))

                db.create_table_from_schema(test_table, test_table_schema)
                self.assertTrue(db.table_exists(test_table))

                db.drop_table(test_table)
                self.assertFalse(db.table_exists(test_table))
            except:
                logger.log_error('Protocol %s' % server.config.protocol)
                raise
Exemplo n.º 57
0
    def cache_database_tables(self, config, database_name, tables):
        """Loads cache with all tables from this database.
        """
        database = self.database_server.get_database(database_name)
        in_storage = StorageFactory().get_storage(type='sql_storage',
                                                  storage_location=database)

        for table_name in tables:
            if table_name in self.tables_to_cache:
                try:
                    self.cache_database_table(table_name, config['base_year'],
                                              database, in_storage, config)
                except:
                    logger.log_error(
                        "Problem caching table %s from database %s" %
                        (table_name, database_name))
                    raise
                self.tables_cached.add(table_name)