Пример #1
0
    def _test_generate_results(self, indicator_name, dataset_name, expression,
                               source):

        # grab the first base_year_data in results_manager/simulation_runs and
        # fetch the year for it
        base_year = self.project.find(
            "results_manager/simulation_runs/run[@name='base_year_data']/end_year"
        )
        if base_year is None:
            return False, "Project doesn't have any base year data to check against"

        start_year = int(base_year.text)
        result_generator = OpusResultGenerator(self.project)
        result_generator.set_data(source_data_name='base_year_data',
                                  indicator_name=indicator_name,
                                  dataset_name=dataset_name,
                                  years=[
                                      start_year,
                                  ],
                                  indicator_definition=(expression, source))

        interface = IndicatorFrameworkInterface(self.project)
        src_data = interface.get_source_data(source_data_name='base_year_data',
                                             years=[
                                                 start_year,
                                             ])
        SimulationState().set_current_time(start_year)
        SimulationState().set_cache_directory(src_data.cache_directory)
        SessionConfiguration(
            new_instance=True,
            package_order=src_data.dataset_pool_configuration.package_order,
            in_storage=AttributeCache())

        dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
        if isinstance(dataset, InteractionDataset):
            #create a subset if its an interaction dataset...
            dataset_arguments = {
                'index1':
                numpy.random.randint(0, dataset.dataset1.size(), size=100),
                'index2':
                numpy.random.randint(0, dataset.dataset2.size(), size=100)
            }
            SessionConfiguration().delete_datasets()
            dataset = SessionConfiguration().get_dataset_from_pool(
                dataset_name, dataset_arguments=dataset_arguments)

        try:
            dataset.compute_variables(names=[expression])
            return True, None
        except Exception, e:
            type, value, tb = sys.exc_info()
            stack_dump = ''.join(traceback.format_exception(type, value, tb))
            errors = "{}\n\n{}".format(e, stack_dump)
            return False, errors
 def run(self, year, condition=None, max_iter=10):
     """
     'year' is the current year of the simulation.
     'condition' should be a boolean expression defined on any dataset.
     The method iterates over the given models until all values of the expression are True. 
     'max_iter' gives the maximum number of iterations to run, if 'condition' is not fulfilled.
     If it is None, there is no limit and thus, the condition must be fulfilled in order to terminate.
     If 'condition' is None, the set of models is run only once.
     """
     self.config['years'] = (year, year)
     if condition is None:
         return self.model_system.run_in_same_process(self.config)
     dataset_pool = SessionConfiguration().get_dataset_pool()
     variable_name = VariableName(condition)
     dataset = dataset_pool.get_dataset(variable_name.get_dataset_name())
     condition_value = dataset.compute_variables(variable_name, dataset_pool=dataset_pool)
     result = None
     iter = 1
     while not alltrue(condition_value):
         result = self.model_system.run_in_same_process(self.config)
         if max_iter is None or iter > max_iter:
             break
         iter = iter + 1
         # force to recompute the condition
         dataset = SessionConfiguration().get_dataset_pool().get_dataset(variable_name.get_dataset_name())
         dataset.delete_computed_attributes()
         condition_value = dataset.compute_variables(variable_name, 
                                                     dataset_pool=SessionConfiguration().get_dataset_pool())
     if not alltrue(condition_value):
         logger.log_status('%s did not converge. Maximum number of iterations (%s) reached.' % (self.model_name, max_iter))
     else:
         logger.log_status('%s converged in %s iterations.' % (self.model_name, iter-1))  
     return result
Пример #3
0
    def _test_generate_results(self, indicator_name, dataset_name, expression, source):

        # grab the first base_year_data in results_manager/simulation_runs and
        # fetch the year for it
        base_year = self.project.find("results_manager/simulation_runs/run[@name='base_year_data']/end_year")
        if base_year is None:
            return False, "Project doesn't have any base year data to check against"

        start_year = int(base_year.text)
        result_generator = OpusResultGenerator(self.project)
        result_generator.set_data(
               source_data_name = 'base_year_data',
               indicator_name = indicator_name,
               dataset_name = dataset_name,
               years = [start_year,],
               indicator_definition = (expression, source))

        interface = IndicatorFrameworkInterface(self.project)
        src_data = interface.get_source_data(source_data_name = 'base_year_data', years = [start_year,])
        SimulationState().set_current_time(start_year)
        SimulationState().set_cache_directory(src_data.cache_directory)
        SessionConfiguration(
            new_instance = True,
            package_order = src_data.dataset_pool_configuration.package_order,
            in_storage = AttributeCache())


        dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
        if isinstance(dataset,InteractionDataset):
            #create a subset if its an interaction dataset...
            dataset_arguments = {
                 'index1':numpy.random.randint(0,dataset.dataset1.size(), size=100),
                 'index2':numpy.random.randint(0,dataset.dataset2.size(), size=100)
            }
            SessionConfiguration().delete_datasets()
            dataset = SessionConfiguration().get_dataset_from_pool(dataset_name,
                                                                   dataset_arguments = dataset_arguments)

        try:
            dataset.compute_variables(names = [expression])
            return True, None
        except Exception, e:
            type, value, tb = sys.exc_info()
            stack_dump = ''.join(traceback.format_exception(type, value, tb))
            errors = "{}\n\n{}".format(e, stack_dump)
            return False, errors
 def run(self, year, condition=None, max_iter=10):
     """
     'year' is the current year of the simulation.
     'condition' should be a boolean expression defined on any dataset.
     The method iterates over the given models until all values of the expression are True. 
     'max_iter' gives the maximum number of iterations to run, if 'condition' is not fulfilled.
     If it is None, there is no limit and thus, the condition must be fulfilled in order to terminate.
     If 'condition' is None, the set of models is run only once.
     """
     self.config['years'] = (year, year)
     if condition is None:
         return self.model_system.run_in_same_process(self.config)
     dataset_pool = SessionConfiguration().get_dataset_pool()
     variable_name = VariableName(condition)
     dataset = dataset_pool.get_dataset(variable_name.get_dataset_name())
     condition_value = dataset.compute_variables(variable_name,
                                                 dataset_pool=dataset_pool)
     result = None
     iter = 1
     while not alltrue(condition_value):
         result = self.model_system.run_in_same_process(self.config)
         if max_iter is None or iter > max_iter:
             break
         iter = iter + 1
         # force to recompute the condition
         dataset = SessionConfiguration().get_dataset_pool().get_dataset(
             variable_name.get_dataset_name())
         dataset.delete_computed_attributes()
         condition_value = dataset.compute_variables(
             variable_name,
             dataset_pool=SessionConfiguration().get_dataset_pool())
     if not alltrue(condition_value):
         logger.log_status(
             '%s did not converge. Maximum number of iterations (%s) reached.'
             % (self.model_name, max_iter))
     else:
         logger.log_status('%s converged in %s iterations.' %
                           (self.model_name, iter - 1))
     return result
Пример #5
0
    def visualize(self, 
                  indicators_to_visualize,
                  computed_indicators):
        """Create a map for the given indicator, save it to the cache
        directory's 'indicators' sub-directory."""
        
        #TODO: eliminate this example indicator stuff
        example_indicator = computed_indicators[indicators_to_visualize[0]]
        source_data = example_indicator.source_data        
        dataset_to_attribute_map = {}
        
        package_order = source_data.get_package_order()
            
            
        self._create_input_stores(years = source_data.years)

        for name, computed_indicator in computed_indicators.items():
            if name not in indicators_to_visualize: continue
            
            if computed_indicator.source_data != source_data:
                raise Exception('result templates in indicator batch must all be the same.')
            dataset_name = computed_indicator.indicator.dataset_name
            if dataset_name == 'parcel':
                raise Exception('Cannot create a Matplotlib map for parcel dataset. Please plot at a higher geographic aggregation')
            if dataset_name not in dataset_to_attribute_map:
                dataset_to_attribute_map[dataset_name] = []
            dataset_to_attribute_map[dataset_name].append(name)
        
        viz_metadata = []
        for dataset_name, indicator_names in dataset_to_attribute_map.items():  
            attributes = [(name,computed_indicators[name].get_computed_dataset_column_name())
                          for name in indicator_names]                  
            for year in source_data.years:
                SessionConfiguration(
                    new_instance = True,
                    package_order = package_order,
                    in_storage = AttributeCache()) 
                SimulationState().set_cache_directory(source_data.cache_directory)
                SimulationState().set_current_time(year)
                dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
                dataset.load_dataset()

                if dataset.get_coordinate_system() is not None:
                    dataset.compute_variables(names = dataset.get_coordinate_system())
                
                for indicator_name, computed_name in attributes:
                        
                    indicator = computed_indicators[indicator_name]
                    
                    table_data = self.input_stores[year].load_table(
                        table_name = dataset_name,
                        column_names = [computed_name])

                    if computed_name in table_data:

                        table_name = self.get_name(
                            dataset_name = dataset_name,
                            years = [year],
                            attribute_names = [indicator_name])
                        
                        if self.scale: 
                            min_value, max_value = self.scale
                        else:
                            min_value, max_value = (None, None)
                        
                        file_path = os.path.join(self.storage_location,
                                             table_name+ '.' + self.get_file_extension())
                        
                        dataset.add_attribute(name = str(computed_name), 
                                              data = table_data[computed_name])
                        
                        if not os.path.exists(file_path):
                            dataset.plot_map(
                                 name = str(computed_name),
                                 min_value = min_value, 
                                 max_value = max_value, 
                                 file = str(file_path), 
                                 my_title = str(indicator_name), 
                                 #filter = where(table_data[computed_name] != -1)
                                 #filter = 'urbansim.gridcell.is_fully_in_water'                                 
                            )
                        
        #                    self.plot_map(dataset = dataset, 
        #                                  attribute_data = table_data[computed_name], 
        #                                  min_value = min_value, 
        #                                  max_value = max_value, 
        #                                  file = file_path, 
        #                                  my_title = indicator_name, 
        #                                  filter = where(table_data[computed_name] != -1))
        
                        metadata = ([indicator_name], table_name, [year])
                        viz_metadata.append(metadata)
                    else:
                        logger.log_warning('There is no computed indicator %s'%computed_name)
                
        visualization_representations = []
        for indicator_names, table_name, years in viz_metadata:
            visualization_representations.append(
                self._get_visualization_metadata(
                    computed_indicators = computed_indicators,
                    indicators_to_visualize = indicator_names,
                    table_name = table_name,
                    years = years
            ))                  
        
        return visualization_representations
Пример #6
0
    def visualize(self, indicators_to_visualize, computed_indicators):
        """Create a map for the given indicator, save it to the cache
        directory's 'indicators' sub-directory."""

        #TODO: eliminate this example indicator stuff
        example_indicator = computed_indicators[indicators_to_visualize[0]]
        source_data = example_indicator.source_data
        dataset_to_attribute_map = {}

        package_order = source_data.get_package_order()

        self._create_input_stores(years=source_data.years)

        for name, computed_indicator in computed_indicators.items():
            if name not in indicators_to_visualize: continue

            if computed_indicator.source_data != source_data:
                raise Exception(
                    'result templates in indicator batch must all be the same.'
                )
            dataset_name = computed_indicator.indicator.dataset_name
            if dataset_name == 'parcel':
                raise Exception(
                    'Cannot create a Matplotlib map for parcel dataset. Please plot at a higher geographic aggregation'
                )
            if dataset_name not in dataset_to_attribute_map:
                dataset_to_attribute_map[dataset_name] = []
            dataset_to_attribute_map[dataset_name].append(name)

        viz_metadata = []
        for dataset_name, indicator_names in dataset_to_attribute_map.items():
            attributes = [
                (name,
                 computed_indicators[name].get_computed_dataset_column_name())
                for name in indicator_names
            ]
            for year in source_data.years:
                SessionConfiguration(new_instance=True,
                                     package_order=package_order,
                                     in_storage=AttributeCache())
                SimulationState().set_cache_directory(
                    source_data.cache_directory)
                SimulationState().set_current_time(year)
                dataset = SessionConfiguration().get_dataset_from_pool(
                    dataset_name)
                dataset.load_dataset()

                if dataset.get_coordinate_system() is not None:
                    dataset.compute_variables(
                        names=dataset.get_coordinate_system())

                for indicator_name, computed_name in attributes:

                    indicator = computed_indicators[indicator_name]

                    table_data = self.input_stores[year].load_table(
                        table_name=dataset_name, column_names=[computed_name])

                    if computed_name in table_data:

                        table_name = self.get_name(
                            dataset_name=dataset_name,
                            years=[year],
                            attribute_names=[indicator_name])

                        if self.scale:
                            min_value, max_value = self.scale
                        else:
                            min_value, max_value = (None, None)

                        file_path = os.path.join(
                            self.storage_location,
                            table_name + '.' + self.get_file_extension())

                        dataset.add_attribute(name=str(computed_name),
                                              data=table_data[computed_name])

                        if not os.path.exists(file_path):
                            dataset.plot_map(
                                name=str(computed_name),
                                min_value=min_value,
                                max_value=max_value,
                                file=str(file_path),
                                my_title=str(indicator_name),
                                #filter = where(table_data[computed_name] != -1)
                                #filter = 'urbansim.gridcell.is_fully_in_water'
                            )

        #                    self.plot_map(dataset = dataset,
        #                                  attribute_data = table_data[computed_name],
        #                                  min_value = min_value,
        #                                  max_value = max_value,
        #                                  file = file_path,
        #                                  my_title = indicator_name,
        #                                  filter = where(table_data[computed_name] != -1))

                        metadata = ([indicator_name], table_name, [year])
                        viz_metadata.append(metadata)
                    else:
                        logger.log_warning(
                            'There is no computed indicator %s' %
                            computed_name)

        visualization_representations = []
        for indicator_names, table_name, years in viz_metadata:
            visualization_representations.append(
                self._get_visualization_metadata(
                    computed_indicators=computed_indicators,
                    indicators_to_visualize=indicator_names,
                    table_name=table_name,
                    years=years))

        return visualization_representations
Пример #7
0
    def visualize(self, indicators_to_visualize, computed_indicators):
        """Create a map for the given indicator, save it to the cache
        directory's 'indicators' sub-directory."""

        # TODO: eliminate this example indicator stuff
        example_indicator = computed_indicators[indicators_to_visualize[0]]
        source_data = example_indicator.source_data
        dataset_to_attribute_map = {}

        package_order = source_data.get_package_order()

        self._create_input_stores(years=source_data.years)

        for name, computed_indicator in computed_indicators.items():
            if name not in indicators_to_visualize:
                continue

            if computed_indicator.source_data != source_data:
                raise Exception("result templates in indicator batch must all be the same.")
            dataset_name = computed_indicator.indicator.dataset_name
            if dataset_name not in dataset_to_attribute_map:
                dataset_to_attribute_map[dataset_name] = []
            dataset_to_attribute_map[dataset_name].append(name)

        viz_metadata = []
        for dataset_name, indicator_names in dataset_to_attribute_map.items():
            attributes = [
                (name, computed_indicators[name].get_computed_dataset_column_name()) for name in indicator_names
            ]

            for year in source_data.years:
                SessionConfiguration(new_instance=True, package_order=package_order, in_storage=AttributeCache())
                SimulationState().set_cache_directory(source_data.cache_directory)
                SimulationState().set_current_time(year)
                dataset = SessionConfiguration().get_dataset_from_pool(dataset_name)
                dataset.load_dataset()

                if dataset.get_coordinate_system() is not None:
                    dataset.compute_variables(names=dataset.get_coordinate_system())

                for indicator_name, computed_name in attributes:

                    indicator = computed_indicators[indicator_name]

                    table_data = self.input_stores[year].load_table(
                        table_name=dataset_name, column_names=[computed_name]
                    )

                    if computed_name in table_data:

                        table_name = self.get_name(
                            dataset_name=dataset_name, years=[year], attribute_names=[indicator_name]
                        )

                        if self.scale:
                            min_value, max_value = self.scale
                        else:
                            min_value, max_value = (None, None)

                        file_path = os.path.join(
                            self.storage_location, "anim_" + table_name + "." + MapnikMap.get_file_extension(self)
                        )

                        dataset.add_attribute(name=str(computed_name), data=table_data[computed_name])

                        dataset.plot_map(
                            name=str(computed_name),
                            min_value=min_value,
                            max_value=max_value,
                            file=str(file_path),
                            my_title=str(indicator_name),
                            color_list=self.color_list,
                            range_list=self.range_list,
                            label_list=self.label_list,
                            is_animation=True,
                            year=year,
                            resolution=self.resolution,
                            page_dims=self.page_dims,
                            map_lower_left=self.map_lower_left,
                            map_upper_right=self.map_upper_right,
                            legend_lower_left=self.legend_lower_left,
                            legend_upper_right=self.legend_upper_right
                            # filter = where(table_data[computed_name] != -1)
                            # filter = 'urbansim.gridcell.is_fully_in_water'
                        )

                        # metadata = ([indicator_name], table_name, [year])
                        # viz_metadata.append(metadata)
                    else:
                        logger.log_warning("There is no computed indicator %s" % computed_name)

            for indicator_name, computed_name in attributes:
                self.create_animation(
                    dataset_name=dataset_name,
                    year_list=source_data.years,
                    indicator_name=str(indicator_name),
                    viz_metadata=viz_metadata,
                )

        visualization_representations = []
        for indicator_names, table_name, years in viz_metadata:
            visualization_representations.append(
                self._get_visualization_metadata(
                    computed_indicators=computed_indicators,
                    indicators_to_visualize=indicator_names,
                    table_name=table_name,
                    years=years,
                )
            )

        return visualization_representations
Пример #8
0
    def visualize(self, indicators_to_visualize, computed_indicators):
        """Create a map for the given indicator, save it to the cache
        directory's 'indicators' sub-directory."""

        #TODO: eliminate this example indicator stuff
        example_indicator = computed_indicators[indicators_to_visualize[0]]
        source_data = example_indicator.source_data
        dataset_to_attribute_map = {}

        package_order = source_data.get_package_order()

        self._create_input_stores(years=source_data.years)

        for name, computed_indicator in computed_indicators.items():
            if name not in indicators_to_visualize: continue

            if computed_indicator.source_data != source_data:
                raise Exception(
                    'result templates in indicator batch must all be the same.'
                )
            dataset_name = computed_indicator.indicator.dataset_name
            if dataset_name not in dataset_to_attribute_map:
                dataset_to_attribute_map[dataset_name] = []
            dataset_to_attribute_map[dataset_name].append(name)

        viz_metadata = []
        for dataset_name, indicator_names in dataset_to_attribute_map.items():
            attributes = [
                (name,
                 computed_indicators[name].get_computed_dataset_column_name())
                for name in indicator_names
            ]

            for year in source_data.years:
                SessionConfiguration(new_instance=True,
                                     package_order=package_order,
                                     in_storage=AttributeCache())
                SimulationState().set_cache_directory(
                    source_data.cache_directory)
                SimulationState().set_current_time(year)
                dataset = SessionConfiguration().get_dataset_from_pool(
                    dataset_name)
                dataset.load_dataset()

                if dataset.get_coordinate_system() is not None:
                    dataset.compute_variables(
                        names=dataset.get_coordinate_system())

                for indicator_name, computed_name in attributes:

                    indicator = computed_indicators[indicator_name]

                    table_data = self.input_stores[year].load_table(
                        table_name=dataset_name, column_names=[computed_name])

                    if computed_name in table_data:

                        table_name = self.get_name(
                            dataset_name=dataset_name,
                            years=[year],
                            attribute_names=[indicator_name])

                        if self.scale:
                            min_value, max_value = self.scale
                        else:
                            min_value, max_value = (None, None)

                        file_path = os.path.join(
                            self.storage_location, 'anim_' + table_name + '.' +
                            MapnikMap.get_file_extension(self))

                        dataset.add_attribute(name=str(computed_name),
                                              data=table_data[computed_name])

                        dataset.plot_map(
                            name=str(computed_name),
                            min_value=min_value,
                            max_value=max_value,
                            file=str(file_path),
                            my_title=str(indicator_name),
                            color_list=self.color_list,
                            range_list=self.range_list,
                            label_list=self.label_list,
                            is_animation=True,
                            year=year,
                            resolution=self.resolution,
                            page_dims=self.page_dims,
                            map_lower_left=self.map_lower_left,
                            map_upper_right=self.map_upper_right,
                            legend_lower_left=self.legend_lower_left,
                            legend_upper_right=self.legend_upper_right
                            #filter = where(table_data[computed_name] != -1)
                            #filter = 'urbansim.gridcell.is_fully_in_water'
                        )

                        #metadata = ([indicator_name], table_name, [year])
                        #viz_metadata.append(metadata)
                    else:
                        logger.log_warning(
                            'There is no computed indicator %s' %
                            computed_name)

            for indicator_name, computed_name in attributes:
                self.create_animation(dataset_name=dataset_name,
                                      year_list=source_data.years,
                                      indicator_name=str(indicator_name),
                                      viz_metadata=viz_metadata)

        visualization_representations = []
        for indicator_names, table_name, years in viz_metadata:
            visualization_representations.append(
                self._get_visualization_metadata(
                    computed_indicators=computed_indicators,
                    indicators_to_visualize=indicator_names,
                    table_name=table_name,
                    years=years))

        return visualization_representations