Esempio n. 1
0
 def process(self, data_object):
     '''
     Process a given file
     :param data_object: MetricManager instance
     :return: Updated data_object reference
     '''
     analysis_metrics = data_object.analyze()
     # merge the data sets
     for group_key, metric_key, metric_obj in metric_iter(analysis_metrics):
         existing = metric_get(group_key, metric_key, self.merged_data)
         # new entry?
         if existing is None:
             metric_add(metric_obj, self.merged_data, group_key, metric_key)
             continue
         # merge existing metrics
         existing.merge(metric_obj)
     # merge config
     for config, config_value in data_object.get_config().items():
         # new entry
         if config not in self.merged_config:
             self.merged_config[config] = config_value
             continue
         # merge existing entry, keep common values
         if self.merged_config[config] != config_value:
             self.merged_config[config] = None
     return data_object
    def _process_metrics(self, metric_seq, folder_path, file_filter):
        # check if we already have the data for each metric
        metric_data = {}
        not_loaded_seq = []
        # try graphs first
        for g_name, m_name, metric in metric_seq:
            # data was found
            if self._have_data(g_name, m_name) and metric.load(
                    self._get_data(g_name, m_name)):
                logging.debug('Loading existing data for %s:%s', g_name,
                              m_name)

                # check if we have graph data
                if hasattr(metric, 'create_graph'):
                    if self._get_graph(g_name, m_name) is None:
                        logging.debug(
                            'Generating graph data from existing data')
                        self._set_graph(metric.create_graph(), g_name, m_name)
                if hasattr(metric, 'create_summation'):
                    if metric.force_summation() or self._get_sum(
                            g_name, m_name) is None:
                        logging.debug(
                            'Generating metric data from existing data')
                        self._set_sum(metric.create_summation(), g_name,
                                      m_name)
                metric_add(metric, metric_data, g_name, m_name)
            else:
                # no existing data found, will need to calculate it later
                not_loaded_seq.append((g_name, m_name, metric))

        # run the missing graphs
        metrics_list = [m_inst for _, _, m_inst in not_loaded_seq]
        if len(metrics_list) > 0:
            file_reader = JSONFileReader(metrics_list)
            finder = FileFinder([file_reader])
            finder.process(folder_path, file_filter)
            # save the results of running the metrics
            for g_name, m_name, metric_obj in not_loaded_seq:
                self._set_data(metric_obj.to_string(), g_name, m_name)
                if hasattr(metric_obj, 'create_graph'):
                    self._set_graph(metric_obj.create_graph(), g_name, m_name)
                if hasattr(metric_obj, 'create_summation'):
                    self._set_sum(metric_obj.create_summation(), g_name,
                                  m_name)
                metric_add(metric_obj, metric_data, g_name, m_name)
        return metric_data
Esempio n. 3
0
    def process(self, data_object):
        '''
        Process a given file
        :param data_object: MetricManager instance
        :return: Updated data_object reference
        '''
        self.add_column('experiment_name')
        # ensure we have all the data
        data_object.analyze()
        # check columns first
        for group_key, _, metric_obj in metric_iter(
                data_object.metrics['summations']):
            for metric in metric_obj:
                metric_name = metric['full_name']
                exists = metric_get(group_key, metric_name, self.metric_map)
                # adding new column
                if exists is None:
                    self.add_column(metric_name)
                    position = len(self.data_frame.columns) + \
                        len(self._columns_to_add) - 1
                    metric_add(position, self.metric_map, group_key,
                               metric_name)

        # populate the values for the table row
        row = [
            None for _ in range(
                0,
                len(self.data_frame.columns) + len(self._columns_to_add))
        ]
        row[0] = Configuration.get_hash_name(
            data_object.get_config(), [self.experiment_variable, 'repeat'])

        for group_key, _, metric_obj in metric_iter(
                data_object.metrics['summations']):
            for metric in metric_obj:
                metric_name = metric['full_name']
                position = metric_get(group_key, metric_name, self.metric_map)
                if position is None:
                    raise Exception('Something very bad happened')
                row[position] = metric['value']
        self.add_row(row)
        return data_object
 def _set_sum(self, value, *args):
     self.is_dirty = True
     metric_add(value, self.metrics['summations'], *args)
 def _set_graph(self, value, *args):
     self.is_dirty = True
     metric_add(value, self.metrics['graphs'], *args)
 def _set_data(self, value, *args):
     self.is_dirty = True
     metric_add(value, self.metrics['data'], *args)