Exemple #1
0
    def aggregate_region_operation_metric_for_table_and_cluster(self):
        allClusterOperationMetric = {}
        # because the number of regions could be huge. We read out region operation metrics
        # by table, then table operation metrics and cluster operation metrics could be aggregated
        tables = Table.objects.all()
        for table in tables:
            clusterName = table.cluster.name
            clusterOperationMetric = allClusterOperationMetric.setdefault(
                clusterName, {})
            tableOperationMetric = {}
            regions = dbutil.get_region_by_table(table)
            logger.info(
                "TableOperationMetricAggregation aggregate %d regions metric for table %s, cluster %s",
                len(regions), table.name, clusterName)

            for region in regions:
                if region.operationMetrics is None or region.operationMetrics == '':
                    continue
                regionOperationMetrics = json.loads(region.operationMetrics)
                for regionOperationName in regionOperationMetrics.keys():
                    regionOperation = regionOperationMetrics[
                        regionOperationName]
                    self.aggregate_one_region_operation_metric(
                        tableOperationMetric.setdefault(
                            regionOperationName,
                            self.make_empty_operation_metric()),
                        regionOperation)
                    self.aggregate_one_region_operation_metric(
                        clusterOperationMetric.setdefault(
                            regionOperationName,
                            self.make_empty_operation_metric()),
                        regionOperation)

            # compute avgTime for table operation metrics
            self.compute_avg_time_and_num_ops_after_aggregation(
                tableOperationMetric)
            table.operationMetrics = json.dumps(tableOperationMetric)
            table.save()

        # compute avgTime for clusetr operation metrics
        clusters = HBaseCluster.objects.all()
        for cluster in clusters:
            clusterName = cluster.cluster.name
            if clusterName in allClusterOperationMetric:
                clusterOperationMetric = allClusterOperationMetric[clusterName]
                self.compute_avg_time_and_num_ops_after_aggregation(
                    clusterOperationMetric)
                cluster.operationMetrics = json.dumps(clusterOperationMetric)
                cluster.save()
        return
Exemple #2
0
def aggregate_region_operation_metric_in_process(output_queue, task_data):
    allClusterOperationMetric = {}
    # because the number of regions could be huge. We read out region operation metrics
    # by table, then table operation metrics and cluster operation metrics could be aggregated
    tables = Table.objects.all()
    for table in tables:
        clusterName = table.cluster.name
        clusterOperationMetric = allClusterOperationMetric.setdefault(clusterName, {})
        tableOperationMetric = {}
        regions = dbutil.get_region_by_table(table)
        logger.info(
            "TableOperationMetricAggregation aggregate %d " "regions metric for table %s, cluster %s",
            len(regions),
            table.name,
            clusterName,
        )

        for region in regions:
            if region.operationMetrics is None or region.operationMetrics == "":
                continue
            regionOperationMetrics = json.loads(region.operationMetrics)
            for regionOperationName in regionOperationMetrics.keys():
                regionOperation = regionOperationMetrics[regionOperationName]
                aggregate_one_region_operation_metric(
                    tableOperationMetric.setdefault(regionOperationName, make_empty_operation_metric()), regionOperation
                )
                aggregate_one_region_operation_metric(
                    clusterOperationMetric.setdefault(regionOperationName, make_empty_operation_metric()),
                    regionOperation,
                )

        # compute avgTime for table operation metrics
        compute_avg_time_and_num_ops_after_aggregation(tableOperationMetric)
        table.operationMetrics = json.dumps(tableOperationMetric)
        table.save()

    # compute avgTime for clusetr operation metrics
    clusters = HBaseCluster.objects.all()
    for cluster in clusters:
        clusterName = cluster.cluster.name
        if clusterName in allClusterOperationMetric:
            clusterOperationMetric = allClusterOperationMetric[clusterName]
            compute_avg_time_and_num_ops_after_aggregation(clusterOperationMetric)
            cluster.operationMetrics = json.dumps(clusterOperationMetric)
            cluster.save()

    output_queue.put(QueueTask(AGGREGATE_TASK_TYPE, None))
Exemple #3
0
  def aggregate_region_operation_metric_for_table_and_cluster(self):
    allClusterOperationMetric = {}
    # because the number of regions could be huge. We read out region operation metrics
    # by table, then table operation metrics and cluster operation metrics could be aggregated
    tables = Table.objects.all()
    for table in tables:
      clusterName = table.cluster.name
      clusterOperationMetric = allClusterOperationMetric.setdefault(clusterName, {})
      tableOperationMetric = {}
      regions = dbutil.get_region_by_table(table)
      logger.info(
          "TableOperationMetricAggregation aggregate %d regions metric for table %s, cluster %s" ,
           len(regions), table.name, clusterName)

      for region in regions:
        if region.operationMetrics is None or region.operationMetrics == '':
          continue;
        regionOperationMetrics = json.loads(region.operationMetrics)
        for regionOperationName in regionOperationMetrics.keys():
          regionOperation = regionOperationMetrics[regionOperationName]
          self.aggregate_one_region_operation_metric(tableOperationMetric.setdefault(regionOperationName,
                                                    self.make_empty_operation_metric()), regionOperation)
          self.aggregate_one_region_operation_metric(clusterOperationMetric.setdefault(regionOperationName,
                                                     self.make_empty_operation_metric()), regionOperation)

      # compute avgTime for table operation metrics
      self.compute_avg_time_and_num_ops_after_aggregation(tableOperationMetric)
      table.operationMetrics = json.dumps(tableOperationMetric)
      table.save()

    # compute avgTime for clusetr operation metrics
    clusters = HBaseCluster.objects.all()
    for cluster in clusters:
      clusterName = cluster.cluster.name
      if clusterName in allClusterOperationMetric:
        clusterOperationMetric = allClusterOperationMetric[clusterName]
        self.compute_avg_time_and_num_ops_after_aggregation(clusterOperationMetric)
        cluster.operationMetrics = json.dumps(clusterOperationMetric)
        cluster.save()
    return