示例#1
0
 def testDeleteStatsFromLegacyDB(self):
   with test_lib.ConfigOverrider({"StatsStore.stats_ttl_hours": 1}):
     timestamp1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1)
     timestamp2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600)
     timestamp3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4800)
     with test_lib.FakeTime(timestamp1):
       stats_collector_instance.Get().IncrementCounter(_SINGLE_DIM_COUNTER)
       stats_store._WriteStats(process_id="fake_process_id")
       expected_results = {
           "fake_process_id": {
               _SINGLE_DIM_COUNTER: [(1, timestamp1)]
           }
       }
       self.assertDictEqual(
           stats_store.ReadStats("f", _SINGLE_DIM_COUNTER), expected_results)
     with test_lib.FakeTime(timestamp2):
       stats_store._WriteStats(process_id="fake_process_id")
       expected_results = {
           "fake_process_id": {
               _SINGLE_DIM_COUNTER: [(1, timestamp1), (1, timestamp2)]
           }
       }
       self.assertDictEqual(
           stats_store.ReadStats("f", _SINGLE_DIM_COUNTER), expected_results)
     with test_lib.FakeTime(timestamp3):
       stats_store._DeleteStatsFromLegacyDB("fake_process_id")
       # timestamp1 is older than 1h, so it should get deleted.
       expected_results = {
           "fake_process_id": {
               _SINGLE_DIM_COUNTER: [(1, timestamp2)]
           }
       }
       self.assertDictEqual(
           stats_store.ReadStats("f", _SINGLE_DIM_COUNTER), expected_results)
示例#2
0
 def testPurgeServerStats(self):
     if not data_store.RelationalDBReadEnabled():
         self.skipTest("Test is only for the relational DB. Skipping...")
     fake_stats_collector = default_stats_collector.DefaultStatsCollector([
         stats_utils.CreateCounterMetadata("fake_counter"),
     ])
     timestamp0 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1)
     timestamp1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2)
     timestamp2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600)
     timestamp3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4800)
     config_overrides = {
         "Database.useForReads.stats": True,
         "StatsStore.stats_ttl_hours": 1
     }
     with test_lib.ConfigOverrider(config_overrides), \
          stats_test_utils.FakeStatsContext(fake_stats_collector), \
          mock.patch.object(system, "_STATS_DELETION_BATCH_SIZE", 1):
         with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp0)):
             stats_store._WriteStats(process_id="fake_process_id")
         with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp1)):
             stats_collector_instance.Get().IncrementCounter("fake_counter")
             stats_store._WriteStats(process_id="fake_process_id")
             expected_results = {
                 "fake_process_id": {
                     "fake_counter": [(0, timestamp0), (1, timestamp1)]
                 }
             }
             self.assertDictEqual(
                 stats_store.ReadStats("f", "fake_counter"),
                 expected_results)
         with test_lib.FakeTime(timestamp2):
             stats_store._WriteStats(process_id="fake_process_id")
             expected_results = {
                 "fake_process_id": {
                     "fake_counter": [(0, timestamp0), (1, timestamp1),
                                      (1, timestamp2)]
                 }
             }
             self.assertDictEqual(
                 stats_store.ReadStats("f", "fake_counter"),
                 expected_results)
         with test_lib.FakeTime(timestamp3):
             cron = system.PurgeServerStatsCronJob(
                 rdf_cronjobs.CronJobRun(), rdf_cronjobs.CronJob())
             cron.Run()
             # timestamp0 and timestamp1 are older than 1h, so they should get
             # deleted.
             expected_results = {
                 "fake_process_id": {
                     "fake_counter": [(1, timestamp2)]
                 }
             }
             self.assertDictEqual(
                 stats_store.ReadStats("f", "fake_counter"),
                 expected_results)
             self.assertIn("Deleted 2 stats entries.",
                           cron.run_state.log_message)
示例#3
0
 def testPurgeServerStats(self):
     if not data_store.RelationalDBReadEnabled():
         self.skipTest("Test is only for the relational DB. Skipping...")
     fake_stats_collector = default_stats_collector.DefaultStatsCollector([
         stats_utils.CreateCounterMetadata("fake_counter"),
     ])
     timestamp1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1)
     timestamp2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600)
     timestamp3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4800)
     config_overrides = {
         "Database.useForReads.stats": True,
         "StatsStore.stats_ttl_hours": 1
     }
     with test_lib.ConfigOverrider(config_overrides):
         with stats_test_utils.FakeStatsContext(fake_stats_collector):
             with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp1)):
                 stats_collector_instance.Get().IncrementCounter(
                     "fake_counter")
                 stats_store._WriteStats(process_id="fake_process_id")
                 expected_results = {
                     "fake_process_id": {
                         "fake_counter": [(1, timestamp1)]
                     }
                 }
                 self.assertDictEqual(
                     stats_store.ReadStats("f", "fake_counter"),
                     expected_results)
             with test_lib.FakeTime(timestamp2):
                 stats_store._WriteStats(process_id="fake_process_id")
                 expected_results = {
                     "fake_process_id": {
                         "fake_counter": [(1, timestamp1), (1, timestamp2)]
                     }
                 }
                 self.assertDictEqual(
                     stats_store.ReadStats("f", "fake_counter"),
                     expected_results)
             with test_lib.FakeTime(timestamp3):
                 system.PurgeServerStatsCronJob(
                     rdf_cronjobs.CronJobRun(),
                     rdf_cronjobs.CronJob()).Run()
                 # timestamp1 is older than 1h, so it should get deleted.
                 expected_results = {
                     "fake_process_id": {
                         "fake_counter": [(1, timestamp2)]
                     }
                 }
                 self.assertDictEqual(
                     stats_store.ReadStats("f", "fake_counter"),
                     expected_results)
示例#4
0
    def testReadStats(self):
        with test_lib.FakeTime(rdfvalue.RDFDatetime(1000)):
            stats_collector_instance.Get().IncrementCounter(
                _SINGLE_DIM_COUNTER)
            stats_collector_instance.Get().IncrementCounter(
                _COUNTER_WITH_ONE_FIELD, fields=["fieldval1"])
            stats_collector_instance.Get().IncrementCounter(
                _COUNTER_WITH_TWO_FIELDS, fields=["fieldval2", 3])
            stats_store._WriteStats(process_id="fake_process_id")

        with test_lib.FakeTime(rdfvalue.RDFDatetime(2000)):
            stats_collector_instance.Get().IncrementCounter(
                _COUNTER_WITH_TWO_FIELDS, fields=["fieldval2", 3])
            stats_collector_instance.Get().IncrementCounter(
                _COUNTER_WITH_TWO_FIELDS, fields=["fieldval2", 4])
            stats_store._WriteStats(process_id="fake_process_id")

        expected_single_dim_results = {
            "fake_process_id": {
                _SINGLE_DIM_COUNTER: [(1, 1000), (1, 2000)]
            }
        }
        expected_multi_dim1_results = {
            "fake_process_id": {
                _COUNTER_WITH_ONE_FIELD: {
                    "fieldval1": [(1, 1000), (1, 2000)]
                }
            }
        }
        expected_multi_dim2_results = {
            "fake_process_id": {
                _COUNTER_WITH_TWO_FIELDS: {
                    "fieldval2": {
                        3: [(1, 1000), (2, 2000)],
                        4: [(1, 2000)]
                    }
                }
            }
        }

        self.assertDictEqual(stats_store.ReadStats("f", _SINGLE_DIM_COUNTER),
                             expected_single_dim_results)
        self.assertDictEqual(
            stats_store.ReadStats("fake", _COUNTER_WITH_ONE_FIELD),
            expected_multi_dim1_results)
        self.assertEqual(
            stats_store.ReadStats("fake_process_id", _COUNTER_WITH_TWO_FIELDS),
            expected_multi_dim2_results)
示例#5
0
文件: stats.py 项目: thetraker/grr
    def Handle(self, args, token):
        start_time = args.start
        end_time = args.end
        if not end_time:
            end_time = rdfvalue.RDFDatetime.Now()
        if not start_time:
            start_time = end_time - rdfvalue.Duration("1h")

        # Run for a little extra time at the start. This improves the quality of the
        # first data points of counter metrics which don't appear in every interval.
        base_start_time = start_time
        # pylint: disable=g-no-augmented-assignment
        start_time = start_time - rdfvalue.Duration("10m")
        # pylint: enable=g-no-augmented-assignment

        if end_time <= start_time:
            raise ValueError("End time can't be less than start time.")

        result = ApiStatsStoreMetric(start=base_start_time,
                                     end=end_time,
                                     metric_name=args.metric_name)

        data = stats_store.ReadStats(unicode(args.component.name.lower()),
                                     args.metric_name,
                                     time_range=(start_time, end_time),
                                     token=token)

        if not data:
            return result

        metric_metadata = stats_collector_instance.Get().GetMetricMetadata(
            args.metric_name)

        query = stats_store.StatsStoreDataQuery(data)
        query.In(args.component.name.lower() + ".*").In(args.metric_name)
        if metric_metadata.fields_defs:
            query.InAll()

        requested_duration = end_time - start_time
        if requested_duration >= rdfvalue.Duration("1d"):
            sampling_duration = rdfvalue.Duration("5m")
        elif requested_duration >= rdfvalue.Duration("6h"):
            sampling_duration = rdfvalue.Duration("1m")
        else:
            sampling_duration = rdfvalue.Duration("30s")

        if metric_metadata.metric_type == metric_metadata.MetricType.COUNTER:
            query.TakeValue().MakeIncreasing().Normalize(
                sampling_duration,
                start_time,
                end_time,
                mode=timeseries.NORMALIZE_MODE_COUNTER)
        elif metric_metadata.metric_type == metric_metadata.MetricType.EVENT:
            if args.distribution_handling_mode == "DH_SUM":
                query.TakeDistributionSum()
            elif args.distribution_handling_mode == "DH_COUNT":
                query.TakeDistributionCount()
            else:
                raise ValueError(
                    "Unexpected request.distribution_handling_mode "
                    "value: %s." % args.distribution_handling_mode)
            query.MakeIncreasing()
            query.Normalize(sampling_duration,
                            start_time,
                            end_time,
                            mode=timeseries.NORMALIZE_MODE_COUNTER)

        elif metric_metadata.metric_type == metric_metadata.MetricType.GAUGE:
            query.TakeValue().Normalize(sampling_duration, start_time,
                                        end_time)
        else:
            raise RuntimeError("Unsupported metric type.")

        if args.aggregation_mode == "AGG_SUM":
            query.AggregateViaSum()
        elif args.aggregation_mode == "AGG_MEAN":
            query.AggregateViaMean()
        elif args.aggregation_mode == "AGG_NONE":
            pass
        else:
            raise ValueError("Unexpected request.aggregation value: %s." %
                             args.aggregation)

        if (args.rate and metric_metadata.metric_type !=
                metric_metadata.MetricType.GAUGE):
            query.Rate()

        query.InTimeRange(base_start_time, end_time)

        for value, timestamp in query.ts.data:
            if value is not None:
                result.data_points.append(
                    ApiStatsStoreMetricDataPoint(timestamp=timestamp,
                                                 value=value))

        return result
示例#6
0
 def testPurgeServerStats(self):
     if not data_store.RelationalDBReadEnabled():
         self.skipTest("Test is only for the relational DB. Skipping...")
     fake_stats_collector = prometheus_stats_collector.PrometheusStatsCollector(
         [
             stats_utils.CreateCounterMetadata("fake_counter"),
         ])
     timestamp0 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(1)
     timestamp1 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(2)
     timestamp2 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(3600)
     timestamp3 = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(4800)
     config_overrides = {
         "Database.useForReads.stats": True,
         "StatsStore.stats_ttl_hours": 1
     }
     zero_duration = rdfvalue.Duration(0)
     # Backslash continuation is explicitly allowed by Google's style guide for
     # nested context manager expressions spanning 3 or more lines.
     # pylint: disable=g-backslash-continuation
     with test_lib.ConfigOverrider(config_overrides), \
          stats_test_utils.FakeStatsContext(fake_stats_collector), \
          mock.patch.object(system, "_STATS_DELETION_BATCH_SIZE", 1), \
          mock.patch.object(system, "_stats_checkpoint_period", zero_duration):
         with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp0)):
             stats_store._WriteStats(process_id="fake_process_id")
         with test_lib.FakeTime(rdfvalue.RDFDatetime(timestamp1)):
             stats_collector_instance.Get().IncrementCounter("fake_counter")
             stats_store._WriteStats(process_id="fake_process_id")
             expected_results = {
                 "fake_process_id": {
                     "fake_counter": [(0, timestamp0), (1, timestamp1)]
                 }
             }
             self.assertDictEqual(
                 stats_store.ReadStats("f", "fake_counter"),
                 expected_results)
         with test_lib.FakeTime(timestamp2):
             stats_store._WriteStats(process_id="fake_process_id")
             expected_results = {
                 "fake_process_id": {
                     "fake_counter": [(0, timestamp0), (1, timestamp1),
                                      (1, timestamp2)]
                 }
             }
             self.assertDictEqual(
                 stats_store.ReadStats("f", "fake_counter"),
                 expected_results)
         with test_lib.FakeTime(timestamp3):
             cron_name = compatibility.GetName(
                 system.PurgeServerStatsCronJob)
             cronjobs.ScheduleSystemCronJobs(names=[cron_name])
             job_data = data_store.REL_DB.ReadCronJobs([cron_name])[0]
             cron_run = rdf_cronjobs.CronJobRun(cron_job_id=cron_name)
             cron_run.GenerateRunId()
             cron_run.started_at = rdfvalue.RDFDatetime.Now()
             cron = system.PurgeServerStatsCronJob(cron_run, job_data)
             cron.Run()
             # timestamp0 and timestamp1 are older than 1h, so they should get
             # deleted.
             expected_results = {
                 "fake_process_id": {
                     "fake_counter": [(1, timestamp2)]
                 }
             }
             self.assertDictEqual(
                 stats_store.ReadStats("f", "fake_counter"),
                 expected_results)
             self.assertEqual(
                 "Deleted 2 stats entries.\nDeleted 1 stats entries.",
                 cron.run_state.log_message)