Exemplo n.º 1
0
  def testMultipleDatasetWithPrefixes(self, dataset_transformation):
    aggregator = stats_aggregator.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    dataset = dataset_transformation(dataset, aggregator, prefix="dataset1")
    dataset2 = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    dataset2 = dataset_transformation(dataset2, aggregator, prefix="dataset2")
    iterator_0 = dataset_ops.make_initializable_iterator(dataset)
    iterator_1 = dataset_ops.make_initializable_iterator(dataset2)
    next_element = iterator_0.get_next() + iterator_1.get_next()
    summary_t = aggregator.get_summary()

    with self.test_session() as sess:
      self.evaluate([iterator_0.initializer, iterator_1.initializer])
      for i in range(100):
        self.assertEqual(i * 2, self.evaluate(next_element))
        self._assertSummaryHasCount(
            self.evaluate(summary_t), "dataset1_record_latency", float(i + 1))
        self._assertSummaryHasCount(
            self.evaluate(summary_t), "dataset2_record_latency", float(i + 1))
      with self.assertRaises(errors.OutOfRangeError):
        self.evaluate(next_element)
      self._assertSummaryHasCount(
          self.evaluate(summary_t), "dataset1_record_latency", 100.0)
      self._assertSummaryHasCount(
          self.evaluate(summary_t), "dataset2_record_latency", 100.0)
  def testMultipleDatasetWithPrefixes(self):
    aggregator = stats_aggregator.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    dataset = self.datasetExperimentalStats(
        dataset, aggregator, prefix="dataset1")
    dataset2 = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    dataset2 = self.datasetExperimentalStats(
        dataset2, aggregator, prefix="dataset2")
    next_element1 = self.getNext(dataset, requires_initialization=True)
    next_element2 = self.getNext(dataset2, requires_initialization=True)

    for i in range(100):
      self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2()))
      handle = self.getHandle(aggregator)
      self.assertStatisticsHasCount(
          handle, "dataset1::record_latency", float(i + 1), 2 * i + 3, offset=1)
      self.assertStatisticsHasCount(handle, "dataset2::record_latency",
                                    float(i + 1), 2 * i + 3)
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element1())
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element2())
    handle = self.getHandle(aggregator)
    self.assertStatisticsHasCount(
        handle, "dataset1::record_latency", 100.0, 201, offset=1)
    self.assertStatisticsHasCount(handle, "dataset2::record_latency", 100.0,
                                  201)
  def testMultipleDatasetWithPrefixes(self, dataset_transformation):
    aggregator = stats_aggregator.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    dataset = dataset_transformation(dataset, aggregator, prefix="dataset1")
    dataset2 = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    dataset2 = dataset_transformation(dataset2, aggregator, prefix="dataset2")
    next_element1 = self.getNext(dataset, requires_initialization=True)
    next_element2 = self.getNext(dataset2, requires_initialization=True)

    for i in range(100):
      self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2()))
      self._assertSummaryHasCount(
          self.evaluate(aggregator.get_summary()), "dataset1_record_latency",
          float(i + 1))
      self._assertSummaryHasCount(
          self.evaluate(aggregator.get_summary()), "dataset2_record_latency",
          float(i + 1))
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element1())
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element2())
    self._assertSummaryHasCount(
        self.evaluate(aggregator.get_summary()), "dataset1_record_latency",
        100.0)
    self._assertSummaryHasCount(
        self.evaluate(aggregator.get_summary()), "dataset2_record_latency",
        100.0)
Exemplo n.º 4
0
  def testMultipleDatasetWithPrefixes(self, dataset_transformation):
    aggregator = stats_aggregator.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    dataset = dataset_transformation(dataset, aggregator, prefix="dataset1")
    dataset2 = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    dataset2 = dataset_transformation(dataset2, aggregator, prefix="dataset2")
    next_element1 = self.getNext(dataset, requires_initialization=True)
    next_element2 = self.getNext(dataset2, requires_initialization=True)

    for i in range(100):
      self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2()))
      self._assertSummaryHasCount(
          self.evaluate(aggregator.get_summary()), "dataset1_record_latency",
          float(i + 1))
      self._assertSummaryHasCount(
          self.evaluate(aggregator.get_summary()), "dataset2_record_latency",
          float(i + 1))
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element1())
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element2())
    self._assertSummaryHasCount(
        self.evaluate(aggregator.get_summary()), "dataset1_record_latency",
        100.0)
    self._assertSummaryHasCount(
        self.evaluate(aggregator.get_summary()), "dataset2_record_latency",
        100.0)
    def testMultipleTags(self):
        aggregator = stats_aggregator.StatsAggregator()
        dataset = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency")).apply(
                stats_ops.latency_stats("record_latency_2"))
        dataset = self.datasetExperimentalStats(dataset, aggregator)

        next_element = self.getNext(dataset, requires_initialization=True)

        for i in range(100):
            self.assertEqual(i, self.evaluate(next_element()))
            handle = self.getHandle(aggregator)
            self.assertStatisticsHasCount(handle,
                                          "record_latency",
                                          float(i + 1),
                                          2 * i + 3,
                                          offset=1)
            self.assertStatisticsHasCount(handle, "record_latency_2",
                                          float(i + 1), 2 * i + 3)
        with self.assertRaises(errors.OutOfRangeError):
            self.evaluate(next_element())
        handle = self.getHandle(aggregator)
        self.assertStatisticsHasCount(handle,
                                      "record_latency",
                                      100.0,
                                      201,
                                      offset=1)
        self.assertStatisticsHasCount(handle, "record_latency_2", 100.0, 201)
  def testMultipleDatasetWithTags(self):
    stats_aggregator = stats_ops.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency")).apply(
            stats_ops.set_stats_aggregator(stats_aggregator, "dataset1"))
    dataset2 = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency")).apply(
            stats_ops.set_stats_aggregator(stats_aggregator, "dataset2"))
    iterator_0 = dataset.make_initializable_iterator()
    iterator_1 = dataset2.make_initializable_iterator()
    next_element = iterator_0.get_next() + iterator_1.get_next()
    summary_t = stats_aggregator.get_summary()

    with self.test_session() as sess:
      sess.run([iterator_0.initializer, iterator_1.initializer])
      for i in range(100):
        self.assertEqual(i * 2, sess.run(next_element))
        self._assertSummaryHasCount(
            sess.run(summary_t), "dataset1_record_latency", float(i + 1))
        self._assertSummaryHasCount(
            sess.run(summary_t), "dataset2_record_latency", float(i + 1))
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(next_element)
      self._assertSummaryHasCount(
          sess.run(summary_t), "dataset1_record_latency", 100.0)
      self._assertSummaryHasCount(
          sess.run(summary_t), "dataset2_record_latency", 100.0)
Exemplo n.º 7
0
    def testMultipleDatasetWithPrefixes(self, dataset_transformation):
        aggregator = stats_aggregator.StatsAggregator()
        dataset = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency"))
        dataset = dataset_transformation(dataset,
                                         aggregator,
                                         prefix="dataset1")
        dataset2 = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency"))
        dataset2 = dataset_transformation(dataset2,
                                          aggregator,
                                          prefix="dataset2")
        iterator_0 = dataset.make_initializable_iterator()
        iterator_1 = dataset2.make_initializable_iterator()
        next_element = iterator_0.get_next() + iterator_1.get_next()
        summary_t = aggregator.get_summary()

        with self.test_session() as sess:
            sess.run([iterator_0.initializer, iterator_1.initializer])
            for i in range(100):
                self.assertEqual(i * 2, self.evaluate(next_element))
                self._assertSummaryHasCount(sess.run(summary_t),
                                            "dataset1_record_latency",
                                            float(i + 1))
                self._assertSummaryHasCount(sess.run(summary_t),
                                            "dataset2_record_latency",
                                            float(i + 1))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(next_element)
            self._assertSummaryHasCount(sess.run(summary_t),
                                        "dataset1_record_latency", 100.0)
            self._assertSummaryHasCount(sess.run(summary_t),
                                        "dataset2_record_latency", 100.0)
  def testRepeatedTags(self, dataset_transformation):
    aggregator = stats_aggregator.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency")).apply(
            stats_ops.latency_stats("record_latency"))
    dataset = dataset_transformation(dataset, aggregator)
    next_element = self.getNext(dataset, requires_initialization=True)

    for i in range(100):
      self.assertEqual(i, self.evaluate(next_element()))
      self._assertSummaryHasCount(
          self.evaluate(aggregator.get_summary()), "record_latency",
          float(2 * (i + 1)))
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element())
    self._assertSummaryHasCount(
        self.evaluate(aggregator.get_summary()), "record_latency", 200.0)
  def testRepeatedTags(self):
    aggregator = stats_aggregator.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency")).apply(
            stats_ops.latency_stats("record_latency"))
    dataset = self.datasetExperimentalStats(dataset, aggregator)
    next_element = self.getNext(dataset, requires_initialization=True)

    for i in range(100):
      self.assertEqual(i, self.evaluate(next_element()))
      handle = self.getHandle(aggregator)
      self.assertStatisticsHasCount(handle, "record_latency",
                                    float(2 * (i + 1)), 2 * i + 3)
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element())
    handle = self.getHandle(aggregator)
    self.assertStatisticsHasCount(handle, "record_latency", 200.0, 201)
Exemplo n.º 10
0
  def testRepeatedTags(self, dataset_transformation):
    aggregator = stats_aggregator.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency")).apply(
            stats_ops.latency_stats("record_latency"))
    dataset = dataset_transformation(dataset, aggregator)
    next_element = self.getNext(dataset, requires_initialization=True)

    for i in range(100):
      self.assertEqual(i, self.evaluate(next_element()))
      self._assertSummaryHasCount(
          self.evaluate(aggregator.get_summary()), "record_latency",
          float(2 * (i + 1)))
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element())
    self._assertSummaryHasCount(
        self.evaluate(aggregator.get_summary()), "record_latency", 200.0)
 def test_latency_stats_invalid_tag_shape(self):
   with self.assertRaisesRegexp(
       ValueError, "Shape must be rank 0 but is rank 1"):
     # pylint: disable=g-long-lambda
     self.run_core_tests(
         lambda: dataset_ops.Dataset.range(100).apply(
             stats_ops.latency_stats(["record_latency", "record_latency_2"])),
         100)
 def test_latency_stats_invalid_tag_shape(self):
   with self.assertRaisesRegexp(
       ValueError, "Shape must be rank 0 but is rank 1"):
     # pylint: disable=g-long-lambda
     self.run_core_tests(
         lambda: dataset_ops.Dataset.range(100).apply(
             stats_ops.latency_stats(["record_latency", "record_latency_2"])),
         None, 100)
    def testNoAggregatorRegistered(self):
        dataset = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency"))

        next_element = self.getNext(dataset, requires_initialization=True)

        for i in range(100):
            self.assertEqual(i, self.evaluate(next_element()))
        with self.assertRaises(errors.OutOfRangeError):
            self.evaluate(next_element())
Exemplo n.º 14
0
  def testNoAggregatorRegistered(self, dataset_transformation):
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))

    next_element = self.getNext(dataset, requires_initialization=True)

    for i in range(100):
      self.assertEqual(i, self.evaluate(next_element()))
    with self.assertRaises(errors.OutOfRangeError):
      self.evaluate(next_element())
Exemplo n.º 15
0
  def testRepeatedTags(self):
    stats_aggregator = stats_ops.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency")).apply(
            stats_ops.latency_stats("record_latency")).apply(
                stats_ops.set_stats_aggregator(stats_aggregator))
    iterator = dataset.make_initializable_iterator()
    next_element = iterator.get_next()
    summary_t = stats_aggregator.get_summary()

    with self.cached_session() as sess:
      sess.run(iterator.initializer)
      for i in range(100):
        self.assertEqual(i, sess.run(next_element))
        self._assertSummaryHasCount(
            sess.run(summary_t), "record_latency", float(2 * (i + 1)))
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(next_element)
      self._assertSummaryHasCount(sess.run(summary_t), "record_latency", 200.0)
Exemplo n.º 16
0
    def testNoAggregatorRegistered(self, dataset_transformation):
        dataset = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency"))
        iterator = dataset.make_initializable_iterator()
        next_element = iterator.get_next()

        with self.cached_session() as sess:
            self.evaluate(iterator.initializer)
            for i in range(100):
                self.assertEqual(i, self.evaluate(next_element))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(next_element)
Exemplo n.º 17
0
  def testNoAggregatorRegistered(self, dataset_transformation):
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    iterator = dataset_ops.make_initializable_iterator(dataset)
    next_element = iterator.get_next()

    with self.cached_session() as sess:
      self.evaluate(iterator.initializer)
      for i in range(100):
        self.assertEqual(i, self.evaluate(next_element))
      with self.assertRaises(errors.OutOfRangeError):
        self.evaluate(next_element)
Exemplo n.º 18
0
    def DISABLED_testMultipleDatasetWithPrefixes(self):
        aggregator = stats_aggregator.StatsAggregator()
        dataset = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency"))
        dataset = self.datasetExperimentalStats(dataset,
                                                aggregator,
                                                prefix="dataset1")
        dataset2 = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency"))
        dataset2 = self.datasetExperimentalStats(dataset2,
                                                 aggregator,
                                                 prefix="dataset2")
        next_element1 = self.getNext(dataset, requires_initialization=True)
        next_element2 = self.getNext(dataset2, requires_initialization=True)

        for i in range(100):
            self.assertEqual(i * 2,
                             self.evaluate(next_element1() + next_element2()))
            handle = self.getHandle(aggregator)
            self.assertStatisticsHasCount(handle,
                                          "dataset1::record_latency",
                                          float(i + 1),
                                          2 * i + 3,
                                          offset=1)
            self.assertStatisticsHasCount(handle, "dataset2::record_latency",
                                          float(i + 1), 2 * i + 3)
        with self.assertRaises(errors.OutOfRangeError):
            self.evaluate(next_element1())
        with self.assertRaises(errors.OutOfRangeError):
            self.evaluate(next_element2())
        handle = self.getHandle(aggregator)
        self.assertStatisticsHasCount(handle,
                                      "dataset1::record_latency",
                                      100.0,
                                      201,
                                      offset=1)
        self.assertStatisticsHasCount(handle, "dataset2::record_latency",
                                      100.0, 201)
Exemplo n.º 19
0
    def testMultipleTags(self, dataset_transformation):
        aggregator = stats_aggregator.StatsAggregator()
        dataset = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency")).apply(
                stats_ops.latency_stats("record_latency_2"))
        dataset = dataset_transformation(dataset, aggregator)
        iterator = dataset.make_initializable_iterator()
        next_element = iterator.get_next()
        summary_t = aggregator.get_summary()

        with self.cached_session() as sess:
            self.evaluate(iterator.initializer)
            for i in range(100):
                self.assertEqual(i, self.evaluate(next_element))
                self._assertSummaryHasCount(sess.run(summary_t),
                                            "record_latency", float(i + 1))
                self._assertSummaryHasCount(sess.run(summary_t),
                                            "record_latency_2", float(i + 1))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(next_element)
            self._assertSummaryHasCount(self.evaluate(summary_t),
                                        "record_latency", 100.0)
            self._assertSummaryHasCount(sess.run(summary_t),
                                        "record_latency_2", 100.0)
Exemplo n.º 20
0
  def testMultipleTags(self, dataset_transformation):
    aggregator = stats_aggregator.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency")).apply(
            stats_ops.latency_stats("record_latency_2"))
    dataset = dataset_transformation(dataset, aggregator)
    iterator = dataset_ops.make_initializable_iterator(dataset)
    next_element = iterator.get_next()
    summary_t = aggregator.get_summary()

    with self.cached_session() as sess:
      self.evaluate(iterator.initializer)
      for i in range(100):
        self.assertEqual(i, self.evaluate(next_element))
        self._assertSummaryHasCount(
            self.evaluate(summary_t), "record_latency", float(i + 1))
        self._assertSummaryHasCount(
            self.evaluate(summary_t), "record_latency_2", float(i + 1))
      with self.assertRaises(errors.OutOfRangeError):
        self.evaluate(next_element)
      self._assertSummaryHasCount(
          self.evaluate(summary_t), "record_latency", 100.0)
      self._assertSummaryHasCount(
          self.evaluate(summary_t), "record_latency_2", 100.0)
Exemplo n.º 21
0
  def testMultipleIteratorsSameAggregator(self, dataset_transformation):
    aggregator = stats_aggregator.StatsAggregator()
    dataset = dataset_ops.Dataset.range(100).apply(
        stats_ops.latency_stats("record_latency"))
    dataset = dataset_transformation(dataset, aggregator)
    iterator_0 = dataset.make_initializable_iterator()
    iterator_1 = dataset.make_initializable_iterator()
    next_element = iterator_0.get_next() + iterator_1.get_next()
    summary_t = aggregator.get_summary()

    with self.cached_session() as sess:
      sess.run([iterator_0.initializer, iterator_1.initializer])
      for i in range(100):
        self.assertEqual(i * 2, sess.run(next_element))
        self._assertSummaryHasCount(
            sess.run(summary_t), "record_latency", float(2 * (i + 1)))
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(next_element)
      self._assertSummaryHasCount(sess.run(summary_t), "record_latency", 200.0)
    def testReinitialize(self):
        aggregator = stats_aggregator.StatsAggregator()
        dataset = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency"))
        dataset = self.datasetExperimentalStats(dataset, aggregator)

        for j in range(5):
            next_element = self.getNext(dataset, requires_initialization=True)
            for i in range(100):
                self.assertEqual(i, self.evaluate(next_element()))
                handle = self.getHandle(aggregator)
                self.assertStatisticsHasCount(handle, "record_latency",
                                              float((j * 100) + i + 1),
                                              (j * 100) + i + 2)
            with self.assertRaises(errors.OutOfRangeError):
                self.evaluate(next_element())
            handle = self.getHandle(aggregator)
            self.assertStatisticsHasCount(handle, "record_latency",
                                          (j + 1) * 100.0, (j * 100) + 101)
    def testMultipleIteratorsSameAggregator(self, dataset_transformation):
        aggregator = stats_aggregator.StatsAggregator()
        dataset = dataset_ops.Dataset.range(100).apply(
            stats_ops.latency_stats("record_latency"))
        dataset = dataset_transformation(dataset, aggregator)
        iterator_0 = dataset.make_initializable_iterator()
        iterator_1 = dataset.make_initializable_iterator()
        next_element = iterator_0.get_next() + iterator_1.get_next()
        summary_t = aggregator.get_summary()

        with self.cached_session() as sess:
            sess.run([iterator_0.initializer, iterator_1.initializer])
            for i in range(100):
                self.assertEqual(i * 2, sess.run(next_element))
                self._assertSummaryHasCount(sess.run(summary_t),
                                            "record_latency",
                                            float(2 * (i + 1)))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(next_element)
            self._assertSummaryHasCount(sess.run(summary_t), "record_latency",
                                        200.0)
 def _build_dataset_latency_stats(self, num_elements, tag="record_latency"):
   return dataset_ops.Dataset.range(num_elements).apply(
       stats_ops.latency_stats(tag))
 def _build_dataset_latency_stats(self, num_elements, tag="record_latency"):
   return dataset_ops.Dataset.range(num_elements).apply(
       stats_ops.latency_stats(tag))
 def _build_dataset_multiple_tags(self,
                                  num_elements,
                                  tag1="record_latency",
                                  tag2="record_latency_2"):
   return dataset_ops.Dataset.range(num_elements).apply(
       stats_ops.latency_stats(tag1)).apply(stats_ops.latency_stats(tag2))
 def _build_dataset_multiple_tags(self,
                                  num_elements,
                                  tag1="record_latency",
                                  tag2="record_latency_2"):
   return dataset_ops.Dataset.range(num_elements).apply(
       stats_ops.latency_stats(tag1)).apply(stats_ops.latency_stats(tag2))