def test_repair_shard(self):
        # Add a normal metric.
        metric_name = "a.b.test"
        metric = bg_test_utils.make_metric(metric_name)

        self.metadata_cache.create_metric(metric)
        self.metadata_cache.repair(shard=0, nshards=2)
        self.metadata_cache.repair(shard=1, nshards=2)
        self.metadata_cache.get_metric(metric_name)
        # Should be only one entry, and it was a hit.
        self.assertEquals(self.metadata_cache.hit_count, 1)
        self.assertEquals(self.metadata_cache.miss_count, 0)

        # Add a spurious metric.
        metric_name = "a.b.fake"
        metric = bg_test_utils.make_metric(metric_name)

        self.metadata_cache._cache(metric)

        # Will not fix.
        self.metadata_cache.repair(start_key="b")
        self.metadata_cache.get_metric(metric_name)
        self.assertEquals(self.metadata_cache.hit_count, 2)

        # Will fix.
        self.metadata_cache.repair(start_key="a", shard=0, nshards=2)
        self.metadata_cache.repair(start_key="a", shard=1, nshards=2)
        self.metadata_cache.get_metric(metric_name)
        # repair() will remove it, and the get will produce a miss.
        self.assertEquals(self.metadata_cache.hit_count, 2)
        self.assertEquals(self.metadata_cache.miss_count, 1)
    def test_repair_shard(self):
        # Add a normal metric.
        metric_name = "a.b.test"
        metric = bg_test_utils.make_metric(metric_name)

        self.metadata_cache.create_metric(metric)
        self.metadata_cache.repair(shard=0, nshards=2)
        self.metadata_cache.repair(shard=1, nshards=2)
        self.metadata_cache.get_metric(metric_name)
        # Should be only one entry, and it was a hit.
        self.assertEqual(self.metadata_cache.hit_count, 1)
        self.assertEqual(self.metadata_cache.miss_count, 0)

        # Add a spurious metric.
        metric_name = "a.b.fake"
        metric = bg_test_utils.make_metric(metric_name)

        self.metadata_cache._cache_set(metric_name, metric)

        # Will not fix.
        self.metadata_cache.repair(start_key="b")
        self.metadata_cache.get_metric(metric_name)
        self.assertEqual(self.metadata_cache.hit_count, 2)

        # Will fix.
        self.metadata_cache.repair(start_key="a", shard=0, nshards=2)
        self.metadata_cache.repair(start_key="a", shard=1, nshards=2)
        self.metadata_cache.get_metric(metric_name)
        # repair() will remove it, and the get will produce a miss.
        self.assertEqual(self.metadata_cache.hit_count, 2)
        self.assertEqual(self.metadata_cache.miss_count, 1)
    def test_cache_clean(self):
        """Check that the cache is cleared out of metrics older than the TTL."""
        with freeze_time("2014-01-01 00:00:00"):
            old_metric = bg_test_utils.make_metric("i.am.old")
            self.metadata_cache.create_metric(old_metric)
        with freeze_time("2015-01-01 00:00:00"):
            new_metric = bg_test_utils.make_metric("i.am.new")
            self.metadata_cache.create_metric(new_metric)
        with freeze_time("2015-01-01 20:00:00"):
            self.metadata_cache.clean()

        self.assertEqual(self.metadata_cache._cache_has(new_metric.name), True)
        self.assertEqual(self.metadata_cache._cache_has(old_metric.name),
                         False)
示例#4
0
    def test_glob_metrics_cached(self):
        metrics = ["a", "a.b", "x.y.z"]
        for name in metrics:
            metric = bg_test_utils.make_metric(name)
            self.accessor.create_metric(metric)
        self.flush()

        cache = bg_accessor_cache.MemoryCache(10, 60)
        original_cache = self.accessor.cache
        self.accessor.cache = cache

        def assert_find(glob, results):
            res = self.accessor.glob_metric_names(glob)
            self.assertEqual(set(results), set(res))

        # Nothing should be cached here.
        assert_find('**', metrics)
        assert_find('a', ['a'])
        assert_find('{x,y}.*y.[z]', ['x.y.z'])

        # Things should be cached here.
        assert_find('**', metrics)
        assert_find('a', ['a'])
        assert_find('{x,y}.*y.[z]', ['x.y.z'])

        # Make sure we use the cache.
        self.accessor.cache.get = lambda _, version: ['fake']
        assert_find('a', ['fake'])
        assert_find('**', ['fake'])
        assert_find('{x,y}.*y.[z]', ['fake'])

        self.accessor.cache = original_cache
示例#5
0
    def test_carbon_protocol_read(self):
        metric_name = 'fake.name'
        metric = bg_test_utils.make_metric(_METRIC_NAME)
        # Custom aggregator to make sure all goes right.
        metric.metadata.aggregator = bg_accessor.Aggregator.minimum
        self.accessor.create_metric(metric)
        self.accessor.flush()
        self.reader = bg_graphite.Reader(
            self.accessor, self.metadata_cache, self.carbonlink, metric_name
        )

        with mock.patch('graphite.carbonlink.CarbonLinkPool.query') as carbonlink_query_mock:
            carbonlink_query_mock.return_value = [
                (864005.0, 100.0), (864065.0, 101.0), (864125.0, 102.0)
            ]

            (start, end, step), points = self.fetch(
                start_time=self._POINTS_START + 3,
                end_time=self._POINTS_END - 3,
                now=self._POINTS_END + 10,
            )

            # Check that we really have a 1sec resolution
            self.assertEqual(start, self._POINTS_START + 3)
            self.assertEqual(end, self._POINTS_END - 3)
            self.assertEqual(step, 1)
            # Check that this returns at least one value different from None.
            self.assertEqual(len(points), end - start)
            # Check that at least one point is at the correct place.
            self.assertEqual(points[864005 - start], 100.0)
示例#6
0
    def test_glob_directories(self):
        IS_ELASTICSEARCH = self.ACCESSOR_SETTINGS.get('driver',
                                                      '') == 'elasticsearch'
        for name in "a", "a.b", "x.y.z":
            metric = bg_test_utils.make_metric(name)
            self.accessor.create_metric(metric)
        self.flush()

        def assert_find(glob, expected_matches):
            # Check we can find the matches of a glob
            self.assertEqual(expected_matches,
                             list(self.accessor.glob_directory_names(glob)))

        assert_find("x.y", ["x.y"])  # Test exact match
        assert_find("A", [])  # Test case mismatch

        # Test various depths
        assert_find("*", ["a", "x"])
        if IS_ELASTICSEARCH:
            assert_find("*.*", ["*.y"])
        else:
            assert_find("*.*", ["x.y"])
        assert_find("*.*.*", [])

        self.accessor.drop_all_metrics()
        assert_find("*", [])
示例#7
0
 def test_write(self):
     metric = bg_test_utils.make_metric(_TEST_METRIC)
     points = [(1, 42)]
     self.accessor.create_metric(metric)
     # Writing twice (the first write is sync and the next one isn't)
     self._plugin.write(metric.name, points)
     self._plugin.write(metric.name, points)
     actual_points = self.accessor.fetch_points(metric, 1, 2, stage=metric.retention[0])
     self.assertEqual(points, list(actual_points))
 def test_glob(self):
     for name in "a", "a.b.c", "a.b.d", "x.y.c", "a.a.a":
         metric = bg_test_utils.make_metric(name)
         self.accessor.create_metric(metric)
     self.assertEqual((["a"], ["a", "x"]), bg_gu.glob(self.accessor, "*"))
     self.assertEqual((["a.b.c", "x.y.c"], []), bg_gu.glob(self.accessor, "*.*.c"))
     self.assertEqual((["a.a.a", "a.b.c", "a.b.d"], []), bg_gu.glob(self.accessor, "a.*.*"))
     self.assertEqual((["a.a.a", "a.b.c", "a.b.d", "x.y.c"], []), bg_gu.glob(self.accessor, "*.*.*"))
     self.assertEqual((["a.b.c", "a.b.d"], []), bg_gu.glob(self.accessor, "*.{b,c,d,5}.?"))
示例#9
0
 def setUp(self):
     super(TestFinder, self).setUp()
     for metric_name in "a", "a.a", "a.b.c", "x.y":
         metric = bg_test_utils.make_metric(metric_name)
         self.accessor.create_metric(metric)
     self.finder = bg_graphite.Finder(
         accessor=self.accessor,
         metadata_cache=self.metadata_cache,
     )
示例#10
0
 def setUp(self):
     super(TestFinder, self).setUp()
     for metric_name in "a", "a.a", "a.b.c", "x.y":
         metric = bg_test_utils.make_metric(metric_name)
         self.accessor.create_metric(metric)
     self.finder = bg_graphite.Finder(
         accessor=self.accessor,
         metadata_cache=self.metadata_cache,
     )
示例#11
0
    def test_glob_too_many_directories(self):
        for name in "a", "a.b", "x.y.z":
            metric = bg_test_utils.make_metric(name)
            self.accessor.create_metric(metric)

        old_value = self.accessor.max_metrics_per_pattern
        self.accessor.max_metrics_per_pattern = 1
        with self.assertRaises(bg_cassandra.TooManyMetrics):
            list(self.accessor.glob_directory_names('**'))
        self.accessor.max_metrics_per_pattern = old_value
示例#12
0
    def test_stats(self):
        ret = self.metadata_cache.stats()
        self.assertNotEqual(len(ret), 0)

        metric_name = "a.b.test"
        metric = bg_test_utils.make_metric(metric_name)

        self.metadata_cache.create_metric(metric)

        ret = self.metadata_cache.stats()
        self.assertNotEqual(len(ret), 0)
示例#13
0
 def test_fetch_empty(self):
     no_such_metric = bg_test_utils.make_metric("no.such.metric")
     self.accessor.insert_points(_METRIC, _POINTS)
     self.accessor.drop_all_metrics()
     self.assertEqual(
         len(self.fetch(no_such_metric, _POINTS_START, _POINTS_END)),
         0,
     )
     self.assertFalse(
         len(self.fetch(_METRIC, _POINTS_START, _POINTS_END)),
         0,
     )
示例#14
0
    def test_repair(self):
        # Add a normal metric.
        metric_name = "a.b.test"
        metric = bg_test_utils.make_metric(metric_name)

        self.metadata_cache.create_metric(metric)
        self.metadata_cache.repair()
        self.metadata_cache.get_metric(metric_name)
        # Should be only one entry, and it was a hit.
        self.assertEquals(self.metadata_cache.hit_count, 1)
        self.assertEquals(self.metadata_cache.miss_count, 0)

        # Add a spurious metric.
        metric_name = "a.b.fake"
        metric = bg_test_utils.make_metric(metric_name)

        self.metadata_cache._cache_set(metric_name, metric)
        self.metadata_cache.repair()
        self.metadata_cache.get_metric(metric_name)
        # repair() will remove it, and the get will produce a miss.
        self.assertEquals(self.metadata_cache.hit_count, 1)
        self.assertEquals(self.metadata_cache.miss_count, 1)
示例#15
0
    def test_create_metrics(self):
        meta_dict = {
            "aggregator": bg_accessor.Aggregator.last,
            "retention": bg_accessor.Retention.from_string("60*1s:60*60s"),
            "carbon_xfilesfactor": 0.3,
        }
        metric = bg_test_utils.make_metric("a.b.c.d.e.f", **meta_dict)

        self.accessor.create_metric(metric)
        metric_again = self.accessor.get_metric(metric.name)
        self.assertEqual(metric.name, metric_again.name)
        for k, v in meta_dict.iteritems():
            self.assertEqual(v, getattr(metric_again.metadata, k))
示例#16
0
 def test_fetch_empty(self):
     no_such_metric = bg_test_utils.make_metric("no.such.metric")
     self.accessor.insert_points(_METRIC, _POINTS)
     self.accessor.flush()
     self.accessor.drop_all_metrics()
     self.assertEqual(
         len(self.fetch(no_such_metric, _POINTS_START, _POINTS_END)),
         0,
     )
     self.assertFalse(
         len(self.fetch(_METRIC, _POINTS_START, _POINTS_END)),
         0,
     )
示例#17
0
    def test_create_metrics(self):
        meta_dict = {
            "aggregator": bg_accessor.Aggregator.last,
            "retention": bg_accessor.Retention.from_string("60*1s:60*60s"),
            "carbon_xfilesfactor": 0.3,
        }
        metric = bg_test_utils.make_metric("a.b.c.d.e.f", **meta_dict)

        self.assertEquals(self.accessor.has_metric(metric.name), False)
        self.accessor.create_metric(metric)
        self.assertEquals(self.accessor.has_metric(metric.name), True)
        metric_again = self.accessor.get_metric(metric.name)
        self.assertEqual(metric.name, metric_again.name)
        for k, v in meta_dict.iteritems():
            self.assertEqual(v, getattr(metric_again.metadata, k))
示例#18
0
    def test_glob_directories(self):
        for name in "a", "a.b", "x.y.z":
            metric = bg_test_utils.make_metric(name)
            self.accessor.create_metric(metric)

        def assert_find(glob, expected_matches):
            # Check we can find the matches of a glob
            self.assertEqual(expected_matches, self.accessor.glob_directory_names(glob))

        assert_find("x.y", ["x.y"])  # Test exact match
        assert_find("A", [])  # Test case mismatch

        # Test various depths
        assert_find("*", ["a", "x"])
        assert_find("*.*", ["x.y"])
        assert_find("*.*.*", [])

        self.accessor.drop_all_metrics()
        assert_find("*", [])
示例#19
0
    def test_glob_directories(self):
        for name in "a", "a.b", "x.y.z":
            metric = bg_test_utils.make_metric(name)
            self.accessor.create_metric(metric)

        def assert_find(glob, expected_matches):
            # Check we can find the matches of a glob
            self.assertEqual(expected_matches, list(self.accessor.glob_directory_names(glob)))

        assert_find("x.y", ["x.y"])  # Test exact match
        assert_find("A", [])  # Test case mismatch

        # Test various depths
        assert_find("*", ["a", "x"])
        assert_find("*.*", ["x.y"])
        assert_find("*.*.*", [])

        self.accessor.drop_all_metrics()
        assert_find("*", [])
示例#20
0
    def test_glob_metrics_cached(self):
        if isinstance(self.accessor, bg_elasticsearch._ElasticSearchAccessor):
            # TODO (t.chataigner) Remove once accessor.cache is implemented.
            self.skipTest(
                "accessor.cache is not implemented for _ElasticSearchAccessor."
            )

        metrics = ["a", "a.b", "x.y.z"]
        for name in metrics:
            metric = bg_test_utils.make_metric(name)
            self.accessor.create_metric(metric)
        self.flush()

        cache = bg_accessor_cache.MemoryCache(10, 60)
        original_cache = self.accessor.cache
        self.accessor.cache = cache

        def assert_find(glob, results):
            res = self.accessor.glob_metric_names(glob)
            self.assertEqual(set(results), set(res))

        # Nothing should be cached here.
        assert_find('**', metrics)
        assert_find('a', ['a'])
        assert_find('{x,y}.*y.[z]', ['x.y.z'])

        # Things should be cached here.
        assert_find('**', metrics)
        assert_find('a', ['a'])
        assert_find('{x,y}.*y.[z]', ['x.y.z'])

        # Make sure we use the cache.
        self.accessor.cache.get = lambda _, version: ['fake']
        assert_find('a', ['fake'])
        assert_find('**', ['fake'])
        assert_find('{x,y}.*y.[z]', ['fake'])

        self.accessor.cache = original_cache
示例#21
0
    def test_graphite_glob(self):
        for name in self._metric_names:
            metric = bg_test_utils.make_metric(name)
            self.accessor.create_metric(metric)

        scenarii = [
            # Single character wildcard
            ("a?", ["aa"], []),
            # Component wildcard
            ("*", self._metrics_by_length[1], ["a", "x"]),
            ("*.*.c", ["a.b.c", "x.y.c"], []),
            ("a.*.*", ["a.a.a", "a.b.c", "a.b.d"], []),
            ("*.*.*", self._metrics_by_length[3], []),
            # Multi-character wildcard
            ("a*", ["a", "aa", "aaa"], ["a"]),
            # Choices
            ("*.{b,c,d,5}.?", ["a.b.c", "a.b.d"], []),
            # Globstar wildcard
            ("a.**", ["a.a.a", "a.b.c", "a.b.d"], ["a.a", "a.b"]),
        ]
        for (glob, metrics, directories) in scenarii:
            found = bg_glob.graphite_glob(self.accessor, glob)
            self.assertEqual((metrics, directories), found)
示例#22
0
 def test_dir(self):
     metric = bg_test_utils.make_metric("a.b.c")
     self.assertIn("name", dir(metric))
     self.assertIn("carbon_xfilesfactor", dir(metric))
示例#23
0
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function

import unittest

from biggraphite import test_utils as bg_test_utils

_TEST_METRIC = bg_test_utils.make_metric("a.b.c")


class TestGraphiteUtilsInternals(bg_test_utils.TestCaseWithFakeAccessor):

    def _assert_hit_miss(self, hit, miss):
        self.assertEqual(hit, self.metadata_cache.hit_count)
        self.assertEqual(miss, self.metadata_cache.miss_count)

    def test_double_open(self):
        self.metadata_cache.open()
        self.metadata_cache.close()

    def test_hit_counts(self):
        """Check that we use the on disk cache to reduce access reads."""
        hit, miss = 0, 0
示例#24
0
class TestCommandCopy(bg_test_utils.TestCaseWithFakeAccessor):
    _POINTS_START = 3600 * 24 * 10
    _POINTS_END = _POINTS_START + 3 * 3600
    _RETENTION = bg_accessor.Retention.from_string("20*15s:1440*60s:48*3600s")
    _RETENTION_BIS = bg_accessor.Retention.from_string(
        "20*10s:14400*60s:500*3600s")
    _POINTS = bg_test_utils._make_easily_queryable_points(
        start=_POINTS_START,
        end=_POINTS_END,
        period=_RETENTION[1].precision,
    )
    _METRIC_1_NAME = "test.origin.metric_1.toto"
    _METRIC_1 = bg_test_utils.make_metric(_METRIC_1_NAME, retention=_RETENTION)
    _METRIC_2_NAME = "test.origin.metric_2.tata"
    _METRIC_2 = bg_test_utils.make_metric(_METRIC_2_NAME, retention=_RETENTION)
    _METRIC_3_NAME = "test.origin.metric_3.tata"
    _METRIC_3 = bg_test_utils.make_metric(_METRIC_3_NAME,
                                          retention=_RETENTION_BIS)

    def setUp(self):
        """Set up a subdirectory of metrics to copy."""
        super(TestCommandCopy, self).setUp()
        self.accessor.connect()
        self.accessor.create_metric(self._METRIC_1)
        self.accessor.create_metric(self._METRIC_2)
        self.accessor.insert_points(self._METRIC_1, self._POINTS)
        self.accessor.flush()

    def test_copy_metric(self):
        """Test copy of a single metric with aggregated points."""
        cmd_copy = command_copy.CommandCopy()

        # Chack that _METRIC_2 is empty
        for i in range(3):
            pts = self.accessor.fetch_points(self._METRIC_2,
                                             self._POINTS_START,
                                             self._POINTS_END,
                                             stage=self._METRIC_2.retention[i])
            self.assertEqual(list(pts), [])

        # Copy points from _METRIC_1 to _METRIC_2
        cmd_copy._copy_metric(self.accessor, self._METRIC_1, self._METRIC_2,
                              self._POINTS_START, self._POINTS_END)
        self.accessor.flush()

        # Check that both metrics have same points
        for i in range(3):
            pts = self.accessor.fetch_points(self._METRIC_1,
                                             self._POINTS_START,
                                             self._POINTS_END,
                                             stage=self._METRIC_1.retention[i],
                                             aggregated=False)
            pts_copy = self.accessor.fetch_points(
                self._METRIC_2,
                self._POINTS_START,
                self._POINTS_END,
                stage=self._METRIC_2.retention[i],
                aggregated=False)
            self.assertEqual(list(pts), list(pts_copy))

    def test_copy_metric_with_retention(self):
        """Test copy of a metric with aggregated points and retention override.

        A given dst_stage should have the same points of the src_stage
        that have the same precision, or no point at all.
        """
        cmd_copy = command_copy.CommandCopy()
        cmd_copy._copy_metric(self.accessor, self._METRIC_1, self._METRIC_3,
                              self._POINTS_START, self._POINTS_END)
        self.accessor.flush()
        for i in range(3):
            pts = self.accessor.fetch_points(self._METRIC_1,
                                             self._POINTS_START,
                                             self._POINTS_END,
                                             stage=self._METRIC_1.retention[i],
                                             aggregated=False)
            pts_copy = self.accessor.fetch_points(
                self._METRIC_3,
                self._POINTS_START,
                self._POINTS_END,
                stage=self._METRIC_3.retention[i],
                aggregated=False)
            if i == 0:
                self.assertNotEqual(list(pts), list(pts_copy))
            else:
                self.assertEqual(list(pts), list(pts_copy))

    def test_get_metric_tuples_with_metric(self):
        """Test retrieve of a single couple of metrics."""
        cmd_copy = command_copy.CommandCopy()

        # Test with metric names arguments
        expected_metric_tuples = [(self._METRIC_1, self._METRIC_2)]
        metric_tuples = cmd_copy._get_metric_tuples(accessor=self.accessor,
                                                    src=self._METRIC_1_NAME,
                                                    dst=self._METRIC_2_NAME,
                                                    src_retention="",
                                                    dst_retention="",
                                                    recursive=False,
                                                    dry_run=False)
        self.assertEqual(list(metric_tuples), expected_metric_tuples)

    def test_get_metric_tuples_with_directory(self):
        """Test retrieve of a single couple of metrics."""
        cmd_copy = command_copy.CommandCopy()
        # Test with subdirectory names arguments
        self.assertEqual(len(list(list_metrics(self.accessor, "*.**"))), 2)
        metric_tuples = cmd_copy._get_metric_tuples(accessor=self.accessor,
                                                    src="test",
                                                    dst="copy",
                                                    src_retention="",
                                                    dst_retention="",
                                                    recursive=True,
                                                    dry_run=False)
        self.assertEqual(len(list(metric_tuples)), 2)
        self.assertEqual(len(list(list_metrics(self.accessor, "*.**"))), 4)

    def test_get_metric_tuples_with_retention(self):
        """Test retrieve of a single couples of metrics overrinding retentions."""
        cmd_copy = command_copy.CommandCopy()
        metric_tuples = cmd_copy._get_metric_tuples(accessor=self.accessor,
                                                    src=self._METRIC_1_NAME,
                                                    dst=self._METRIC_2_NAME,
                                                    src_retention="18*42s",
                                                    dst_retention="50*300s",
                                                    recursive=False,
                                                    dry_run=False)
        retention_str = [
            m.metadata.retention.as_string for m in list(metric_tuples)[0]
        ]
        self.assertEqual(len(retention_str), 2)
        self.assertIn("18*42s", retention_str)
        self.assertIn("50*300s", retention_str)
示例#25
0
class TestCommandCopy(bg_test_utils.TestCaseWithFakeAccessor):
    _POINTS_START = 3600 * 24 * 10
    _POINTS_END = _POINTS_START + 3600
    _RETENTION = bg_accessor.Retention.from_string("20*15s:1440*60s:48*3600s")
    _POINTS = bg_test_utils._make_easily_queryable_points(
        start=_POINTS_START, end=_POINTS_END, period=_RETENTION[1].precision,
    )
    _METRIC_1_NAME = "test.origin.metric_1.toto"
    _METRIC_1 = bg_test_utils.make_metric(_METRIC_1_NAME, retention=_RETENTION)
    _METRIC_2_NAME = "test.origin.metric_2.tata"
    _METRIC_2 = bg_test_utils.make_metric(_METRIC_2_NAME, retention=_RETENTION)

    def setUp(self):
        """Set up a subdirectory of metrics to copy."""
        super(TestCommandCopy, self).setUp()
        self.accessor.connect()
        self.accessor.create_metric(self._METRIC_1)
        self.accessor.create_metric(self._METRIC_2)
        self.accessor.insert_points(self._METRIC_1, self._POINTS)

    def test_copy_metric(self):
        """Test copy of a single metric."""
        cmd_copy = command_copy.CommandCopy()

        # Chack that _METRIC_2 is empty
        for i in range(3):
            pts = self.accessor.fetch_points(
                self._METRIC_2,
                self._POINTS_START, self._POINTS_END,
                stage=self._METRIC_2.retention[i]
            )
            self.assertEqual(list(pts), [])

        # Copy points from _METRIC_1 to _METRIC_2
        cmd_copy._copy_metric(self.accessor, self._METRIC_1, self._METRIC_2,
                              self._POINTS_START, self._POINTS_END)

        # Check that both metrics have same points
        for i in range(3):
            pts = self.accessor.fetch_points(
                self._METRIC_1,
                self._POINTS_START, self._POINTS_END,
                stage=self._METRIC_1.retention[i]
            )
            pts_copy = self.accessor.fetch_points(
                self._METRIC_2,
                self._POINTS_START, self._POINTS_END,
                stage=self._METRIC_2.retention[i]
            )
            self.assertEqual(list(pts), list(pts_copy))

    def test_get_metric_tuples(self):
        """Test retrieve of a single metric."""
        cmd_copy = command_copy.CommandCopy()

        # Test with metric names arguments
        expected_metric_tuples = [
            (self._METRIC_1, self._METRIC_2)
        ]
        metric_tuples = cmd_copy._get_metric_tuples(
            accessor=self.accessor,
            src=self._METRIC_1_NAME, dst=self._METRIC_2_NAME,
            recursive=False, dry_run=False
        )
        self.assertEqual(list(metric_tuples), expected_metric_tuples)

        # Test with subdirectory names arguments
        self.assertEqual(len(list(list_metrics(self.accessor, "*.**"))), 2)
        metric_tuples = cmd_copy._get_metric_tuples(
            accessor=self.accessor,
            src="test", dst="copy",
            recursive=True, dry_run=False
        )
        self.assertEqual(len(list(metric_tuples)), 2)
        self.assertEqual(len(list(list_metrics(self.accessor, "*.**"))), 4)
示例#26
0
 def test_unicode(self):
     metric_name = u"a.b.testé"
     metric = bg_test_utils.make_metric(metric_name)
     self.metadata_cache.create_metric(metric)
     self.metadata_cache.get_metric(metric_name)
示例#27
0
    def test_glob_metrics(self):
        metrics = [
            "a", "a.a", "a.b", "a.a.a", "a.b.c", "a.x.y",
            "x.y.z", "x.y.y.z", "x.y.y.y.z",
            "super", "superb", "supercomputer", "superconductivity", "superman",
            "supper", "suppose",
            "ad.o.g", "af.o.g", "ap.o.g", "az.o.g",
            "b.o.g", "m.o.g",
            "zd.o.g", "zf.o.g", "zp.o.g", "zz.o.g",
            "-b-.a.t", "-c-.a.t", "-d-.a.t", "-e-.a.t",
        ]
        metrics.sort()

        for name in metrics:
            metric = bg_test_utils.make_metric(name)
            self.accessor.create_metric(metric)

        def assert_find(glob, expected_matches):
            # Check we can find the matches of a glob
            matches = sorted(list(self.accessor.glob_metric_names(glob)))
            self.assertEqual(expected_matches, matches)

        # Empty query
        assert_find("", [])

        # Exact matches
        assert_find("a.a", ["a.a"])
        assert_find("A", [])

        # Character wildcard
        assert_find("?",
                    [x for x in metrics if x.count('.') == 0])
        assert_find("sup?er",
                    [x for x in metrics if x.startswith("sup")])

        # Character selector
        for pattern in [
                "a[!dfp].o.g",
                u"a[!dfp].o.g",
                "a[!dfp]suffix.o.g",
                "a[nope].o.g",
                "a[nope]suffix.o.g",
        ]:
            assert_find(pattern,
                        ["a{0}.o.g".format(x) for x in "dfpz"])

        # Sequence wildcard
        assert_find("*",
                    [x for x in metrics if x.count('.') == 0])
        assert_find("*.*",
                    [x for x in metrics if x.count('.') == 1])
        assert_find("*.*.*",
                    [x for x in metrics if x.count('.') == 2])
        assert_find("super*",
                    [x for x in metrics if x.startswith("super")])

        # Sequence selector
        assert_find("a.{b,x}.{c,y}",
                    ["a.b.c", "a.x.y"])
        assert_find("a{d,f,p}.o.g",
                    ["a{0}.o.g".format(c) for c in "dfp"])
        assert_find("{a,z}{d,f,p}.o.g",
                    ["{0}{1}.o.g".format(a, b) for a in "az" for b in "dfp"])
        assert_find("{a{d,f,p},z{d,f,p}}.o.g",
                    ["{0}{1}.o.g".format(a, b) for a in "az" for b in "dfp"])
        for pattern in [
                "-{b,c,d}-.a.t",
                u"-{b,c,d}-.a.t",
                "-{b,c,d}?.a.t",
                "-{b,c,d}?suffix.a.t",
                "-{b,c,d}[ha].a.t",
                "-{b,c,d}[ha]suffix.a.t",
                "-{b,c,d}[!-].a.t",
                "-{b,c,d}[!-]suffix.a.t",
                "-{b,c,d}*.a.t",
                "-{b,c,d}*suffix.a.t",
                u"-{b,c,d}*suffix.a.t",
        ]:
            assert_find(pattern, ["-b-.a.t", "-c-.a.t", "-d-.a.t"])

        # Ensure the query optimizer works as expected by having a high
        # combinatorial pattern.
        assert_find(
            "-{b,c,d}*suffix.a.t{,u}{,v}{,w}{,x}{,y}{,z}",
            ["-{0}-.a.t".format(c) for c in "bcde"],
        )

        # Globstars
        assert_find("**",
                    metrics)
        assert_find("x.**",
                    [x for x in metrics if x.startswith("x.")])
        assert_find("**.z",
                    [x for x in metrics if x.endswith(".z")])
        assert_find("x.**.z",
                    [x for x in metrics
                     if x.startswith("x.") and x.endswith(".z")])

        self.accessor.drop_all_metrics()
        assert_find("*", [])
        assert_find("**", [])
示例#28
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function

import unittest
from freezegun import freeze_time

from biggraphite import test_utils as bg_test_utils
from biggraphite import metadata_cache as bg_metadata_cache

_TEST_METRIC = bg_test_utils.make_metric("a.b.c")


class CacheBaseTest(object):
    def _assert_hit_miss(self, hit, miss):
        self.assertEqual(hit, self.metadata_cache.hit_count)
        self.assertEqual(miss, self.metadata_cache.miss_count)

    def test_double_open(self):
        self.metadata_cache.open()
        self.metadata_cache.close()

    def test_hit_counts(self):
        """Check that we use the on disk cache to reduce access reads."""
        hit, miss = 0, 0
        self._assert_hit_miss(hit, miss)
示例#29
0
    def test_glob_metrics(self):
        IS_LUCENE = self.ACCESSOR_SETTINGS.get('use_lucene', False)

        metrics = [
            "a",
            "a.a",
            "a.b",
            "a.a.a",
            "a.b.c",
            "a.x.y",
            "x.y.z",
            "x.y.y.z",
            "x.y.y.y.z",
            "super",
            "superb",
            "supercomputer",
            "superconductivity",
            "superman",
            "supper",
            "suppose",
            "ad.o.g",
            "af.o.g",
            "ap.o.g",
            "az.o.g",
            "b.o.g",
            "m.o.g",
            "zd.o.g",
            "zf.o.g",
            "zp.o.g",
            "zz.o.g",
            "-b-.a.t",
            "-c-.a.t",
            "-d-.a.t",
            "-e-.a.t",
        ]
        metrics.sort()

        for name in metrics:
            metric = bg_test_utils.make_metric(name)
            self.accessor.create_metric(metric)
        self.flush()

        def assert_find(glob, expected_matches):
            # Check we can find the matches of a glob
            matches = sorted(list(self.accessor.glob_metric_names(glob)))

            # Lucene is supposed to give perfect results, so filter wrongly expected matches.
            if IS_LUCENE:
                glob_re = re.compile(bg_glob_utils.glob_to_regex(glob))
                expected_matches = list(filter(glob_re.match,
                                               expected_matches))

            self.assertEqual(expected_matches, matches)

        # Empty query
        assert_find("", [])

        # Exact matches
        assert_find("a.a", ["a.a"])
        assert_find("A", [])

        # Character wildcard
        assert_find("?", [x for x in metrics if x.count('.') == 0])
        assert_find("sup?er", [x for x in metrics if x.startswith("sup")])

        # Character selector
        for pattern in [
                "a[!dfp].o.g",
                u"a[!dfp].o.g",
                "a[!dfp]suffix.o.g",
                "a[nope].o.g",
                "a[nope]suffix.o.g",
        ]:
            assert_find(pattern, ["a{0}.o.g".format(x) for x in "dfpz"])

        # Sequence wildcard
        assert_find("*", [x for x in metrics if x.count('.') == 0])
        assert_find("*.*", [x for x in metrics if x.count('.') == 1])
        assert_find("*.*.*", [x for x in metrics if x.count('.') == 2])
        assert_find("super*", [x for x in metrics if x.startswith("super")])

        # Sequence selector
        assert_find("a.{b,x}.{c,y}", ["a.b.c", "a.x.y"])
        assert_find("a{d,f,p}.o.g", ["a{0}.o.g".format(c) for c in "dfp"])
        assert_find("{a,z}{d,f,p}.o.g",
                    ["{0}{1}.o.g".format(a, b) for a in "az" for b in "dfp"])
        assert_find("{a{d,f,p},z{d,f,p}}.o.g",
                    ["{0}{1}.o.g".format(a, b) for a in "az" for b in "dfp"])
        for pattern in [
                "-{b,c,d}-.a.t",
                u"-{b,c,d}-.a.t",
                "-{b,c,d}?.a.t",
                "-{b,c,d}?suffix.a.t",
                "-{b,c,d}[ha].a.t",
                "-{b,c,d}[ha]suffix.a.t",
                "-{b,c,d}[!ha].a.t",
                "-{b,c,d}[!ha]suffix.a.t",
                "-{b,c,d}*.a.t",
                "-{b,c,d}*suffix.a.t",
                u"-{b,c,d}*suffix.a.t",
        ]:
            assert_find(pattern, ["-b-.a.t", "-c-.a.t", "-d-.a.t"])

        # Ensure the query optimizer works as expected by having a high
        # combinatorial pattern.
        assert_find(
            "-{b,c,d}*suffix.a.t{,u}{,v}{,w}{,x}{,y}{,z}",
            ["-{0}-.a.t".format(c) for c in "bcde"],
        )

        # Globstars
        assert_find("**", metrics)
        assert_find("x.**", [x for x in metrics if x.startswith("x.")])

        if not IS_LUCENE:
            # FIXME: Lucene doesn't support globstars here yet.
            assert_find("**.z", [x for x in metrics if x.endswith(".z")])
            assert_find("x.**.z", [
                x for x in metrics if x.startswith("x.") and x.endswith(".z")
            ])

        self.accessor.drop_all_metrics()
        assert_find("*", [])
        assert_find("**", [])
示例#30
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function

import unittest
import time
from distutils import version

from biggraphite import accessor as bg_accessor
from biggraphite import test_utils as bg_test_utils
from biggraphite.drivers import cassandra as bg_cassandra

_METRIC = bg_test_utils.make_metric("test.metric")

# Points test query.
_QUERY_RANGE = 3600
_QUERY_START = 1000 * _QUERY_RANGE
_QUERY_END = _QUERY_START + _QUERY_RANGE

# Points injected in the test DB, a superset of above.
_EXTRA_POINTS = 1000
_POINTS_START = _QUERY_START - _EXTRA_POINTS
_POINTS_END = _QUERY_END + _EXTRA_POINTS
_POINTS = [(t, v) for v, t in enumerate(xrange(_POINTS_START, _POINTS_END))]
_USEFUL_POINTS = _POINTS[_EXTRA_POINTS:-_EXTRA_POINTS]
assert _QUERY_RANGE == len(_USEFUL_POINTS)

示例#31
0
 def test_setattr(self):
     m = bg_test_utils.make_metric("test")
     self.assertTrue(hasattr(m, "carbon_xfilesfactor"))
     self.assertRaises(AttributeError, setattr, m, "carbon_xfilesfactor", 0.5)
示例#32
0
 def test_unicode(self):
     metric_name = u"a.b.testé"
     metric = bg_test_utils.make_metric(metric_name)
     self.metadata_cache.create_metric(metric)
     self.metadata_cache.get_metric(metric_name)
示例#33
0
class TestReader(bg_test_utils.TestCaseWithFakeAccessor):

    _POINTS_START = 3600 * 24 * 10
    _POINTS_END = _POINTS_START + 3600
    _RETENTION = bg_accessor.Retention.from_string("20*15s:1440*60s:48*3600s")
    _POINTS = bg_test_utils._make_easily_queryable_points(
        start=_POINTS_START, end=_POINTS_END, period=_RETENTION[1].precision,
    )
    _METRIC = bg_test_utils.make_metric(_METRIC_NAME, retention=_RETENTION)

    def setUp(self):
        super(TestReader, self).setUp()
        self.accessor.connect()
        self.accessor.create_metric(self._METRIC)
        self.accessor.insert_points(self._METRIC, self._POINTS)
        self.accessor.flush()
        self.finder = bg_graphite.Finder(
            accessor=self.accessor,
            metadata_cache=self.metadata_cache,
        )

        # Make sure that carbonlink is enabled.
        from django.conf import settings as django_settings
        django_settings.CARBONLINK_HOSTS = ['localhost:12345']

        self.carbonlink = self.finder.carbonlink()
        self.reader = bg_graphite.Reader(
            self.accessor, self.metadata_cache, self.carbonlink, _METRIC_NAME)

    def fetch(self, *args, **kwargs):
        result = self.reader.fetch(*args, **kwargs)
        # Readers can return a list or an object.
        if bg_graphite.FetchInProgress:
            if isinstance(result, bg_graphite.FetchInProgress):
                result = result.waitForResults()

        return result

    def test_fetch_non_existing(self):
        self.reader._metric_name = 'broken.name'
        (start, end, step), points = self.fetch(
            start_time=self._POINTS_START + 3,
            end_time=self._POINTS_END - 3,
            now=self._POINTS_END + 10,
        )
        # Check that this returns at least one None.
        self.assertEqual(points[0], None)

    def test_fresh_read(self):
        (start, end, step), points = self.fetch(
            start_time=self._POINTS_START + 3,
            end_time=self._POINTS_END - 3,
            now=self._POINTS_END + 10,
        )
        self.assertEqual(self._RETENTION[1].precision, step)
        # We expect these to have been rounded to match precision.
        self.assertEqual(self._POINTS_START, start)
        self.assertEqual(self._POINTS_END, end)

        expected_points = list(range((end - start) // step))
        self.assertEqual(expected_points, points)

    def test_carbon_protocol_read(self):
        metric_name = 'fake.name'
        metric = bg_test_utils.make_metric(_METRIC_NAME)
        # Custom aggregator to make sure all goes right.
        metric.metadata.aggregator = bg_accessor.Aggregator.minimum
        self.accessor.create_metric(metric)
        self.accessor.flush()
        self.reader = bg_graphite.Reader(
            self.accessor, self.metadata_cache, self.carbonlink, metric_name
        )

        with mock.patch('graphite.carbonlink.CarbonLinkPool.query') as carbonlink_query_mock:
            carbonlink_query_mock.return_value = [
                (864005.0, 100.0), (864065.0, 101.0), (864125.0, 102.0)
            ]

            (start, end, step), points = self.fetch(
                start_time=self._POINTS_START + 3,
                end_time=self._POINTS_END - 3,
                now=self._POINTS_END + 10,
            )

            # Check that we really have a 1sec resolution
            self.assertEqual(start, self._POINTS_START + 3)
            self.assertEqual(end, self._POINTS_END - 3)
            self.assertEqual(step, 1)
            # Check that this returns at least one value different from None.
            self.assertEqual(len(points), end - start)
            # Check that at least one point is at the correct place.
            self.assertEqual(points[864005 - start], 100.0)

    def test_get_intervals(self):
        # start and end are the expected results, aligned on the precision
        now_rounded = 10000000 * self._RETENTION[2].precision
        now = now_rounded - 3
        res = self.reader.get_intervals(now=now)
        self.assertEqual(self._RETENTION.duration, res.size)
        self.assertEqual(1, len(res.intervals))
        self.assertEqual(now_rounded, res.intervals[0].end)
示例#34
0
 def test_dir(self):
     metric = bg_test_utils.make_metric("a.b.c")
     self.assertIn("name", dir(metric))
     self.assertIn("carbon_xfilesfactor", dir(metric))
示例#35
0
 def test_setattr(self):
     m = bg_test_utils.make_metric("test")
     self.assertTrue(hasattr(m, "carbon_xfilesfactor"))
     self.assertRaises(AttributeError, setattr, m, "carbon_xfilesfactor", 0.5)
示例#36
0
class TestReader(bg_test_utils.TestCaseWithFakeAccessor):

    _POINTS_START = 3600 * 24 * 10
    _POINTS_END = _POINTS_START + 3600
    _RETENTION = bg_accessor.Retention.from_string("20*15s:1440*60s:48*3600s")
    _POINTS = _make_easily_queryable_points(
        start=_POINTS_START,
        end=_POINTS_END,
        period=_RETENTION[1].precision,
    )
    _METRIC = bg_test_utils.make_metric(_METRIC_NAME, retention=_RETENTION)

    def setUp(self):
        super(TestReader, self).setUp()
        self.accessor.connect()
        self.accessor.create_metric(self._METRIC)
        self.accessor.insert_points(self._METRIC, self._POINTS)
        self.accessor.flush()
        self.finder = bg_graphite.Finder(
            accessor=self.accessor,
            metadata_cache=self.metadata_cache,
        )
        self.carbonlink = self.finder.carbonlink()
        self.reader = bg_graphite.Reader(self.accessor, self.metadata_cache,
                                         self.carbonlink, _METRIC_NAME)

    def fetch(self, *args, **kwargs):
        result = self.reader.fetch(*args, **kwargs)
        # Readers can return a list or an object.
        if isinstance(result, readers.FetchInProgress):
            result = result.waitForResults()
        return result

    def test_fetch_non_existing(self):
        self.reader._metric_name = 'broken.name'
        (start, end, step), points = self.fetch(
            start_time=self._POINTS_START + 3,
            end_time=self._POINTS_END - 3,
            now=self._POINTS_END + 10,
        )
        # Check that this returns at least one None.
        self.assertEqual(points[0], None)

    def test_fresh_read(self):
        (start, end, step), points = self.fetch(
            start_time=self._POINTS_START + 3,
            end_time=self._POINTS_END - 3,
            now=self._POINTS_END + 10,
        )
        self.assertEqual(self._RETENTION[1].precision, step)
        # We expect these to have been rounded to match precision.
        self.assertEqual(self._POINTS_START, start)
        self.assertEqual(self._POINTS_END, end)

        expected_points = range((end - start) // step)
        self.assertEqual(expected_points, points)

    def test_carbon_protocol_read(self):
        self.reader._metric_name = 'fake.name'
        with mock.patch('graphite.carbonlink.CarbonLink.query'
                        ) as carbonlink_query_mock:
            carbonlink_query_mock.return_value = [(864005.0, 100.0),
                                                  (864065.0, 101.0),
                                                  (864125.0, 102.0)]

            (start, end, step), points = self.fetch(
                start_time=self._POINTS_START + 3,
                end_time=self._POINTS_END - 3,
                now=self._POINTS_END + 10,
            )
        # Check that this returns at least one value different from None.
        self.assertNotEqual(points[0], None)

    def test_get_intervals(self):
        # start and end are the expected results, aligned on the precision
        now_rounded = 10000000 * self._RETENTION[2].precision
        now = now_rounded - 3
        res = self.reader.get_intervals(now=now)

        self.assertEqual(self._RETENTION.duration, res.size)
        self.assertEqual(1, len(res.intervals))
        self.assertEqual(now_rounded, res.intervals[0].end)
示例#37
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function

import unittest

from biggraphite import accessor as bg_accessor
from biggraphite import test_utils as bg_test_utils
from biggraphite.drivers import cassandra as bg_cassandra

_METRIC = bg_test_utils.make_metric("test.metric")

# Points test query.
_QUERY_RANGE = 3600
_QUERY_START = 1000 * _QUERY_RANGE
_QUERY_END = _QUERY_START + _QUERY_RANGE

# Points injected in the test DB, a superset of above.
_EXTRA_POINTS = 1000
_POINTS_START = _QUERY_START - _EXTRA_POINTS
_POINTS_END = _QUERY_END + _EXTRA_POINTS
_POINTS = [(t, v) for v, t in enumerate(xrange(_POINTS_START, _POINTS_END))]
_USEFUL_POINTS = _POINTS[_EXTRA_POINTS:-_EXTRA_POINTS]
assert _QUERY_RANGE == len(_USEFUL_POINTS)