Example #1
0
 def test_resize_policy(self):
     name = str(uuid.uuid4())
     ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
     self.index.create_archive_policy(ap)
     m = storage.Metric(uuid.uuid4(), ap)
     self.index.create_metric(m.id, str(uuid.uuid4()), str(uuid.uuid4()),
                              name)
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1),
     ])
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # expand to more points
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1),
     ])
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # shrink timespan
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
     # unchanged after update if no samples
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # drop points
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 20), 1),
     ])
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 20), 5.0, 1.0),
     ], self.storage.get_measures(m))
Example #2
0
 def test_resize_policy(self):
     name = str(uuid.uuid4())
     ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
     self.index.create_archive_policy(ap)
     m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()),
                                  str(uuid.uuid4()), name)
     m = self.index.list_metrics(ids=[m.id])[0]
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1),
     ])
     self.trigger_processing(self.storage, self.index)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # expand to more points
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
     m = self.index.list_metrics(ids=[m.id])[0]
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1),
     ])
     self.trigger_processing(self.storage, self.index)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # shrink timespan
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
     m = self.index.list_metrics(ids=[m.id])[0]
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
Example #3
0
 def test_resize_policy(self):
     name = str(uuid.uuid4())
     ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
     self.index.create_archive_policy(ap)
     m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name)
     m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0]
     self.incoming.add_measures(m.id, [
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 0), 1),
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 1),
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 10), 1),
     ])
     self.trigger_processing([str(m.id)])
     self.assertEqual([
         (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1),
     ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')]))
     # expand to more points
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
     m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0]
     self.incoming.add_measures(m.id, [
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 15), 1),
     ])
     self.trigger_processing([str(m.id)])
     self.assertEqual([
         (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1),
     ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')]))
     # shrink timespan
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
     m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0]
     self.assertEqual([
         (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1),
     ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')]))
Example #4
0
class TestCase(base.BaseTestCase):

    ARCHIVE_POLICIES = {
        'no_granularity_match': archive_policy.ArchivePolicy(
            "no_granularity_match",
            0,
            [
                # 2 second resolution for a day
                archive_policy.ArchivePolicyItem(
                    granularity=2, points=3600 * 24),
                ],
        ),
    }

    @staticmethod
    def path_get(project_file=None):
        root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                            '..',
                                            '..',
                                            )
                               )
        if project_file:
            return os.path.join(root, project_file)
        return root

    def setUp(self):
        super(TestCase, self).setUp()
        self.conf = service.prepare_service([],
                                            default_config_files=[])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start()

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            # Force upgrading using Alembic rather than creating the
            # database from scratch so we are sure we don't miss anything
            # in the Alembic upgrades. We have a test to check that
            # upgrades == create but it misses things such as custom CHECK
            # constraints.
            self.index.upgrade(nocreate=True)

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        self.archive_policies.update(archive_policy.DEFAULT_ARCHIVE_POLICIES)
        # Used in gnocchi.gendoc
        if not getattr(self, "skip_archive_policies_creation", False):
            for name, ap in six.iteritems(self.archive_policies):
                # Create basic archive policies
                try:
                    self.index.create_archive_policy(ap)
                except indexer.ArchivePolicyAlreadyExists:
                    pass

        if swexc:
            self.useFixture(mockpatch.Patch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados',
                                        FakeRadosModule()))

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'influxdb':
            self.conf.set_override('influxdb_block_until_data_ingested', True,
                                   'storage')
            self.conf.set_override('influxdb_database', 'test', 'storage')
            self.conf.set_override('influxdb_password', 'root', 'storage')
            self.conf.set_override('influxdb_port',
                                   os.getenv("GNOCCHI_TEST_INFLUXDB_PORT",
                                             51234), 'storage')
            # NOTE(ityaptin) Creating unique database for every test may cause
            # tests failing by timeout, but in may be useful in some cases
            if os.getenv("GNOCCHI_TEST_INFLUXDB_UNIQUE_DATABASES"):
                self.conf.set_override("influxdb_database",
                                       "gnocchi_%s" % uuid.uuid4().hex,
                                       'storage')

        self.storage = storage.get_driver(self.conf)
        # NOTE(jd) Do not upgrade the storage. We don't really need the storage
        # upgrade for now, and the code that upgrade from pre-1.3
        # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz
        # explodes because MySQL does not support that many connections in real
        # life.
        # self.storage.upgrade(self.index)

        self.mgr = extension.ExtensionManager('gnocchi.aggregates',
                                              invoke_on_load=True)
        self.custom_agg = dict((x.name, x.obj) for x in self.mgr)

    def tearDown(self):
        self.index.disconnect()
        self.storage.stop()
        super(TestCase, self).tearDown()
Example #5
0
class TestCase(BaseTestCase):

    REDIS_DB_INDEX = 0
    REDIS_DB_LOCK = threading.Lock()

    ARCHIVE_POLICIES = {
        'no_granularity_match':
        archive_policy.ArchivePolicy(
            "no_granularity_match",
            0,
            [
                # 2 second resolution for a day
                archive_policy.ArchivePolicyItem(granularity=2,
                                                 points=3600 * 24),
            ],
        ),
        'low':
        archive_policy.ArchivePolicy(
            "low",
            0,
            [
                # 5 minutes resolution for an hour
                archive_policy.ArchivePolicyItem(granularity=300, points=12),
                # 1 hour resolution for a day
                archive_policy.ArchivePolicyItem(granularity=3600, points=24),
                # 1 day resolution for a month
                archive_policy.ArchivePolicyItem(granularity=3600 * 24,
                                                 points=30),
            ],
        ),
        'medium':
        archive_policy.ArchivePolicy(
            "medium",
            0,
            [
                # 1 minute resolution for an day
                archive_policy.ArchivePolicyItem(granularity=60,
                                                 points=60 * 24),
                # 1 hour resolution for a week
                archive_policy.ArchivePolicyItem(granularity=3600,
                                                 points=7 * 24),
                # 1 day resolution for a year
                archive_policy.ArchivePolicyItem(granularity=3600 * 24,
                                                 points=365),
            ],
        ),
        'high':
        archive_policy.ArchivePolicy(
            "high",
            0,
            [
                # 1 second resolution for an hour
                archive_policy.ArchivePolicyItem(granularity=1, points=3600),
                # 1 minute resolution for a week
                archive_policy.ArchivePolicyItem(granularity=60,
                                                 points=60 * 24 * 7),
                # 1 hour resolution for a year
                archive_policy.ArchivePolicyItem(granularity=3600,
                                                 points=365 * 24),
            ],
        ),
    }

    @classmethod
    def setUpClass(self):
        super(TestCase, self).setUpClass()
        self.conf = service.prepare_service([], default_config_files=[])
        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            daiquiri.setup(outputs=[])

        py_root = os.path.abspath(
            os.path.join(
                os.path.dirname(__file__),
                '..',
            ))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.json'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id',
                                   "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key',
                                   "anythingworks",
                                   group="storage")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start(start_heart=True)

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')
        if storage_driver == 'ceph':
            self.conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
                                   'storage')

    def setUp(self):
        super(TestCase, self).setUp()
        if swexc:
            self.useFixture(
                fixtures.MockPatch('swiftclient.client.Connection',
                                   FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath', tempdir.path, 'storage')
        elif self.conf.storage.driver == 'ceph':
            pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call("rados -c %s mkpool %s" %
                                (os.getenv("CEPH_CONF"), pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix",
                               str(uuid.uuid4())[:26], "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4())

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_PREFIX = str(uuid.uuid4())

        self.storage.upgrade()
        self.incoming.upgrade(128)

    def tearDown(self):
        self.index.disconnect()
        self.storage.stop()
        super(TestCase, self).tearDown()