def _inject(inc, coord, store, idx, metrics, measures, archive_policy_name="low", process=False, interval=None): LOG.info("Creating %d metrics", metrics) with utils.StopWatch() as sw: metric_ids = [ idx.create_metric(uuid.uuid4(), "admin", archive_policy_name).id for _ in range(metrics) ] LOG.info("Created %d metrics in %.2fs", metrics, sw.elapsed()) LOG.info("Generating %d measures per metric for %d metrics… ", measures, metrics) now = numpy.datetime64(utils.utcnow()) with utils.StopWatch() as sw: measures = { m_id: [ incoming.Measure(now + numpy.timedelta64(seconds=s), random.randint(-999999, 999999)) for s in range(measures) ] for m_id in metric_ids } LOG.info("… done in %.2fs", sw.elapsed()) interval_timer = utils.StopWatch().start() while True: interval_timer.reset() with utils.StopWatch() as sw: inc.add_measures_batch(measures) total_measures = sum(map(len, measures.values())) LOG.info("Pushed %d measures in %.2fs", total_measures, sw.elapsed()) if process: c = chef.Chef(coord, inc, idx, store) with utils.StopWatch() as sw: for s in inc.iter_sacks(): c.process_new_measures_for_sack(s, blocking=True) LOG.info("Processed %d sacks in %.2fs", inc.NUM_SACKS, sw.elapsed()) LOG.info("Speed: %.2f measures/s", float(total_measures) / sw.elapsed()) if interval is None: break time.sleep(max(0, interval - interval_timer.elapsed())) return total_measures
def on_route(self, state): state.request.coordinator = self._lazy_load('coordinator') state.request.storage = self._lazy_load('storage') state.request.indexer = self._lazy_load('indexer') state.request.incoming = self._lazy_load('incoming') state.request.chef = chef.Chef( state.request.coordinator, state.request.incoming, state.request.indexer, state.request.storage, ) state.request.conf = self.conf state.request.policy_enforcer = self.policy_enforcer state.request.auth_helper = self.auth_helper
def _configure(self): member_id = "%s.%s.%s" % ( socket.gethostname(), self.worker_id, # NOTE(jd) Still use a uuid here so we're # sure there's no conflict in case of # crash/restart str(uuid.uuid4())) self.coord = get_coordinator_and_start(member_id, self.conf.coordination_url) self.store = storage.get_driver(self.conf) self.incoming = incoming.get_driver(self.conf) self.index = indexer.get_driver(self.conf) self.chef = chef.Chef(self.coord, self.incoming, self.index, self.store)
def metricd_tester(conf): # NOTE(sileht): This method is designed to be profiled, we # want to avoid issues with profiler and os.fork(), that # why we don't use the MetricdServiceManager. index = indexer.get_driver(conf) s = storage.get_driver(conf) inc = incoming.get_driver(conf) metrics = set() for sack in inc.iter_sacks(): metrics.update(inc.list_metric_with_measures_to_process(sack)) if len(metrics) >= conf.stop_after_processing_metrics: break c = chef.Chef(None, inc, index, s) c.process_new_measures( list(metrics)[:conf.stop_after_processing_metrics], True)
def metricd_tester(conf): # NOTE(sileht): This method is designed to be profiled, we # want to avoid issues with profiler and os.fork(), that # why we don't use the MetricdServiceManager. index = indexer.get_driver(conf) s = storage.get_driver(conf) inc = incoming.get_driver(conf) c = chef.Chef(None, inc, index, s) metrics_count = 0 for sack in inc.iter_sacks(): try: metrics_count += c.process_new_measures_for_sack(s, True) except chef.SackAlreadyLocked: continue if metrics_count >= conf.stop_after_processing_metrics: break
def setUp(self): super(TestCase, self).setUp() if swexc: self.useFixture( fixtures.MockPatch('swiftclient.client.Connection', FakeSwiftClient)) if self.conf.storage.driver == 'file': tempdir = self.useFixture(fixtures.TempDir()) self.conf.set_override('file_basepath', tempdir.path, 'storage') elif self.conf.storage.driver == 'ceph': pool_name = uuid.uuid4().hex with open(os.devnull, 'w') as f: subprocess.call("rados -c %s mkpool %s" % (os.getenv("CEPH_CONF"), pool_name), shell=True, stdout=f, stderr=subprocess.STDOUT) self.conf.set_override('ceph_pool', pool_name, 'storage') # Override the bucket prefix to be unique to avoid concurrent access # with any other test self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26], "storage") self.storage = storage.get_driver(self.conf) self.incoming = incoming.get_driver(self.conf) if self.conf.storage.driver == 'redis': # Create one prefix per test self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode() if self.conf.incoming.driver == 'redis': self.incoming.SACK_NAME_FORMAT = ( str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT) self.storage.upgrade() self.incoming.upgrade(128) self.chef = chef.Chef(self.coord, self.incoming, self.index, self.storage)
def setUp(self): super(TestCase, self).setUp() self.conf = service.prepare_service( [], conf=utils.prepare_conf(), default_config_files=[], logging_level=logging.DEBUG, skip_log_opts=True) self.index = indexer.get_driver(self.conf) self.coord = metricd.get_coordinator_and_start( str(uuid.uuid4()), self.conf.coordination_url) # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all # their tables in a single transaction even with the # checkfirst=True, so what we do here is we force the upgrade code # path to be sequential to avoid race conditions as the tests run # in parallel. with self.coord.get_lock(b"gnocchi-tests-db-lock"): self.index.upgrade() self.archive_policies = self.ARCHIVE_POLICIES.copy() for name, ap in six.iteritems(self.archive_policies): # Create basic archive policies try: self.index.create_archive_policy(ap) except indexer.ArchivePolicyAlreadyExists: pass py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',)) self.conf.set_override('paste_config', os.path.join(py_root, 'rest', 'api-paste.ini'), group="api") self.conf.set_override('policy_file', os.path.join(py_root, 'rest', 'policy.yaml'), group="oslo_policy") # NOTE(jd) This allows to test S3 on AWS if not os.getenv("AWS_ACCESS_KEY_ID"): self.conf.set_override('s3_endpoint_url', os.getenv("GNOCCHI_STORAGE_HTTP_URL"), group="storage") self.conf.set_override('s3_access_key_id', "gnocchi", group="storage") self.conf.set_override('s3_secret_access_key', "anythingworks", group="storage") storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file") self.conf.set_override('driver', storage_driver, 'storage') if swexc: self.useFixture(fixtures.MockPatch( 'swiftclient.client.Connection', FakeSwiftClient)) if self.conf.storage.driver == 'file': tempdir = self.useFixture(fixtures.TempDir()) self.conf.set_override('file_basepath', tempdir.path, 'storage') elif self.conf.storage.driver == 'ceph': self.conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"), 'storage') self.ceph_pool_name = uuid.uuid4().hex with open(os.devnull, 'w') as f: subprocess.call(("ceph -c %s osd pool create %s " "16 16 replicated") % ( os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True, stdout=f, stderr=subprocess.STDOUT) subprocess.call(("ceph -c %s osd pool application " "enable %s rbd") % ( os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True, stdout=f, stderr=subprocess.STDOUT) self.conf.set_override('ceph_pool', self.ceph_pool_name, 'storage') # Override the bucket prefix to be unique to avoid concurrent access # with any other test self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26], "storage") self.storage = storage.get_driver(self.conf) self.incoming = incoming.get_driver(self.conf) if self.conf.storage.driver == 'redis': # Create one prefix per test self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode() if self.conf.incoming.driver == 'redis': self.incoming.SACK_NAME_FORMAT = ( str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT ) self.storage.upgrade() self.incoming.upgrade(3) self.chef = chef.Chef( self.coord, self.incoming, self.index, self.storage)
def start_fixture(self): """Create necessary temp files and do the config dance.""" global LOAD_APP_KWARGS if not os.getenv("GNOCCHI_TEST_DEBUG"): self.output = base.CaptureOutput() self.output.setUp() data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi') if os.getenv("GABBI_LIVE"): dcf = None else: dcf = [] conf = service.prepare_service([], conf=utils.prepare_conf(), default_config_files=dcf, logging_level=logging.DEBUG, skip_log_opts=True) py_root = os.path.abspath( os.path.join( os.path.dirname(__file__), '..', '..', )) conf.set_override('paste_config', os.path.join(py_root, 'rest', 'api-paste.ini'), group="api") conf.set_override('policy_file', os.path.join(py_root, 'rest', 'policy.yaml'), group="oslo_policy") # NOTE(sileht): This is not concurrency safe, but only this tests file # deal with cors, so we are fine. set_override don't work because cors # group doesn't yet exists, and we the CORS middleware is created it # register the option and directly copy value of all configurations # options making impossible to override them properly... cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com") self.conf = conf self.tmp_dir = data_tmp_dir if conf.indexer.url is None: raise case.SkipTest("No indexer configured") storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file") conf.set_override('driver', storage_driver, 'storage') if conf.storage.driver == 'file': conf.set_override('file_basepath', data_tmp_dir, 'storage') elif conf.storage.driver == 'ceph': conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"), 'storage') self.ceph_pool_name = uuid.uuid4().hex with open(os.devnull, 'w') as f: subprocess.call(("ceph -c %s osd pool create %s " "16 16 replicated") % (os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True, stdout=f, stderr=subprocess.STDOUT) subprocess.call(("ceph -c %s osd pool application " "enable %s rbd") % (os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True, stdout=f, stderr=subprocess.STDOUT) conf.set_override('ceph_pool', self.ceph_pool_name, 'storage') elif conf.storage.driver == "s3": conf.set_override('s3_endpoint_url', os.getenv("GNOCCHI_STORAGE_HTTP_URL"), group="storage") conf.set_override('s3_access_key_id', "gnocchi", group="storage") conf.set_override('s3_secret_access_key', "anythingworks", group="storage") conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26], "storage") elif conf.storage.driver == "swift": # NOTE(sileht): This fixture must start before any driver stuff swift_fixture = fixtures.MockPatch('swiftclient.client.Connection', base.FakeSwiftClient) swift_fixture.setUp() # NOTE(jd) All of that is still very SQL centric but we only support # SQL for now so let's say it's good enough. conf.set_override( 'url', sqlalchemy.SQLAlchemyIndexer._create_new_database( conf.indexer.url), 'indexer') index = indexer.get_driver(conf) index.upgrade() # Set pagination to a testable value conf.set_override('max_limit', 7, 'api') conf.set_override('enable_proxy_headers_parsing', True, group="api") self.index = index self.coord = metricd.get_coordinator_and_start(str(uuid.uuid4()), conf.coordination_url) s = storage.get_driver(conf) i = incoming.get_driver(conf) if conf.storage.driver == 'redis': # Create one prefix per test s.STORAGE_PREFIX = str(uuid.uuid4()).encode() if conf.incoming.driver == 'redis': i.SACK_NAME_FORMAT = (str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT) self.fixtures = [ fixtures.MockPatch("gnocchi.storage.get_driver", return_value=s), fixtures.MockPatch("gnocchi.incoming.get_driver", return_value=i), fixtures.MockPatch("gnocchi.indexer.get_driver", return_value=self.index), fixtures.MockPatch("gnocchi.cli.metricd.get_coordinator_and_start", return_value=self.coord), ] for f in self.fixtures: f.setUp() if conf.storage.driver == 'swift': self.fixtures.append(swift_fixture) LOAD_APP_KWARGS = { 'conf': conf, } s.upgrade() i.upgrade(128) # start up a thread to async process measures self.metricd_thread = MetricdThread(chef.Chef(self.coord, i, index, s)) self.metricd_thread.start()
def setUp(self): super(TestChef, self).setUp() self.metric, __ = self._create_metric() self.chef = chef.Chef(self.coord, self.incoming, self.index, self.storage)