def __init__(self): self._coord = coordination.get_coordinator( CONF.orchestrator.coordination_url, uuidutils.generate_uuid().encode('ascii')) self._state = state.StateManager() self._storage = storage.get_storage() self._coord.start(start_heart=True)
def __init__(self, worker_id): self._worker_id = worker_id super(Orchestrator, self).__init__(self._worker_id) self.fetcher = driver.DriverManager( FETCHERS_NAMESPACE, CONF.fetcher.backend, invoke_on_load=True, ).driver transformers = transformer.get_transformers() self.collector = collector.get_collector(transformers) self.storage = storage.get_storage() self._state = state.StateManager() # RPC self.server = None self._rating_endpoint = RatingEndpoint(self) self._init_messaging() # DLM self.coord = coordination.get_coordinator( CONF.orchestrator.coordination_url, uuidutils.generate_uuid().encode('ascii')) self.coord.start(start_heart=True)
def __init__(self, worker_id): self._worker_id = worker_id super(CloudKittyProcessor, self).__init__(self._worker_id) self.tenants = [] self.fetcher = driver.DriverManager( FETCHERS_NAMESPACE, CONF.fetcher.backend, invoke_on_load=True, ).driver self.collector = collector.get_collector() self.storage = storage.get_storage() self._state = state.StateManager() # RPC self.server = None self._rating_endpoint = RatingEndpoint(self) self._scope_endpoint = ScopeEndpoint() self._init_messaging() # DLM self.coord = coordination.get_coordinator( CONF.orchestrator.coordination_url, uuidutils.generate_uuid().encode('ascii')) self.coord.start(start_heart=True) self.next_timestamp_to_process = functools.partial( _check_state, self, CONF.collect.period) self.worker_class = Worker self.log_worker_initiated()
def get(self, offset=0, limit=100, scope_id=None, scope_key=None, fetcher=None, collector=None): policy.authorize( flask.request.context, 'scope:get_state', {'tenant_id': scope_id or flask.request.context.project_id}) results = storage_state.StateManager().get_all( identifier=scope_id, scope_key=scope_key, fetcher=fetcher, collector=collector, offset=offset, limit=limit, ) if len(results) < 1: raise http_exceptions.NotFound( "No resource found for provided filters.") return { 'results': [{ 'scope_id': r.identifier, 'scope_key': r.scope_key, 'fetcher': r.fetcher, 'collector': r.collector, 'state': str(r.state), } for r in results] }
def __init__(self, collector, storage, tenant_id): self._collector = collector self._storage = storage self._period = CONF.collect.period self._wait_time = CONF.collect.wait_periods * self._period self._tenant_id = tenant_id self._conf = ck_utils.load_conf(CONF.collect.metrics_conf) self._state = state.StateManager() super(Worker, self).__init__(self._tenant_id)
def __init__(self, collector, storage, tenant_id, worker_id): self._collector = collector self._storage = storage self._period = CONF.collect.period self._wait_time = CONF.collect.wait_periods * self._period self._tenant_id = tenant_id self._worker_id = worker_id self._log_prefix = '[scope: {scope}, worker: {worker}] '.format( scope=self._tenant_id, worker=self._worker_id) self._conf = ck_utils.load_conf(CONF.collect.metrics_conf) self._state = state.StateManager() self._check_state = functools.partial(_check_state, self, self._period, self._tenant_id) super(Worker, self).__init__(self._tenant_id)
def start_fixture(self): self.sm = storage_state.StateManager() self.sm.init() data = [ ('aaaa', datetime.datetime(2019, 1, 1), 'fet1', 'col1', 'key1'), ('bbbb', datetime.datetime(2019, 2, 2), 'fet1', 'col1', 'key2'), ('cccc', datetime.datetime(2019, 3, 3), 'fet1', 'col2', 'key1'), ('dddd', datetime.datetime(2019, 4, 4), 'fet1', 'col2', 'key2'), ('eeee', datetime.datetime(2019, 5, 5), 'fet2', 'col1', 'key1'), ('ffff', datetime.datetime(2019, 6, 6), 'fet2', 'col1', 'key2'), ('gggg', datetime.datetime(2019, 6, 6), 'fet2', 'col2', 'key1'), ('hhhh', datetime.datetime(2019, 6, 6), 'fet2', 'col2', 'key2'), ] for d in data: self.sm.set_state(d[0], d[1], fetcher=d[2], collector=d[3], scope_key=d[4])
def __init__(self, backend, tenant_id, storage, basepath=None, period=3600): self._backend = backend self._tenant_id = tenant_id self._storage = storage self._storage_state = storage_state.StateManager() self._basepath = basepath if self._basepath: fileutils.ensure_tree(self._basepath) self._period = period self._sm = state.DBStateManager(self._tenant_id, 'writer_status') self._write_pipeline = [] # State vars self.usage_start = None self.usage_end = None # Current total self.total = 0
def reload(cls): super(ScopeState, cls).reload() cls._client = messaging.get_client() cls._storage_state = storage_state.StateManager()
def get_tenants(self, begin=None, end=None): return storage_state.StateManager().get_tenants(begin, end)
def setUp(self): super(StateManagerTest, self).setUp() self._state = storage_state.StateManager() self.conf.set_override('backend', 'fetcher1', 'fetcher') self.conf.set_override('collector', 'collector1', 'collect') self.conf.set_override('scope_key', 'scope_key', 'collect')
def init_storage_backend(): backend = storage.get_storage() backend.init() state_manager = storage_state.StateManager() state_manager.init()
def __init__(self, *args, **kwargs): super(ScopeState, self).__init__(*args, **kwargs) self._client = messaging.get_client() self._storage_state = storage_state.StateManager()
def __init__(self, *args, **kwargs): super(ReprocessSchedulerPostApi, self).__init__(*args, **kwargs) self.storage_state_manager = storage_state.StateManager() self.schedule_reprocessing_db = storage_state.ReprocessingSchedulerDb()