def periodic_balance_all(self, engine_id): LOG.info(_LI("periodically balance quota for all keystone tenants")) lock = kingbird_lock.sync_lock_acquire(self.context, engine_id, TASK_TYPE) if not lock: LOG.error(_LE("Not able to acquire lock for %(task_type)s, may" " be Previous sync job has not finished yet, " "Aborting this run at: %(time)s "), {'task_type': TASK_TYPE, 'time': time.strftime("%c")} ) return LOG.info(_LI("Successfully acquired lock")) projects_thread_list = [] # Iterate through project list and call sync project for each project # using threads project_list = sdk.OpenStackDriver().get_enabled_projects() # Divide list of projects into batches and perfrom quota sync # for one batch at a time. for current_batch_projects in utils.get_batch_projects( cfg.CONF.batch.batch_size, project_list): LOG.info(_LI("Syncing quota for current batch with projects: %s"), current_batch_projects) for current_project in current_batch_projects: if current_project: thread = threading.Thread( target=self.quota_sync_for_project, args=(current_project,)) projects_thread_list.append(thread) thread.start() # Wait for all the threads to complete # the job(sync all projects quota) for current_thread in projects_thread_list: current_thread.join() kingbird_lock.sync_lock_release(self.context, engine_id, TASK_TYPE)
def main(): api_config.init(sys.argv[1:]) api_config.setup_logging() application = app.setup_app() host = CONF.bind_host port = CONF.bind_port workers = CONF.api_workers if workers < 1: LOG.warning(_LW("Wrong worker number, worker = %(workers)s"), workers) workers = 1 LOG.info(_LI("Server on http://%(host)s:%(port)s with %(workers)s"), {'host': host, 'port': port, 'workers': workers}) messaging.setup() systemd.notify_once() service = wsgi.Server(CONF, "Kingbird", application, host, port) app.serve(service, CONF, workers) LOG.info(_LI("Configuration:")) CONF.log_opt_values(LOG, std_logging.INFO) app.wait()
def setup_logging(): """Sets up the logging options for a log with supplied name.""" product_name = "kingbird" logging.setup(cfg.CONF, product_name) LOG.info(_LI("Logging enabled!")) LOG.info(_LI("%(prog)s version %(version)s"), {'prog': sys.argv[0], 'version': version.version_info.release_string()}) LOG.debug("command line: %s", " ".join(sys.argv))
def sync_lock_acquire(context, engine_id, task_type, forced=False): """Try to lock with specified engine_id. :param engine: ID of the engine which wants to lock the projects. :returns: True if lock is acquired, or False otherwise. """ # Step 1: try lock the projects- if it returns True then success LOG.info(_LI('Trying to acquire lock with %(engId)s for Task: %(task)s'), {'engId': engine_id, 'task': task_type } ) lock_status = db_api.sync_lock_acquire(context, engine_id, task_type) if lock_status: return True # Step 2: retry using global configuration options retries = cfg.CONF.locks.lock_retry_times retry_interval = cfg.CONF.locks.lock_retry_interval while retries > 0: scheduler.sleep(retry_interval) LOG.info(_LI('Retry acquire lock with %(engId)s for Task: %(task)s'), {'engId': engine_id, 'task': task_type } ) lock_status = db_api.sync_lock_acquire(context, engine_id, task_type) if lock_status: return True retries = retries - 1 # Step 3: Last resort is 'forced locking', only needed when retry failed if forced: lock_status = db_api.sync_lock_steal(context, engine_id, task_type) if not lock_status: return False else: return True # Will reach here only when not able to acquire locks with retry LOG.error(_LE('Not able to acquire lock for %(task)s with retry' ' with engineId %(engId)s'), {'engId': engine_id, 'task': task_type } ) return False
def stop(self): self._stop_rpc_server() self.TG.stop() # Terminate the engine process LOG.info(_LI("All threads were gone, terminating engine")) super(EngineService, self).stop()
def get_total_usage_for_tenant(self, project_id): # Returns total quota usage for a tenant LOG.info(_LI("Get total usage called for project: %s"), project_id) total_usage = dict(self.get_summation( self.get_tenant_quota_usage_per_region(project_id))) return total_usage
def sync_lock_release(context, engine_id, task_type): """Release the lock for the projects""" LOG.info(_LI('Releasing acquired lock with %(engId)s for Task: %(task)s'), {'engId': engine_id, 'task': task_type } ) return db_api.sync_lock_release(context, task_type)
def _stop_rpc_server(self): # Stop RPC connection to prevent new requests LOG.debug(_("Attempting to stop engine service...")) try: self._rpc_server.stop() self._rpc_server.wait() LOG.info(_LI('Engine service stopped successfully')) except Exception as ex: LOG.error(_LE('Failed to stop engine service: %s'), six.text_type(ex))
def service_registry_cleanup(self): ctx = context.get_admin_context() time_window = (2 * cfg.CONF.report_interval) services = service_obj.Service.get_all(ctx) for svc in services: if svc['id'] == self.engine_id: continue if timeutils.is_older_than(svc['updated_at'], time_window): # < time_line: # hasn't been updated, assuming it's died. LOG.info(_LI('Service %s was aborted'), svc['id']) service_obj.Service.delete(ctx, svc['id'])
def read_quota_usage(self, project_id, region, usage_queue): # Writes usage dict to the Queue in the following format # {'region_name': (<nova_usages>, <neutron_usages>, <cinder_usages>)} LOG.info(_LI("Reading quota usage for %(project_id)s in %(region)s"), {'project_id': project_id, 'region': region} ) os_client = sdk.OpenStackDriver(region) region_usage = os_client.get_resource_usages(project_id) total_region_usage = collections.defaultdict(dict) # region_usage[0], region_usage[1], region_usage[3] are # nova, neutron & cinder usages respectively total_region_usage.update(region_usage[0]) total_region_usage.update(region_usage[1]) total_region_usage.update(region_usage[2]) usage_queue.put({region: total_region_usage})
def get_total_usage_for_tenant(self, project_id): # Returns total quota usage for a tenant LOG.info(_LI("Get total usage called for project: %s"), project_id) try: total_usage = dict(self.get_summation( self.get_tenant_quota_usage_per_region(project_id))) kingbird_global_limit = self._get_kingbird_project_limit( project_id) # Get unused quotas unused_quota = set( kingbird_global_limit).difference(set(total_usage.keys())) # Create a dict with value as '0' for unused quotas unused_quota = dict((quota_name, 0) for quota_name in unused_quota) total_usage.update(unused_quota) return {'limits': kingbird_global_limit, 'usage': total_usage} except exceptions.NotFound: raise
def quota_sync_for_project(self, project_id): # Sync quota limits for the project according to below formula # Global remaining limit = Kingbird global limit - Summation of usages # in all the regions # New quota limit = Global remaining limit + usage in that region LOG.info(_LI("Quota sync Called for Project: %s"), project_id) regions_thread_list = [] # Retrieve regions for the project region_lists = sdk.OpenStackDriver().get_all_regions_for_project( project_id) regions_usage_dict = self.get_tenant_quota_usage_per_region(project_id) if not regions_usage_dict: # Skip syncing for the project if not able to read regions usage LOG.error(_LE("Error reading regions usage for the Project: '%s'. " "Aborting, continue with next project."), project_id) return total_project_usages = dict(self.get_summation(regions_usage_dict)) kingbird_global_limit = self._get_kingbird_project_limit(project_id) global_remaining_limit = collections.Counter( kingbird_global_limit) - collections.Counter(total_project_usages) for current_region in region_lists: region_new_limit = dict( global_remaining_limit + collections.Counter( regions_usage_dict[current_region])) region_new_limit = self._arrange_quotas_by_service_name( region_new_limit) thread = threading.Thread(target=self.update_quota_limits, args=(project_id, region_new_limit, current_region,)) regions_thread_list.append(thread) thread.start() # Wait for all the threads to update quota for current_thread in regions_thread_list: current_thread.join()
def quota_sync_for_project(self, context, project_id): # On Demand Quota Sync for a project, will be triggered by KB-API LOG.info(_LI("On Demand Quota Sync Called for: %s"), project_id) self.qm.quota_sync_for_project(project_id)
def get_total_usage_for_tenant(self, context, project_id): # Returns a dictionary containing nova, neutron & # cinder usages for the project LOG.info(_LI("Get total tenant usage called for: %s"), project_id) return self.qm.get_total_usage_for_tenant(project_id)
def periodic_balance_all(self, engine_id): # Automated Quota Sync for all the keystone projects LOG.info(_LI("Periodic quota sync job started at: %s"), time.strftime("%c")) self.qm.periodic_balance_all(engine_id)