Пример #1
0
 def periodic_balance_all(self, engine_id):
     LOG.info(_LI("periodically balance quota for all keystone tenants"))
     lock = kingbird_lock.sync_lock_acquire(self.context, engine_id,
                                            TASK_TYPE)
     if not lock:
         LOG.error(_LE("Not able to acquire lock for %(task_type)s, may"
                       " be Previous sync job has not finished yet, "
                       "Aborting this run at: %(time)s "),
                   {'task_type': TASK_TYPE,
                    'time': time.strftime("%c")}
                   )
         return
     LOG.info(_LI("Successfully acquired lock"))
     projects_thread_list = []
     # Iterate through project list and call sync project for each project
     # using threads
     project_list = sdk.OpenStackDriver().get_enabled_projects()
     # Divide list of projects into batches and perfrom quota sync
     # for one batch at a time.
     for current_batch_projects in utils.get_batch_projects(
             cfg.CONF.batch.batch_size, project_list):
         LOG.info(_LI("Syncing quota for current batch with projects: %s"),
                  current_batch_projects)
         for current_project in current_batch_projects:
             if current_project:
                 thread = threading.Thread(
                     target=self.quota_sync_for_project,
                     args=(current_project,))
                 projects_thread_list.append(thread)
                 thread.start()
             # Wait for all the threads to complete
             # the job(sync all projects quota)
             for current_thread in projects_thread_list:
                 current_thread.join()
     kingbird_lock.sync_lock_release(self.context, engine_id, TASK_TYPE)
Пример #2
0
 def _stop_rpc_server(self):
     # Stop RPC connection to prevent new requests
     LOG.debug(_("Attempting to stop engine service..."))
     try:
         self._rpc_server.stop()
         self._rpc_server.wait()
         LOG.info(_LI('Engine service stopped successfully'))
     except Exception as ex:
         LOG.error(_LE('Failed to stop engine service: %s'),
                   six.text_type(ex))
Пример #3
0
 def service_registry_report(self):
     ctx = context.get_admin_context()
     try:
         svc = service_obj.Service.update(ctx, self.engine_id)
         # if svc is None, means it's not created.
         if svc is None:
             service_obj.Service.create(ctx, self.engine_id, self.host,
                                        'kingbird-engine', self.topic)
     except Exception as ex:
         LOG.error(_LE('Service %(service_id)s update failed: %(error)s'),
                   {'service_id': self.engine_id, 'error': ex})
Пример #4
0
def sync_lock_acquire(context, engine_id, task_type, forced=False):
    """Try to lock with specified engine_id.

    :param engine: ID of the engine which wants to lock the projects.
    :returns: True if lock is acquired, or False otherwise.
    """

    # Step 1: try lock the projects- if it returns True then success
    LOG.info(_LI('Trying to acquire lock with %(engId)s for Task: %(task)s'),
             {'engId': engine_id,
              'task': task_type
              }
             )
    lock_status = db_api.sync_lock_acquire(context, engine_id, task_type)
    if lock_status:
        return True

    # Step 2: retry using global configuration options
    retries = cfg.CONF.locks.lock_retry_times
    retry_interval = cfg.CONF.locks.lock_retry_interval

    while retries > 0:
        scheduler.sleep(retry_interval)
        LOG.info(_LI('Retry acquire lock with %(engId)s for Task: %(task)s'),
                 {'engId': engine_id,
                  'task': task_type
                  }
                 )
        lock_status = db_api.sync_lock_acquire(context, engine_id, task_type)
        if lock_status:
            return True
        retries = retries - 1

    # Step 3: Last resort is 'forced locking', only needed when retry failed
    if forced:
        lock_status = db_api.sync_lock_steal(context, engine_id, task_type)
        if not lock_status:
            return False
        else:
            return True

    # Will reach here only when not able to acquire locks with retry

    LOG.error(_LE('Not able to acquire lock  for %(task)s with retry'
                  ' with engineId %(engId)s'),
              {'engId': engine_id,
               'task': task_type
               }
              )
    return False
Пример #5
0
    def quota_sync_for_project(self, project_id):
        # Sync quota limits for the project according to below formula
        # Global remaining limit = Kingbird global limit - Summation of usages
        #                          in all the regions
        # New quota limit = Global remaining limit + usage in that region
        LOG.info(_LI("Quota sync Called for Project: %s"),
                 project_id)
        regions_thread_list = []
        # Retrieve regions for the project
        region_lists = sdk.OpenStackDriver().get_all_regions_for_project(
            project_id)
        regions_usage_dict = self.get_tenant_quota_usage_per_region(project_id)
        if not regions_usage_dict:
            # Skip syncing for the project if not able to read regions usage
            LOG.error(_LE("Error reading regions usage for the Project: '%s'. "
                      "Aborting, continue with next project."), project_id)
            return
        total_project_usages = dict(self.get_summation(regions_usage_dict))
        kingbird_global_limit = self._get_kingbird_project_limit(project_id)
        global_remaining_limit = collections.Counter(
            kingbird_global_limit) - collections.Counter(total_project_usages)

        for current_region in region_lists:
            region_new_limit = dict(
                global_remaining_limit + collections.Counter(
                    regions_usage_dict[current_region]))
            region_new_limit = self._arrange_quotas_by_service_name(
                region_new_limit)
            thread = threading.Thread(target=self.update_quota_limits,
                                      args=(project_id, region_new_limit,
                                            current_region,))
            regions_thread_list.append(thread)
            thread.start()

        # Wait for all the threads to update quota
        for current_thread in regions_thread_list:
            current_thread.join()