Esempio n. 1
0
    def __call__(self, ctx):
        """ Schedule the collection tasks based on interval """
        try:
            # Remove jobs from scheduler when marked for delete
            filters = {'deleted': True}
            tasks = db.task_get_all(ctx, filters=filters)
            LOG.debug("Total tasks found deleted "
                      "in this cycle:%s" % len(tasks))
            for task in tasks:
                job_id = task['job_id']
                if job_id and self.scheduler.get_job(job_id):
                    self.scheduler.remove_job(job_id)
                db.task_delete(ctx, task['id'])
        except Exception as e:
            LOG.error("Failed to remove periodic scheduling job , reason: %s.",
                      six.text_type(e))
        try:

            filters = {'last_run_time': None}
            tasks = db.task_get_all(ctx, filters=filters)
            LOG.debug("Schedule performance collection triggered: total "
                      "tasks to be handled:%s" % len(tasks))
            for task in tasks:
                # Get current time in epoch format in seconds. Here method
                # indicates the specific collection task to be triggered
                current_time = int(datetime.now().timestamp())
                last_run_time = current_time
                next_collection_time = last_run_time + task['interval']
                task_id = task['id']
                job_id = uuidutils.generate_uuid()
                next_collection_time = datetime \
                    .fromtimestamp(next_collection_time) \
                    .strftime('%Y-%m-%d %H:%M:%S')

                collection_class = importutils.import_class(task['method'])
                instance = collection_class.get_instance(ctx, task_id)
                self.scheduler.add_job(instance,
                                       'interval',
                                       seconds=task['interval'],
                                       next_run_time=next_collection_time,
                                       id=job_id)

                update_task_dict = {
                    'job_id': job_id,
                    'last_run_time': last_run_time
                }
                db.task_update(ctx, task_id, update_task_dict)
                LOG.info('Periodic collection task triggered for for task id: '
                         '%s ' % task['id'])
        except Exception as e:
            LOG.error("Failed to trigger periodic collection, reason: %s.",
                      six.text_type(e))
        else:
            LOG.debug("Periodic collection task Scheduling completed.")
Esempio n. 2
0
    def schedule_boot_jobs(self, executor):
        """Schedule periodic collection if any task is currently assigned to
        this executor """
        try:
            filters = {'executor': executor, 'deleted': False}
            context = ctxt.get_admin_context()
            tasks = db.task_get_all(context, filters=filters)
            failed_tasks = db.failed_task_get_all(context, filters=filters)
            LOG.info("Scheduling boot time jobs for this executor: total "
                     "jobs to be handled :%s" % len(tasks))
            for task in tasks:
                self.assign_job(context, task['id'], executor)
                LOG.debug('Periodic collection job assigned for id: '
                          '%s ' % task['id'])
            for failed_task in failed_tasks:
                self.assign_failed_job(context, failed_task['id'], executor)
                LOG.debug('Failed job assigned for id: '
                          '%s ' % failed_task['id'])

        except Exception as e:
            LOG.error(
                "Failed to schedule boot jobs for this executor "
                "reason: %s.", six.text_type(e))
        else:
            LOG.debug("Boot job scheduling completed.")
Esempio n. 3
0
    def __init__(self, ctxt):
        self.scheduler = scheduler.Scheduler.get_instance()

        # Reset last run time of tasks to restart scheduling and
        # start the failed task job
        task_list = db.task_get_all(ctxt)
        for task in task_list:
            db.task_update(ctxt, task['id'], {'last_run_time': None})
        self._schedule_failed_telemetry_job_handler(ctxt)
Esempio n. 4
0
def delete_perf_job(context, storage_id):
    # Delete it from scheduler
    filters = {'storage_id': storage_id}
    tasks = db.task_get_all(context, filters=filters)
    failed_tasks = db.failed_task_get_all(context, filters=filters)
    for task in tasks:
        metrics_rpcapi.TaskAPI().remove_job(context, task.get('id'),
                                            task.get('executor'))
    for failed_task in failed_tasks:
        metrics_rpcapi.TaskAPI().remove_failed_job(context,
                                                   failed_task.get('id'),
                                                   failed_task.get('executor'))

    # Soft delete tasks
    db.task_delete_by_storage(context, storage_id)
    db.failed_task_delete_by_storage(context, storage_id)
Esempio n. 5
0
def create_perf_job(context, storage_id, capabilities):
    # Add it to db
    # Check resource_metric attribute availability and
    # check if resource_metric is empty
    if 'resource_metrics' not in capabilities \
            or not bool(capabilities.get('resource_metrics')):
        raise exception.EmptyResourceMetrics()

    task = dict()
    task.update(storage_id=storage_id)
    task.update(args=capabilities.get('resource_metrics'))
    task.update(interval=CONF.telemetry.performance_collection_interval)
    task.update(method=constants.TelemetryCollection.PERFORMANCE_TASK_METHOD)
    db.task_create(context=context, values=task)
    # Add it to RabbitMQ
    filters = {'storage_id': storage_id}
    task_id = db.task_get_all(context, filters=filters)[0].get('id')
    metrics_rpcapi.TaskAPI().create_perf_job(context, task_id)
Esempio n. 6
0
 def get_all_tasks(self, storage_id):
     filters = {'storage_id': storage_id, 'deleted': False}
     context = ctxt.get_admin_context()
     tasks = db.task_get_all(context, filters=filters)
     failed_tasks = db.failed_task_get_all(context, filters=filters)
     return tasks, failed_tasks