def _special_task_to_dict(task, queue_entries):
    """Transforms a special task dictionary to another form of dictionary.

    @param task           Special task as a dictionary type
    @param queue_entries  Host queue entries as a list of dictionaries.

    @return Transformed dictionary for a special task.
    """
    job_dict = None
    if task['queue_entry']:
        # Scan queue_entries to get the job detail info.
        for qentry in queue_entries:
            if task['queue_entry']['id'] == qentry['id']:
                job_dict = qentry['job']
                break
        # If not found, get it from DB.
        if job_dict is None:
            job = models.Job.objects.get(id=task['queue_entry']['job'])
            job_dict = job.get_object_dict()

    exec_path = server_utils.get_special_task_exec_path(
        task['host']['hostname'], task['id'], task['task'],
        time_utils.time_string_to_datetime(task['time_requested']))
    status = server_utils.get_special_task_status(task['is_complete'],
                                                  task['success'],
                                                  task['is_active'])
    return _common_entry_to_dict(task, task['task'], job_dict, exec_path,
                                 status, task['time_started'])
def _compute_next_job_for_tasks(queue_entries, special_tasks):
    """
    For each task, try to figure out the next job that ran after that task.
    This is done using two pieces of information:
    * if the task has a queue entry, we can use that entry's job ID.
    * if the task has a time_started, we can try to compare that against the
      started_on field of queue_entries. this isn't guaranteed to work perfectly
      since queue_entries may also have null started_on values.
    * if the task has neither, or if use of time_started fails, just use the
      last computed job ID.

    @param queue_entries    Host queue entries as a list of dictionaries.
    @param special_tasks    Special tasks as a list of dictionaries.
    """
    next_job_id = None  # most recently computed next job
    hqe_index = 0  # index for scanning by started_on times
    for task in special_tasks:
        if task['queue_entry']:
            next_job_id = task['queue_entry']['job']
        elif task['time_started'] is not None:
            for queue_entry in queue_entries[hqe_index:]:
                if queue_entry['started_on'] is None:
                    continue
                t1 = time_utils.time_string_to_datetime(
                    queue_entry['started_on'])
                t2 = time_utils.time_string_to_datetime(task['time_started'])
                if t1 < t2:
                    break
                next_job_id = queue_entry['job']['id']

        task['next_job_id'] = next_job_id

        # advance hqe_index to just after next_job_id
        if next_job_id is not None:
            for queue_entry in queue_entries[hqe_index:]:
                if queue_entry['job']['id'] < next_job_id:
                    break
                hqe_index += 1
Exemple #3
0
def is_container_orphaned(container):
    """Check if a container is orphaned.

    A container is orphaned if any of these condition is True:
    1. The autoserv process created the container is no longer running.
    2. The test job is finished at least 1 hour ago.

    @param container: A Container object.

    @return: True if the container is orphaned.

    """
    logging.debug('Checking if container is orphaned: %s', container.name)
    if container.id is None:
        logging.debug('Container %s is not created for test.', container.name)
        return False

    job_id = container.id.job_id
    pid = container.id.pid

    if pid and not utils.pid_is_alive(pid):
        logging.debug(
            'Process with PID %s is not alive, container %s is '
            'orphaned.', pid, container.name)
        return True

    try:
        hqes = AFE.get_host_queue_entries(job_id=job_id)
    except Exception as e:
        logging.error('Failed to get hqe for job %s. Error: %s.', job_id, e)
        return False

    if not hqes:
        # The job has not run yet.
        return False
    for hqe in hqes:
        if hqe.active or not hqe.complete:
            logging.debug(
                'Test job %s is not completed yet, container %s is '
                'not orphaned.', job_id, container.name)
            return False
        if (hqe.finished_on and (time_utils.time_string_to_datetime(
                hqe.finished_on) > FINISHED_JOB_CUTOFF_TIME)):
            logging.debug('Test job %s was completed less than an hour ago.',
                          job_id)
            return False

    logging.debug('Test job %s was completed, container %s is orphaned.',
                  job_id, container.name)
    return True
Exemple #4
0
def is_job_expired(age_limit, timestamp):
  """Check whether a job timestamp is older than an age limit.

  @param age_limit: Minimum age, measured in days.  If the value is
                    not positive, the job is always expired.
  @param timestamp: Timestamp of the job whose age we are checking.
                    The format must match time_utils.TIME_FMT.

  @returns True iff the job is old enough to be expired.
  """
  if age_limit <= 0:
    return True
  job_time = time_utils.time_string_to_datetime(timestamp)
  expiration = job_time + datetime.timedelta(days=age_limit)
  return datetime.datetime.now() >= expiration