def main():
    """main script. """
    t_now = time.time()
    t_now_minus_one_day = t_now - 3600*24
    parser = argparse.ArgumentParser()
    parser.add_argument('-l', type=int, dest='last',
                        help='last days to summary test results across',
                        default=None)
    parser.add_argument('--start', type=str, dest='start',
                        help=('Enter start time as: yyyy-mm-dd hh:mm:ss,'
                              'defualts to 24h ago.'),
                        default=time_utils.epoch_time_to_date_string(
                                t_now_minus_one_day))
    parser.add_argument('--end', type=str, dest='end',
                        help=('Enter end time in as: yyyy-mm-dd hh:mm:ss,'
                              'defualts to current time.'),
                        default=time_utils.epoch_time_to_date_string(t_now))
    parser.add_argument('-t', type=int, dest='top',
                        help='Print the top x of large result folders.',
                        default=0)
    parser.add_argument('-r', action='store_true', dest='report_stat',
                        default=False,
                        help='True to report total size to statsd.')
    options = parser.parse_args()

    if options.last:
        start_time = t_now - 3600*24*options.last
        end_time = t_now
    else:
        start_time = time_utils.to_epoch_time(options.start)
        end_time = time_utils.to_epoch_time(options.end)

    get_summary(start_time=start_time, end_time=end_time,
                top=options.top, report_stat=options.report_stat)
Beispiel #2
0
def get_intervals_for_host(t_start, t_end, hostname):
    """Gets intervals for the given.

    Query metaDB to return all intervals between given start and end time.
    Note that intervals found in metaDB may miss the history from t_start to
    the first interval found.

    @param t_start: beginning of time period we are interested in.
    @param t_end: end of time period we are interested in.
    @param hosts: A list of hostnames to look for history.
    @param board: Name of the board to look for history. Default is None.
    @param pool: Name of the pool to look for history. Default is None.
    @returns: A dictionary of hostname: intervals.
    """
    t_start_epoch = time_utils.to_epoch_time(t_start)
    t_end_epoch = time_utils.to_epoch_time(t_end)
    host_history_entries = autotest_es.query(
        fields_returned=None,
        equality_constraints=[('_type', _HOST_HISTORY_TYPE),
                              ('hostname', hostname)],
        range_constraints=[('time_recorded', t_start_epoch, t_end_epoch)],
        sort_specs=[{
            'time_recorded': 'asc'
        }])
    return host_history_entries.hits
Beispiel #3
0
def get_entries(time_start, time_end, gte, lte, size, index, hostname):
    """Gets all entries from es db with the given constraints.

    @param time_start: Earliest time entry was recorded
    @param time_end: Latest time entry was recorded
    @param gte: Lowest reboot_time to return
    @param lte: Highest reboot_time to return
    @param size: Max number of entries to return
    @param index: es db index to get entries for, i.e. 'cautotest'
    @param hostname: string representing hostname to query for
    @returns: Entries from esdb.
    """
    time_start_epoch = time_utils.to_epoch_time(time_start)
    time_end_epoch = time_utils.to_epoch_time(time_end)
    gte_epoch = time_utils.to_epoch_time(gte)
    lte_epoch = time_utils.to_epoch_time(lte)
    return autotest_es.query(
        index=index,
        fields_returned=['hostname', 'time_recorded', 'value'],
        equality_constraints=[('_type', 'reboot_total'),
                              ('hostname', hostname)],
        range_constraints=[('time_recorded', time_start_epoch, time_end_epoch),
                           ('value', gte_epoch, lte_epoch)],
        size=size,
        sort_specs=[{
            'hostname': 'asc'
        }, {
            'value': 'desc'
        }])
    return results
def get_results(start_time,
                end_time,
                hosts=None,
                board=None,
                pool=None,
                verbose=False):
    """Get history results of specified hosts or board/pool.

    If hosts is set to None, all hosts are used, filtered by the board and pool
    constraints. If board is not provided, all boards are included. If pool is
    not provided, all pools are included.
    If a list of hosts is provided, the board and pool constraints are ignored.

    @param hosts: A list of hosts to search for history. Default is None.
    @param board: board type of hosts. Default is None.
    @param pool: pool type of hosts. Default is None.
    @param start_time: start time to search for history, can be string value or
                       epoch time.
    @param end_time: end time to search for history, can be string value or
                     epoch time.
    @param verbose: True to print out detail intervals of host history.

    @returns: A dictionary of host history.
    """
    assert start_time and end_time
    start_time = time_utils.to_epoch_time(start_time)
    end_time = time_utils.to_epoch_time(end_time)
    assert start_time < end_time

    return host_history_utils.get_report(t_start=start_time,
                                         t_end=end_time,
                                         hosts=hosts,
                                         board=board,
                                         pool=pool,
                                         print_each_interval=verbose)
Beispiel #5
0
def make_entry(entry_id,
               name,
               status,
               start_time,
               finish_time=None,
               parent=None,
               **kwargs):
    """Generate an event log entry to be stored in Cloud Datastore.

    @param entry_id: A (Kind, id) tuple representing the key.
    @param name: A string identifying the event
    @param status: A string identifying the status of the event.
    @param start_time: A datetime of the start of the event.
    @param finish_time: A datetime of the finish of the event.
    @param parent: A (Kind, id) tuple representing the parent key.

    @return A dictionary representing the entry suitable for dumping via JSON.
    """
    entry = {
        'id': entry_id,
        'name': name,
        'status': status,
        'start_time': time_utils.to_epoch_time(start_time),
    }
    if finish_time is not None:
        entry['finish_time'] = time_utils.to_epoch_time(finish_time)
    if parent is not None:
        entry['parent'] = parent
    return entry
Beispiel #6
0
def get_status_intervals(history_details):
    """Get a list of status interval from history details.

    This is a reverse method of above build_history. Caller gets the history
    details from RPC get_host_history, and use this method to get the list of
    status interval, which can be used to calculate stats from
    host_history_utils.aggregate_hosts.

    @param history_details: A dictionary of host history for each host, e.g.,
            {'172.22.33.51': [{'status': 'Resetting'
                               'start_time': '2014-08-07 10:02:16',
                               'end_time': '2014-08-07 10:03:16',
                               'log_url': 'http://autotest/reset-546546/debug',
                               'task_id': 546546},]
            }
    @return: A list of dictionaries where keys are tuple (start_time, end_time),
             and value is a dictionary containing at least key 'status'.
    """
    status_intervals = []
    for host, history in history_details.iteritems():
        intervals = collections.OrderedDict()
        for interval in history:
            start_time = time_utils.to_epoch_time(interval['start_time'])
            end_time = time_utils.to_epoch_time(interval['end_time'])
            metadata = copy.deepcopy(interval)
            metadata['hostname'] = host
            intervals[(start_time, end_time)] = {
                'status': interval['status'],
                'metadata': metadata
            }
        status_intervals.append(intervals)
    return status_intervals
    def test_to_epoch_time_success(self):
        """Test function to_epoch_time."""
        with set_time_zone('US/Pacific'):
            self.assertEqual(self.TIME_SECONDS,
                             time_utils.to_epoch_time(self.TIME_STRING))

            self.assertEqual(self.TIME_SECONDS,
                             time_utils.to_epoch_time(self.TIME_OBJ))
def analyze_suites(start_time, end_time):
    """
    Calculates timing stats (i.e., suite runtime, scheduling overhead)
    for the suites that finished within the timestamps given by parameters.

    @param start_time: Beginning timestamp.
    @param end_time: Ending timestamp.
    """
    print('Analyzing suites from %s to %s...' %
          (time_utils.epoch_time_to_date_string(start_time),
           time_utils.epoch_time_to_date_string(end_time)))

    if _options.bvtonly:
        batch_constraints = [('suite_name',
                              ['bvt-inline', 'bvt-cq', 'bvt-perbuild'])]
    else:
        batch_constraints = []

    start_time_epoch = time_utils.to_epoch_time(start_time)
    end_time_epoch = time_utils.to_epoch_time(end_time)
    results = autotest_es.query(fields_returned=[
        'suite_name', 'suite_job_id', 'board', 'build', 'num_child_jobs',
        'duration'
    ],
                                equality_constraints=[
                                    ('_type', job_overhead.SUITE_RUNTIME_KEY),
                                ],
                                range_constraints=[
                                    ('time_recorded', start_time_epoch,
                                     end_time_epoch)
                                ],
                                sort_specs=[{
                                    'time_recorded': 'asc'
                                }],
                                batch_constraints=batch_constraints)
    print('Found %d suites' % (results.total))

    for hit in results.hits:
        suite_job_id = hit['suite_job_id']

        try:
            suite_name = hit['suite_name']
            num_child_jobs = int(hit['num_child_jobs'])
            suite_runtime = float(hit['duration'])

            print('Suite: %s (%s), Board: %s, Build: %s, Num child jobs: %d' %
                  (suite_name, suite_job_id, hit['board'], hit['build'],
                   num_child_jobs))

            suite_stats = get_scheduling_overhead(suite_job_id, num_child_jobs)
            print('Suite: %s (%s) runtime: %f,' %
                  (suite_name, suite_job_id, suite_runtime)),
            print_suite_stats(suite_stats)

        except Exception as e:
            print('ERROR: Exception is raised while processing suite %s' %
                  (suite_job_id))
            print e
def main():
    """main script. """
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--span',
        type=int,
        dest='span',
        default=1,
        help=('Number of hours that stats should be collected. '
              'If it is set to 24, the end time of stats being '
              'collected will set to the mid of the night. '
              'Default is set to 1 hour.'))
    parser.add_argument('-e',
                        '--email',
                        dest='email',
                        default=None,
                        help='Email any errors to the given email address.')
    options = parser.parse_args()

    boards = host_label_utils.get_all_boards()
    pools = ['bvt', 'suites', 'cq']

    if options.span == 24:
        today = datetime.combine(date.today(), datetime.min.time())
        end_time = time_utils.to_epoch_time(today)
    else:
        now = datetime.now()
        end_time = datetime(year=now.year,
                            month=now.month,
                            day=now.day,
                            hour=now.hour)
        end_time = time_utils.to_epoch_time(end_time)

    start_time = end_time - timedelta(hours=options.span).total_seconds()
    print('Collecting host stats from %s to %s...' %
          (time_utils.epoch_time_to_date_string(start_time),
           time_utils.epoch_time_to_date_string(end_time)))

    errors = []
    if not boards:
        errors.append('Error! No board found in metadb.')
    for board in boards:
        for pool in pools:
            error = report_stats(board, pool, start_time, end_time,
                                 options.span)
            if error:
                errors.append(error)
    if options.email and errors:
        gmail_lib.send_email(options.email,
                             'Error occured when collecting host stats.',
                             '\n'.join(errors))
Beispiel #10
0
    def diagnose_pool(self, board, pool, time_delta_hours, limit=10):
        """Log diagnostic information about a timeout for a board/pool.

        @param board: The board for which the current suite was run.
        @param pool: The pool against which the current suite was run.
        @param time_delta_hours: The time from which we should log information.
            This is a datetime.timedelta object, as stored by the JobTimer.
        @param limit: The maximum number of jobs per host, to log.

        @raises proxy.JSONRPCException: For exceptions thrown across the wire.
        """
        end_time = datetime.now()
        start_time = end_time - time_delta_hours
        labels = labellib.LabelsMapping()
        labels['board'] = board
        labels['pool'] = pool
        host_histories = status_history.HostJobHistory.get_multiple_histories(
            self.rpc_interface,
            time_utils.to_epoch_time(start_time),
            time_utils.to_epoch_time(end_time),
            labels.getlabels(),
        )
        if not host_histories:
            logging.error('No hosts found for board:%s in pool:%s', board,
                          pool)
            return
        status_map = {
            status_history.UNUSED: 'Unused',
            status_history.UNKNOWN: 'No job history',
            status_history.WORKING: 'Working',
            status_history.BROKEN: 'Failed repair'
        }
        for history in host_histories:
            count = 0
            job_info = ''
            for job in history:
                start_time = (time_utils.epoch_time_to_date_string(
                    job.start_time))
                job_info += ('%s %s started on: %s status %s\n' %
                             (job.id, job.name, start_time, job.job_status))
                count += 1
                if count >= limit:
                    break
            host = history.host
            logging.error(
                'host: %s, status: %s, locked: %s '
                'diagnosis: %s\n'
                'labels: %s\nLast %s jobs within %s:\n'
                '%s', history.hostname, host.status, host.locked,
                status_map[history.last_diagnosis()[0]], host.labels, limit,
                time_delta_hours, job_info)
    def __init__(self, ds, duts, config, simultaneous=1, total=0,
                 outputlog=None, ping=False, blacklist_consecutive=None,
                 blacklist_success=None, blacklist_total=None, dryrun=False):
        self.ds = ds
        self.duts = duts
        self.config = config
        self.start_time = datetime.datetime.now()
        self.finish_time = None
        self.simultaneous = simultaneous
        self.total = total
        self.outputlog = outputlog
        self.ping = ping
        self.blacklist_consecutive = blacklist_consecutive
        self.blacklist_success = blacklist_success
        self.blacklist_total = blacklist_total
        self.dryrun = dryrun

        self.active = []
        self.started = 0
        self.completed = []
        # Track DUTs which have failed multiple times.
        self.dut_blacklist = set()
        # Track versions of each DUT to provision in order.
        self.last_versions = {}

        # id for the parent entry.
        # TODO: This isn't the most unique.
        self.entry_id = ('Runner',
                         int(time_utils.to_epoch_time(datetime.datetime.now())))

        # ids for the job entries.
        self.next_id = 0

        if self.outputlog:
            dump_entries_as_json([self.as_entry()], self.outputlog)
Beispiel #12
0
def get_tasks_runtime(task_list, dut, t_start, job_id, job_info_dict):
    """
    Get sum of durations for special tasks.
    job_info_dict will be modified in this function to store the duration
    for each special task.

    @param task_list: List of task id.
    @param dut: Hostname of a DUT that the tasks ran on.
    @param t_start: Beginning timestamp.
    @param job_id: The job id that is related to the tasks.
                   This is used only for debugging purpose.
    @param job_info_dict: Dictionary that has information for jobs.
    @return: Sum of durations of the tasks.
    """
    t_start_epoch = time_utils.to_epoch_time(t_start)
    results = autotest_es.query(
        fields_returned=['status', 'task_id', 'duration'],
        equality_constraints=[('_type', 'job_time_breakdown'),
                              ('hostname', dut)],
        range_constraints=[('time_recorded', t_start_epoch, None)],
        batch_constraints=[('task_id', task_list)])
    sum = 0
    for hit in results.hits:
        sum += float(hit['duration'])
        job_info_dict[job_id][hit['status']] = float(hit['duration'])
        print_verbose('Task %s for Job %s took %s', hit['task_id'], job_id,
                      hit['duration'])
    return sum
Beispiel #13
0
def find_most_recent_entry_before(t, type_str, hostname, fields):
    """Returns the fields of the most recent entry before t.

    @param t: time we are interested in.
    @param type_str: _type in esdb, such as 'host_history' (string)
    @param hostname: hostname of DUT (string)
    @param fields: list of fields we are interested in
    @returns: time, field_value of the latest entry.
    """
    # History older than 90 days are ignored. This helps the ES query faster.
    t_epoch = time_utils.to_epoch_time(t)
    result = autotest_es.query(fields_returned=fields,
                               equality_constraints=[('_type', type_str),
                                                     ('hostname', hostname)],
                               range_constraints=[
                                   ('time_recorded', t_epoch -
                                    3600 * 24 * _MAX_DAYS_FOR_HISTORY, t_epoch)
                               ],
                               size=1,
                               sort_specs=[{
                                   'time_recorded': 'desc'
                               }])
    if result.total > 0:
        return result.hits[0]
    return {}
Beispiel #14
0
def parse_time(time_string):
    """Parse time according to a canonical form.

    The "canonical" form is the form in which date/time
    values are stored in the database.

    @param time_string Time to be parsed.
    """
    return int(time_utils.to_epoch_time(time_string))
Beispiel #15
0
def find_start_finish_times(statuses):
    """Determines the start and finish times for a list of statuses.

    @param statuses: A list of job test statuses.

    @return (start_tme, finish_time) tuple of seconds past epoch.  If either
            cannot be determined, None for that time.
    """
    starts = {
        int(time_utils.to_epoch_time(s.test_started_time))
        for s in statuses if s.test_started_time != 'None'
    }
    finishes = {
        int(time_utils.to_epoch_time(s.test_finished_time))
        for s in statuses if s.test_finished_time != 'None'
    }
    start_time = min(starts) if starts else None
    finish_time = max(finishes) if finishes else None
    return start_time, finish_time
def get_calls(time_start,
              time_end,
              artifact_filters=None,
              regex_constraints=None,
              devserver=None,
              size=1e7):
    """Gets all devserver calls from es db with the given constraints.

    @param time_start: Earliest time entry was recorded.
    @param time_end: Latest time entry was recorded.
    @param artifact_filters: A list of names to match artifacts.
    @param regex_constraints: A list of regex constraints for ES query.
    @param devserver: name of devserver to query for. If it's set to None,
                      return calls for all devservers. Default is set to None.
    @param size: Max number of entries to return, default to 1 million.

    @returns: Entries from esdb.
    """
    eqs = [('_type', 'devserver')]
    if devserver:
        eqs.append(('devserver', devserver))
    if artifact_filters:
        for artifact in artifact_filters:
            eqs.append(('artifacts', artifact))
    time_start_epoch = time_utils.to_epoch_time(time_start)
    time_end_epoch = time_utils.to_epoch_time(time_end)
    results = autotest_es.query(fields_returned=None,
                                equality_constraints=eqs,
                                range_constraints=[
                                    ('time_recorded', time_start_epoch,
                                     time_end_epoch)
                                ],
                                size=size,
                                sort_specs=[{
                                    'time_recorded': 'desc'
                                }],
                                regex_constraints=regex_constraints)
    devserver_calls = []
    for hit in results.hits:
        devserver_calls.append(devserver_call(hit))
    logging.info('Found %d calls.', len(devserver_calls))
    return devserver_calls
Beispiel #17
0
def get_job_timestamps(job_list, job_info_dict):
    """
    Get beginning time and ending time for each job.

    The beginning time of a job is "queued_time" of "tko_jobs" table.
    The ending time of a job is "finished_on" of "afe_host_queue_entries" table.
    job_info_dict will be modified in this function to store the timestamps.

    @param job_list: List of job ids
    @param job_info_dict: Dictionary that timestamps for each job will be stored
    """
    tko = tko_models.Job.objects.filter(afe_job_id__in=job_list)
    hqe = models.HostQueueEntry.objects.filter(job_id__in=job_list)
    job_start = {}
    for t in tko:
        job_start[t.afe_job_id] = time_utils.to_epoch_time(t.queued_time)
    job_end = {}
    for h in hqe:
        job_end[h.job_id] = time_utils.to_epoch_time(h.finished_on)

    for job_id in job_list:
        info_dict = job_info_dict.setdefault(job_id, {})
        info_dict.setdefault('timestamps',
                             (job_start[job_id], job_end[job_id]))
Beispiel #18
0
def main():
    """main script."""
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-c',
                        dest='cron_mode',
                        action='store_true',
                        help=('Run in a cron mode. Cron mode '
                              'sends calculated stat data to Graphite.'),
                        default=False)
    parser.add_argument('-s',
                        type=int,
                        dest='span',
                        help=('Number of hours that stats should be '
                              'collected.'),
                        default=1)
    parser.add_argument('--bvtonly',
                        dest='bvtonly',
                        action='store_true',
                        help=('Gets bvt suites only (i.e., bvt-inline,'
                              'bvt-cq, bvt-perbuild).'),
                        default=False)
    parser.add_argument('--suite',
                        type=int,
                        dest='suite_job_id',
                        help=('Job id of a suite.'))
    parser.add_argument('--verbose',
                        dest='verbose',
                        action='store_true',
                        help=('Prints out more info if True.'),
                        default=False)
    global _options
    _options = parser.parse_args()

    if _options.suite_job_id:
        analyze_suite(_options.suite_job_id)
    else:
        end_time = time_utils.to_epoch_time(datetime.now())
        start_time = end_time - timedelta(hours=_options.span).total_seconds()
        analyze_suites(start_time, end_time)
def main():
    """main script. """
    t_now = time.time()
    t_now_minus_one_day = t_now - 3600 * 24
    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        action='store_true',
                        dest='verbose',
                        default=False,
                        help='-v to print out ALL entries.')
    parser.add_argument('-l',
                        type=float,
                        dest='last',
                        help='last hours to search results across',
                        default=None)
    parser.add_argument('--board',
                        type=str,
                        dest='board',
                        help='restrict query by board, not implemented yet',
                        default=None)
    parser.add_argument('--pool',
                        type=str,
                        dest='pool',
                        help='restrict query by pool, not implemented yet',
                        default=None)
    parser.add_argument('--hosts',
                        nargs='+',
                        dest='hosts',
                        help='Enter space deliminated hostnames',
                        default=[])
    parser.add_argument(
        '--start',
        type=str,
        dest='start',
        help=('Enter start time as: yyyy-mm-dd hh:mm:ss,'
              'defualts to 24h ago.'),
        default=time_utils.epoch_time_to_date_string(t_now_minus_one_day))
    parser.add_argument('--end',
                        type=str,
                        dest='end',
                        help=('Enter end time in as: yyyy-mm-dd hh:mm:ss,'
                              'defualts to current time.'),
                        default=time_utils.epoch_time_to_date_string(t_now))
    options = parser.parse_args()

    if options.last:
        start_time = t_now - 3600 * options.last
        end_time = t_now
    else:
        start_time = time_utils.to_epoch_time(options.start)
        end_time = time_utils.to_epoch_time(options.end)

    results = get_results(hosts=options.hosts,
                          board=options.board,
                          pool=options.pool,
                          start_time=start_time,
                          end_time=end_time,
                          verbose=options.verbose)
    labels = []
    if options.board:
        labels.append('board:%s' % (options.board))
    if options.pool:
        labels.append('pool:%s' % (options.pool))
    print_all_stats(results, labels, start_time, end_time)
Beispiel #20
0
                              'for entries to return.'),
                        default=None)
    parser.add_argument('-n',
                        type=int,
                        dest='size',
                        help='Maximum number of entries to return.',
                        default=10000)
    parser.add_argument('--hosts',
                        nargs='+',
                        dest='hosts',
                        help='Enter space deliminated hostnames',
                        default=[])
    options = parser.parse_args()

    if options.last:
        t_start = t_now - 3600 * options.last
        t_end = t_now
    else:
        t_start = time_utils.to_epoch_time(options.start)
        t_end = time_utils.to_epoch_time(options.end)
    if options.hosts:
        hosts = options.hosts
    else:
        hosts = host_history.get_matched_hosts(options.autotest_server,
                                               options.board, options.pool)

    for hostname in hosts:
        results = get_entries(t_start, t_end, options.gte, options.lte,
                              options.size, options.autotest_server, hostname)
        print get_results_string(hostname, t_start, t_end, results)
Beispiel #21
0
def get_host_history_intervals(input):
    """Gets stats for a host.

    This method uses intervals found in metaDB to build a full history of the
    host. The intervals argument contains a list of metadata from querying ES
    for records between t_start and t_end. To get the status from t_start to
    the first record logged in ES, we need to look back to the last record
    logged in ES before t_start.

    @param input: A dictionary of input args, which including following args:
            t_start: beginning of time period we are interested in.
            t_end: end of time period we are interested in.
            hostname: hostname for the host we are interested in (string)
            intervals: intervals from ES query.
    @returns: dictionary, num_entries_found
        dictionary of status: time spent in that status
        num_entries_found: number of host history entries
                           found in [t_start, t_end]

    """
    t_start = input['t_start']
    t_end = input['t_end']
    hostname = input['hostname']
    intervals = input['intervals']
    lock_history_recent = find_most_recent_entry_before(
        t=t_start,
        type_str=_LOCK_HISTORY_TYPE,
        hostname=hostname,
        fields=['time_recorded', 'locked'])
    # I use [0] and [None] because lock_history_recent's type is list.
    t_lock = lock_history_recent.get('time_recorded', None)
    t_lock_val = lock_history_recent.get('locked', None)
    t_metadata = find_most_recent_entry_before(t=t_start,
                                               type_str=_HOST_HISTORY_TYPE,
                                               hostname=hostname,
                                               fields=None)
    t_host = t_metadata.pop('time_recorded', None)
    t_host_stat = t_metadata.pop('status', None)
    status_first = t_host_stat if t_host else 'Ready'
    t = min([t for t in [t_lock, t_host, t_start] if t])

    t_epoch = time_utils.to_epoch_time(t)
    t_end_epoch = time_utils.to_epoch_time(t_end)
    lock_history_entries = autotest_es.query(
        fields_returned=['locked', 'time_recorded'],
        equality_constraints=[('_type', _LOCK_HISTORY_TYPE),
                              ('hostname', hostname)],
        range_constraints=[('time_recorded', t_epoch, t_end_epoch)],
        sort_specs=[{
            'time_recorded': 'asc'
        }])

    # Validate lock history. If an unlock event failed to be recorded in metadb,
    # lock history will show the dut being locked while host still has status
    # changed over the time. This check tries to remove the lock event in lock
    # history if:
    # 1. There is only one entry in lock_history_entries (it's a good enough
    #    assumption to avoid the code being over complicated.
    # 2. The host status has changes after the lock history starts as locked.
    if (len(lock_history_entries.hits) == 1 and t_lock_val
            and len(intervals) > 1):
        locked_intervals = None
        print('Lock history of dut %s is ignored, the dut may have missing '
              'data in lock history in metadb. Try to lock and unlock the dut '
              'in AFE will force the lock history to be updated in metadb.' %
              hostname)
    else:
        locked_intervals = lock_history_to_intervals(t_lock_val, t, t_end,
                                                     lock_history_entries)
    num_entries_found = len(intervals)
    t_prev = t_start
    status_prev = status_first
    metadata_prev = t_metadata
    intervals_of_statuses = collections.OrderedDict()

    for entry in intervals:
        metadata = entry.copy()
        t_curr = metadata.pop('time_recorded')
        status_curr = metadata.pop('status')
        intervals_of_statuses.update(
            calculate_status_times(t_prev, t_curr, status_prev, metadata_prev,
                                   locked_intervals))
        # Update vars
        t_prev = t_curr
        status_prev = status_curr
        metadata_prev = metadata

    # Do final as well.
    intervals_of_statuses.update(
        calculate_status_times(t_prev, t_end, status_prev, metadata_prev,
                               locked_intervals))
    return hostname, intervals_of_statuses, num_entries_found
Beispiel #22
0
def to_epoch_time_int(value):
    """Convert the given value to epoch time int.

    @returns: epoch time in integer."""
    return int(time_utils.to_epoch_time(value))
def GetSuiteHQEs(suite_job_id, look_past_seconds, afe=None, tko=None):
    """Get the host queue entries for active DUTs during a suite job.

    @param suite_job_id: Suite's AFE job id.
    @param look_past_seconds: Number of seconds past the end of the suite
                              job to look for next HQEs.
    @param afe: AFE database handle.
    @param tko: TKO database handle.

    @returns A dictionary keyed on hostname to a list of host queue entry
             dictionaries.  HQE dictionary contains the following keys:
             name, hostname, job_status, job_url, gs_url, start_time, end_time
    """
    if afe is None:
        afe = frontend.AFE()
    if tko is None:
        tko = frontend.TKO()

    # Find the suite job and when it ran.
    statuses = tko.get_job_test_statuses_from_db(suite_job_id)
    if len(statuses):
        for s in statuses:
            if s.test_started_time == 'None' or s.test_finished_time == 'None':
                logging.error(
                    'TKO entry missing time: %s %s %s %s %s %s %s %s %s' %
                    (s.id, s.test_name, s.status, s.reason,
                     s.test_started_time, s.test_finished_time, s.job_owner,
                     s.hostname, s.job_tag))
        start_time = min(
            int(time_utils.to_epoch_time(s.test_started_time))
            for s in statuses if s.test_started_time != 'None')
        finish_time = max(
            int(time_utils.to_epoch_time(s.test_finished_time))
            for s in statuses if s.test_finished_time != 'None')
    else:
        start_time = None
        finish_time = None

    # If there is no start time or finish time, won't be able to get HQEs.
    if start_time is None or finish_time is None:
        return {}

    # Find all the HQE entries.
    child_jobs = afe.get_jobs(parent_job_id=suite_job_id)
    child_job_ids = {j.id for j in child_jobs}
    hqes = afe.get_host_queue_entries(job_id__in=list(child_job_ids))
    hostnames = {h.host.hostname for h in hqes if h.host}
    host_hqes = {}
    for hostname in hostnames:
        history = HostJobHistory.get_host_history(
            afe, hostname, start_time, finish_time + look_past_seconds)
        for h in history:
            gs_url = re.sub(r'http://.*/tko/retrieve_logs.cgi\?job=/results',
                            r'gs://chromeos-autotest-results', h.job_url)
            entry = {
                'name': h.name,
                'hostname': history.hostname,
                'job_status': h.job_status,
                'job_url': h.job_url,
                'gs_url': gs_url,
                'start_time': h.start_time,
                'end_time': h.end_time,
            }
            host_hqes.setdefault(history.hostname, []).append(entry)

    return host_hqes
Beispiel #24
0
def get_intervals_for_hosts(t_start, t_end, hosts=None, board=None, pool=None):
    """Gets intervals for given hosts or board/pool.

    Query metaDB to return all intervals between given start and end time.
    If a list of hosts is provided, the board and pool constraints are ignored.
    If hosts is set to None, and board or pool is set, this method will attempt
    to search host history with labels for all hosts, to help the search perform
    faster.
    If hosts, board and pool are all set to None, return intervals for all
    hosts.
    Note that intervals found in metaDB may miss the history from t_start to
    the first interval found.

    @param t_start: beginning of time period we are interested in.
    @param t_end: end of time period we are interested in.
    @param hosts: A list of hostnames to look for history.
    @param board: Name of the board to look for history. Default is None.
    @param pool: Name of the pool to look for history. Default is None.
    @returns: A dictionary of hostname: intervals.
    """
    hosts_intervals = {}
    if hosts:
        for host in hosts:
            hosts_intervals[host] = get_intervals_for_host(
                t_start, t_end, host)
    else:
        hosts = get_matched_hosts(board, pool)
        if not hosts:
            raise NoHostFoundException(
                'No host is found for board:%s, pool:%s.' % (board, pool))
        equality_constraints = [
            ('_type', _HOST_HISTORY_TYPE),
        ]
        if board:
            equality_constraints.append(('labels', 'board:' + board))
        if pool:
            equality_constraints.append(('labels', 'pool:' + pool))
        t_start_epoch = time_utils.to_epoch_time(t_start)
        t_end_epoch = time_utils.to_epoch_time(t_end)
        results = autotest_es.query(equality_constraints=equality_constraints,
                                    range_constraints=[
                                        ('time_recorded', t_start_epoch,
                                         t_end_epoch)
                                    ],
                                    sort_specs=[{
                                        'hostname': 'asc'
                                    }])
        results_group_by_host = {}
        for hostname, intervals_for_host in groupby(results.hits,
                                                    lambda h: h['hostname']):
            results_group_by_host[hostname] = intervals_for_host
        for host in hosts:
            intervals = results_group_by_host.get(host, None)
            # In case the host's board or pool label was modified after
            # the last status change event was reported, we need to run a
            # separate query to get its history. That way the host's
            # history won't be shown as blank.
            if not intervals:
                intervals = get_intervals_for_host(t_start, t_end, host)
            hosts_intervals[host] = intervals
    return hosts_intervals