Beispiel #1
0
def get_woven_logs(log_files, pod, filters, objref_dict):
    lines = []
    combined_lines = []
    first_combined = ""
    pod_re = regex.wordRE(pod)

    # Produce a list of lines of all the selected logs
    for log_file in log_files:
        log = gcs_async.read(log_file).get_result()
        log = log.decode('utf8', 'replace')
        lines.extend(log.split('\n'))
    # Combine lines without timestamp into previous line, except if it comes at the
    # beginning of the file, in which case add it to the line with the first timestamp
    for line in lines:
        timestamp_re = regex.timestamp(line)
        if timestamp_re and timestamp_re.group(0):
            if not combined_lines:
                # add beginning of file to first timestamp line
                line = first_combined + line
            combined_lines.append(line)
        else:
            if not combined_lines:
                first_combined = first_combined + line
            else:
                combined_lines[-1] = combined_lines[-1] + line
    lines = sorted(combined_lines, key=regex.sub_timestamp)
    data = '\n'.join(lines)
    woven_logs = log_parser.digest(data, error_re=pod_re,
        filters=filters, objref_dict=objref_dict)
    return woven_logs
Beispiel #2
0
def get_running_build_log(job, build, prow_url):
    try:
        url = "https://%s/log?job=%s&id=%s" % (prow_url, job, build)
        result = urlfetch.fetch(url)
        if result.status_code == 200:
            return log_parser.digest(result.content), url
    except urlfetch.Error:
        logging.exception('Caught exception fetching url')
    return None, None
Beispiel #3
0
def get_running_build_log(job, build):
    try:
        # keep this synced with TestKubernetes in prow/cmd/line/line.go
        pod_name = ("%s-%s" % (job, build))[-60:]
        url = "https://prow.k8s.io/log?pod=%s" % pod_name
        result = urlfetch.fetch(url)
        if result.status_code == 200:
            return log_parser.digest(result.content), url
    except urlfetch.Error:
        logging.exception('Caught exception fetching url')
    return None, None
Beispiel #4
0
def digest(data, strip=True, filters=None,
           error_re=regex.error_re):
    if filters is None:
        filters = {"UID": "", "pod": "", "Namespace": ""}
    digested = log_parser.digest(data.replace(' ', '\n'), error_re=error_re,
                                 skip_fmt=lambda l: 's%d' % l, filters=filters)
    if strip:
        digested = re.sub(r'<span class="skipped">([^<]*)</span>', r'(\1)',
            digested, flags=re.MULTILINE)
        digested = re.sub(r'<[^>]*>', '', digested)
    return digested.replace('\n', ' ')
Beispiel #5
0
def parse_log_file(log_filename, pod, filters=None, make_dict=False, objref_dict=None):
    """Based on make_dict, either returns the objref_dict or the parsed log file"""
    log = gcs_async.read(log_filename).get_result()
    if log is None:
        return None
    pod_re = regex.wordRE(pod)

    if make_dict:
        return kubelet_parser.make_dict(log.decode('utf8', 'replace'), pod_re)
    else:
        return log_parser.digest(log.decode('utf8', 'replace'),
        error_re=pod_re, filters=filters, objref_dict=objref_dict)
def digest(data, strip=True, filters=None,
           error_re=regex.error_re):
    if filters is None:
        filters = {"UID":"", "pod":"", "Namespace":"", "ContainerID":""}

    digested = log_parser.digest(data.replace(' ', '\n'), error_re=error_re,
                                 skip_fmt=lambda l: 's%d' % l, filters=filters)
    print digested
    if strip:
        digested = re.sub(r'<span class="skipped"[^<]*>([^<]*)</span>', r'(\1)',
            digested, flags=re.MULTILINE)
        digested = re.sub(r'<[^>]*>', '', digested)
    return digested.replace('\n', ' ').strip()
Beispiel #7
0
def build_details(build_dir):
    """
    Collect information from a build directory.

    Args:
        build_dir: GCS path containing a build's results.
    Returns:
        started: value from started.json {'version': ..., 'timestamp': ...}
        finished: value from finished.json {'timestamp': ..., 'result': ...}
        failures: list of (name, duration, text) tuples
        build_log: a hilighted portion of errors in the build log. May be None.
    """
    started_fut = gcs_async.read(build_dir + '/started.json')
    finished = gcs_async.read(build_dir + '/finished.json').get_result()
    started = started_fut.get_result()
    if finished and not started:
        started = 'null'
    if started and not finished:
        finished = 'null'
    elif not (started and finished):
        return
    started = json.loads(started)
    finished = json.loads(finished)

    failures = []
    junit_paths = [f.filename for f in view_base.gcs_ls('%s/artifacts' % build_dir)
                   if re.match(r'junit_.*\.xml', os.path.basename(f.filename))]

    junit_futures = {}
    for f in junit_paths:
        junit_futures[gcs_async.read(f)] = f

    for future in junit_futures:
        junit = future.get_result()
        if junit is None:
            continue
        failures.extend(parse_junit(junit, junit_futures[future]))
    failures.sort()

    build_log = None
    if finished and finished.get('result') != 'SUCCESS' and len(failures) == 0:
        build_log = gcs_async.read(build_dir + '/build-log.txt').get_result()
        if build_log:
            build_log = log_parser.digest(build_log.decode('utf8', 'replace'))
            logging.info('fallback log parser emitted %d lines',
                         build_log.count('\n'))
    return started, finished, failures, build_log
Beispiel #8
0
def build_details(build_dir):
    """
    Collect information from a build directory.

    Args:
        build_dir: GCS path containing a build's results.
    Returns:
        started: value from started.json {'version': ..., 'timestamp': ...}
        finished: value from finished.json {'timestamp': ..., 'result': ...}
        failures: list of (name, duration, text) tuples
        build_log: a hilighted portion of errors in the build log. May be None.
    """
    started_fut = gcs_read_async(build_dir + '/started.json')
    finished = gcs_read_async(build_dir + '/finished.json').get_result()
    started = started_fut.get_result()
    if finished and not started:
        started = 'null'
    elif not (started and finished):
        # TODO: handle builds that have started but not finished properly.
        # Right now they show an empty page (404), but should show the version
        # and when the build started.
        return
    started = json.loads(started)
    finished = json.loads(finished)
    failures = []
    junit_paths = [f.filename for f in gcs_ls('%s/artifacts' % build_dir)
                   if re.match(r'junit_.*\.xml', os.path.basename(f.filename))]
    junit_futures = [gcs_read_async(f) for f in junit_paths]
    for future in junit_futures:
        junit = future.get_result()
        if junit is None:
            continue
        failures.extend(parse_junit(junit))
    build_log = None
    if finished.get('result') == 'FAILURE' and len(failures) == 0:
        build_log = gcs_read_async(build_dir + '/build-log.txt').get_result()
        if build_log:
            build_log = log_parser.digest(build_log.decode('utf8', 'replace'))
            logging.warning('fallback log parser emitted %d lines',
                            build_log.count('\n'))
    return started, finished, failures, build_log
Beispiel #9
0
def parse_kubelet(pod, junit, build_dir, filters):
    junit_file = junit + ".xml"
    tmps = [f.filename for f in gcs_ls('%s/artifacts' % build_dir)
            if re.match(r'.*/tmp-node.*', f.filename)]    

    junit_regex = r".*" + junit_file + r".*"
    kubelet_filename = ""
    for folder in tmps:
        tmp_contents = [f.filename for f in gcs_ls(folder)]
        for f in tmp_contents:
            if re.match(junit_regex, f):
                for file in tmp_contents:
                    if re.match(r'.*kubelet\.log', file):
                        kubelet_filename = file
    if kubelet_filename == "":
        return False
    kubelet_log = gcs_async.read(kubelet_filename).get_result()

    if kubelet_log:
        pod_re = regex.wordRE(pod)
        kubelet_log = log_parser.digest(kubelet_log.decode('utf8', 
            'replace'), error_re=pod_re, filters=filters)

    return kubelet_log
Beispiel #10
0
def get_build_log(build_dir):
    build_log = gcs_async.read(build_dir + '/build-log.txt').get_result()
    if build_log:
        return log_parser.digest(build_log)
 def digest(self, data, strip=True):
     digested = log_parser.digest(data.replace(' ', '\n'),
                                  skip_fmt=lambda l: 's%d' % l)
     if strip:
         digested = re.sub(r'<[^>]*>', '', digested)
     return digested.replace('\n', ' ')
 def test_unicode(self):
     self.assertEqual(log_parser.digest(u'error \xb5s'),
         u'<span class="hilight"><span class="keyword">'
         u'error</span> \xb5s</span>\n')
Beispiel #13
0
 def digest(self, data, strip=True):
     digested = log_parser.digest(data.replace(' ', '\n'),
                                  skip_fmt=lambda l: 's%d' % l)
     if strip:
         digested = re.sub(r'<[^>]*>', '', digested)
     return digested.replace('\n', ' ')