def test_pod(self):
     self.assertEqual(digest(
         'pod-blah', error_re=regex.wordRE("pod"), strip=False),
         '<span class="highlight">pod-blah</span>')
     self.assertEqual(digest('0 1 2 3 4 5 pod 6 7 8 9 10',
         error_re=regex.wordRE("pod"),
         filters={"pod": "pod", "UID": "", "Namespace": "", "ContainerID":""}),
         's2 2 3 4 5 pod 6 7 8 9 10')
 def test_pod(self):
     self.assertEqual(digest(
         'pod-blah', error_re=regex.wordRE("pod"), strip=False),
         '<span class="hilight">pod-blah</span>')
     self.assertEqual(digest('0 1 2 3 4 5 pod 6 7 8 9 10',
         error_re=regex.wordRE("pod"),
         filters={"pod": "pod", "UID": "", "Namespace": "", "ContainerID":""}),
         's2 2 3 4 5 pod 6 7 8 9 10')
Example #3
0
 def test_pod(self):
     self.assertEqual(digest(
         'pod-blah', error_re=regex.wordRE("pod"),
         filters={"pod": "pod", "UID": "", "Namespace": ""}, strip=False),
         '<span class="hilight"><span class="keyword">'
         'pod</span>-blah</span>')
     self.assertEqual(digest('0 1 2 3 4 5 pod 6 7 8 9 10',
         error_re=regex.wordRE("pod"),
         filters={"pod": "pod", "UID": "", "Namespace": ""}),
         's2 ( 0 1 ) 2 3 4 5 pod 6 7 8 9')
Example #4
0
def get_woven_logs(log_files, pod, filters, objref_dict):
    lines = []
    combined_lines = []
    first_combined = ""
    pod_re = regex.wordRE(pod)

    # Produce a list of lines of all the selected logs
    for log_file in log_files:
        log = gcs_async.read(log_file).get_result()
        log = log.decode('utf8', 'replace')
        lines.extend(log.split('\n'))
    # Combine lines without timestamp into previous line, except if it comes at the
    # beginning of the file, in which case add it to the line with the first timestamp
    for line in lines:
        timestamp_re = regex.timestamp(line)
        if timestamp_re and timestamp_re.group(0):
            if not combined_lines:
                # add beginning of file to first timestamp line
                line = first_combined + line
            combined_lines.append(line)
        else:
            if not combined_lines:
                first_combined = first_combined + line
            else:
                combined_lines[-1] = combined_lines[-1] + line
    lines = sorted(combined_lines, key=regex.sub_timestamp)
    data = '\n'.join(lines)
    woven_logs = log_parser.digest(data,
                                   error_re=pod_re,
                                   filters=filters,
                                   objref_dict=objref_dict)
    return woven_logs
Example #5
0
def get_woven_logs(log_files, pod, filters, objref_dict):
    lines = []
    combined_lines = []
    first_combined = ""
    pod_re = regex.wordRE(pod)

    # Produce a list of lines of all the selected logs
    for log_file in log_files:
        log = gcs_async.read(log_file).get_result()
        log = log.decode('utf8', 'replace')
        lines.extend(log.split('\n'))
    # Combine lines without timestamp into previous line, except if it comes at the
    # beginning of the file, in which case add it to the line with the first timestamp
    for line in lines:
        timestamp_re = regex.timestamp(line)
        if timestamp_re and timestamp_re.group(0):
            if not combined_lines:
                # add beginning of file to first timestamp line
                line = first_combined + line
            combined_lines.append(line)
        else:
            if not combined_lines:
                first_combined = first_combined + line
            else:
                combined_lines[-1] = combined_lines[-1] + line
    lines = sorted(combined_lines, key=regex.sub_timestamp)
    data = '\n'.join(lines)
    woven_logs = log_parser.digest(data, error_re=pod_re,
        filters=filters, objref_dict=objref_dict)
    return woven_logs
Example #6
0
def parse(lines, error_re, hilight_words, filters):
    """
    Given filters returns indeces of wanted lines from the kubelet log

    Args:
        lines: array of kubelet log lines
        error_re: regular expression of the failed pod name
        hilight_words: array of words that need to be bolded
        filters: dictionary of which filters to apply
    Returns:
        matched_lines: ordered array of indeces of lines to display
        hilight_words: updated hilight_words
    """
    matched_lines = []
    uid = ""

    end = 0
    for n, line in enumerate(lines):
        if error_re.search(line):
            matched_lines.append(n)
            if filters["uid"] and uid == "":
                s = regex.uidobj_re.search(line)
                if s and s.group(1) != "":
                    end = n
                    uid = s.group(1)
                    hilight_words.append(uid)

        if uid != "" and matched_lines[-1] != n:
            uid_re = regex.wordRE(uid)
            if uid_re.search(line):
                matched_lines.append(n)
        matched_lines.sort()

    return matched_lines, hilight_words
Example #7
0
 def test_make_dict(self):
     """Test make_dict works"""
     objref_dict = kubelet_parser.make_dict(lines, regex.wordRE("abc"))
     self.assertEqual(objref_dict, {
         "UID": "uid",
         "Namespace": "podName",
         "Name": "abc"
     })
Example #8
0
 def test_pod(self):
     self.assertEqual(
         digest('pod-blah',
                error_re=regex.wordRE("pod"),
                filters={
                    "pod": "pod",
                    "UID": "",
                    "Namespace": ""
                },
                strip=False), '<span class="hilight"><span class="keyword">'
         'pod</span>-blah</span>')
     self.assertEqual(
         digest('0 1 2 3 4 5 pod 6 7 8 9 10',
                error_re=regex.wordRE("pod"),
                filters={
                    "pod": "pod",
                    "UID": "",
                    "Namespace": ""
                }), 's2 ( 0 1 ) 2 3 4 5 pod 6 7 8 9')
Example #9
0
 def test_wordRE(self):
     for text, matches in [
         ('/abcdef/', True),
         ('Pod abcdef failed', True),
         ('abcdef', True),
         ('cdabcdef', False),
         ('abc def', False),
         ('Podname(abcdef)', True),
     ]:
         self.assertEqual(bool(regex.wordRE("abcdef").search(text)), matches,
             'wordRE(abcdef).search(%r) should be %r' % (text, matches))
Example #10
0
 def test_wordRE(self):
     for text, matches in [
         ('/abcdef/', True),
         ('Pod abcdef failed', True),
         ('abcdef', True),
         ('cdabcdef', False),
         ('abc def', False),
         ('Podname(abcdef)', True),
     ]:
         self.assertEqual(bool(regex.wordRE("abcdef").search(text)), matches,
             'wordRE(abcdef).search(%r) should be %r' % (text, matches))
Example #11
0
def parse_log_file(log_filename, pod, filters=None, make_dict=False, objref_dict=None):
    """Based on make_dict, either returns the objref_dict or the parsed log file"""
    log = gcs_async.read(log_filename).get_result()
    if log is None:
        return None
    pod_re = regex.wordRE(pod)

    if make_dict:
        return kubelet_parser.make_dict(log.decode('utf8', 'replace'), pod_re)
    else:
        return log_parser.digest(log.decode('utf8', 'replace'),
        error_re=pod_re, filters=filters, objref_dict=objref_dict)
Example #12
0
def parse_log_file(log_filename, pod, filters=None, make_dict=False, objref_dict=None):
    """Based on make_dict, either returns the objref_dict or the parsed log file"""
    log = gcs_async.read(log_filename).get_result()
    if log is None:
        return {}, False if make_dict else None
    if pod:
        bold_re = regex.wordRE(pod)
    else:
        bold_re = regex.error_re
    if objref_dict is None:
        objref_dict = {}
    if make_dict and pod:
        return kubelet_parser.make_dict(log.decode('utf8', 'replace'), bold_re, objref_dict)
    else:
        return log_parser.digest(log.decode('utf8', 'replace'),
            error_re=bold_re, filters=filters, objref_dict=objref_dict)
Example #13
0
def parse_kubelet(pod, junit, build_dir, filters):
    junit_file = junit + ".xml"
    tmps = [f.filename for f in gcs_ls('%s/artifacts' % build_dir)
            if re.match(r'.*/tmp-node.*', f.filename)]    

    junit_regex = r".*" + junit_file + r".*"
    kubelet_filename = ""
    for folder in tmps:
        tmp_contents = [f.filename for f in gcs_ls(folder)]
        for f in tmp_contents:
            if re.match(junit_regex, f):
                for file in tmp_contents:
                    if re.match(r'.*kubelet\.log', file):
                        kubelet_filename = file
    if kubelet_filename == "":
        return False
    kubelet_log = gcs_async.read(kubelet_filename).get_result()

    if kubelet_log:
        pod_re = regex.wordRE(pod)
        kubelet_log = log_parser.digest(kubelet_log.decode('utf8', 
            'replace'), error_re=pod_re, filters=filters)

    return kubelet_log
Example #14
0
 def test_make_dict_fail(self):
     """Test when objref line not in file"""
     objref_dict = kubelet_parser.make_dict(["pod failed"],
                                            regex.wordRE("abc"))
     self.assertEqual(objref_dict, None)
Example #15
0
 def test_make_dict_fail(self):
     """Test when objref line not in file"""
     objref_dict = kubelet_parser.make_dict(["pod failed"], regex.wordRE("abc"))
     self.assertEqual(objref_dict, None)
Example #16
0
 def test_make_dict(self):
     """Test make_dict works"""
     objref_dict = kubelet_parser.make_dict(lines, regex.wordRE("abc"))
     self.assertEqual(
         objref_dict, {"UID": "uid", "Namespace": "podName", "Name": "abc"})