示例#1
0
文件: wtstats.py 项目: Machyne/mongo
def main():   
    parser = argparse.ArgumentParser(description='Create graphs from' \
        'WiredTiger statistics.')
    parser.add_argument('--all', '-A', action='store_true',
        help='generate separate html files for each stats group')
    parser.add_argument('--clear', action='store_true',
        help='WiredTiger stats gathered with clear set')
    parser.add_argument('--include', '-I', metavar='regexp',
        type=re.compile, action='append',
        help='only include series with titles matching regexp')
    parser.add_argument('--list', action='store_true',
        help='only list the parsed series, does not create html file')
    parser.add_argument('--output', '-o', metavar='file', default='wtstats',
        help='HTML output file prefix')
    parser.add_argument('--json', action='store_true', 
        help='additionally output data series in json format')
    parser.add_argument('files', metavar='file', nargs='+',
        help='input files or directories generated by WiredTiger statistics' \
        'logging')
    args = parser.parse_args()

    # Parse files or directory and skip constants
    parsed = skip_constants(parse_files(args.files))

    # filter results based on --include, compute common prefix and suffix
    results = []
    prefix = suffix = None

    for title, values in sorted(parsed.iteritems()):
        title, ydata = munge(args, title, values)

        # ignore entries if a list of regular expressions was given
        if args.include and not [r for r in args.include if r.search(title)]:
            continue
        if not 'wtperf' in title:
            prefix = title if prefix is None else common_prefix(prefix, title)
            suffix = title if suffix is None else common_suffix(title, suffix)
        results.append((title, ydata))

    # Process titles, eliminate common prefixes and suffixes
    if prefix or suffix:
        new_results = []
        for title, ydata in results:
            if 'wtperf' not in title:
                title = title[len(prefix):]
                if suffix:
                    title = title[:-len(suffix)]
            new_results.append((title, ydata))
        results = new_results

    # Are we just listing the results?
    if args.list:
        print 
        print "Parsed stats:"
        for title, ydata in results:
            print "  ", title
        sys.exit(0)

    output_series(results, args)

    # If the user wants the stats split up by prefix type do so.
    if args.all:
        for prefix in prefix_list:
            output_series(results, args, prefix)
        for group in groups.keys():
            output_series(results, args, group, groups[group])
示例#2
0
def main():   
    parser = argparse.ArgumentParser(description='Create graphs from' \
        'WiredTiger statistics.')
    parser.add_argument('--all', '-A', action='store_true',
        help='generate separate html files for each stats group')
    parser.add_argument('--clear', action='store_true',
        help='WiredTiger stats gathered with clear set')
    parser.add_argument('--include', '-I', metavar='regexp',
        type=re.compile, action='append',
        help='only include series with titles matching regexp')
    parser.add_argument('--list', action='store_true',
        help='only list the parsed series, does not create html file')
    parser.add_argument('--output', '-o', metavar='file', default='wtstats',
        help='HTML output file prefix')
    parser.add_argument('--json', action='store_true', 
        help='additionally output data series in json format')
    parser.add_argument('files', metavar='file', nargs='+',
        help='input files or directories generated by WiredTiger statistics' \
        'logging')
    args = parser.parse_args()

    # Parse files or directory and skip constants
    parsed = skip_constants(parse_files(args.files))

    # filter results based on --include, compute common prefix and suffix
    results = []
    prefix = suffix = None

    for title, values in sorted(parsed.iteritems()):
        title, ydata = munge(args, title, values)

        # ignore entries if a list of regular expressions was given
        if args.include and not [r for r in args.include if r.search(title)]:
            continue
        if not 'wtperf' in title:
            prefix = title if prefix is None else common_prefix(prefix, title)
            suffix = title if suffix is None else common_suffix(title, suffix)
        results.append((title, ydata))

    # Process titles, eliminate common prefixes and suffixes
    if prefix or suffix:
        new_results = []
        for title, ydata in results:
            if 'wtperf' not in title:
                title = title[len(prefix):]
                if suffix:
                    title = title[:-len(suffix)]
            new_results.append((title, ydata))
        results = new_results

    # Are we just listing the results?
    if args.list:
        print 
        print "Parsed stats:"
        for title, ydata in results:
            print "  ", title
        sys.exit(0)

    output_series(results, args)

    # If the user wants the stats split up by prefix type do so.
    if args.all:
        for prefix in prefix_list:
            output_series(results, args, prefix)
        for group in groups.keys():
            output_series(results, args, group, groups[group])
示例#3
0
文件: wtstats.py 项目: 3rf/mongo
    new_results = []
    for title, yaxis, ydata in results:
        title = title[len(prefix):]
        if suffix:
            title = title[:-len(suffix)]
        new_results.append((title, yaxis, ydata))
    results = new_results

# Dump the results as a CSV file
#print '"time", ' + ', '.join('"%s"' % title for title, values in ydata)
#for i in xrange(len(xdata)):
#    print '%d, %s' % (xdata[i], ', '.join('%g' % values[i] for title, values in ydata))

# Are we just listing the results?
if args.list:
    for title, yaxis, ydata in results:
        print title
    sys.exit(0)

# Figure out the full set of x axis values
xdata = sorted(set(k for k in ydata.iterkeys() for ydata in results))

output_series(results)

# If the user wants the stats split up by prefix type do so.
if args.all:
    for prefix in prefix_list:
        output_series(results, prefix)
    for group in groups.keys():
        output_series(results, group, groups[group])
示例#4
0
def main():
    parser = argparse.ArgumentParser(description='Create graphs from' \
        'WiredTiger statistics.')
    parser.add_argument(
        '--all',
        '-A',
        action='store_true',
        help='generate separate html files for each stats group')
    parser.add_argument('--clear',
                        action='store_true',
                        help='WiredTiger stats gathered with clear set')
    parser.add_argument('--include',
                        '-I',
                        metavar='regexp',
                        type=re.compile,
                        action='append',
                        help='only include series with titles matching regexp')
    parser.add_argument(
        '--list',
        action='store_true',
        help='only list the parsed series, does not create html file')
    parser.add_argument('--output',
                        '-o',
                        metavar='file',
                        default='wtstats',
                        help='HTML output file prefix')
    parser.add_argument('--json',
                        action='store_true',
                        help='additionally output data series in json format')
    parser.add_argument('files', metavar='file', nargs='+',
        help='input files or directories generated by WiredTiger statistics' \
        'logging')
    args = parser.parse_args()

    # Read the input file(s) into a dictionary of lists.
    def getfiles(l):
        for f in l:
            if os.path.isfile(f):
                yield f
            elif os.path.isdir(f):
                for s in glob(os.path.join(f, 'WiredTigerStat*')):
                    print 'Processing ' + s
                    yield s

    d = defaultdict(list)
    for f in getfiles(args.files):
        for line in open(f, 'rU'):
            month, day, time, v, title = line.strip('\n').split(" ", 4)
            d[title].append((month + " " + day + " " + time, v))

    # Process the series, eliminate constants
    for title, values in sorted(d.iteritems()):
        skip = True
        t0, v0 = values[0]
        for t, v in values:
            if v != v0:
                skip = False
                break
        if skip:
            #print "Skipping", title
            del d[title]

    # Common prefix / suffix elimination
    prefix = suffix = None

    def common_prefix(a, b):
        while not b.startswith(a):
            a = a[:-1]
        return a

    def common_suffix(a, b):
        while not a.endswith(b):
            b = b[1:]
        return b

    def output_series(results, prefix=None, grouplist=[]):
        # add .html ending if not present
        filename, ext = os.path.splitext(args.output)
        if ext == '':
            ext = '.html'

        # open the output file based on prefix
        if prefix == None:
            outputname = filename + ext
        elif len(grouplist) == 0:
            outputname = filename + '.' + prefix + ext
        else:
            outputname = filename + '.group.' + prefix + ext

        if prefix != None and len(grouplist) == 0:
            this_series = []
            for title, ydata in results:
                if not prefix in title:
                    continue
                #print 'Appending to dataset: ' + title
                this_series.append((title, ydata))
        elif prefix != None and len(grouplist) > 0:
            this_series = []
            for title, ydata in results:
                for subgroup in grouplist:
                    if not subgroup in title:
                        continue
                    # print 'Appending to dataset: ' + title
                    this_series.append((title, ydata))
        else:
            this_series = results

        if len(this_series) == 0:
            print 'Output: ' + outputname + ' has no data.  Do not create.'
            return

        json_output = {"series": []}

        for title, ydata in this_series:
            json_output["series"].append({
                "key": title,
                "values": ydata,
            })

        # load template
        this_path = os.path.dirname(os.path.realpath(__file__))
        srcfile = os.path.join(this_path, 'wtstats.html.template')
        try:
            srcfile = open(srcfile)
            contents = srcfile.read()
        except IOError:
            print >>sys.stderr, "Cannot find template file 'wtstats.html." \
                "template'. See ./template/README.md for more information."
            sys.exit(-1)

        srcfile.close()

        # if --json write data to <filename>.json
        if args.json:
            jsonfile = filename + '.json'
            with open(jsonfile, 'w') as f:
                json.dump(json_output, f)
                print "created %s" % jsonfile

        # write output file
        dstfile = open(outputname, 'wt')
        replaced_contents = contents.replace('"### INSERT DATA HERE ###"',
                                             json.dumps(json_output))
        dstfile.write(replaced_contents)
        dstfile.close()
        print "created %s" % dstfile.name

    # Split out the data, convert timestamps
    results = []
    for title, values in sorted(d.iteritems()):
        title, ydata = munge(args, title, values)
        # Ignore entries if a list of regular expressions was given
        if args.include and not [r for r in args.include if r.search(title)]:
            continue
        prefix = title if prefix is None else common_prefix(prefix, title)
        suffix = title if suffix is None else common_suffix(title, suffix)
        results.append((title, ydata))

    # Process titles, eliminate common prefixes and suffixes
    if prefix or suffix:
        new_results = []
        for title, ydata in results:
            title = title[len(prefix):]
            if suffix:
                title = title[:-len(suffix)]
            new_results.append((title, ydata))
        results = new_results

    # Are we just listing the results?
    if args.list:
        for title, ydata in results:
            print title
        sys.exit(0)

    output_series(results)

    # If the user wants the stats split up by prefix type do so.
    if args.all:
        for prefix in prefix_list:
            output_series(results, prefix)
        for group in groups.keys():
            output_series(results, group, groups[group])
示例#5
0
def main():   
    parser = argparse.ArgumentParser(description='Create graphs from' \
        'WiredTiger statistics.')
    parser.add_argument('--all', '-A', action='store_true',
        help='generate separate html files for each stats group')
    parser.add_argument('--clear', action='store_true',
        help='WiredTiger stats gathered with clear set')
    parser.add_argument('--include', '-I', metavar='regexp',
        type=re.compile, action='append',
        help='only include series with titles matching regexp')
    parser.add_argument('--list', action='store_true',
        help='only list the parsed series, does not create html file')
    parser.add_argument('--output', '-o', metavar='file', default='wtstats',
        help='HTML output file prefix')
    parser.add_argument('--json', action='store_true', 
        help='additionally output data series in json format')
    parser.add_argument('files', metavar='file', nargs='+',
        help='input files or directories generated by WiredTiger statistics' \
        'logging')
    args = parser.parse_args()

    # Read the input file(s) into a dictionary of lists.
    def getfiles(l):
        for f in l:
            if os.path.isfile(f):
                yield f
            elif os.path.isdir(f):
                for s in glob(os.path.join(f, 'WiredTigerStat*')):
                    print 'Processing ' + s
                    yield s

    d = defaultdict(list)
    for f in getfiles(args.files):
        for line in open(f, 'rU'):
            month, day, time, v, title = line.strip('\n').split(" ", 4)
            d[title].append((month + " " + day + " " + time, v))

    # Process the series, eliminate constants
    for title, values in sorted(d.iteritems()):
        skip = True
        t0, v0 = values[0]
        for t, v in values:
            if v != v0:
                skip = False
                break
        if skip:
            #print "Skipping", title
            del d[title]

    # Common prefix / suffix elimination
    prefix = suffix = None

    def common_prefix(a, b):
        while not b.startswith(a):
            a = a[:-1]
        return a

    def common_suffix(a, b):
        while not a.endswith(b):
            b = b[1:]
        return b

    def output_series(results, prefix=None, grouplist=[]):
        # add .html ending if not present
        filename, ext = os.path.splitext(args.output)
        if ext == '':
            ext = '.html'

        # open the output file based on prefix
        if prefix == None:
            outputname = filename + ext
        elif len(grouplist) == 0:
            outputname = filename +'.' + prefix + ext
        else:
            outputname = filename +'.group.' + prefix + ext

        if prefix != None and len(grouplist) == 0:
            this_series = []
            for title, ydata in results:
                if not prefix in title:
                    continue
                #print 'Appending to dataset: ' + title
                this_series.append((title, ydata))
        elif prefix != None and len(grouplist) > 0:
            this_series = []
            for title, ydata in results:
                for subgroup in grouplist:
                    if not subgroup in title:
                        continue
                    # print 'Appending to dataset: ' + title
                    this_series.append((title, ydata))
        else:
            this_series = results

        if len(this_series) == 0:
            print 'Output: ' + outputname + ' has no data.  Do not create.'
            return


        json_output = { "series": [] }

        for title, ydata in this_series:
            json_output["series"].append({
                "key": title,
                "values": ydata,
            });
        
        # load template
        this_path = os.path.dirname(os.path.realpath(__file__))
        srcfile = os.path.join(this_path, 'wtstats.html.template')
        try: 
            srcfile = open(srcfile)
            contents = srcfile.read()
        except IOError: 
            print >>sys.stderr, "Cannot find template file 'wtstats.html." \
                "template'. See ./template/README.md for more information."
            sys.exit(-1)  

        srcfile.close()

        # if --json write data to <filename>.json
        if args.json:
            jsonfile = filename + '.json'
            with open(jsonfile, 'w') as f:
                json.dump(json_output, f)
                print "created %s" % jsonfile

        # write output file
        dstfile = open(outputname, 'wt')
        replaced_contents = contents.replace('"### INSERT DATA HERE ###"', 
            json.dumps(json_output))
        dstfile.write(replaced_contents)
        dstfile.close()
        print "created %s" % dstfile.name

    # Split out the data, convert timestamps
    results = []
    for title, values in sorted(d.iteritems()):
        title, ydata = munge(args, title, values)
        # Ignore entries if a list of regular expressions was given
        if args.include and not [r for r in args.include if r.search(title)]:
            continue
        prefix = title if prefix is None else common_prefix(prefix, title)
        suffix = title if suffix is None else common_suffix(title, suffix)
        results.append((title, ydata))

    # Process titles, eliminate common prefixes and suffixes
    if prefix or suffix:
        new_results = []
        for title, ydata in results:
            title = title[len(prefix):]
            if suffix:
                title = title[:-len(suffix)]
            new_results.append((title, ydata))
        results = new_results

    # Are we just listing the results?
    if args.list:
        for title, ydata in results:
            print title
        sys.exit(0)

    output_series(results)

    # If the user wants the stats split up by prefix type do so.
    if args.all:
        for prefix in prefix_list:
            output_series(results, prefix)
        for group in groups.keys():
            output_series(results, group, groups[group])