def __exit__(self, exc_type, exc_value, traceback): if exc_type is self.failureException: log_info = log.scan(self.cluster.log_subdir, ["WARNING", "ERROR"]) if len(log_info) > 0: print(log_info) self.cluster.__exit__() return False # rethrow exception, if any
# Provide a default set of tests to run (the most useful ones). args = ["basic", "multiRead_oneMaster", "multiRead_oneObjectPerMaster", "multiReadThroughput", "multiWrite_oneMaster", "readDistRandom", "readThroughput", "readVaryingKeyLength", "writeVaryingKeyLength" ] for name in args: for test in simple_tests: if test.name == name: run_test(test, options) break else: for test in graph_tests: if test.name == name: run_test(test, options) break else: print("No clusterperf test named '%s'" % (name)) finally: logInfo = log.scan("%s/latest" % (options.log_dir), ["WARNING", "ERROR"], ["starting new cluster from scratch", "Ping timeout to server"]) if len(logInfo) > 0: print(logInfo, file=sys.stderr)
'the servers, though multiple clients may run on a ' 'single machine)') parser.add_option( '-t', '--timeout', type=int, default=20, metavar='SECS', help="Abort if the client application doesn't finish within " 'SECS seconds') parser.add_option('-T', '--transport', default='infrc', help='Transport to use for communication with servers') parser.add_option('-v', '--verbose', action='store_true', default=False, help='Print progress messages') (options, args) = parser.parse_args() status = 0 try: run(**vars(options)) finally: logInfo = log.scan("logs/latest", ["WARNING", "ERROR"]) if len(logInfo) > 0: print >> sys.stderr, logInfo status = 1 quit(status)
# set up trend points for dumpstr trends = ['recovery'] if options.trends is not None: for trend in options.trends: if trend not in trends: trends.append(trend) trends = zip(trends, [stats['ns'] / 1e9] * len(trends)) # print and upload dumpstr report dumpstr = getDumpstr() dumpstr.print_report(stats['report']) s = dumpstr.upload_report('recovery', stats['report'], trends=trends) print('You can view your report at %s' % s['url']) # write the dumpstr URL to the metrics log file f = open('%s/metrics' % stats['run'], 'a') print('You can view your report at %s' % s['url'], file=f) f.close() finally: log_info = log.scan( "%s/latest" % (options.log_dir), ["WARNING", "ERROR"], [ "Ping timeout", "Pool destroyed", "told to kill", "is not responding", "failed to exchange", "timed out waiting for response", "received nonce", "Couldn't open session", "verifying cluster membership" ]) if len(log_info) > 0: print(log_info)
'(by default clients run on different machines than ' 'the servers, though multiple clients may run on a ' 'single machine)') parser.add_option('-t', '--timeout', type=int, default=20, metavar='SECS', help="Abort if the client application doesn't finish within " 'SECS seconds') parser.add_option('-T', '--transport', default='infrc', help='Transport to use for communication with servers') parser.add_option('-v', '--verbose', action='store_true', default=False, help='Print progress messages') parser.add_option('--valgrind', action='store_true', default=False, help='Run all the processes under valgrind') parser.add_option('--valgrindArgs', metavar='ARGS', default='', dest='valgrind_args', help='Arguments to pass to valgrind') parser.add_option('--disjunct', action='store_true', default=False, help='Disjunct entities (disable collocation) on each server') (options, args) = parser.parse_args() status = 0 try: run(**vars(options)) finally: logInfo = log.scan("logs/latest", ["WARNING", "ERROR"]) if len(logInfo) > 0: print(logInfo, file=sys.stderr) status = 1 quit(status)
args['master_args'] = options.master_args args['backup_args'] = options.backup_args try: stats = recover(**args) # set up trend points for dumpstr trends = ['recovery'] if options.trends is not None: for trend in options.trends: if trend not in trends: trends.append(trend) trends = zip(trends, [stats['ns'] / 1e9] * len(trends)) # print and upload dumpstr report dumpstr = getDumpstr() dumpstr.print_report(stats['report']) s = dumpstr.upload_report('recovery', stats['report'], trends=trends) print('You can view your report at %s' % s['url']) # write the dumpstr URL to the metrics log file f = open('%s/metrics' % stats['run'], 'a') print('You can view your report at %s' % s['url'], file=f) f.close() finally: log_info = log.scan("%s/latest" % (options.log_dir), ["WARNING", "ERROR"]) if len(log_info) > 0: print(log_info)
# Provide a default set of tests to run (the most useful ones). args = ["basic", "multiRead_oneMaster", "multiRead_oneObjectPerMaster", "multiReadThroughput", "multiWrite_oneMaster", "readDistRandom", "readThroughput", "readVaryingKeyLength", "writeVaryingKeyLength" ] for name in args: for test in simple_tests: if test.name == name: run_test(test, options) break else: for test in graph_tests: if test.name == name: run_test(test, options) break else: print("No clusterperf test named '%s'" % (name)) finally: logInfo = log.scan("%s/latest" % (options.log_dir), ["WARNING", "ERROR"], ["starting new cluster from scratch", "Ping timeout to server"]) if len(logInfo) > 0: print(logInfo)
# set up trend points for dumpstr trends = ['recovery'] if options.trends is not None: for trend in options.trends: if trend not in trends: trends.append(trend) trends = zip(trends, [stats['ns'] / 1e9] * len(trends)) # print and upload dumpstr report dumpstr = getDumpstr() dumpstr.print_report(stats['report']) s = dumpstr.upload_report('recovery', stats['report'], trends=trends) print('You can view your report at %s' % s['url']) # write the dumpstr URL to the metrics log file f = open('%s/metrics' % stats['run'], 'a') print('You can view your report at %s' % s['url'], file=f) f.close() finally: log_info = log.scan("%s/latest" % (options.log_dir), ["WARNING", "ERROR"], ["Ping timeout", "Pool destroyed", "told to kill", "is not responding", "failed to exchange", "timed out waiting for response", "received nonce", "Couldn't open session", "verifying cluster membership"]) if len(log_info) > 0: print(log_info)
parser.add_option("-T", "--transport", default="infrc", help="Transport to use for communication with servers") parser.add_option("-v", "--verbose", action="store_true", default=False, help="Print progress messages") (options, args) = parser.parse_args() # Invoke the requested tests (run all of them if no tests were specified) try: if len(args) == 0: # No specific tests were requested, so run all of them. for test in simple_tests: run_test(test, options) for test in graph_tests: run_test(test, options) else: for name in args: for test in simple_tests: if test.name == name: run_test(test, options) break else: for test in graph_tests: if test.name == name: run_test(test, options) break else: print("No clusterperf test named '%s'" % (name)) finally: logInfo = log.scan("%s/latest" % (options.log_dir), ["WARNING", "ERROR"]) if len(logInfo) > 0: print(logInfo)
dest='share_hosts', help='Allow clients to run on machines running servers ' '(by default clients run on different machines than ' 'the servers, though multiple clients may run on a ' 'single machine)') parser.add_option('-t', '--timeout', type=int, default=20, metavar='SECS', help="Abort if the client application doesn't finish within " 'SECS seconds') parser.add_option('-T', '--transport', default='infrc', help='Transport to use for communication with servers') parser.add_option('-v', '--verbose', action='store_true', default=False, help='Print progress messages') parser.add_option('--valgrind', action='store_true', default=False, help='Run all the processes under valgrind') parser.add_option('--valgrindArgs', metavar='ARGS', default='', dest='valgrind_args', help='Arguments to pass to valgrind') (options, args) = parser.parse_args() status = 0 try: run(**vars(options)) finally: logInfo = log.scan(''.join([scripts_path, "/logs/latest"]), ["WARNING", "ERROR"]) if len(logInfo) > 0: print(logInfo, file=sys.stderr) status = 1 quit(status)