def run(args): """ Run a test with the specified parameters, and return the HTTP Archive (HAR) File represented as a dictionary. """ har_gen = FlowProfiler(args.testfile, int(args.iterations)) \ if args.iterations else FlowProfiler(args.testfile) # profiling_results is a list of lists containing HARs for each page in a run profiling_results = har_gen.profile() if args.average: return [merge_by_average(page_results) \ for page_results in zip(*profiling_results)] else: return profiling_results[-1]
'--iterations', help='Do profiling task the specified number of times') arg_parser.add_argument( '-a', '--average', action='store_true', help='Output the average results of the iterations') arg_parser.add_argument( '-p', '--report', action='store_true', help='Call make_report.py to make a report and open it') args = arg_parser.parse_args() har_gen = FlowProfiler(args.testfile, int( args.iterations)) if args.iterations else FlowProfiler(args.testfile) # profiling_results is a list of lists containing HARs for each page in a run profiling_results = har_gen.profile() if args.average: chosen_result = [ merge_by_average(page_results) for page_results in zip(*profiling_results) ] else: chosen_result = profiling_results[-1] result_text = json.dumps(chosen_result) filename = args.filename or 'data_{0}.json'.format(int(time.time()))
('startedDateTime', ['str', 'unicode']), ('time', ['int']), ('request', ['dict'], lambda x: validate_request(x,i)), ('response', ['dict'], lambda x: validate_response(x, i)), ('cache', ['dict'], lambda x: validate_cache(x,i)), ('timings', ['dict'], lambda x: validate_resource_timings(x,i))]) assert 'log' in har, 'HAR file must contain a "log" field' validation_funcs = {'version': (['str', 'unicode'], lambda x: validate_str_is_number(x)), 'creator': ('dict', validate_har_creator), 'browser': ('dict', validate_browser), 'pages': ('list', validate_pages), 'entries': ('list', validate_entries)} for field, (type, func) in validation_funcs.iteritems(): validate_field((har['log'], 'log'), field, type, func) gen = FlowProfiler('../bin/sampleinput/limobilelogin.json') hars = gen.make() for har in hars: # remove unicode strings from dictionary to make type verification easier har = json.loads(json.dumps(har)) print json.dumps(har) print '\n' validate_har_structure(har)
DEFAULT_OUTPUT_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'report') if __name__ == '__main__': arg_parser = argparse.ArgumentParser() arg_parser.add_argument('-d', '--outputdir', help='Write JSON file to specified directory') arg_parser.add_argument('-f', '--filename', help="Used specified name for JSON file") arg_parser.add_argument('-u', '--urls', help='Comma separated URLs to profile') arg_parser.add_argument('-t', '--testfile', required=True, help='Use specified JSON test file to determine test actions') arg_parser.add_argument('-i', '--iterations', help='Do profiling task the specified number of times') arg_parser.add_argument('-a', '--average', action='store_true', help='Output the average results of the iterations') arg_parser.add_argument('-p', '--report', action='store_true', help='Call make_report.py to make a report and open it') args = arg_parser.parse_args() har_gen = FlowProfiler(args.testfile, int(args.iterations)) if args.iterations else FlowProfiler(args.testfile) # profiling_results is a list of lists containing HARs for each page in a run profiling_results = har_gen.profile() if args.average: chosen_result = [merge_by_average(page_results) for page_results in zip(*profiling_results)] else: chosen_result = profiling_results[-1] result_text = json.dumps(chosen_result) filename = args.filename or 'data_{0}.json'.format(int(time.time())) if args.outputdir: output_file = os.path.join(args.outputdir, filename) else:
('startedDateTime', ['str', 'unicode']), ('time', ['int']), ('request', ['dict'], lambda x: validate_request(x,i)), ('response', ['dict'], lambda x: validate_response(x, i)), ('cache', ['dict'], lambda x: validate_cache(x,i)), ('timings', ['dict'], lambda x: validate_resource_timings(x,i))]) assert 'log' in har, 'HAR file must contain a "log" field' validation_funcs = {'version': (['str', 'unicode'], lambda x: validate_str_is_number(x)), 'creator': ('dict', validate_har_creator), 'browser': ('dict', validate_browser), 'pages': ('list', validate_pages), 'entries': ('list', validate_entries)} for field, (type, func) in validation_funcs.iteritems(): validate_field((har['log'], 'log'), field, type, func) gen = FlowProfiler('../bin/sampleinput/sample.json') hars = gen.profile() for har in hars: # remove unicode strings from dictionary to make type verification easier har = json.loads(json.dumps(har)) print json.dumps(har) print '\n' validate_har_structure(har)