def LoadToBigQuery(file_path): """Load the samples from a file to BigQuery.""" print '==== Publishing samples from file %s to %s' % (file_path, FLAGS.bigquery_table) load_cmd = [FLAGS.bq_path] if FLAGS.use_gce: load_cmd.append('--use_gce_service_account') load_cmd.extend([ 'load', '--source_format=NEWLINE_DELIMITED_JSON', FLAGS.bigquery_table, file_path, 'results_table_schema.json' ]) (out, ret) = esp_utils.IssueCommand(load_cmd)
def test(run, n, c, t, d): """Run a test and extract its results. Args: run: is a dict { 'url': a string 'headers': [headers] 'post_file': a string } n: number of requests c: number of connections t: number of threads d: test duration in seconds Returns: metric: is a dict of metric name to a tuple of (value, unit) metadata: is per test metadata such time, n and c. errors: a list of non-200 responses """ cmd = [ H2LOAD, '-n', str(n), '-t', str(t), '-c', str(c), '-r', str(1), '--verbose', '-H', '"Content-Type:application/json"' ] if 'headers' in run: for h in run['headers']: cmd += ['-H', h] if 'post_file' in run: cmd += ['-d', run['post_file']] cmd += [run['url']] (out, ret) = esp_utils.IssueCommand(cmd) if ret != 0: print '==== Failed to run' return None metrics = {} # h2load does not output non-2xx error responses errors = [] # Parse the output of h2load for line in out.split("\n"): print line if line.startswith('requests:'): r = re.search( r'requests: (\d+) total, (\d+) started, (\d+) done, (\d+) succeeded, (\d+) failed, (\d+) errored, (\d+) timeout', line) metrics['Complete requests'] = (int(r.group(4)), 'number') metrics['Failed requests'] = (int(r.group(5)), 'number') metrics['Timeout requests'] = (int(r.group(7)), 'number') if line.startswith('finished in'): r = re.search(r'finished in (\d+\.?\d+\w+), (\d+\.?\d+) req/s', line) metrics['Requests per second'] = (r.group(2), 'qps') if line.startswith('status codes:'): r = re.search( r'status codes: (\d+) 2xx, (\d+) 3xx, (\d+) 4xx, (\d+) 5xx', line) metrics['Non-2xx responses'] = (int(r.group(2)) + int(r.group(3)) + int(r.group(4)), 'number') if line.startswith('time for request:'): r = re.search( 'time for request:\s+(\d+\.?\d+)(\w+)\s+(\d+\.?\d+)(\w+)\s+(\d+\.?\d+)(\w+)\s+(\d+\.?\d+)(\w+)\s+(\d+\.?\d+)%', line) metrics['Latency percentile: 100%'] = (r.group(3), r.group(4)) metrics['Latency percentile: mean'] = (r.group(5), r.group(6)) return metrics, errors
def test(run, n, c, t, d): """Run a test and extract its results. Args: run: is a dict { 'url': a string 'headers': [headers] 'post_file': a string } n: number of requests (ignored by wrk) c: number of connections t: number of threads d: test duration in seconds Returns: metrics: is a dict of metric name to a tuple of (value, unit) errors: a list of non-200 responses """ cmd = [ WRK_PATH, '-t', str(t), '--timeout', '2m', '-c', str(c), '-d', str(d) + 's', '-s', 'wrk_script.lua', '-H', '"Content-Type:application/json"' ] if 'headers' in run: for h in run['headers']: cmd += ['-H', h] if 'post_file' in run: wrk_method = "POST" wrk_body_file = run['post_file'] else: wrk_method = "GET" wrk_body_file = "/dev/null" wrk_out = 'wrk_out' wrk_err = 'wrk_err' with open('wrk_script.lua.temp', 'r') as f: wrk_script = f.read() expected_status = run.get('expected_status', '200') with open('wrk_script.lua', 'w') as f: f.write( Template(wrk_script).substitute(HTTP_METHOD=wrk_method, REQUEST_BODY_FILE=wrk_body_file, EXPECTED_STATUS=expected_status, OUT=wrk_out, ERR=wrk_err)) cmd += [run['url']] (_, ret) = esp_utils.IssueCommand(cmd) if ret != 0: print '==== Failed to run=%s,t=%d,c=%s,ret=%d' % (str(run), t, c, ret) return None with open(wrk_out, 'r') as f: metrics = json.load(f) for k in metrics.keys(): metrics[k] = tuple(metrics[k]) errors = [] for i in range(0, t): with open(wrk_err + '_' + str(i), 'r') as f: errors.extend(f.readlines()) return metrics, errors