def wrap(*args, **kwargs): db = kwargs.pop('db') ts = kwargs.pop('ts') g.db_name = db g.testsuite_name = ts g.db_info = current_app.old_config.databases.get(g.db_name) if g.db_info is None: abort(404, message="Invalid database.") # Compute result. result = func(*args, **kwargs) # Make sure that any transactions begun by this request are finished. request.get_db().rollback() return result
def wrap(*args, **kwargs): db = kwargs.pop("db") ts = kwargs.pop("ts") g.db_name = db g.testsuite_name = ts g.db_info = current_app.old_config.databases.get(g.db_name) if g.db_info is None: abort(404, message="Invalid database.") # Compute result. result = func(*args, **kwargs) # Make sure that any transactions begun by this request are finished. request.get_db().rollback() return result
def post(): """Add a new run into the lnt database""" session = request.session db = request.get_db() data = request.data select_machine = request.values.get('select_machine', 'match') merge = request.values.get('merge', None) result = lnt.util.ImportData.import_from_string( current_app.old_config, g.db_name, db, session, g.testsuite_name, data, select_machine=select_machine, merge_run=merge) error = result['error'] if error is not None: response = jsonify(result) response.status = '400' logger.warning("%s: Submission rejected: %s" % (request.url, error)) return response new_url = ( '%sapi/db_%s/v4/%s/runs/%s' % (request.url_root, g.db_name, g.testsuite_name, result['run_id'])) result['result_url'] = new_url response = jsonify(result) response.status = '301' response.headers.add('Location', new_url) return response
def post(): """Add a new run into the lnt database""" session = request.session db = request.get_db() data = request.data select_machine = request.values.get('select_machine', 'match') merge = request.values.get('merge', None) result = lnt.util.ImportData.import_from_string( current_app.old_config, g.db_name, db, session, g.testsuite_name, data, select_machine=select_machine, merge_run=merge) error = result['error'] if error is not None: response = jsonify(result) response.status = '400' logger.warning("%s: Submission rejected: %s" % (request.url, error)) return response new_url = ('%sapi/db_%s/v4/%s/runs/%s' % (request.url_root, g.db_name, g.testsuite_name, result['run_id'])) result['result_url'] = new_url response = jsonify(result) response.status = '301' response.headers.add('Location', new_url) return response
def v4_summary_report(): # Load the summary report configuration. config_path = get_summary_config_path() if not os.path.exists(config_path): return render_template("error.html", message="""\ You must define a summary report configuration first.""") with open(config_path) as f: config = flask.json.load(f) # Create the report object. report = lnt.server.reporting.summaryreport.SummaryReport( request.get_db(), config['orders'], config['machine_names'], config['machine_patterns']) # Build the report. report.build() if bool(request.args.get('json')): json_obj = dict() json_obj['ticks'] = report.report_orders data = [] for e in report.normalized_data_table.items(): header, samples = e raw_samples = samples.getvalue() data.append([header, raw_samples]) json_obj['data'] = data return flask.jsonify(**json_obj) return render_template("v4_summary_report.html", report=report)
def simple_run(tag, id): # Attempt to find a V4 run which declares that it matches this simple run # ID. We do this so we can preserve some URL compatibility for old # databases. if g.db_info.db_version != '0.4': return render_template("error.html", message="""\ Invalid URL for version %r database.""" % (g.db_info.db_version, )) # Get the expected test suite. db = request.get_db() ts = db.testsuite[tag] # Look for a matched run. matched_run = ts.query(ts.Run).\ filter(ts.Run.simple_run_id == id).\ first() # If we found one, redirect to it's report. if matched_run is not None: return redirect( db_url_for("v4_run", testsuite_name=tag, id=matched_run.id)) # Otherwise, report an error. return render_template("error.html", message="""\ Unable to find a v0.4 run for this ID. Please use the native v0.4 URL interface (instead of the /simple/... URL schema).""")
def simple_run(tag, id): # Attempt to find a V4 run which declares that it matches this simple run # ID. We do this so we can preserve some URL compatibility for old # databases. if g.db_info.db_version != '0.4': return render_template("error.html", message="""\ Invalid URL for version %r database.""" % (g.db_info.db_version,)) # Get the expected test suite. db = request.get_db() ts = db.testsuite[tag] # Look for a matched run. matched_run = ts.query(ts.Run).\ filter(ts.Run.simple_run_id == id).\ first() # If we found one, redirect to it's report. if matched_run is not None: return redirect(db_url_for("v4_run", testsuite_name=tag, id=matched_run.id)) # Otherwise, report an error. return render_template("error.html", message="""\ Unable to find a v0.4 run for this ID. Please use the native v0.4 URL interface (instead of the /simple/... URL schema).""")
def submit_run(): if request.method == 'POST': input_file = request.files.get('file') input_data = request.form.get('input_data') commit = int(request.form.get('commit', 0)) if input_file and not input_file.content_length: input_file = None if not input_file and not input_data: return render_template( "submit_run.html", error="must provide input file or data") if input_file and input_data: return render_template( "submit_run.html", error="cannot provide input file *and* data") if input_file: data_value = input_file.read() else: data_value = input_data # Stash a copy of the raw submission. # # To keep the temporary directory organized, we keep files in # subdirectories organized by (database, year-month). utcnow = datetime.datetime.utcnow() tmpdir = os.path.join(current_app.old_config.tempDir, g.db_name, "%04d-%02d" % (utcnow.year, utcnow.month)) try: os.makedirs(tmpdir) except OSError,e: pass # Save the file under a name prefixed with the date, to make it easier # to use these files in cases we might need them for debugging or data # recovery. prefix = utcnow.strftime("data-%Y-%m-%d_%H-%M-%S") fd,path = tempfile.mkstemp(prefix=prefix, suffix='.plist', dir=str(tmpdir)) os.write(fd, data_value) os.close(fd) # Get a DB connection. db = request.get_db() # Import the data. # # FIXME: Gracefully handle formats failures and DOS attempts. We # should at least reject overly large inputs. result = lnt.util.ImportData.import_and_report( current_app.old_config, g.db_name, db, path, '<auto>', commit) # It is nice to have a full URL to the run, so fixup the request URL # here were we know more about the flask instance. if result.get('result_url'): result['result_url'] = request.url_root + result['result_url'] return flask.jsonify(**result)
def wrap(testsuite_name, db_name = None, **args): # Initialize the test suite parameters on the app globals object. g.testsuite_name = testsuite_name # Initialize the database parameters on the app globals object. g.db_name = db_name or "default" g.db_info = current_app.old_config.databases.get(g.db_name) if g.db_info is None: abort(404) # Compute result. result = f(**args) # Make sure that any transactions begun by this request are finished. request.get_db().rollback() # Return result. return result
def wrap(testsuite_name, db_name=None, **args): # Initialize the test suite parameters on the app globals object. g.testsuite_name = testsuite_name # Initialize the database parameters on the app globals object. g.db_name = db_name or "default" g.db_info = current_app.old_config.databases.get(g.db_name) if g.db_info is None: abort(404) # Compute result. result = f(**args) # Make sure that any transactions begun by this request are finished. request.get_db().rollback() # Return result. return result
def wrap(db_name = None, **args): # Initialize the database parameters on the app globals object. g.db_name = db_name or "default" g.db_info = current_app.old_config.databases.get(g.db_name) if g.db_info is None: abort(404) # Disable non-v0.3 database support, if requested. if only_v3 and g.db_info.db_version != '0.3': return render_template("error.html", message="""\ UI support for database with version %r is not yet implemented.""" % ( g.db_info.db_version)) # Compute result. result = f(**args) # Make sure that any transactions begun by this request are finished. request.get_db().rollback() # Return result. return result
def wrap(db_name=None, **args): # Initialize the database parameters on the app globals object. g.db_name = db_name or "default" g.db_info = current_app.old_config.databases.get(g.db_name) if g.db_info is None: abort(404) # Disable non-v0.3 database support, if requested. if only_v3 and g.db_info.db_version != '0.3': return render_template("error.html", message="""\ UI support for database with version %r is not yet implemented.""" % (g.db_info.db_version)) # Compute result. result = f(**args) # Make sure that any transactions begun by this request are finished. request.get_db().rollback() # Return result. return result
def v4_summary_report_ui(): # If this is a POST request, update the saved config. if request.method == 'POST': # Parse the config data. config_data = request.form.get('config') config = flask.json.loads(config_data) # Write the updated config. with open(get_summary_config_path(), 'w') as f: flask.json.dump(config, f, indent=2) # Redirect to the summary report. return redirect(db_url_for("v4_summary_report")) config_path = get_summary_config_path() if os.path.exists(config_path): with open(config_path) as f: config = flask.json.load(f) else: config = { "machine_names": [], "orders": [], "machine_patterns": [], } # Get the list of available test suites. testsuites = request.get_db().testsuite.values() # Gather the list of all run orders and all machines. def to_key(name): first = name.split('.', 1)[0] if first.isdigit(): return (int(first), name) return (first, name) all_machines = set() all_orders = set() for ts in testsuites: for name, in ts.query(ts.Machine.name): all_machines.add(name) for name, in ts.query(ts.Order.llvm_project_revision): all_orders.add(name) all_machines = sorted(all_machines) all_orders = sorted(all_orders, key=to_key) return render_template("v4_summary_report_ui.html", config=config, all_machines=all_machines, all_orders=all_orders)
def v4_summary_report_ui(): # If this is a POST request, update the saved config. if request.method == 'POST': # Parse the config data. config_data = request.form.get('config') config = flask.json.loads(config_data) # Write the updated config. with open(get_summary_config_path(), 'w') as f: flask.json.dump(config, f, indent=2) # Redirect to the summary report. return redirect(db_url_for("v4_summary_report")) config_path = get_summary_config_path() if os.path.exists(config_path): with open(config_path) as f: config = flask.json.load(f) else: config = { "machine_names" : [], "orders" : [], "machine_patterns" : [], } # Get the list of available test suites. testsuites = request.get_db().testsuite.values() # Gather the list of all run orders and all machines. def to_key(name): first = name.split('.', 1)[0] if first.isdigit(): return (int(first), name) return (first, name) all_machines = set() all_orders = set() for ts in testsuites: for name, in ts.query(ts.Machine.name): all_machines.add(name) for name, in ts.query(ts.Order.llvm_project_revision): all_orders.add(name) all_machines = sorted(all_machines) all_orders = sorted(all_orders, key=to_key) return render_template("v4_summary_report_ui.html", config=config, all_machines=all_machines, all_orders=all_orders)
def __init__(self, run_id, only_html_body=True): self.db = request.get_db() self.ts = ts = request.get_testsuite() self.run = run = ts.query(ts.Run).filter_by(id=run_id).first() if run is None: abort(404) # Get the aggregation function to use. aggregation_fn_name = request.args.get('aggregation_fn') self.aggregation_fn = { 'min': lnt.util.stats.safe_min, 'median': lnt.util.stats.median }.get(aggregation_fn_name, lnt.util.stats.safe_min) # Get the MW confidence level. try: confidence_lv = float(request.args.get('MW_confidence_lv')) except (TypeError, ValueError): confidence_lv = .05 self.confidence_lv = confidence_lv # Find the neighboring runs, by order. prev_runs = list(ts.get_previous_runs_on_machine(run, N=3)) next_runs = list(ts.get_next_runs_on_machine(run, N=3)) self.neighboring_runs = next_runs[::-1] + [self.run] + prev_runs # Select the comparison run as either the previous run, or a user # specified comparison run. compare_to_str = request.args.get('compare_to') if compare_to_str: compare_to_id = int(compare_to_str) self.compare_to = ts.query(ts.Run).\ filter_by(id=compare_to_id).first() if self.compare_to is None: # FIXME: Need better way to report this error. abort(404) self.comparison_neighboring_runs = ( list(ts.get_next_runs_on_machine(self.compare_to, N=3))[::-1] + [self.compare_to] + list(ts.get_previous_runs_on_machine(self.compare_to, N=3))) else: if prev_runs: self.compare_to = prev_runs[0] else: self.compare_to = None self.comparison_neighboring_runs = self.neighboring_runs try: self.num_comparison_runs = int( request.args.get('num_comparison_runs')) except: self.num_comparison_runs = 0 # Find the baseline run, if requested. baseline_str = request.args.get('baseline') if baseline_str: baseline_id = int(baseline_str) self.baseline = ts.query(ts.Run).\ filter_by(id=baseline_id).first() if self.baseline is None: # FIXME: Need better way to report this error. abort(404) else: self.baseline = None # Gather the runs to use for statistical data. comparison_start_run = self.compare_to or self.run reports = lnt.server.reporting.runs.generate_run_report( self.run, baseurl=db_url_for('index', _external=True), only_html_body=only_html_body, result=None, compare_to=self.compare_to, baseline=self.baseline, num_comparison_runs=self.num_comparison_runs, aggregation_fn=self.aggregation_fn, confidence_lv=confidence_lv) _, self.text_report, self.html_report, self.sri = reports
def __init__(self, run_id, only_html_body=True): self.db = request.get_db() self.ts = ts = request.get_testsuite() self.run = run = ts.query(ts.Run).filter_by(id=run_id).first() if run is None: abort(404) # Get the aggregation function to use. aggregation_fn_name = request.args.get('aggregation_fn') self.aggregation_fn = {'min': lnt.util.stats.safe_min, 'median': lnt.util.stats.median}.get( aggregation_fn_name, lnt.util.stats.safe_min) # Get the MW confidence level. try: confidence_lv = float(request.args.get('MW_confidence_lv')) except (TypeError, ValueError): confidence_lv = .05 self.confidence_lv = confidence_lv # Find the neighboring runs, by order. prev_runs = list(ts.get_previous_runs_on_machine(run, N = 3)) next_runs = list(ts.get_next_runs_on_machine(run, N = 3)) self.neighboring_runs = next_runs[::-1] + [self.run] + prev_runs # Select the comparison run as either the previous run, or a user # specified comparison run. compare_to_str = request.args.get('compare_to') if compare_to_str: compare_to_id = int(compare_to_str) self.compare_to = ts.query(ts.Run).\ filter_by(id=compare_to_id).first() if self.compare_to is None: # FIXME: Need better way to report this error. abort(404) self.comparison_neighboring_runs = ( list(ts.get_next_runs_on_machine(self.compare_to, N=3))[::-1] + [self.compare_to] + list(ts.get_previous_runs_on_machine(self.compare_to, N=3))) else: if prev_runs: self.compare_to = prev_runs[0] else: self.compare_to = None self.comparison_neighboring_runs = self.neighboring_runs try: self.num_comparison_runs = int( request.args.get('num_comparison_runs')) except: self.num_comparison_runs = 0 # Find the baseline run, if requested. baseline_str = request.args.get('baseline') if baseline_str: baseline_id = int(baseline_str) self.baseline = ts.query(ts.Run).\ filter_by(id=baseline_id).first() if self.baseline is None: # FIXME: Need better way to report this error. abort(404) else: self.baseline = None # Gather the runs to use for statistical data. comparison_start_run = self.compare_to or self.run reports = lnt.server.reporting.runs.generate_run_report( self.run, baseurl=db_url_for('index', _external=True), only_html_body=only_html_body, result=None, compare_to=self.compare_to, baseline=self.baseline, num_comparison_runs=self.num_comparison_runs, aggregation_fn=self.aggregation_fn, confidence_lv=confidence_lv) _, self.text_report, self.html_report, self.sri = reports
def submit_run(): if request.method == 'POST': input_file = request.files.get('file') input_data = request.form.get('input_data') commit = int(request.form.get('commit', 0)) if input_file and not input_file.content_length: input_file = None if not input_file and not input_data: return render_template("submit_run.html", error="must provide input file or data") if input_file and input_data: return render_template( "submit_run.html", error="cannot provide input file *and* data") if input_file: data_value = input_file.read() else: data_value = input_data # Stash a copy of the raw submission. # # To keep the temporary directory organized, we keep files in # subdirectories organized by (database, year-month). utcnow = datetime.datetime.utcnow() tmpdir = os.path.join(current_app.old_config.tempDir, g.db_name, "%04d-%02d" % (utcnow.year, utcnow.month)) try: os.makedirs(tmpdir) except OSError, e: pass # Save the file under a name prefixed with the date, to make it easier # to use these files in cases we might need them for debugging or data # recovery. prefix = utcnow.strftime("data-%Y-%m-%d_%H-%M-%S") fd, path = tempfile.mkstemp(prefix=prefix, suffix='.plist', dir=str(tmpdir)) os.write(fd, data_value) os.close(fd) # Get a DB connection. db = request.get_db() # Import the data. # # FIXME: Gracefully handle formats failures and DOS attempts. We # should at least reject overly large inputs. result = lnt.util.ImportData.import_and_report(current_app.old_config, g.db_name, db, path, '<auto>', commit) # It is nice to have a full URL to the run, so fixup the request URL # here were we know more about the flask instance. if result.get('result_url'): result['result_url'] = request.url_root + result['result_url'] return flask.jsonify(**result)
def __init__(self, run_id, only_html_body=True): self.db = request.get_db() self.ts = ts = request.get_testsuite() self.run = run = ts.query(ts.Run).filter_by(id=run_id).first() if run is None: abort(404) # Get the aggregation function to use. aggregation_fn_name = request.args.get('aggregation_fn') self.aggregation_fn = {'min': lnt.util.stats.safe_min, 'median': lnt.util.stats.median}.get( aggregation_fn_name, lnt.util.stats.safe_min) # Get the MW confidence level. try: confidence_lv = float(request.args.get('MW_confidence_lv')) except (TypeError, ValueError): confidence_lv = .05 self.confidence_lv = confidence_lv # Find the neighboring runs, by order. prev_runs = list(ts.get_previous_runs_on_machine(run, N = 3)) next_runs = list(ts.get_next_runs_on_machine(run, N = 3)) self.neighboring_runs = next_runs[::-1] + [self.run] + prev_runs # Select the comparison run as either the previous run, or a user # specified comparison run. compare_to_str = request.args.get('compare_to') if compare_to_str: compare_to_id = int(compare_to_str) self.compare_to = ts.query(ts.Run).\ filter_by(id=compare_to_id).first() if self.compare_to is None: # FIXME: Need better way to report this error. abort(404) self.comparison_neighboring_runs = ( list(ts.get_next_runs_on_machine(self.compare_to, N=3))[::-1] + [self.compare_to] + list(ts.get_previous_runs_on_machine(self.compare_to, N=3))) else: if prev_runs: self.compare_to = prev_runs[0] else: self.compare_to = None self.comparison_neighboring_runs = self.neighboring_runs try: self.num_comparison_runs = int( request.args.get('num_comparison_runs')) except: self.num_comparison_runs = 0 # Find the baseline run, if requested. baseline_str = request.args.get('baseline') if baseline_str: baseline_id = int(baseline_str) self.baseline = ts.query(ts.Run).\ filter_by(id=baseline_id).first() if self.baseline is None: # FIXME: Need better way to report this error. abort(404) else: self.baseline = None # Gather the runs to use for statistical data. comparison_start_run = self.compare_to or self.run # We're going to render this on a real webpage with CSS support, so # override the default styles and provide bootstrap class names for # the tables. styles = { 'body': '', 'td': '', 'h1': 'font-size: 14pt', 'table': 'width: initial; font-size: 9pt;', 'th': 'text-align: center;' } classes = { 'table': 'table table-striped table-condensed table-hover' } reports = lnt.server.reporting.runs.generate_run_report( self.run, baseurl=db_url_for('index', _external=True), only_html_body=only_html_body, result=None, compare_to=self.compare_to, baseline=self.baseline, num_comparison_runs=self.num_comparison_runs, aggregation_fn=self.aggregation_fn, confidence_lv=confidence_lv, styles=styles, classes=classes) _, self.text_report, self.html_report, self.sri = reports