def v4_run_graph(id): # This is an old style endpoint that treated graphs as associated with # runs. Redirect to the new endpoint. ts = request.get_testsuite() run = ts.query(ts.Run).filter_by(id=id).first() if run is None: abort(404) # Convert the old style test parameters encoding. args = { 'highlight_run' : id } plot_number = 0 for name,value in request.args.items(): # If this isn't a test specification, just forward it. if not name.startswith('test.'): args[name] = value continue # Otherwise, rewrite from the old style of:: # # test.<test id>=<sample field index> # # into the new style of:: # # plot.<number>=<machine id>.<test id>.<sample field index> test_id = name.split('.', 1)[1] args['plot.%d' % (plot_number,)] = '%d.%s.%s' % ( run.machine.id, test_id, value) plot_number += 1 return redirect(v4_url_for("v4_graph", **args))
def v4_regression_list(): ts = request.get_testsuite() form = MergeRegressionForm(request.form) if request.method == 'POST' and \ request.form['merge_btn'] == "Merge Regressions": regressions_id_to_merge = form.regression_checkboxes.data regressions = ts.query(ts.Regression) \ .filter(ts.Regression.id.in_(regressions_id_to_merge)).all() reg_inds = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id.in_( regressions_id_to_merge)) \ .all() new_regress = new_regression(ts, [x.field_change_id for x in reg_inds]) new_regress.state = regressions[0].state [ts.delete(x) for x in reg_inds] [ts.delete(x) for x in regressions] ts.commit() flash("Created" + new_regress.title, FLASH_SUCCESS) return redirect(v4_url_for("v4_regression_detail", id=new_regress.id)) state_filter = int(request.args.get('state', RegressionState.ACTIVE)) q = ts.query(ts.Regression) title = "All Regressions" if state_filter != -1: q = q.filter(ts.Regression.state == state_filter) title = RegressionState.names[state_filter] regression_info = q.all()[::-1] form.regression_checkboxes.choices = list() regression_sizes = [] impacts = [] for regression in regression_info: form.regression_checkboxes.choices.append((regression.id, 1,)) reg_inds = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == regression.id) \ .all() regression_sizes.append(len(reg_inds)) impacts.append(calc_impact(ts, [x.field_change for x in reg_inds])) return render_template("v4_regression_list.html", testsuite_name=g.testsuite_name, regressions=regression_info, highlight=request.args.get('highlight'), title=title, RegressionState=RegressionState, form=form, sizes=regression_sizes, impacts=impacts, analysis=lnt.server.reporting.analysis)
def v4_new_regressions(): form = TriagePageSelectedForm(request.form) session = request.session ts = request.get_testsuite() if request.method == 'POST' and \ request.form['btn'] == "Create New Regression": regression, _ = new_regression(session, ts, form.field_changes.data) flash("Created " + regression.title, FLASH_SUCCESS) return redirect(v4_url_for(".v4_regression_list", highlight=regression.id)) if request.method == 'POST' and request.form['btn'] == "Ignore Changes": msg = "Ignoring changes: " ignored = [] for fc_id in form.field_changes.data: ignored.append(str(fc_id)) fc = get_fieldchange(session, ts, fc_id) ignored_change = ts.ChangeIgnore(fc) session.add(ignored_change) session.commit() flash(msg + ", ".join(ignored), FLASH_SUCCESS) # d = datetime.datetime.now() # two_weeks_ago = d - datetime.timedelta(days=14) recent_fieldchange = session.query(ts.FieldChange) \ .join(ts.Test) \ .outerjoin(ts.ChangeIgnore) \ .filter(ts.ChangeIgnore.id.is_(None)) \ .outerjoin(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.id.is_(None)) \ .order_by(desc(ts.FieldChange.id)) \ .limit(500) \ .all() crs = [] form.field_changes.choices = list() for fc in recent_fieldchange: if fc.old_value is None: cr, key_run, _ = get_cr_for_field_change(session, ts, fc) else: cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better) key_run = get_first_runs_of_fieldchange(session, ts, fc) current_cr, _, _ = get_cr_for_field_change(session, ts, fc, current=True) crs.append(ChangeData(fc, cr, key_run, current_cr)) form.field_changes.choices.append((fc.id, 1,)) return render_template("v4_new_regressions.html", testsuite_name=g.testsuite_name, changes=crs, analysis=lnt.server.reporting.analysis, form=form, **ts_data(ts))
def v4_daily_report_overview(): # Redirect to the report for the most recent submitted run's date. ts = request.get_testsuite() # Get the latest run. latest = ts.query(ts.Run).\ order_by(ts.Run.start_time.desc()).limit(1).first() # If we found a run, use it's start time. if latest: date = latest.start_time else: # Otherwise, just use today. date = datetime.date.today() return redirect(v4_url_for("v4_daily_report", year=date.year, month=date.month, day=date.day))
def v4_make_regression(machine_id, test_id, field_index, run_id): """This function is called to make a new regression from a graph data point. It is not nessessarly the case that there will be a real change there, so we must create a regression, bypassing the normal analysis. """ ts = request.get_testsuite() field = ts.sample_fields[field_index] new_regression_id = 0 run = ts.query(ts.Run).get(run_id) runs = ts.query(ts.Run). \ filter(ts.Run.order_id == run.order_id). \ filter(ts.Run.machine_id == run.machine_id). \ all() if len(runs) == 0: abort(404) previous_runs = ts.get_previous_runs_on_machine(run, 1) # Find our start/end order. if previous_runs != []: start_order = previous_runs[0].order else: start_order = run.order end_order = run.order # Load our run data for the creation of the new fieldchanges. runs_to_load = [r.id for r in (runs + previous_runs)] runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load) result = runinfo.get_comparison_result( runs, previous_runs, test_id, field, ts.Sample.get_hash_of_binary_field()) # Try and find a matching FC and update, else create one. f = None try: f = ts.query(ts.FieldChange) \ .filter(ts.FieldChange.start_order == start_order) \ .filter(ts.FieldChange.end_order == end_order) \ .filter(ts.FieldChange.test_id == test_id) \ .filter(ts.FieldChange.machine == run.machine) \ .filter(ts.FieldChange.field == field) \ .one() except sqlalchemy.orm.exc.NoResultFound: f = None if not f: test = ts.query(ts.Test).filter(ts.Test.id == test_id).one() f = ts.FieldChange(start_order=start_order, end_order=run.order, machine=run.machine, test=test, field=field) ts.add(f) # Always update FCs with new values. if f: f.old_value = result.previous f.new_value = result.current f.run = run ts.commit() # Make new regressions. regression = new_regression(ts, [f.id]) regression.state = RegressionState.ACTIVE ts.commit() note("Manually created new regressions: {}".format(regression.id)) flash("Created " + regression.title, FLASH_SUCCESS) return redirect(v4_url_for("v4_regression_detail", id=regression.id))
def v4_regression_detail(id): ts = request.get_testsuite() form = EditRegressionForm(request.form) try: regression_info = ts.query(ts.Regression) \ .filter(ts.Regression.id == id) \ .one() except NoResultFound as e: abort(404) if request.method == 'POST' and request.form['save_btn'] == "Save Changes": regression_info.title = form.title.data regression_info.bug = form.bug.data regression_info.state = form.state.data ts.commit() flash("Updated " + regression_info.title, FLASH_SUCCESS) return redirect(v4_url_for("v4_regression_list", highlight=regression_info.id, state=regression_info.state)) if request.method == 'POST' and request.form['save_btn'] == "Split Regression": # For each of the regression indicators, grab their field ids. res_inds = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.field_change_id.in_(form.field_changes.data)) \ .all() fc_ids = [x.field_change_id for x in res_inds] second_regression = new_regression(ts, fc_ids) second_regression.state = regression_info.state # Now remove our links to this regression. for res_ind in res_inds: ts.delete(res_ind) lnt.server.db.fieldchange.rebuild_title(ts, regression_info) ts.commit() flash("Split " + second_regression.title, FLASH_SUCCESS) return redirect(v4_url_for("v4_regression_list", highlight=second_regression.id)) if request.method == 'POST' and request.form['save_btn'] == "Delete": # For each of the regression indicators, grab their field ids. title = regression_info.title res_inds = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == regression_info.id) \ .all() # Now remove our links to this regression. for res_ind in res_inds: ts.delete(res_ind) ts.delete(regression_info) ts.commit() flash("Deleted " + title, FLASH_SUCCESS) return redirect(v4_url_for("v4_regression_list")) form.field_changes.choices = list() form.state.default = regression_info.state form.process() form.title.data = regression_info.title form.bug.data = regression_info.bug regression_indicators = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == id) \ .all() crs = [] test_suite_versions = set() form.field_changes.choices = list() for regression in regression_indicators: fc = regression.field_change if fc is None: continue if fc.old_value is None: cr, key_run, all_runs = get_cr_for_field_change(ts, fc) else: cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better) key_run = get_first_runs_of_fieldchange(ts, fc) current_cr, _, all_runs = get_cr_for_field_change(ts, fc, current=True) crs.append(ChangeData(fc, cr, key_run, current_cr)) form.field_changes.choices.append((fc.id, 1,)) for run in all_runs: ts_rev = key_run.parameters.get('test_suite_revision') if ts_rev and ts_rev != u'None': test_suite_versions.add(ts_rev) if len(test_suite_versions) > 1: revs = ', '.join(list(test_suite_versions)) flash("More than one test-suite version: " + revs, FLASH_DANGER) if request.args.get('json'): return json.dumps({u'Regression': regression_info, u'Changes':crs}, cls=LNTEncoder) return render_template("v4_regression_detail.html", testsuite_name=g.testsuite_name, regression=regression_info, changes=crs, form=form, analysis=lnt.server.reporting.analysis)
def v4_profile(testid, run1_id, run2_id=None): ts = request.get_testsuite() profileDir = current_app.old_config.profileDir try: test = ts.query(ts.Test).filter(ts.Test.id == testid).one() run1 = ts.query(ts.Run).filter(ts.Run.id == run1_id).one() sample1 = ts.query(ts.Sample) \ .filter(ts.Sample.run_id == run1_id) \ .filter(ts.Sample.test_id == testid).first() if run2_id is not None: run2 = ts.query(ts.Run).filter(ts.Run.id == run2_id).one() sample2 = ts.query(ts.Sample) \ .filter(ts.Sample.run_id == run2_id) \ .filter(ts.Sample.test_id == testid).first() else: run2 = None sample2 = None except NoResultFound: # FIXME: Make this a nicer error page. abort(404) if sample1.profile: profile1 = sample1.profile else: profile1 = None if sample2 and sample2.profile: profile2 = sample2.profile else: profile2 = None json_run1 = { 'id': run1.id, 'order': run1.order.llvm_project_revision, 'machine': run1.machine.name, 'sample': sample1.id if sample1 else None } if run2: json_run2 = { 'id': run2.id, 'order': run2.order.llvm_project_revision, 'machine': run2.machine.name, 'sample': sample2.id if sample2 else None } else: json_run2 = {} urls = { 'search': v4_url_for('v4_search'), 'singlerun_template': v4_url_for('v4_profile_fwd', testid=1111, run1_id=2222) \ .replace('1111', '<testid>').replace('2222', '<run1id>'), 'comparison_template': v4_url_for('v4_profile_fwd2', testid=1111, run1_id=2222, run2_id=3333) \ .replace('1111', '<testid>').replace('2222', '<run1id>') \ .replace('3333', '<run2id>'), 'getTopLevelCounters': v4_url_for('v4_profile_ajax_getTopLevelCounters'), 'getFunctions': v4_url_for('v4_profile_ajax_getFunctions'), 'getCodeForFunction': v4_url_for('v4_profile_ajax_getCodeForFunction'), } return render_template("v4_profile.html", ts=ts, test=test, run1=json_run1, run2=json_run2, urls=urls)
def v4_regression_list(): session = request.session ts = request.get_testsuite() form = MergeRegressionForm(request.form) machine_filter = request.args.get('machine_filter') state_filter = int(request.args.get('state', RegressionState.ACTIVE)) # Merge requested regressions. if request.method == 'POST' and \ request.form['merge_btn'] == "Merge Regressions": reg_inds, regressions = _get_regressions_from_selected_form(session, form, ts) links = [] target = 0 for i, r in enumerate(regressions): if r.bug: target = i links.append(r.bug) new_regress, _ = new_regression(session, ts, [x.field_change_id for x in reg_inds]) new_regress.state = regressions[target].state new_regress.title = regressions[target].title new_regress.bug = ' '.join(links) for r in regressions: r.bug = v4_url_for(".v4_regression_detail", id=new_regress.id) r.title = "Merged into Regression " + str(new_regress.id) r.state = RegressionState.IGNORED [session.delete(x) for x in reg_inds] session.commit() flash("Created: " + new_regress.title, FLASH_SUCCESS) return redirect(v4_url_for(".v4_regression_detail", id=new_regress.id)) # Delete requested regressions. if request.method == 'POST' and \ request.form['merge_btn'] == "Delete Regressions": reg_inds, regressions = _get_regressions_from_selected_form(session, form, ts) titles = [r.title for r in regressions] for res_ind in reg_inds: session.delete(res_ind) for reg in regressions: session.delete(reg) session.commit() flash(' Deleted: '.join(titles), FLASH_SUCCESS) return redirect(v4_url_for(".v4_regression_list", state=state_filter)) q = session.query(ts.Regression) title = "All Regressions" if state_filter != -1: q = q.filter(ts.Regression.state == state_filter) title = RegressionState.names[state_filter] regression_info = q.all()[::-1] form.regression_checkboxes.choices = list() regression_sizes = [] impacts = [] ages = [] filtered_regressions = [] for regression in regression_info: reg_inds = session.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == regression.id) \ .all() if machine_filter: machine_names = \ set([x.field_change.machine.name for x in reg_inds]) if machine_filter in machine_names: filtered_regressions.append(regression) else: continue else: filtered_regressions.append(regression) form.regression_checkboxes.choices.append((regression.id, 1,)) regression_sizes.append(len(reg_inds)) impacts.append(calc_impact(session, ts, [x.field_change for x in reg_inds])) # Now guess the regression age: if len(reg_inds) and reg_inds[0].field_change and \ reg_inds[0].field_change.run: age = reg_inds[0].field_change.run.end_time else: age = EmptyDate() ages.append(age) return render_template("v4_regression_list.html", testsuite_name=g.testsuite_name, regressions=filtered_regressions, highlight=request.args.get('highlight'), title=title, RegressionState=RegressionState, state_filter=state_filter, form=form, sizes=regression_sizes, impacts=impacts, ages=ages, analysis=lnt.server.reporting.analysis, **ts_data(ts))
def v4_regression_detail(id): ts = request.get_testsuite() form = EditRegressionForm(request.form) try: regression_info = ts.query(ts.Regression) \ .filter(ts.Regression.id == id) \ .one() except NoResultFound as e: abort(404) if request.method == 'POST' and request.form['save_btn'] == "Save Changes": regression_info.title = form.title.data regression_info.bug = form.bug.data regression_info.state = form.state.data ts.commit() flash("Updated " + regression_info.title, FLASH_SUCCESS) return redirect( v4_url_for("v4_regression_list", highlight=regression_info.id, state=int(form.edit_state.data))) if request.method == 'POST' and request.form[ 'save_btn'] == "Split Regression": # For each of the regression indicators, grab their field ids. res_inds = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.field_change_id.in_(form.field_changes.data)) \ .all() fc_ids = [x.field_change_id for x in res_inds] second_regression = new_regression(ts, fc_ids) second_regression.state = regression_info.state # Now remove our links to this regression. for res_ind in res_inds: ts.delete(res_ind) lnt.server.db.fieldchange.rebuild_title(ts, regression_info) ts.commit() flash("Split " + second_regression.title, FLASH_SUCCESS) return redirect( v4_url_for("v4_regression_list", highlight=second_regression.id, state=int(form.edit_state.data))) if request.method == 'POST' and request.form['save_btn'] == "Delete": # For each of the regression indicators, grab their field ids. title = regression_info.title res_inds = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == regression_info.id) \ .all() # Now remove our links to this regression. for res_ind in res_inds: ts.delete(res_ind) ts.delete(regression_info) ts.commit() flash("Deleted " + title, FLASH_SUCCESS) return redirect( v4_url_for("v4_regression_list", state=int(form.edit_state.data))) form.field_changes.choices = list() form.state.default = regression_info.state form.process() form.edit_state.data = regression_info.state form.title.data = regression_info.title form.bug.data = regression_info.bug regression_indicators = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == id) \ .all() crs = [] test_suite_versions = set() form.field_changes.choices = list() # If we have more than 10 regressions, don't graph any by default. checkbox_state = 1 if len(regression_indicators) >= 10: checkbox_state = 0 for regression in regression_indicators: fc = regression.field_change if fc is None: continue if fc.old_value is None: cr, key_run, all_runs = get_cr_for_field_change(ts, fc) else: cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better) key_run = get_first_runs_of_fieldchange(ts, fc) current_cr, _, all_runs = get_cr_for_field_change(ts, fc, current=True) crs.append(ChangeData(fc, cr, key_run, current_cr)) form.field_changes.choices.append(( fc.id, checkbox_state, )) for run in all_runs: ts_rev = key_run.parameters.get('test_suite_revision') if ts_rev and ts_rev != u'None': test_suite_versions.add(ts_rev) if len(test_suite_versions) > 1: revs = ', '.join(list(test_suite_versions)) flash("More than one test-suite version: " + revs, FLASH_DANGER) if request.args.get('json'): return json.dumps({ u'Regression': regression_info, u'Changes': crs }, cls=LNTEncoder) return render_template("v4_regression_detail.html", testsuite_name=g.testsuite_name, regression=regression_info, changes=crs, form=form, analysis=lnt.server.reporting.analysis, check_all=checkbox_state)
def v4_regression_list(): ts = request.get_testsuite() form = MergeRegressionForm(request.form) machine_filter = request.args.get('machine_filter') state_filter = int(request.args.get('state', RegressionState.ACTIVE)) # Merge requested regressions. if request.method == 'POST' and \ request.form['merge_btn'] == "Merge Regressions": reg_inds, regressions = _get_regressions_from_selected_form(form, ts) links = [] target = 0 for i, r in enumerate(regressions): if r.bug: target = i links.append(r.bug) new_regress = new_regression(ts, [x.field_change_id for x in reg_inds]) new_regress.state = regressions[target].state new_regress.title = regressions[target].title new_regress.bug = ' '.join(links) for r in regressions: r.bug = v4_url_for("v4_regression_detail", id=new_regress.id) r.title = "Merged into Regression " + str(new_regress.id) r.state = RegressionState.IGNORED [ts.delete(x) for x in reg_inds] ts.commit() flash("Created: " + new_regress.title, FLASH_SUCCESS) return redirect(v4_url_for("v4_regression_detail", id=new_regress.id)) # Delete requested regressions. if request.method == 'POST' and \ request.form['merge_btn'] == "Delete Regressions": reg_inds, regressions = _get_regressions_from_selected_form(form, ts) titles = [r.title for r in regressions] for res_ind in reg_inds: ts.delete(res_ind) for reg in regressions: ts.delete(reg) ts.commit() flash(' Deleted: '.join(titles), FLASH_SUCCESS) return redirect(v4_url_for("v4_regression_list", state=state_filter)) q = ts.query(ts.Regression) title = "All Regressions" if state_filter != -1: q = q.filter(ts.Regression.state == state_filter) title = RegressionState.names[state_filter] regression_info = q.all()[::-1] form.regression_checkboxes.choices = list() regression_sizes = [] impacts = [] ages = [] filtered_regressions = [] for regression in regression_info: reg_inds = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == regression.id) \ .all() if machine_filter: machine_names = set( [x.field_change.machine.name for x in reg_inds]) if machine_filter in machine_names: filtered_regressions.append(regression) else: continue else: filtered_regressions.append(regression) form.regression_checkboxes.choices.append(( regression.id, 1, )) regression_sizes.append(len(reg_inds)) impacts.append(calc_impact(ts, [x.field_change for x in reg_inds])) # Now guess the regression age: if len(reg_inds ) and reg_inds[0].field_change and reg_inds[0].field_change.run: age = reg_inds[0].field_change.run.end_time else: age = EmptyDate() ages.append(age) return render_template("v4_regression_list.html", testsuite_name=g.testsuite_name, regressions=filtered_regressions, highlight=request.args.get('highlight'), title=title, RegressionState=RegressionState, state_filter=state_filter, form=form, sizes=regression_sizes, impacts=impacts, ages=ages, analysis=lnt.server.reporting.analysis)
def v4_profile(testid, run1_id, run2_id=None): session = request.session ts = request.get_testsuite() profileDir = current_app.old_config.profileDir try: test = session.query(ts.Test).filter(ts.Test.id == testid).one() run1 = session.query(ts.Run).filter(ts.Run.id == run1_id).one() sample1 = session.query(ts.Sample) \ .filter(ts.Sample.run_id == run1_id) \ .filter(ts.Sample.test_id == testid).first() if run2_id is not None: run2 = session.query(ts.Run).filter(ts.Run.id == run2_id).one() sample2 = session.query(ts.Sample) \ .filter(ts.Sample.run_id == run2_id) \ .filter(ts.Sample.test_id == testid).first() else: run2 = None sample2 = None except NoResultFound: # FIXME: Make this a nicer error page. abort(404) if sample1.profile: profile1 = sample1.profile else: profile1 = None if sample2 and sample2.profile: profile2 = sample2.profile else: profile2 = None json_run1 = { 'id': run1.id, 'order': run1.order.llvm_project_revision, 'machine': run1.machine.name, 'sample': sample1.id if sample1 else None } if run2: json_run2 = { 'id': run2.id, 'order': run2.order.llvm_project_revision, 'machine': run2.machine.name, 'sample': sample2.id if sample2 else None } else: json_run2 = {} urls = { 'search': v4_url_for('.v4_search'), 'singlerun_template': v4_url_for('.v4_profile_fwd', testid=1111, run1_id=2222) .replace('1111', '<testid>').replace('2222', '<run1id>'), 'comparison_template': v4_url_for('.v4_profile_fwd2', testid=1111, run1_id=2222, run2_id=3333) .replace('1111', '<testid>').replace('2222', '<run1id>') .replace('3333', '<run2id>'), 'getTopLevelCounters': v4_url_for('.v4_profile_ajax_getTopLevelCounters'), 'getFunctions': v4_url_for('.v4_profile_ajax_getFunctions'), 'getCodeForFunction': v4_url_for('.v4_profile_ajax_getCodeForFunction'), } return render_template("v4_profile.html", test=test, run1=json_run1, run2=json_run2, urls=urls, **ts_data(ts))
def v4_make_regression(machine_id, test_id, field_index, run_id): """This function is called to make a new regression from a graph data point. It is not nessessarly the case that there will be a real change there, so we must create a regression, bypassing the normal analysis. """ session = request.session ts = request.get_testsuite() field = ts.sample_fields[field_index] new_regression_id = 0 run = session.query(ts.Run).get(run_id) runs = session.query(ts.Run). \ filter(ts.Run.order_id == run.order_id). \ filter(ts.Run.machine_id == run.machine_id). \ all() if len(runs) == 0: abort(404) previous_runs = ts.get_previous_runs_on_machine(session, run, 1) # Find our start/end order. if previous_runs != []: start_order = previous_runs[0].order else: start_order = run.order end_order = run.order # Load our run data for the creation of the new fieldchanges. runs_to_load = [r.id for r in (runs + previous_runs)] runinfo = lnt.server.reporting.analysis.RunInfo(session, ts, runs_to_load) result = runinfo.get_comparison_result( runs, previous_runs, test_id, field, ts.Sample.get_hash_of_binary_field()) # Try and find a matching FC and update, else create one. try: f = session.query(ts.FieldChange) \ .filter(ts.FieldChange.start_order == start_order) \ .filter(ts.FieldChange.end_order == end_order) \ .filter(ts.FieldChange.test_id == test_id) \ .filter(ts.FieldChange.machine == run.machine) \ .filter(ts.FieldChange.field_id == field.id) \ .one() except sqlalchemy.orm.exc.NoResultFound: # Create one test = session.query(ts.Test).filter(ts.Test.id == test_id).one() f = ts.FieldChange(start_order=start_order, end_order=run.order, machine=run.machine, test=test, field_id=field.id) session.add(f) # Always update FCs with new values. if f: f.old_value = result.previous f.new_value = result.current f.run = run session.commit() # Make new regressions. regression, _ = new_regression(session, ts, [f.id]) regression.state = RegressionState.ACTIVE session.commit() logger.info("Manually created new regressions: {}".format(regression.id)) flash("Created " + regression.title, FLASH_SUCCESS) return v4_redirect(v4_url_for(".v4_regression_detail", id=regression.id))
def v4_regression_detail(id): ts = request.get_testsuite() form = EditRegressionForm(request.form) regression_info = ts.query(ts.Regression) \ .filter(ts.Regression.id == id) \ .one() if request.method == 'POST' and request.form['save_btn'] == "Save Changes": regression_info.title = form.title.data regression_info.bug = form.bug.data regression_info.state = form.state.data ts.commit() flash("Updated " + regression_info.title, FLASH_SUCCESS) return redirect( v4_url_for("v4_regression_list", highlight=regression_info.id, state=regression_info.state)) if request.method == 'POST' and request.form[ 'save_btn'] == "Split Regression": # For each of the regression indicators, grab their field ids. res_inds = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.field_change_id.in_(form.field_changes.data)) \ .all() fc_ids = [x.field_change_id for x in res_inds] second_regression = new_regression(ts, fc_ids) second_regression.state = regression_info.state # Now remove our links to this regression. for res_ind in res_inds: ts.delete(res_ind) lnt.server.db.fieldchange.rebuild_title(ts, regression_info) ts.commit() flash("Split " + second_regression.title, FLASH_SUCCESS) return redirect( v4_url_for("v4_regression_list", highlight=second_regression.id)) if request.method == 'POST' and request.form['save_btn'] == "Delete": # For each of the regression indicators, grab their field ids. title = regression_info.title res_inds = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == regression_info.id) \ .all() # Now remove our links to this regression. for res_ind in res_inds: ts.delete(res_ind) ts.delete(regression_info) ts.commit() flash("Deleted " + title, FLASH_SUCCESS) return redirect(v4_url_for("v4_regression_list")) form.field_changes.choices = list() form.state.default = regression_info.state form.process() form.title.data = regression_info.title form.bug.data = regression_info.bug regression_indicators = ts.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == id) \ .all() crs = [] form.field_changes.choices = list() for regression in regression_indicators: fc = regression.field_change if fc is None: continue if fc.old_value is None: cr, key_run = get_cr_for_field_change(ts, fc) else: cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better) key_run = get_first_runs_of_fieldchange(ts, fc) current_cr, _ = get_cr_for_field_change(ts, fc, current=True) crs.append(ChangeData(fc, cr, key_run, current_cr)) form.field_changes.choices.append(( fc.id, 1, )) return render_template("v4_regression_detail.html", testsuite_name=g.testsuite_name, regression=regression_info, changes=crs, form=form, analysis=lnt.server.reporting.analysis)
def v4_run(id): info = V4RequestInfo(id) ts = info.ts run = info.run # Parse the view options. options = {} options['show_delta'] = bool(request.args.get('show_delta')) options['show_previous'] = bool(request.args.get('show_previous')) options['show_stddev'] = bool(request.args.get('show_stddev')) options['show_mad'] = bool(request.args.get('show_mad')) options['show_all'] = bool(request.args.get('show_all')) options['show_all_samples'] = bool(request.args.get('show_all_samples')) options['show_sample_counts'] = bool(request.args.get('show_sample_counts')) options['show_graphs'] = show_graphs = bool(request.args.get('show_graphs')) options['show_data_table'] = bool(request.args.get('show_data_table')) options['show_small_diff'] = bool(request.args.get('show_small_diff')) options['hide_report_by_default'] = bool( request.args.get('hide_report_by_default')) options['num_comparison_runs'] = info.num_comparison_runs options['test_filter'] = test_filter_str = request.args.get( 'test_filter', '') options['MW_confidence_lv'] = info.confidence_lv if test_filter_str: test_filter_re = re.compile(test_filter_str) else: test_filter_re = None options['test_min_value_filter'] = test_min_value_filter_str = \ request.args.get('test_min_value_filter', '') if test_min_value_filter_str != '': test_min_value_filter = float(test_min_value_filter_str) else: test_min_value_filter = 0.0 options['aggregation_fn'] = request.args.get('aggregation_fn', 'min') # Get the test names. test_info = ts.query(ts.Test.name, ts.Test.id).\ order_by(ts.Test.name).all() # Filter the list of tests by name, if requested. if test_filter_re: test_info = [test for test in test_info if test_filter_re.search(test[0])] if request.args.get('json'): json_obj = dict() sri = lnt.server.reporting.analysis.RunInfo(ts, [id]) reported_tests = ts.query(ts.Test.name, ts.Test.id).\ filter(ts.Run.id == id).\ filter(ts.Test.id.in_(sri.test_ids)).all() json_obj['tests'] = {} for test_name, test_id in reported_tests: test = {} test['name'] = test_name for sample_field in ts.sample_fields: res = sri.get_run_comparison_result( run, None, test_id, sample_field, ts.Sample.get_hash_of_binary_field()) test[sample_field.name] = res.current json_obj['tests'][test_id] = test return flask.jsonify(**json_obj) urls = { 'search': v4_url_for('v4_search') } return render_template( "v4_run.html", ts=ts, options=options, metric_fields=list(ts.Sample.get_metric_fields()), test_info=test_info, analysis=lnt.server.reporting.analysis, test_min_value_filter=test_min_value_filter, request_info=info, urls=urls )