def training(path): model_data = g.model_data_manager.models[path] cached_data = model_data.get_metrics(50) context = { 'precision_recall_curve': plots.precision_recall_curve(cached_data), 'roc_curve': plots.roc_curve(cached_data), 'score_distribution': plots.score_distribution(cached_data[0]), 'absolute_score_distribution': plots.absolute_score_distribution(cached_data[0]), 'marginal_precision_curve': plots.marginal_precision_curve(cached_data[0]), 'threshold_graph': plots.thresholds_graph(cached_data[0]), 'threshold_table': plots.thresholds_table(cached_data[0]), 'auc': auc(cached_data[0]['fprs'], cached_data[0]['recalls']), 'notes': model_data.get_notes(), 'model_metadata': model_data.get_metadata(), 'path': path, } return render_template("results.html", **context)
def compare(): models = request.args.getlist('model[]') cached_datas = [] for path in models: model_data = g.model_data_manager.models[path] cached_datas.append(model_data.get_metrics(10)) _, ax = plt.subplots(figsize=(12, 6)) for name, cached_data in zip(models, cached_datas): prc = plots.precision_recall_curve(cached_data, ax=ax, label=name) _, ax = plt.subplots(figsize=(12, 6)) for name, cached_data in zip(models, cached_datas): roc = plots.roc_curve(cached_data, ax=ax, label=name) context = {'precision_recall_curve': prc, 'roc_curve': roc} return render_template("compare.html", **context)
def training(path): model_data = g.model_data_manager.models[path] cached_data = model_data.get_metrics(50) context = { 'precision_recall_curve': plots.precision_recall_curve(cached_data), 'roc_curve': plots.roc_curve(cached_data), 'score_distribution': plots.score_distribution(cached_data[0]), 'marginal_precision_curve': plots.marginal_precision_curve(cached_data[0]), 'threshold_graph': plots.thresholds_graph(cached_data[0]), 'threshold_table': plots.thresholds_table(cached_data[0]), 'brier': plots.box_brier(cached_data), 'auc': auc(cached_data[0]['fprs'], cached_data[0]['recalls']), 'notes': model_data.get_notes(), 'path': path, } return render_template("results.html", **context)
def training(path): model_data = ModelData(g.file_system, path) cached_data = model_data.get_metrics(50) # print cached_data context = { 'precision_recall_curve': plots.precision_recall_curve(cached_data), 'roc_curve': plots.roc_curve(cached_data), 'score_distribution': plots.score_distribution(cached_data[0]), 'marginal_precision_curve': plots.marginal_precision_curve(cached_data[0]), 'threshold_graph': plots.thresholds_graph(cached_data[0]), 'threshold_table': plots.thresholds_table(cached_data[0]), 'brier': plots.box_brier(cached_data), 'auc': auc(cached_data[0]['fprs'], cached_data[0]['recalls']), 'notes': ModelData(g.file_system, path).get_notes(), 'path': path, } return render_template("results.html", **context)
def training(path): model_data = ModelData(g.file_system, path) cached_data = model_data.get_metrics(50) # print cached_data context = { # 'precision_recall_curve': plots.precision_recall_curve(cached_data), 'support_precision_curve': plots.support_precision_curve(cached_data), 'roc_curve': plots.roc_curve(cached_data), 'score_distribution': plots.score_distribution(cached_data[0]), 'marginal_precision_curve': plots.marginal_precision_curve(cached_data[0]), 'threshold_graph': plots.thresholds_graph(cached_data[0]), 'threshold_table': plots.thresholds_table(cached_data[0]), 'brier': plots.box_brier(cached_data), 'auc': auc(cached_data[0]['fprs'], cached_data[0]['recalls']), 'notes': ModelData(g.file_system, path).get_notes(), 'path': path, } return render_template("results.html", **context)
def compare(): models = request.args.getlist('model[]') cached_datas = [] for path in models: model_data = ModelData(g.file_system, path) cached_datas.append(model_data.get_metrics(10)) _, ax = plt.subplots(figsize=(12, 6)) for name, cached_data in zip(models, cached_datas): prc = plots.support_precision_curve(cached_data, ax=ax, label=name) _, ax = plt.subplots(figsize=(12, 6)) for name, cached_data in zip(models, cached_datas): roc = plots.roc_curve(cached_data, ax=ax, label=name) context = { 'support_precision_curve': prc, 'roc_curve': roc} return render_template("compare.html", **context)
def compare(): models = request.args.getlist('model[]') cached_datas = [] for path in models: model_data = g.model_data_manager.models[path] cached_datas.append(model_data.get_metrics(10)) _, ax = plt.subplots(figsize=(12, 6)) for name, cached_data in zip(models, cached_datas): prc = plots.precision_recall_curve(cached_data, ax=ax, label=name) _, ax = plt.subplots(figsize=(12, 6)) for name, cached_data in zip(models, cached_datas): roc = plots.roc_curve(cached_data, ax=ax, label=name) context = { 'precision_recall_curve': prc, 'roc_curve': roc} return render_template("compare.html", **context)