def plot_roc_from_results_averaged(y: Series, results: List[ModelCVResult], label: str = None) -> None: normalized_fpr = np.linspace(0, 1, 99) def roc_curve_for_fold(y_score): fpr, tpr, thresholds = roc_curve(y.loc[y_score.index], y_score.iloc[:, 1]) auc_value = auc(fpr, tpr) normalized_tpr = np.interp(normalized_fpr, fpr, tpr) return normalized_tpr, auc_value tprs: Any aucs: Any tprs, aucs = zip(*flatten( [[roc_curve_for_fold(y_score) for y_score in result['y_scores']] for result in results])) mean_tpr = np.mean(tprs, axis=0) mean_auc: float = np.mean(aucs) std_auc: float = np.std(aucs, ddof=0) plt.plot( normalized_fpr, mean_tpr, lw=1.5, label=f'{"ROC curve" if not label else label} (AUC=%0.3f ±%0.3f)' % (mean_auc, std_auc)) plt.plot([0, 1], [0, 1], color='#CCCCCC', lw=0.75, linestyle=':') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.legend(loc="lower right")
def test_flatten_to_tuple(): test_data = (1, 2, 3) assert tuple(functional.flatten(test_data)) == ( 1, 2, 3, )
def test_flatten_combo(): test_data = [1, 2, 3, 7, ( 4, 5, 6, 7, )] assert list(functional.flatten(test_data)) == [1, 2, 3, 7, 4, 5, 6, 7]
def flattentostring(l): flatlist = functional.flatten(l) flatlistnone = [] for x in flatlist: if isinstance(x,str): flatlistnone.append(x) else: flatlistnone.append("none") return "".join(flatlistnone)
def compare_metrics_in_table( metrics_for_methods: Dict[str, ClassificationMetricsWithStatistics], include: Tuple[str, ...] = ('balanced_accuracy', 'roc_auc', 'recall', 'fpr'), format_method_name: Callable[[str], str] = identity, include_ci_for: Set[str] = None, include_delta: bool = False, ) -> List[List]: if include_ci_for is None: include_ci_for = include def get_line( method: str, metrics: Union[ClassificationMetrics, ClassificationMetricsWithStatistics] ): return [ format_method_name(method), *pipe( [ [ metrics[metric].mean, ( metrics[metric].mean - get_max_metric_value(metric, metrics_for_methods.values()) ) if include_delta else None, ] + ([format_ci(metrics[metric].ci)] if metric in include_ci_for else []) for metric in include ], flatten, compact, ), ] lines = pipe( [get_line(method, metrics) for method, metrics in metrics_for_methods.items()], partial(sorted, key=get(1), reverse=True), ) return format_structure( format_decimal, [ [ '', *flatten( map( lambda metric: [format_metric_short(metric), *(['Δ'] if include_delta else [])] + (['95% CI'] if metric in include_ci_for else []), include ) ) ], *lines, ], )
def test_flatten_tuple(): test_data = (1, 2, ( 3, 4, 5, ), 6) assert tuple(functional.flatten(test_data)) == ( 1, 2, 3, 4, 5, 6, )
def test_flatten(): assert functional.flatten([]) == \ [] assert functional.flatten([ 1, 2, 3 ]) == \ [ 1, 2, 3 ] assert functional.flatten([ [ 1, 2 ], 3 ]) == \ [ 1, 2, 3 ] assert functional.flatten([ 1, [ 2, 3 ] ]) == \ [ 1, 2, 3 ] assert functional.flatten([ 1, [ 2, [ 3 ] ] ]) == \ [ 1, 2, 3 ] assert functional.flatten([ [ [ 1 ], [ 2 ], [ 3 ] ] ]) == \ [ 1, 2, 3 ]
def flatten(x): return functional.flatten(x)
def test_group(): assert functional.group([ 1, 1, 2, 3, 3, 1, 4, 4, 4 ]) == \ [ [ 1, 1 ], [ 2 ], [ 3, 3 ], [ 1 ], [ 4, 4, 4 ]] a = [1, 1, 2, 3, 3, 1, 4, 4, 4] assert functional.flatten(functional.group(a)) == a
def test_flatten_string(): test_data = 'string' assert list(functional.flatten(test_data)) == list(test_data)
def test_flatten_list(): test_data = [0, [1, [2, 3]]] assert list(functional.flatten(test_data)) == [0, 1, 2, 3]