def main(): output_dir = os.path.dirname(__file__) experiments = [ "G_Jul03_21_14_nr_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16", "G_Jul05_00_24_nr_rgb_tf_efficientnet_b6_ns_fold1_local_rank_0_fp16", "G_Jul06_03_39_nr_rgb_tf_efficientnet_b6_ns_fold2_local_rank_0_fp16", "G_Jul07_06_38_nr_rgb_tf_efficientnet_b6_ns_fold3_local_rank_0_fp16", ] scoring_fn = alaska_weighted_auc for metric in [ # "loss", # "bauc", "cauc" ]: holdout_predictions_d4 = get_predictions_csv(experiments, metric, "holdout", "d4") oof_predictions_d4 = get_predictions_csv(experiments, metric, "oof", "d4") test_predictions_d4 = get_predictions_csv(experiments, metric, "test", "d4") fnames_for_checksum = [x + f"{metric}" for x in experiments] bin_pred_d4 = make_binary_predictions(holdout_predictions_d4) y_true = bin_pred_d4[0].y_true_type.values bin_pred_d4_score = scoring_fn(y_true, blend_predictions_mean(bin_pred_d4).Label) cls_pred_d4 = make_classifier_predictions(holdout_predictions_d4) cls_pred_d4_score = scoring_fn(y_true, blend_predictions_mean(cls_pred_d4).Label) bin_pred_d4_cal = make_binary_predictions_calibrated(holdout_predictions_d4, oof_predictions_d4) bin_pred_d4_cal_score = scoring_fn(y_true, blend_predictions_mean(bin_pred_d4_cal).Label) cls_pred_d4_cal = make_classifier_predictions_calibrated(holdout_predictions_d4, oof_predictions_d4) cls_pred_d4_cal_score = scoring_fn(y_true, blend_predictions_mean(cls_pred_d4_cal).Label) prod_pred_d4_cal_score = scoring_fn( y_true, blend_predictions_mean(cls_pred_d4_cal).Label * blend_predictions_mean(bin_pred_d4_cal).Label ) print(metric, "Bin NC", "d4", bin_pred_d4_score) print(metric, "Bin CL", "d4", cls_pred_d4_score) print(metric, "Cls NC", "d4", bin_pred_d4_cal_score) print(metric, "Cls CL", "d4", cls_pred_d4_cal_score) print(metric, "Prod ", "d4", prod_pred_d4_cal_score) max_score = max( bin_pred_d4_score, cls_pred_d4_score, bin_pred_d4_cal_score, cls_pred_d4_cal_score, prod_pred_d4_cal_score ) if bin_pred_d4_score == max_score: predictions = make_binary_predictions(test_predictions_d4) predictions = blend_predictions_mean(predictions) predictions.to_csv( os.path.join(output_dir, f"mean_{max_score:.4f}_bin_{compute_checksum_v2(fnames_for_checksum)}.csv"), index=False, ) if bin_pred_d4_cal_score == max_score: predictions = make_binary_predictions_calibrated(test_predictions_d4, oof_predictions_d4) predictions = blend_predictions_mean(predictions) predictions.to_csv( os.path.join( output_dir, f"mean_{max_score:.4f}_bin_cal_{compute_checksum_v2(fnames_for_checksum)}.csv" ), index=False, ) if cls_pred_d4_score == max_score: predictions = make_classifier_predictions(test_predictions_d4) predictions = blend_predictions_mean(predictions) predictions.to_csv( os.path.join(output_dir, f"mean_{max_score:.4f}_cls_{compute_checksum_v2(fnames_for_checksum)}.csv"), index=False, ) if cls_pred_d4_cal_score == max_score: predictions = make_classifier_predictions_calibrated(test_predictions_d4, oof_predictions_d4) predictions = blend_predictions_mean(predictions) predictions.to_csv( os.path.join( output_dir, f"mean_{max_score:.4f}_cls_cal_{compute_checksum_v2(fnames_for_checksum)}.csv" ), index=False, ) if prod_pred_d4_cal_score == max_score: cls_predictions = make_classifier_predictions_calibrated(test_predictions_d4, oof_predictions_d4) bin_predictions = make_binary_predictions_calibrated(test_predictions_d4, oof_predictions_d4) predictions1 = blend_predictions_mean(cls_predictions) predictions2 = blend_predictions_mean(bin_predictions) predictions = predictions1.copy() predictions.Label = predictions1.Label * predictions2.Label predictions.to_csv( os.path.join( output_dir, f"mean_{max_score:.4f}_prod_cal_{compute_checksum_v2(fnames_for_checksum)}.csv" ), index=False, )
import pandas as pd from scipy.stats import spearmanr from sklearn.metrics import matthews_corrcoef from alaska2.submissions import blend_predictions_ranked, blend_predictions_mean submission_v25_xl_NR_moreTTA = pd.read_csv( "submission_v25_xl_NR_moreTTA.csv").sort_values(by="Id") stacked_b6_xgb_cv = pd.read_csv( "662cfbbddf616db0df6f59ee2a96cc20_xgb_cv_0.9485.csv") print(spearmanr(submission_v25_xl_NR_moreTTA.Label, stacked_b6_xgb_cv.Label)) blend_1_ranked = blend_predictions_ranked( [submission_v25_xl_NR_moreTTA, stacked_b6_xgb_cv]) blend_1_ranked.to_csv("blend_1_ranked.csv", index=False) blend_1_mean = blend_predictions_mean( [submission_v25_xl_NR_moreTTA, stacked_b6_xgb_cv]) blend_1_mean.to_csv("blend_1_mean.csv", index=False)
import pandas as pd from scipy.stats import spearmanr from sklearn.metrics import matthews_corrcoef from alaska2.submissions import blend_predictions_ranked, blend_predictions_mean submission_v25_xl_NR_moreTTA = pd.read_csv( "submission_v25_xl_NR_moreTTA.csv").sort_values(by="Id").reset_index() submission_b6_mean_calibrated = pd.read_csv( "662cfbbddf616db0df6f59ee2a96cc20_best_cauc_blend_cls_mean_calibrated_0.9422.csv" ) # Force 1.01 value of OOR values in my submission oor_mask = submission_v25_xl_NR_moreTTA.Label > 1.0 submission_b6_mean_calibrated.loc[oor_mask, "Label"] = 1.01 print( spearmanr(submission_v25_xl_NR_moreTTA.Label, submission_b6_mean_calibrated.Label)) blend_3_ranked = blend_predictions_ranked( [submission_v25_xl_NR_moreTTA, submission_b6_mean_calibrated]) blend_3_ranked.to_csv( "blend_3_ranked_from_v25_xl_NR_moreTTA_and_b6_cauc_mean_calibrated.csv", index=False) blend_3_mean = blend_predictions_mean( [submission_v25_xl_NR_moreTTA, submission_b6_mean_calibrated]) blend_3_mean.to_csv( "blend_3_mean_from_v25_xl_NR_moreTTA_and_b6_cauc_mean_calibrated.csv", index=False)
def main(): output_dir = os.path.dirname(__file__) experiments = [ # A models trained on old folds without holdout, so it will have a leak if evaluated. # "A_May24_11_08_ela_skresnext50_32x4d_fold0_fp16", # "A_May15_17_03_ela_skresnext50_32x4d_fold1_fp16", # "A_May21_13_28_ela_skresnext50_32x4d_fold2_fp16", # "A_May26_12_58_ela_skresnext50_32x4d_fold3_fp16", # # "B_Jun05_08_49_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16", # "B_Jun09_16_38_rgb_tf_efficientnet_b6_ns_fold1_local_rank_0_fp16", # "B_Jun11_08_51_rgb_tf_efficientnet_b6_ns_fold2_local_rank_0_fp16", # "B_Jun11_18_38_rgb_tf_efficientnet_b6_ns_fold3_local_rank_0_fp16", # # "C_Jun24_22_00_rgb_tf_efficientnet_b2_ns_fold2_local_rank_0_fp16", # # # "D_Jun18_16_07_rgb_tf_efficientnet_b7_ns_fold1_local_rank_0_fp16", # "D_Jun20_09_52_rgb_tf_efficientnet_b7_ns_fold2_local_rank_0_fp16", # # # # "E_Jun18_19_24_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16", # "E_Jun21_10_48_rgb_tf_efficientnet_b6_ns_fold0_istego100k_local_rank_0_fp16", # # # "F_Jun29_19_43_rgb_tf_efficientnet_b3_ns_fold0_local_rank_0_fp16", # "G_Jul03_21_14_nr_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16", "G_Jul05_00_24_nr_rgb_tf_efficientnet_b6_ns_fold1_local_rank_0_fp16", "G_Jul06_03_39_nr_rgb_tf_efficientnet_b6_ns_fold2_local_rank_0_fp16", "G_Jul07_06_38_nr_rgb_tf_efficientnet_b6_ns_fold3_local_rank_0_fp16", ] test_predictions_d4 = get_predictions_csv(experiments, "cauc", "test", "d4") classes = [] for x in test_predictions_d4: df = pd.read_csv(x) df = df.rename(columns={"image_id": "Id"}) df["classes"] = df["pred_modification_type"].apply(parse_and_softmax) classes.append(df["classes"].tolist()) classes = np.mean(classes, axis=0) print("Class distribution", np.bincount(classes.argmax(axis=1))) bin_probas = np.stack([classes[:, 0], 1 - classes[:, 0]]) bin_classes = bin_probas.argmax(axis=0) classes_cp = classes.copy() classes_cp[bin_classes == 1, 0] = 0 print("Class distribution", np.bincount(classes_cp.argmax(axis=1))) plt.figure() plt.hist(classes[:, 0], bins=100, alpha=0.25, label="Cover") plt.hist(classes[:, 1], bins=100, alpha=0.25, label="JMiPOD") plt.hist(classes[:, 2], bins=100, alpha=0.25, label="JUNIWARD") plt.hist(classes[:, 3], bins=100, alpha=0.25, label="UERD") plt.yscale("log") plt.legend() plt.show() holdout_predictions_d4 = get_predictions_csv(experiments, "cauc", "holdout", "d4") holdout_predictions_d4 = make_product_predictions(holdout_predictions_d4) y_true_type = holdout_predictions_d4[0].y_true_type holdout_predictions_d4 = blend_predictions_mean(holdout_predictions_d4) scores = evaluate_wauc_shakeup_using_bagging(holdout_predictions_d4, y_true_type, 10000) plt.figure() plt.hist(scores, bins=100, alpha=0.5, label=f"{np.mean(scores):.5f} +- {np.std(scores):.6f}") plt.legend() plt.show() print(np.mean(scores), np.std(scores))
def main(): output_dir = os.path.dirname(__file__) experiments = [ "B_Jun05_08_49_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16", "B_Jun09_16_38_rgb_tf_efficientnet_b6_ns_fold1_local_rank_0_fp16", "B_Jun11_08_51_rgb_tf_efficientnet_b6_ns_fold2_local_rank_0_fp16", "B_Jun11_18_38_rgb_tf_efficientnet_b6_ns_fold3_local_rank_0_fp16", "G_Jul03_21_14_nr_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16", "G_Jul05_00_24_nr_rgb_tf_efficientnet_b6_ns_fold1_local_rank_0_fp16", "G_Jul06_03_39_nr_rgb_tf_efficientnet_b6_ns_fold2_local_rank_0_fp16", "G_Jul07_06_38_nr_rgb_tf_efficientnet_b6_ns_fold3_local_rank_0_fp16", ] for metric in [ # "loss", # "bauc", "cauc" ]: holdout_predictions_d4 = get_predictions_csv(experiments, metric, "holdout", "d4") oof_predictions_d4 = get_predictions_csv(experiments, metric, "oof", "d4") test_predictions_d4 = get_predictions_csv(experiments, metric, "test", "d4") hld_bin_pred_d4 = make_binary_predictions(holdout_predictions_d4) hld_y_true = hld_bin_pred_d4[0].y_true_type.values oof_bin_pred_d4 = make_binary_predictions(oof_predictions_d4) hld_cls_pred_d4 = make_classifier_predictions(holdout_predictions_d4) oof_cls_pred_d4 = make_classifier_predictions(oof_predictions_d4) bin_pred_d4_cal = make_binary_predictions_calibrated(holdout_predictions_d4, oof_predictions_d4) cls_pred_d4_cal = make_classifier_predictions_calibrated(holdout_predictions_d4, oof_predictions_d4) print( " ", " ", " ", " OOF", " OOF 5K", " OOF 1K", " HLD", " HLD 5K", " HLD 1K" ) print( metric, "Bin NC", "{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}".format( np.mean([alaska_weighted_auc(x.y_true_type, x.Label) for x in oof_bin_pred_d4]), np.mean([shaky_wauc(x.y_true_type, x.Label) for x in oof_bin_pred_d4]), np.mean([shaky_wauc_public(x.y_true_type, x.Label) for x in oof_bin_pred_d4]), alaska_weighted_auc(hld_y_true, blend_predictions_mean(hld_bin_pred_d4).Label), shaky_wauc(hld_y_true, blend_predictions_mean(hld_bin_pred_d4).Label), shaky_wauc_public(hld_y_true, blend_predictions_mean(hld_bin_pred_d4).Label), ), ) print( metric, "Cls NC", "{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}".format( np.mean([alaska_weighted_auc(x.y_true_type, x.Label) for x in oof_cls_pred_d4]), np.mean([shaky_wauc(x.y_true_type, x.Label) for x in oof_cls_pred_d4]), np.mean([shaky_wauc_public(x.y_true_type, x.Label) for x in oof_cls_pred_d4]), alaska_weighted_auc(hld_y_true, blend_predictions_mean(hld_cls_pred_d4).Label), shaky_wauc(hld_y_true, blend_predictions_mean(hld_cls_pred_d4).Label), shaky_wauc_public(hld_y_true, blend_predictions_mean(hld_cls_pred_d4).Label), ), ) print( metric, "Bin CL", " {:.6f}\t{:.6f}\t{:.6f}".format( alaska_weighted_auc(hld_y_true, blend_predictions_mean(bin_pred_d4_cal).Label), shaky_wauc(hld_y_true, blend_predictions_mean(bin_pred_d4_cal).Label), shaky_wauc_public(hld_y_true, blend_predictions_mean(bin_pred_d4_cal).Label), ), ) print( metric, "Cls CL", " {:.6f}\t{:.6f}\t{:.6f}".format( alaska_weighted_auc(hld_y_true, blend_predictions_mean(cls_pred_d4_cal).Label), shaky_wauc(hld_y_true, blend_predictions_mean(cls_pred_d4_cal).Label), shaky_wauc_public(hld_y_true, blend_predictions_mean(cls_pred_d4_cal).Label), ), ) print( metric, "Prd NC", "{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}".format( np.mean( [ alaska_weighted_auc(x.y_true_type, x.Label * y.Label) for (x, y) in zip(oof_bin_pred_d4, oof_cls_pred_d4) ] ), np.mean( [shaky_wauc(x.y_true_type, x.Label * y.Label) for (x, y) in zip(oof_bin_pred_d4, oof_cls_pred_d4)] ), np.mean( [ shaky_wauc_public(x.y_true_type, x.Label * y.Label) for (x, y) in zip(oof_bin_pred_d4, oof_cls_pred_d4) ] ), alaska_weighted_auc( hld_y_true, blend_predictions_mean(bin_pred_d4_cal).Label * blend_predictions_mean(cls_pred_d4_cal).Label, ), shaky_wauc( hld_y_true, blend_predictions_mean(bin_pred_d4_cal).Label * blend_predictions_mean(cls_pred_d4_cal).Label, ), shaky_wauc_public( hld_y_true, blend_predictions_mean(bin_pred_d4_cal).Label * blend_predictions_mean(cls_pred_d4_cal).Label, ), ), )
def main(): output_dir = os.path.dirname(__file__) experiments = [ "G_Jul03_21_14_nr_rgb_tf_efficientnet_b6_ns_fold0_local_rank_0_fp16", "G_Jul05_00_24_nr_rgb_tf_efficientnet_b6_ns_fold1_local_rank_0_fp16", "G_Jul06_03_39_nr_rgb_tf_efficientnet_b6_ns_fold2_local_rank_0_fp16", "G_Jul07_06_38_nr_rgb_tf_efficientnet_b6_ns_fold3_local_rank_0_fp16", # "H_Jul12_18_42_nr_rgb_tf_efficientnet_b7_ns_mish_fold1_local_rank_0_fp16", "H_Jul11_16_37_nr_rgb_tf_efficientnet_b7_ns_mish_fold2_local_rank_0_fp16", # "K_Jul17_17_09_nr_rgb_tf_efficientnet_b6_ns_mish_fold0_local_rank_0_fp16", "K_Jul18_16_41_nr_rgb_tf_efficientnet_b6_ns_mish_fold3_local_rank_0_fp16", # "J_Jul19_20_10_nr_rgb_tf_efficientnet_b7_ns_mish_fold1_local_rank_0_fp16", ] holdout_predictions = get_predictions_csv(experiments, "cauc", "holdout", "d4") test_predictions = get_predictions_csv(experiments, "cauc", "test", "d4") fnames_for_checksum = np.array( [x + "cauc_bin" for x in experiments] # + [x + "loss_bin" for x in experiments] + [x + "cauc_cls" for x in experiments] # + [x + "loss_cls" for x in experiments] ) X = make_binary_predictions(holdout_predictions) + make_classifier_predictions(holdout_predictions) y_true = X[0].y_true_type.values X = np.array([x.Label.values for x in X]) assert len(fnames_for_checksum) == X.shape[0] X_test = make_binary_predictions(test_predictions) + make_classifier_predictions(test_predictions) indices = np.arange(len(X)) for r in range(2, 8): best_comb = None best_auc = 0 combs = list(itertools.combinations(indices, r)) for c in tqdm(combs, desc=f"{r}"): avg_preds = X[np.array(c)].mean(axis=0) score_averaging = alaska_weighted_auc(y_true, avg_preds) if score_averaging > best_auc: best_auc = score_averaging best_comb = c print(r, best_auc, best_comb) checksum = compute_checksum_v2(fnames_for_checksum[np.array(best_comb)]) test_preds = [X_test[i] for i in best_comb] test_preds = blend_predictions_mean(test_preds) test_preds.to_csv(os.path.join(output_dir, f"cmb_mean_{best_auc:.4f}_{r}_{checksum}.csv"), index=False) for r in range(2, 8): best_comb = None best_auc = 0 combs = list(itertools.combinations(indices, r)) for c in tqdm(combs, desc=f"{r}"): rnk_preds = rankdata(X[np.array(c)], axis=1).mean(axis=0) score_averaging = alaska_weighted_auc(y_true, rnk_preds) if score_averaging > best_auc: best_auc = score_averaging best_comb = c print(r, best_auc, best_comb) checksum = compute_checksum_v2(fnames_for_checksum[np.array(best_comb)]) test_preds = [X_test[i] for i in best_comb] test_preds = blend_predictions_mean(test_preds) test_preds.to_csv(os.path.join(output_dir, f"cmb_rank_{best_auc:.4f}_{r}_{checksum}.csv"), index=False)