Esempio n. 1
0
def test_median_kappa_on_idrid():
    y_preds = []
    for index, predictions, in enumerate(idrid_predictions):
        if not isinstance(predictions, pd.DataFrame):
            predictions = pd.read_pickle(predictions)

        y_true = predictions['diagnosis'].values
        y_pred = predictions['ordinal'].values
        y_preds.append(y_pred)

        print(
            'Score on Idrid-test', index,
            cohen_kappa_score(y_true,
                              regression_to_class(y_pred),
                              weights='quadratic'))

    y_preds = np.row_stack(y_preds)
    y_pred_median = np.median(y_preds, axis=0)

    y_pred_avg = np.mean(y_preds, axis=0)
    print(
        'Score on Idrid-test',
        cohen_kappa_score(y_true,
                          regression_to_class(y_pred_avg),
                          weights='quadratic'),
        cohen_kappa_score(y_true,
                          regression_to_class(y_pred_median),
                          weights='quadratic'))
Esempio n. 2
0
def test_optimize_kappa_on_idrid():
    average_predictions = None
    for index, predictions, in enumerate(idrid_predictions):
        if not isinstance(predictions, pd.DataFrame):
            predictions = pd.read_pickle(predictions)

        y_true = predictions['diagnosis'].values
        y_pred = predictions['ordinal'].values

        if average_predictions is None:
            average_predictions = y_pred.copy()
        else:
            average_predictions += y_pred

        print(
            'Score on Idrid-test', index,
            cohen_kappa_score(y_true,
                              regression_to_class(y_pred),
                              weights='quadratic'))

    average_predictions /= len(idrid_predictions)

    rounder = OptimizedRounder()
    rounder.fit(average_predictions, y_true)
    print(rounder.coefficients())
    print(
        'Score on Idrid-test',
        cohen_kappa_score(y_true,
                          regression_to_class(average_predictions),
                          weights='quadratic'),
        cohen_kappa_score(y_true,
                          regression_to_class(
                              average_predictions,
                              rounding_coefficients=rounder.coefficients()),
                          weights='quadratic'))
Esempio n. 3
0
def test_pseudolabeling_aptos2015_round1(predictions, output_csv):
    print('Saving pseudolabels to ', output_csv)
    num_models = len(predictions)
    ids, x, y_true, y_average = prepare_inference_datasets(
        predictions, use_features=False, use_predictions=True)

    for i in range(num_models):
        print(
            fs.id_from_fname(predictions[i]),
            cohen_kappa_score(y_true,
                              regression_to_class(x[:, i]),
                              weights='quadratic'))

    y_round = to_numpy(regression_to_class(x))
    y_major = majority_voting(y_round, axis=1)

    y_agreement = y_round == np.expand_dims(y_major, -1)

    # y_agreement_all = np.all(y_agreement, axis=1)
    # y_agreement_all = np.sum(y_agreement, axis=1) >= 16
    y_agreement_all = y_major == y_true

    print('Agreement', np.mean(y_agreement_all))
    print('Distribution', np.bincount(y_major[y_agreement_all]))

    y_true[~y_agreement_all] = -100
    print(y_round)
    df = pd.DataFrame.from_dict({'id_code': ids, 'diagnosis': y_true})
    df.to_csv(output_csv, index=None)
Esempio n. 4
0
def test_optimize_kappa_on_aptos2015_v2():
    pl1 = pd.read_csv(
        '../data/aptos-2015/test_private_pseudolabel_round_1.csv')
    labeled_gt = dict(
        (row['id_code'], row['diagnosis']) for i, row in pl1.iterrows())

    ids, train_x, train_y, train_y_avg = prepare_inference_datasets(
        aptos2015_predictions, use_features=False, use_predictions=True)
    mask = np.zeros(len(ids), dtype=np.bool)
    for i, id in enumerate(ids):
        mask[i] = labeled_gt[id] >= 0

    ids = ids[mask]
    train_x = train_x[mask]
    train_y = train_y[mask]
    train_y_avg = train_y_avg[mask]

    ids, val_x, val_y, val_y_avg = prepare_inference_datasets(
        idrid_predictions, use_features=False, use_predictions=True)
    rounder = OptimizedRounderV2()
    rounder.fit(train_x, train_y)
    print(rounder.coefficients())
    print(
        'Score on APTOS',
        cohen_kappa_score(train_y,
                          regression_to_class(train_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(train_y,
                          rounder.predict(train_x),
                          weights='quadratic'))

    print(
        'Score on IDRID',
        cohen_kappa_score(val_y,
                          regression_to_class(val_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(val_y, rounder.predict(val_x), weights='quadratic'))

    # Vice versa
    rounder = OptimizedRounderV2()
    rounder.fit(val_x, val_y)
    print(rounder.coefficients())
    print(
        'Score on IDRID',
        cohen_kappa_score(val_y,
                          regression_to_class(val_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(val_y, rounder.predict(val_x), weights='quadratic'))

    print(
        'Score on APTOS',
        cohen_kappa_score(train_y,
                          regression_to_class(train_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(train_y,
                          rounder.predict(train_x),
                          weights='quadratic'))
Esempio n. 5
0
def test_rf_on_idrid():
    ids, train_x, train_y, train_y_avg = prepare_inference_datasets(
        aptos2015_predictions, use_features=False, use_predictions=True)
    ids, val_x, val_y, val_y_avg = prepare_inference_datasets(
        idrid_predictions, use_features=False, use_predictions=True)

    # {'criterion': 'gini', 'max_depth': 12, 'n_estimators': 64}
    # RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
    #                        max_depth=12, max_features='auto', max_leaf_nodes=None,
    #                        min_impurity_decrease=0.0, min_impurity_split=None,
    #                        min_samples_leaf=1, min_samples_split=2,
    #                        min_weight_fraction_leaf=0.0, n_estimators=64,
    #                        n_jobs=None, oob_score=False, random_state=None,
    #                        verbose=0, warm_start=False)
    # LR on Train 0.8058995695509186 0.9164135768322392
    # LR on IDRID 0.8260826380611839 0.8900600400266845

    # create a dictionary of all values we want to test for n_neighbors
    params_rf = {
        'n_estimators': [8, 16, 32, 64, 128],
        'criterion': ['gini', 'entropy'],
        'max_depth': [2, 4, 6, 8, 12],
    }

    forest_gs = GridSearchCV(RandomForestClassifier(),
                             params_rf,
                             cv=5,
                             verbose=1,
                             n_jobs=4)
    forest_gs.fit(train_x, train_y)

    print(forest_gs.best_params_)
    print(forest_gs.best_estimator_)

    print(
        'LR on Train',
        cohen_kappa_score(train_y,
                          regression_to_class(train_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(train_y,
                          forest_gs.best_estimator_.predict(train_x),
                          weights='quadratic'))

    print(
        'LR on IDRID',
        cohen_kappa_score(val_y,
                          regression_to_class(val_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(val_y,
                          forest_gs.best_estimator_.predict(val_x),
                          weights='quadratic'))

    with open('forest.pkl', 'wb') as f:
        pickle.dump(forest_gs.best_estimator_, f)
Esempio n. 6
0
def test_stack_with_ada_boost():
    pl1 = pd.read_csv(
        '../data/aptos-2015/test_private_pseudolabel_round_1.csv')
    labeled_gt = dict(
        (row['id_code'], row['diagnosis']) for i, row in pl1.iterrows())

    ids, train_x, train_y, train_y_avg = prepare_inference_datasets(
        aptos2015_predictions, use_features=False, use_predictions=True)
    mask = np.zeros(len(ids), dtype=np.bool)
    for i, id in enumerate(ids):
        mask[i] = labeled_gt[id] >= 0

    ids = ids[mask]
    train_x = train_x[mask]
    train_y = train_y[mask]
    train_y_avg = train_y_avg[mask]

    _, val_x, val_y, val_y_avg = prepare_inference_datasets(
        idrid_predictions, use_features=False, use_predictions=True)

    from sklearn.ensemble import AdaBoostClassifier

    clf = AdaBoostClassifier(n_estimators=100)
    scores = cross_val_score(clf, train_x, train_y, cv=5)
    print(scores)
    print(scores.mean())

    clf = AdaBoostClassifier(n_estimators=100)
    clf.fit(train_x, train_y)

    y_pred = clf.predict(train_x)
    df = pd.DataFrame.from_dict({
        'id_code': ids,
        'y_true': train_y,
        'y_pred': y_pred
    })

    negatives = df[df['y_pred'] != df['y_true']]
    negatives.to_csv('aptos_negatives.csv', index=None)

    print(
        'Score on APTOS',
        cohen_kappa_score(train_y,
                          regression_to_class(train_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(train_y, clf.predict(train_x), weights='quadratic'))

    print(
        'Score on IDRID',
        cohen_kappa_score(val_y,
                          regression_to_class(val_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(val_y, clf.predict(val_x), weights='quadratic'))
Esempio n. 7
0
def test_knn_on_idrid():
    ids, train_x, train_y, train_y_avg = prepare_inference_datasets(
        aptos2015_predictions, use_features=False, use_predictions=True)
    ids, val_x, val_y, val_y_avg = prepare_inference_datasets(
        idrid_predictions, use_features=False, use_predictions=True)

    # {'algorithm': 'ball_tree', 'leaf_size': 8, 'n_neighbors': 64, 'p': 1, 'weights': 'distance'}
    # KNeighborsClassifier(algorithm='ball_tree', leaf_size=8, metric='minkowski',
    #                      metric_params=None, n_jobs=None, n_neighbors=64, p=1,
    #                      weights='distance')
    # LR on Train 0.8058995695509186 1.0
    # LR on IDRID 0.8260826380611839 0.8692778993435448

    # create a dictionary of all values we want to test for n_neighbors
    params_knn = {
        'n_neighbors': [8, 16, 32, 64, 128],
        'weights': ['uniform', 'distance'],
        'p': [1, 2],
        'algorithm': ['ball_tree', 'kd_tree'],
        'leaf_size': [8, 16, 32, 64, 128]
    }

    knn_gs = GridSearchCV(KNeighborsClassifier(),
                          params_knn,
                          cv=5,
                          verbose=1,
                          n_jobs=4)
    knn_gs.fit(train_x, train_y)

    print(knn_gs.best_params_)
    print(knn_gs.best_estimator_)

    print(
        'LR on Train',
        cohen_kappa_score(train_y,
                          regression_to_class(train_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(train_y,
                          knn_gs.best_estimator_.predict(train_x),
                          weights='quadratic'))

    print(
        'LR on IDRID',
        cohen_kappa_score(val_y,
                          regression_to_class(val_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(val_y,
                          knn_gs.best_estimator_.predict(val_x),
                          weights='quadratic'))

    with open('knn.pkl', 'wb') as f:
        pickle.dump(knn_gs.best_estimator_, f)
Esempio n. 8
0
def test_logistic_regression_on_idrid():
    ids, train_x, train_y, train_y_avg = prepare_inference_datasets(
        aptos2015_predictions, use_features=False, use_predictions=True)
    ids, val_x, val_y, val_y_avg = prepare_inference_datasets(
        idrid_predictions, use_features=False, use_predictions=True)

    params_lr = {
        'class_weight': ['balanced', None],
        'multi_class': ['multinomial', 'auto', 'ovr'],
        'solver': ['newton-cg', 'lbfgs'],
        'max_iter': [100, 250, 500, 1000, 2000, 5000],
        'fit_intercept': [True, False],
        'random_state': [42]
    }

    # {'class_weight': None, 'fit_intercept': True, 'max_iter': 100, 'multi_class': 'multinomial', 'random_state': 42, 'solver': 'lbfgs'}
    # LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
    #                    intercept_scaling=1, l1_ratio=None, max_iter=100,
    #                    multi_class='multinomial', n_jobs=None, penalty='l2',
    #                    random_state=42, solver='lbfgs', tol=0.0001, verbose=0,
    #                    warm_start=False)
    # LR on Train 0.8058995695509186 0.8408521184285193
    # LR on IDRID 0.8260826380611839 0.8655784001198091

    lr_gs = GridSearchCV(LogisticRegression(), params_lr, cv=5, verbose=1)
    lr_gs.fit(train_x, train_y)

    print(lr_gs.best_params_)
    print(lr_gs.best_estimator_)

    print(
        'LR on Train',
        cohen_kappa_score(train_y,
                          regression_to_class(train_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(train_y,
                          lr_gs.best_estimator_.predict(train_x),
                          weights='quadratic'))

    print(
        'LR on IDRID',
        cohen_kappa_score(val_y,
                          regression_to_class(val_y_avg),
                          weights='quadratic'),
        cohen_kappa_score(val_y,
                          lr_gs.best_estimator_.predict(val_x),
                          weights='quadratic'))

    with open('logistic_regression.pkl', 'wb') as f:
        pickle.dump(lr_gs.best_estimator_, f)
Esempio n. 9
0
def test_round():
    x = torch.tensor([
        -0.9, -0.2, 0.2, 0.5, 0.7, 1.1, 1.4, 1.5, 1.6, 2.4, 2.5, 2.6, 3.3, 3.5,
        3.9, 4, 4.5, 5
    ])
    y = regression_to_class(x)
    print(x)
    print(y)
Esempio n. 10
0
def test_evaluate_model(predictions):
    num_models = len(predictions)
    ids, x, y_true, y_average = prepare_inference_datasets(
        predictions, use_features=False, use_predictions=True)

    for i in range(num_models):
        print(
            fs.id_from_fname(predictions[i]),
            cohen_kappa_score(y_true,
                              regression_to_class(x),
                              weights='quadratic'))
Esempio n. 11
0
def test_pseudolabeling_aptos2019_round1():
    ids, x, y_true, y_average = prepare_inference_datasets(
        aptos2019_predictions, use_features=False, use_predictions=True)

    y_round = to_numpy(regression_to_class(x))
    y_major = majority_voting(y_round, axis=1)

    y_agreement = y_round == np.expand_dims(y_major, -1)

    y_agreement_all = np.all(y_agreement, axis=1)
    y_agreement_all = np.sum(y_agreement, axis=1) >= 16
    print('Agreement', np.mean(y_agreement_all))
    print('Distribution', np.bincount(y_major[y_agreement_all]))

    y_true[y_agreement_all] = y_major[y_agreement_all]
    print(y_round)
    df = pd.DataFrame.from_dict({'id_code': ids, 'diagnosis': y_true})
    df.to_csv('../data/aptos-2019/test_pseudolabel_round_1.csv', index=None)
Esempio n. 12
0
def test_pseudolabeling_messirod_2_round1(predictions, output_csv):
    ids, x, y_true, y_average = prepare_inference_datasets(
        predictions, use_features=False, use_predictions=True)

    y_round = to_numpy(regression_to_class(x))
    y_major = majority_voting(y_round, axis=1)

    y_agreement = y_round == np.expand_dims(y_major, -1)

    num_models = x.shape[1]
    y_agreement_most = np.sum(y_agreement, axis=1) >= int(0.75 * num_models)
    # y_agreement_all = y_major == y_true

    print('Agreement', np.mean(y_agreement_most))
    print('Distribution', np.bincount(y_major[y_agreement_most]))

    y_major[~y_agreement_most] = -100
    print(y_round)
    df = pd.DataFrame.from_dict({'id_code': ids, 'diagnosis': y_major})
    df.to_csv(output_csv, index=None)
def main():
    f0_aptos15 = pd.read_pickle(
        fs.auto_file(
            'reg_seresnext50_rms_512_medium_mse_aptos2019_fold0_awesome_babbage_on_aptos2019_fold0.pkl'
        ))
    f0_aptos15['fold'] = 0

    f0_idrid = pd.read_pickle(
        fs.auto_file(
            'reg_seresnext50_rms_512_medium_mse_idrid_fold0_heuristic_ptolemy_on_aptos2019_fold0.pkl'
        ))
    f0_idrid['fold'] = 0

    f1_aptos15 = pd.read_pickle(
        fs.auto_file(
            'reg_seresnext50_rms_512_medium_mse_aptos2019_fold1_hopeful_khorana_on_aptos2019_fold1.pkl'
        ))
    f1_aptos15['fold'] = 1

    f1_idrid = pd.read_pickle(
        fs.auto_file(
            'reg_seresnext50_rms_512_medium_mse_idrid_fold1_gifted_visvesvaraya_on_aptos2019_fold1.pkl'
        ))
    f1_idrid['fold'] = 1

    f2_aptos15 = pd.read_pickle(
        fs.auto_file(
            'reg_seresnext50_rms_512_medium_mse_aptos2019_fold2_trusting_nightingale_on_aptos2019_fold2.pkl'
        ))
    f2_aptos15['fold'] = 2

    f2_idrid = pd.read_pickle(
        fs.auto_file(
            'reg_seresnext50_rms_512_medium_mse_idrid_fold2_sharp_brattain_on_aptos2019_fold2.pkl'
        ))
    f2_idrid['fold'] = 2

    f3_aptos15 = pd.read_pickle(
        fs.auto_file(
            'reg_seresnext50_rms_512_medium_mse_aptos2019_fold3_epic_wing_on_aptos2019_fold3.pkl'
        ))
    f3_aptos15['fold'] = 3

    f3_idrid = pd.read_pickle(
        fs.auto_file(
            'reg_seresnext50_rms_512_medium_mse_idrid_fold3_vibrant_minsky_on_aptos2019_fold3.pkl'
        ))
    f3_idrid['fold'] = 3

    df_aptos15 = pd.concat([f0_aptos15, f1_aptos15, f2_aptos15, f3_aptos15])
    df_idrid = pd.concat([f0_idrid, f1_idrid, f2_idrid, f3_idrid])

    print(len(f0_aptos15), len(f1_aptos15), len(f2_aptos15), len(f3_aptos15),
          len(df_aptos15))

    # logits = np.array(df_aptos15['logits'].values.tolist())
    regression = np.array(df_aptos15['regression'].values.tolist())

    X = np.hstack((np.array(df_aptos15['features'].values.tolist()),
                   np.array(df_idrid['features'].values.tolist())))

    Y = np.array(df_aptos15['diagnosis_true'].values.tolist())

    print(X.shape, Y.shape)

    x_train, x_test, y_train, y_test, y_hat_train, y_hat_test = train_test_split(
        X,
        Y,
        to_numpy(regression_to_class(regression)),
        stratify=Y,
        test_size=0.25,
        random_state=0)

    from sklearn.preprocessing import StandardScaler
    sc = StandardScaler()
    x_train = sc.fit_transform(x_train)
    x_test = sc.transform(x_test)

    # d_train = lgb.Dataset(x_train, label=y_train.astype(np.float32))
    #
    # params = {}
    # params['learning_rate'] = 0.003
    # params['boosting_type'] = 'gbdt'
    # params['objective'] = 'regression'
    # params['metric'] = 'mse'
    # params['sub_feature'] = 0.5
    # params['num_leaves'] = 10
    # params['min_data'] = 50
    # # params['max_depth'] = 4
    #
    # clf = lgb.train(params, d_train, 1000)
    #
    # y_pred = clf.predict(x_test)
    # y_pred_class = regression_to_class(y_pred)

    cls = LGBMClassifier(n_estimators=64,
                         max_depth=10,
                         random_state=0,
                         num_leaves=256)
    cls.fit(x_train, y_train)
    y_pred_class = cls.predict(x_test)

    raw_score, num, denom = cohen_kappa_score(y_hat_test,
                                              y_test,
                                              weights='quadratic')
    print('raw_score', raw_score)
    lgb_score, num, denom = cohen_kappa_score(y_pred_class,
                                              y_test,
                                              weights='quadratic')
    print('lgb_score', lgb_score)
Esempio n. 14
0
def test_evaluate_model_v2(train, validation):
    num_models = len(train)
    ids, train_x, train_y_true, train_y_average = prepare_inference_datasets(
        train, use_features=False, use_predictions=True)

    ids, valid_x, valid_y_true, valid_y_average = prepare_inference_datasets(
        validation, use_features=False, use_predictions=True)

    for i in range(num_models):
        print(
            fs.id_from_fname(train[i]),
            cohen_kappa_score(train_y_true,
                              regression_to_class(train_x[:, i]),
                              weights='quadratic'),
            cohen_kappa_score(train_y_true,
                              regression_to_class(valid_x[:, i]),
                              weights='quadratic'),
        )

    print(
        'Averaged',
        cohen_kappa_score(train_y_true,
                          regression_to_class(train_y_average),
                          weights='quadratic'),
        cohen_kappa_score(valid_y_true,
                          regression_to_class(valid_y_average),
                          weights='quadratic'))

    print(
        'Median  ',
        cohen_kappa_score(train_y_true,
                          regression_to_class(np.median(train_x, axis=1)),
                          weights='quadratic'),
        cohen_kappa_score(valid_y_true,
                          regression_to_class(np.median(valid_x, axis=1)),
                          weights='quadratic'))

    print(
        'TrimMean',
        cohen_kappa_score(train_y_true,
                          regression_to_class(
                              trim_mean(train_x, proportiontocut=0.1, axis=1)),
                          weights='quadratic'),
        cohen_kappa_score(valid_y_true,
                          regression_to_class(
                              trim_mean(valid_x, proportiontocut=0.1, axis=1)),
                          weights='quadratic'))

    rounder = OptimizedRounder()
    rounder.fit(train_y_average, train_y_true)

    print(rounder.coefficients())
    print(
        'Optimized',
        cohen_kappa_score(train_y_true,
                          rounder.predict(train_y_average,
                                          rounder.coefficients()),
                          weights='quadratic'),
        cohen_kappa_score(valid_y_true,
                          rounder.predict(valid_y_average,
                                          rounder.coefficients()),
                          weights='quadratic'))