예제 #1
0
                        (100 * auc, 100 * acc, 100 * sens, 100 * spec))
            logger.info('\t\tResults here at optimal threshold:')
            logger.info(
                '\t\t\tAUC: %.3f\tAcc: %.3f\tSens: %.3f\tSpec: %.3f' %
                (auc * 100, 100 * test_acc_opt, test_sens_opt, test_spec_opt))

        # Store the results over each run for this adipose shell
        adi_results[adi_id] = [
            aucs_here, accs_here, sens_here, spec_here, accs_opt_thresh_here,
            sens_opt_thresh_here, spec_opt_thresh_here
        ]

    # Report the results to the logger
    for adi_id in unique_adi_ids:
        logger.info('CNN Gen-2 Results on Unseen Adi-ID: %s' % adi_id)
        report_metrics(adi_results[adi_id][0], adi_results[adi_id][1],
                       adi_results[adi_id][2], adi_results[adi_id][3], logger)

    # Save the results to a .pickle file
    save_dir = os.path.join(get_proj_path(), 'output/g2-by-adi//')
    verify_path(save_dir)
    save_pickle(adi_results, os.path.join(save_dir,
                                          'g2_by_adi_metrics.pickle'))

    # Save the incorrect predictions
    out_dir = os.path.join(get_proj_path(), 'output/by-adi-preds/')
    verify_path(out_dir)

    save_pickle(incor_preds, os.path.join(out_dir, 'byadi_incor_preds.pickle'))
    save_pickle(cor_preds, os.path.join(out_dir, 'byadi_cor_preds.pickle'))
예제 #2
0
        accs[run_idx] = 100 * get_acc(
            preds=g1_preds[:, 1], labels=g1_labels[:, 1], threshold=opt_thresh)
        sens[run_idx] = 100 * get_sens(
            preds=g1_preds[:, 1], labels=g1_labels[:, 1], threshold=opt_thresh)
        spec[run_idx] = 100 * get_spec(
            preds=g1_preds[:, 1], labels=g1_labels[:, 1], threshold=opt_thresh)
        # Plot ROC curve
        plt_roc_curve(preds=g1_preds[:, 1],
                      labels=g1_labels[:, 1],
                      save_str='dnn_run_%d_roc' % run_idx,
                      save=True)

        # Report AUC at this run
        logger.info('\t\tAUC:\t%.2f' % g1_auc)

        # Get the class predictions
        class_preds = g1_preds * np.zeros_like(g1_preds)
        class_preds[g1_preds >= opt_thresh] = 1

        # Reset the model
        clear_session()

    # Report performance metrics to logger
    logger.info('Average performance metrics')
    logger.info('')
    report_metrics(aucs=auc_scores,
                   accs=accs,
                   sens=sens,
                   spec=spec,
                   logger=logger)