コード例 #1
0
def test_step(n_class, top_k_percent, att_gate, att_only, mil_ins, mut_ex,
              m_clam_op, imf_norm_op, c_model, dim_compress_features,
              test_path, result_path, result_file_name, n_test_steps):

    start_time = time.time()

    slide_true_label = list()
    slide_predict_label = list()
    sample_names = list()

    test_sample_list = os.listdir(test_path)
    test_sample_list = random.sample(test_sample_list, len(test_sample_list))

    for i in test_sample_list:
        print('>', end="")
        single_test_data = test_path + i
        img_features, slide_label = get_data_from_tf(single_test_data,
                                                     imf_norm_op=imf_norm_op)

        predict_slide_label = m_test_per_sample(
            n_class=n_class,
            top_k_percent=top_k_percent,
            att_gate=att_gate,
            att_only=att_only,
            m_clam_op=m_clam_op,
            mil_ins=mil_ins,
            mut_ex=mut_ex,
            c_model=c_model,
            dim_compress_features=dim_compress_features,
            img_features=img_features,
            slide_label=slide_label,
            n_test_steps=n_test_steps)

        slide_true_label.append(slide_label)
        slide_predict_label.append(predict_slide_label)
        sample_names.append(i)

        test_results = pd.DataFrame(list(
            zip(sample_names, slide_true_label, slide_predict_label)),
                                    columns=[
                                        'Sample Names', 'Slide True Label',
                                        'Slide Predict Label'
                                    ])
        test_results.to_csv(os.path.join(result_path, result_file_name),
                            sep='\t',
                            index=False)

    tn, fp, fn, tp = sklearn.metrics.confusion_matrix(
        slide_true_label, slide_predict_label).ravel()
    test_tn = int(tn)
    test_fp = int(fp)
    test_fn = int(fn)
    test_tp = int(tp)

    test_sensitivity = round(test_tp / (test_tp + test_fn), 2)
    test_specificity = round(test_tn / (test_tn + test_fp), 2)
    test_acc = round(
        (test_tp + test_tn) / (test_tn + test_fp + test_fn + test_tp), 2)

    fpr, tpr, thresholds = sklearn.metrics.roc_curve(slide_true_label,
                                                     slide_predict_label,
                                                     pos_label=1)
    test_auc = round(sklearn.metrics.auc(fpr, tpr), 2)

    test_run_time = time.time() - start_time

    template = '\n Test Accuracy: {}, Test Sensitivity: {}, Test Specificity: {}, Test Running Time: {}'
    print(
        template.format(f"{float(test_acc):.4%}",
                        f"{float(test_sensitivity):.4%}",
                        f"{float(test_specificity):.4%}",
                        "--- %s mins ---" % int(test_run_time / 60)))
コード例 #2
0
def val_step(c_model, val_path, imf_norm_op, i_loss_name, b_loss_name, mut_ex,
             n_class, c1, c2, top_k_percent, batch_size, batch_op):

    i_loss_func, b_loss_func = load_loss_func(i_loss_func_name=i_loss_name,
                                              b_loss_func_name=b_loss_name)

    loss_t = list()
    loss_i = list()
    loss_b = list()

    slide_true_label = list()
    slide_predict_label = list()

    val_sample_list = os.listdir(val_path)
    val_sample_list = random.sample(val_sample_list, len(val_sample_list))
    for i in val_sample_list:
        print('=', end="")
        single_val_data = val_path + i
        img_features, slide_label = get_data_from_tf(tf_path=single_val_data,
                                                     imf_norm_op=imf_norm_op)

        img_features = random.sample(
            img_features,
            len(img_features))  # follow the training loop, see details there

        if batch_op:
            if batch_size < len(img_features):
                I_Loss, B_Loss, T_Loss, predict_slide_label = b_val(
                    batch_size=batch_size,
                    top_k_percent=top_k_percent,
                    n_samples=len(img_features),
                    img_features=img_features,
                    slide_label=slide_label,
                    c_model=c_model,
                    i_loss_func=i_loss_func,
                    b_loss_func=b_loss_func,
                    n_class=n_class,
                    c1=c1,
                    c2=c2,
                    mut_ex=mut_ex)
            else:
                I_Loss, B_Loss, T_Loss, predict_slide_label = nb_val(
                    img_features=img_features,
                    slide_label=slide_label,
                    c_model=c_model,
                    i_loss_func=i_loss_func,
                    b_loss_func=b_loss_func,
                    n_class=n_class,
                    c1=c1,
                    c2=c2,
                    mut_ex=mut_ex)
        else:
            I_Loss, B_Loss, T_Loss, predict_slide_label = nb_val(
                img_features=img_features,
                slide_label=slide_label,
                c_model=c_model,
                i_loss_func=i_loss_func,
                b_loss_func=b_loss_func,
                n_class=n_class,
                c1=c1,
                c2=c2,
                mut_ex=mut_ex)

        loss_t.append(float(T_Loss))
        loss_i.append(float(I_Loss))
        loss_b.append(float(B_Loss))

        slide_true_label.append(slide_label)
        slide_predict_label.append(predict_slide_label)

    tn, fp, fn, tp = sklearn.metrics.confusion_matrix(
        slide_true_label, slide_predict_label).ravel()
    val_tn = int(tn)
    val_fp = int(fp)
    val_fn = int(fn)
    val_tp = int(tp)

    val_sensitivity = round(val_tp / (val_tp + val_fn), 2)
    val_specificity = round(val_tn / (val_tn + val_fp), 2)
    val_acc = round((val_tp + val_tn) / (val_tn + val_fp + val_fn + val_tp), 2)

    fpr, tpr, thresholds = sklearn.metrics.roc_curve(slide_true_label,
                                                     slide_predict_label,
                                                     pos_label=1)
    val_auc = round(sklearn.metrics.auc(fpr, tpr), 2)

    val_loss = statistics.mean(loss_t)
    val_ins_loss = statistics.mean(loss_i)
    val_bag_loss = statistics.mean(loss_b)

    return val_loss, val_ins_loss, val_bag_loss, val_tn, val_fp, val_fn, val_tp, val_sensitivity, val_specificity, \
           val_acc, val_auc
コード例 #3
0
def train_step(c_model, train_path, imf_norm_op, i_wd_op_name, b_wd_op_name,
               a_wd_op_name, i_optimizer_name, b_optimizer_name,
               a_optimizer_name, i_loss_name, b_loss_name, mut_ex, n_class, c1,
               c2, i_learn_rate, b_learn_rate, a_learn_rate, i_l2_decay,
               b_l2_decay, a_l2_decay, top_k_percent, batch_size, batch_op):

    i_optimizer, b_optimizer, a_optimizer = load_optimizers(
        i_wd_op_name=i_wd_op_name,
        b_wd_op_name=b_wd_op_name,
        a_wd_op_name=a_wd_op_name,
        i_optimizer_name=i_optimizer_name,
        b_optimizer_name=b_optimizer_name,
        a_optimizer_name=a_optimizer_name,
        i_learn_rate=i_learn_rate,
        b_learn_rate=b_learn_rate,
        a_learn_rate=a_learn_rate,
        i_l2_decay=i_l2_decay,
        b_l2_decay=b_l2_decay,
        a_l2_decay=a_l2_decay)

    i_loss_func, b_loss_func = load_loss_func(i_loss_func_name=i_loss_name,
                                              b_loss_func_name=b_loss_name)

    loss_total = list()
    loss_ins = list()
    loss_bag = list()

    slide_true_label = list()
    slide_predict_label = list()

    train_sample_list = os.listdir(train_path)
    train_sample_list = random.sample(train_sample_list,
                                      len(train_sample_list))
    for i in train_sample_list:
        print('=', end="")
        single_train_data = train_path + i
        img_features, slide_label = get_data_from_tf(tf_path=single_train_data,
                                                     imf_norm_op=imf_norm_op)
        # shuffle the order of img features list in order to reduce the side effects of randomly drop potential
        # number of patches' feature vectors during training when enable batch training option
        img_features = random.sample(img_features, len(img_features))

        if batch_op:
            if batch_size < len(img_features):
                I_Loss, B_Loss, T_Loss, predict_slide_label = b_optimize(
                    batch_size=batch_size,
                    top_k_percent=top_k_percent,
                    n_samples=len(img_features),
                    img_features=img_features,
                    slide_label=slide_label,
                    c_model=c_model,
                    i_optimizer=i_optimizer,
                    b_optimizer=b_optimizer,
                    a_optimizer=a_optimizer,
                    i_loss_func=i_loss_func,
                    b_loss_func=b_loss_func,
                    n_class=n_class,
                    c1=c1,
                    c2=c2,
                    mut_ex=mut_ex)
            else:
                I_Loss, B_Loss, T_Loss, predict_slide_label = nb_optimize(
                    img_features=img_features,
                    slide_label=slide_label,
                    c_model=c_model,
                    i_optimizer=i_optimizer,
                    b_optimizer=b_optimizer,
                    a_optimizer=a_optimizer,
                    i_loss_func=i_loss_func,
                    b_loss_func=b_loss_func,
                    n_class=n_class,
                    c1=c1,
                    c2=c2,
                    mut_ex=mut_ex)
        else:
            I_Loss, B_Loss, T_Loss, predict_slide_label = nb_optimize(
                img_features=img_features,
                slide_label=slide_label,
                c_model=c_model,
                i_optimizer=i_optimizer,
                b_optimizer=b_optimizer,
                a_optimizer=a_optimizer,
                i_loss_func=i_loss_func,
                b_loss_func=b_loss_func,
                n_class=n_class,
                c1=c1,
                c2=c2,
                mut_ex=mut_ex)

        loss_total.append(float(T_Loss))
        loss_ins.append(float(I_Loss))
        loss_bag.append(float(B_Loss))

        slide_true_label.append(slide_label)
        slide_predict_label.append(predict_slide_label)

    tn, fp, fn, tp = sklearn.metrics.confusion_matrix(
        slide_true_label, slide_predict_label).ravel()
    train_tn = int(tn)
    train_fp = int(fp)
    train_fn = int(fn)
    train_tp = int(tp)

    train_sensitivity = round(train_tp / (train_tp + train_fn), 2)
    train_specificity = round(train_tn / (train_tn + train_fp), 2)
    train_acc = round(
        (train_tp + train_tn) / (train_tn + train_fp + train_fn + train_tp), 2)

    fpr, tpr, thresholds = sklearn.metrics.roc_curve(slide_true_label,
                                                     slide_predict_label,
                                                     pos_label=1)
    train_auc = round(sklearn.metrics.auc(fpr, tpr), 2)

    train_loss = statistics.mean(loss_total)
    train_ins_loss = statistics.mean(loss_ins)
    train_bag_loss = statistics.mean(loss_bag)

    return train_loss, train_ins_loss, train_bag_loss, train_tn, train_fp, train_fn, train_tp, train_sensitivity, \
           train_specificity, train_acc, train_auc