Esempio n. 1
0
def making_patch(num, img_path, mask_path, patch_side, threshold):
    z_size = 320
    y_size = 320
    x_size = 320
    w = int(patch_side / 2)

    path_w = "E:/data/data{}_patch/sigma_0.9/th_{}/size_{}/".format(
        num, threshold, patch_side)

    # load data
    print('load data')
    img = io.read_mhd_and_raw(img_path)
    mask = io.read_mhd_and_raw(mask_path)

    img = np.reshape(img, [z_size, y_size, x_size])
    mask = np.reshape(mask, [z_size, y_size, x_size])

    # check folder
    if not (os.path.exists(path_w)):
        os.makedirs(path_w)

    file = open(path_w + "filename.txt", mode='w')
    count = 0
    for z in range(z_size - 1):
        for y in range(y_size - 1):
            for x in range(x_size - 1):
                if mask[z, y, x] > threshold and mask[z, y, x] > mask[z, y, x - 1] and mask[z, y, x] > mask[z, y, x + 1] \
                        and mask[z, y, x] > mask[z - 1, y, x] and mask[z, y, x] > mask[z + 1, y, x] \
                        and mask[z, y, x] > mask[z, y - 1, x] and mask[z, y, x] > mask[z, y + 1, x]:
                    patch = img[z - w:z + w + 1, y - w:y + w + 1,
                                x - w:x + w + 1]
                    patch = patch.reshape([patch_side, patch_side, patch_side])
                    eudt_image = sitk.GetImageFromArray(patch)
                    eudt_image.SetOrigin([patch_side, patch_side, patch_side])
                    eudt_image.SetSpacing([0.885, 0.885, 1])
                    io.write_mhd_and_raw(
                        eudt_image,
                        os.path.join(path_w,
                                     "patch_{}_{}_{}.mhd".format(x, y, z)))
                    file.write(
                        os.path.join(
                            path_w,
                            "data1_patch_{}_{}_{}.mhd".format(x, y, z) + "\n"))
                    count += 1
                    print(count)

    return 0
Esempio n. 2
0
def spe_test(model):
    #  calculate mu and sigma
    for i in enumerate(train_loader):
        with torch.no_grad():
            train_data_cuda = train_data.to(device)
            recon_batch, mean_batch, logvar_batch = model(train_data_cuda)
            mean_single = mean_batch[0, :]
            std_single = torch.exp(0.5 * logvar_batch)[0, :]
            mean.append(mean_single.cpu().numpy())
            std.append(std_single.cpu().numpy())

    mu = np.mean(mean)
    sigma = np.mean(std)

    model.eval()
    with torch.no_grad():
        gen = np.zeros(num_of_gen, dtype="float")
        ori = np.reshape(test_data.cpu().numpy(),
                         [num_of_test, patch_side, patch_side, patch_side])
        for j in trange(num_of_gen):
            # sample_z = np.random.normal(mu, sigma, (1, latent_dim))
            sample_z = torch.normal(mu, sigma, (1, latent_dim)).to(device)
            gen_batch = model.decode(sample_z)
            gen_single = gen_batch.cpu().numpy()
            gen = np.reshape(gen_single, [patch_side, patch_side, patch_side])
            # EUDT
            eudt_image = sitk.GetImageFromArray(gen)
            eudt_image.SetSpacing([0.885, 0.885, 1])
            eudt_image.SetOrigin([0, 0, 0])

            # calculate spe
            case_min_specificity = 1.0
            for image_index in range(num_of_test):
                specificity_tmp = L1norm(ori[image_index], gen)
                if specificity_tmp < case_min_specificity:
                    case_min_specificity = specificity_tmp
            specificity.append([case_min_specificity])

            # output image
            io.write_mhd_and_raw(
                eudt_image, '{}.mhd'.format(
                    os.path.join(args.outdir, 'spe',
                                 '{}'.format(str(j).zfill(4)))))

    return specificity
Esempio n. 3
0
def main():

    # tf flag
    flags = tf.flags
    flags.DEFINE_string("train_data_txt",
                        "E:/git/beta-VAE/input/CT/shift/train.txt",
                        "train data txt")
    flags.DEFINE_string("ground_truth_txt",
                        "E:/git/beta-VAE/input/CT/shift/test.txt", "i1")
    flags.DEFINE_string(
        "model1", 'D:/vae_result/n1/z6/beta_1/model/model_{}'.format(997500),
        "i2")
    flags.DEFINE_string(
        "model2",
        'D:/vae_result/n1+n2/all/sig/beta_1/model/model_{}'.format(197500),
        "i3")
    flags.DEFINE_string("outdir", "D:/vae_result/n1+n2/all/sig/beta_1/spe/",
                        "i4")
    flags.DEFINE_float("beta", 1, "hyperparameter beta")
    flags.DEFINE_integer("num_of_generate", 5000, "number of generate data")
    flags.DEFINE_integer("num_of_test", 600, "number of test data")
    flags.DEFINE_integer("num_of_train", 1804, "number of train data")
    flags.DEFINE_integer("batch_size", 1, "batch size")
    flags.DEFINE_integer("latent_dim", 6, "latent dim")
    flags.DEFINE_list("image_size", [9 * 9 * 9], "image size")
    flags.DEFINE_boolean("const_bool", False,
                         "if there is sigmoid in front of last output")
    FLAGS = flags.FLAGS

    # check folder
    if not (os.path.exists(FLAGS.outdir)):
        os.makedirs(FLAGS.outdir + 'spe1/')
        os.makedirs(FLAGS.outdir + 'spe2/')
        os.makedirs(FLAGS.outdir + 'spe_all/')

    # read list
    test_data_list = io.load_list(FLAGS.ground_truth_txt)
    train_data_list = io.load_list(FLAGS.train_data_txt)

    # test step
    test_step = FLAGS.num_of_generate // FLAGS.batch_size
    if FLAGS.num_of_generate % FLAGS.batch_size != 0:
        test_step += 1

    # load train data
    train_set = tf.data.TFRecordDataset(train_data_list)
    train_set = train_set.map(
        lambda x: _parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    train_set = train_set.batch(FLAGS.batch_size)
    train_iter = train_set.make_one_shot_iterator()
    train_data = train_iter.get_next()

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(
        lambda x: _parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config) as sess:

        # set network
        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': encoder_mlp,
            'decoder': decoder_mlp,
            'is_res': False
        }
        VAE = Variational_Autoencoder(**kwargs)
        kwargs_2 = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'beta': FLAGS.beta,
            'latent_dim': 8,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': encoder_mlp2,
            'decoder': decoder_mlp_tanh,
            'is_res': False,
            'is_constraints': FLAGS.const_bool
        }
        VAE_2 = Variational_Autoencoder(**kwargs_2)

        sess.run(init_op)

        # testing
        VAE.restore_model(FLAGS.model1)
        VAE_2.restore_model(FLAGS.model2)

        tbar = tqdm(range(FLAGS.num_of_generate), ascii=True)
        specificity = []
        spe_mean = []
        generate_data = []
        generate_data2 = []
        ori = []
        latent_space = []
        latent_space2 = []

        patch_side = 9

        for i in range(FLAGS.num_of_train):
            train_data_batch = sess.run(train_data)
            z = VAE.plot_latent(train_data_batch)
            z2 = VAE_2.plot_latent(train_data_batch)
            z = z.flatten()
            z2 = z2.flatten()
            latent_space.append(z)
            latent_space2.append(z2)

        mu = np.mean(latent_space, axis=0)
        var = np.var(latent_space, axis=0)
        mu2 = np.mean(latent_space2, axis=0)
        var2 = np.var(latent_space2, axis=0)

        for i in range(FLAGS.num_of_test):
            test_data_batch = sess.run(test_data)
            ori_single = test_data_batch
            ori_single = ori_single[0, :]
            ori.append(ori_single)

        file_spe1 = open(FLAGS.outdir + 'spe1/list.txt', 'w')
        file_spe2 = open(FLAGS.outdir + 'spe2/list.txt', 'w')
        file_spe_all = open(FLAGS.outdir + 'spe_all/list.txt', 'w')

        for j in tbar:
            sample_z = np.random.normal(mu, var, (1, FLAGS.latent_dim))
            sample_z2 = np.random.normal(mu2, var2, (1, 8))
            generate_data_single = VAE.generate_sample(sample_z)
            if FLAGS.const_bool is False:
                generate_data_single2 = VAE_2.generate_sample(sample_z2)
                generate_data_single = generate_data_single[0, :]
                generate_data_single2 = generate_data_single2[0, :]
                generate_data.append(generate_data_single)
                generate_data2.append(generate_data_single2)
                gen = np.reshape(generate_data_single,
                                 [patch_side, patch_side, patch_side])
                gen2 = np.reshape(generate_data_single2,
                                  [patch_side, patch_side, patch_side])
                generate_data_single_all = generate_data_single + generate_data_single2
                gen_all = gen + gen2

            if FLAGS.const_bool is True:
                generate_data_single_all = VAE_2.generate_sample2(
                    sample_z2, generate_data_single)
                generate_data_single = generate_data_single[0, :]
                generate_data_single_all = generate_data_single_all[0, :]
                generate_data.append(generate_data_single)
                generate_data2.append(generate_data_single_all)
                gen = np.reshape(generate_data_single,
                                 [patch_side, patch_side, patch_side])
                gen_all = np.reshape(generate_data_single_all,
                                     [patch_side, patch_side, patch_side])
                generate_data_single2 = generate_data_single_all - generate_data_single
                gen2 = gen_all - gen

            # EUDT
            gen_image = sitk.GetImageFromArray(gen)
            gen_image.SetSpacing([0.885, 0.885, 1])
            gen_image.SetOrigin([0, 0, 0])

            gen2_image = sitk.GetImageFromArray(gen2)
            gen2_image.SetSpacing([0.885, 0.885, 1])
            gen2_image.SetOrigin([0, 0, 0])

            gen_all_image = sitk.GetImageFromArray(gen_all)
            gen_all_image.SetSpacing([0.885, 0.885, 1])
            gen_all_image.SetOrigin([0, 0, 0])

            # calculation
            case_min_specificity = 1.0
            for image_index in range(FLAGS.num_of_test):
                specificity_tmp = utils.L1norm(ori[image_index],
                                               generate_data_single_all)
                if specificity_tmp < case_min_specificity:
                    case_min_specificity = specificity_tmp

            specificity.append([case_min_specificity])
            spe = np.mean(specificity)
            spe_mean.append(spe)

            io.write_mhd_and_raw(
                gen_image, '{}.mhd'.format(
                    os.path.join(FLAGS.outdir, 'spe1',
                                 'spe1_{}'.format(j + 1))))
            io.write_mhd_and_raw(
                gen2_image, '{}.mhd'.format(
                    os.path.join(FLAGS.outdir, 'spe2',
                                 'spe2_{}'.format(j + 1))))
            io.write_mhd_and_raw(
                gen_all_image, '{}.mhd'.format(
                    os.path.join(FLAGS.outdir, 'spe_all',
                                 'spe_all_{}'.format(j + 1))))
            file_spe1.write('{}.mhd'.format(
                os.path.join(FLAGS.outdir, 'spe1', 'spe1_{}'.format(j + 1))) +
                            "\n")
            file_spe2.write('{}.mhd'.format(
                os.path.join(FLAGS.outdir, 'spe2', 'spe2_{}'.format(j + 1))) +
                            "\n")
            file_spe_all.write('{}.mhd'.format(
                os.path.join(FLAGS.outdir, 'spe_all', 'spe_all_{}'.format(
                    j + 1))) + "\n")

    file_spe1.close()
    file_spe2.close()
    file_spe_all.close()

    print('specificity = %f' % np.mean(specificity))
    np.savetxt(os.path.join(FLAGS.outdir, 'specificity.csv'),
               specificity,
               delimiter=",")

    # spe graph
    plt.plot(spe_mean)
    plt.grid()
    # plt.show()
    plt.savefig(FLAGS.outdir + "Specificity.png")
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser(
        description='py, test_data_txt, ground_truth_txt, outdir')

    parser.add_argument('--ground_truth_txt', '-i1', default='')

    parser.add_argument('--model', '-i2', default='./model_{}'.format(50000))

    parser.add_argument('--outdir', '-i3', default='')

    args = parser.parse_args()

    # check folder
    if not (os.path.exists(args.outdir)):
        os.makedirs(args.outdir)

    # tf flag
    flags = tf.flags
    flags.DEFINE_float("beta", 0.1, "hyperparameter beta")
    flags.DEFINE_integer("num_of_generate", 1000, "number of generate data")
    flags.DEFINE_integer("batch_size", 1, "batch size")
    flags.DEFINE_integer("latent_dim", 2, "latent dim")
    flags.DEFINE_list("image_size", [512, 512, 1], "image size")
    FLAGS = flags.FLAGS

    # load ground truth
    ground_truth = io.load_matrix_data(args.ground_truth_txt, 'int32')
    print(ground_truth.shape)

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config) as sess:

        # set network
        kwargs = {
            'sess': sess,
            'outdir': args.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': cnn_encoder,
            'decoder': cnn_decoder
        }
        VAE = Variational_Autoencoder(**kwargs)

        sess.run(init_op)

        # testing
        VAE.restore_model(args.model)
        tbar = tqdm(range(FLAGS.num_of_generate), ascii=True)
        specificity = []
        for i in tbar:
            sample_z = np.random.normal(0, 1.0, (1, FLAGS.latent_dim))
            generate_data = VAE.generate_sample(sample_z)
            generate_data = generate_data[0, :, :, 0]

            # EUDT
            eudt_image = sitk.GetImageFromArray(generate_data)
            eudt_image.SetSpacing([1, 1])
            eudt_image.SetOrigin([0, 0])

            # label
            label = np.where(generate_data > 0, 0, 1)
            label_image = sitk.GetImageFromArray(label)
            label_image.SetSpacing([1, 1])
            label_image.SetOrigin([0, 0])

            # calculate ji
            case_max_ji = 0.
            for image_index in range(ground_truth.shape[0]):
                ji = utils.jaccard(label, ground_truth[image_index])
                if ji > case_max_ji:
                    case_max_ji = ji
            specificity.append([case_max_ji])

            # output image
            io.write_mhd_and_raw(
                eudt_image,
                '{}.mhd'.format(os.path.join(args.outdir, 'EUDT', str(i + 1))))
            io.write_mhd_and_raw(
                label_image,
                '{}.mhd'.format(os.path.join(args.outdir, 'label',
                                             str(i + 1))))

    print('specificity = %f' % np.mean(specificity))

    # output csv file
    with open(os.path.join(args.outdir, 'specificity.csv'), 'w',
              newline='') as file:
        writer = csv.writer(file)
        writer.writerows(specificity)
        writer.writerow(['specificity:', np.mean(specificity)])
Esempio n. 5
0
def gen(model):
    model.eval()
    with torch.no_grad():
        ori = []
        rec = []
        for i, data in enumerate(test_loader):
            # print(i)
            data = data.to(device)
            recon_batch, mu, logvar = model(data)
            ori_single = data[0, :]
            rec_single = recon_batch[0, :]
            ori.append(ori_single.cpu().numpy())
            rec.append(rec_single.cpu().numpy())

    ori = np.reshape(ori, [num_of_test, patch_side, patch_side, patch_side])
    rec = np.reshape(rec, [num_of_test, patch_side, patch_side, patch_side])

    generalization_single = []
    file_ori = open(args.outdir + 'ori/list.txt', 'w')
    file_rec = open(args.outdir + 'rec/list.txt', 'w')

    for j in trange(len(rec)):

        # EUDT
        ori_image = sitk.GetImageFromArray(ori[j])
        ori_image.SetOrigin([0, 0, 0])
        ori_image.SetSpacing([0.885, 0.885, 1])

        rec_image = sitk.GetImageFromArray(rec[j])
        rec_image.SetOrigin([0, 0, 0])
        rec_image.SetSpacing([0.885, 0.885, 1])

        # output image
        io.write_mhd_and_raw(
            ori_image, '{}.mhd'.format(
                os.path.join(args.outdir, 'ori',
                             '{}'.format(str(j).zfill(4)))))
        io.write_mhd_and_raw(
            rec_image, '{}.mhd'.format(
                os.path.join(args.outdir, 'rec',
                             '{}'.format(str(j).zfill(4)))))
        file_ori.write('{}.mhd'.format(
            os.path.join(args.outdir, 'ori', '{}'.format(str(j).zfill(4)))) +
                       "\n")
        file_rec.write('{}.mhd'.format(
            os.path.join(args.outdir, 'rec', '{}'.format(str(j).zfill(4)))) +
                       "\n")

        generalization_single.append(L1norm(ori[j], rec[j]))

    file_ori.close()
    file_rec.close()

    generalization = np.average(generalization_single)
    print('generalization = %f' % generalization)

    np.savetxt(os.path.join(args.outdir, 'generalization.csv'),
               generalization_single,
               delimiter=",")

    # plot reconstruction
    a_X = ori[:, 4, :]
    a_Xe = rec[:, 4, :]
    c_X = ori[:, :, 4, :]
    c_Xe = rec[:, :, 4, :]
    s_X = ori[:, :, :, 4]
    s_Xe = rec[:, :, :, 4]
    visualize_slices(a_X, a_Xe, args.outdir + "axial_")
    visualize_slices(c_X, c_Xe, args.outdir + "coronal_")
    visualize_slices(s_X, s_Xe, args.outdir + "sagital_")
Esempio n. 6
0
def predict_spe():
    parser = argparse.ArgumentParser(
        description='py, test_data_list, name_list, outdir')
    parser.add_argument('--model', '-i1', default='', help='model')
    parser.add_argument('--truth_data_txt',
                        '-i2',
                        default='',
                        help='name list')
    parser.add_argument('--outdir', '-i3', default='', help='outdir')
    args = parser.parse_args()

    if not (os.path.exists(args.outdir)):
        os.mkdir(args.outdir)

    latent_dim = 32
    n_gen = 1

    # load ground truth
    ground_truth = io.load_matrix_data(args.truth_data_txt, 'int32')
    print(ground_truth.shape)

    # aset network
    decoder = load_model(args.model)

    specificity = []
    for i in range(n_gen):

        # # generate shape
        # sample_z = np.full(latent_dim, 0)
        # sample_z = np.array([sample_z])
        sample_z = np.random.normal(0, 1.0, (1, latent_dim))
        # print(sample_z.shape)
        preds = decoder.predict(sample_z)
        preds = preds[:, :, :, :, 0]

        # # EUDT
        eudt_image = sitk.GetImageFromArray(preds[0])
        eudt_image.SetSpacing([1, 1, 1])
        eudt_image.SetOrigin([0, 0, 0])

        # label
        label = np.where(preds[0] > 0, 0, 1)
        label_image = sitk.GetImageFromArray(label)
        label_image.SetSpacing([1, 1, 1])
        label_image.SetOrigin([0, 0, 0])

        # calculate ji
        case_max_ji = 0.
        for image_index in range(ground_truth.shape[0]):
            ji = jaccard(label, ground_truth[image_index])
            if ji > case_max_ji:
                case_max_ji = ji
        specificity.append([case_max_ji])

        # output image
        io.write_mhd_and_raw(
            eudt_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'EUDT', str(i + 1))))
        io.write_mhd_and_raw(
            label_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'label', str(i + 1))))

    # output csv file
    # with open(os.path.join(args.outdir, 'specificity.csv'), 'w', newline='') as file:
    #     writer = csv.writer(file)
    #     writer.writerows(specificity)
    #     writer.writerow(['specificity:', np.mean(specificity)])

    print('specificity = %f' % np.mean(specificity))
Esempio n. 7
0
def main():
    # tf flag
    flags = tf.flags
    flags.DEFINE_string(
        "model",
        'G:/experiment_result/liver/VAE/set_4/down_64/RBF/alpha_0.1/4/beta_10/model/model_{}'
        .format(1350), "model")
    flags.DEFINE_string(
        "outdir",
        'G:/experiment_result/liver/VAE/set_4/down_64/RBF/alpha_0.1/4/beta_10/random',
        "outdir")
    flags.DEFINE_string("gpu_index", "0", "GPU-index")
    flags.DEFINE_float("beta", 1.0, "hyperparameter beta")
    flags.DEFINE_integer("batch_size", 1, "batch size")
    flags.DEFINE_integer("latent_dim", 2, "latent dim")
    flags.DEFINE_list("image_size", [56, 72, 88, 1], "image size")
    FLAGS = flags.FLAGS

    # check folder
    if not (os.path.exists(FLAGS.outdir)):
        os.makedirs(FLAGS.outdir)

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:

        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': encoder_resblock_bn,
            'decoder': decoder_resblock_bn,
            'downsampling': down_sampling,
            'upsampling': up_sampling,
            'is_training': False,
            'is_down': False
        }
        VAE = Variational_Autoencoder(**kwargs)

        sess.run(init_op)

        # testing
        VAE.restore_model(FLAGS.model)

        # 2 dim vis
        for j in range(-2, 3):
            for i in range(-2, 3):
                mean = [0.37555057, 0.8882291]
                var = [32.121346, 24.540127]

                sample_z = [[i, j]]
                sample_z = np.asarray(mean) + np.sqrt(
                    np.asarray(var)) * sample_z
                generate_data = VAE.generate_sample(sample_z)
                generate_data = generate_data[0, :, :, :, 0]

                # EUDT
                generate_data = generate_data.astype(np.float32)
                eudt_image = sitk.GetImageFromArray(generate_data)
                eudt_image.SetSpacing([1, 1, 1])
                eudt_image.SetOrigin([0, 0, 0])

                # label
                label = np.where(generate_data > 0.5, 0, 1)
                label = label.astype(np.int16)
                label_image = sitk.GetImageFromArray(label)
                label_image.SetSpacing([1, 1, 1])
                label_image.SetOrigin([0, 0, 0])

                io.write_mhd_and_raw(
                    label_image, '{}.mhd'.format(
                        os.path.join(FLAGS.outdir, '2_dim',
                                     str(i) + '_' + str(j))))
Esempio n. 8
0
def main():
    parser = argparse.ArgumentParser(
        description='py, test_data_txt, model, outdir')

    parser.add_argument('--test_data_txt', '-i1', default='')

    parser.add_argument('--model', '-i2', default='./model_{}'.format(50000))

    parser.add_argument('--outdir', '-i3', default='')

    args = parser.parse_args()

    # check folder
    if not (os.path.exists(args.outdir)):
        os.makedirs(args.outdir)

    # tf flag
    flags = tf.flags
    flags.DEFINE_float("beta", 0.1, "hyperparameter beta")
    flags.DEFINE_integer("num_of_test", 100, "number of test data")
    flags.DEFINE_integer("batch_size", 1, "batch size")
    flags.DEFINE_integer("latent_dim", 2, "latent dim")
    flags.DEFINE_list("image_size", [512, 512, 1], "image size")
    FLAGS = flags.FLAGS

    # read list
    test_data_list = io.load_list(args.test_data_txt)

    # test step
    test_step = FLAGS.num_of_test // FLAGS.batch_size
    if FLAGS.num_of_test % FLAGS.batch_size != 0:
        test_step += 1

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(
        lambda x: _parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config) as sess:

        # set network
        kwargs = {
            'sess': sess,
            'outdir': args.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': cnn_encoder,
            'decoder': cnn_decoder
        }
        VAE = Variational_Autoencoder(**kwargs)

        sess.run(init_op)

        # testing
        VAE.restore_model(args.model)
        tbar = tqdm(range(test_step), ascii=True)
        preds = []
        ori = []
        for k in tbar:
            test_data_batch = sess.run(test_data)
            ori_single = test_data_batch
            preds_single = VAE.reconstruction_image(ori_single)
            preds_single = preds_single[0, :, :, 0]
            ori_single = ori_single[0, :, :, 0]

            preds.append(preds_single)
            ori.append(ori_single)

        # # label
        ji = []
        for j in range(len(preds)):

            # EUDT
            eudt_image = sitk.GetImageFromArray(preds[j])
            eudt_image.SetSpacing([1, 1])
            eudt_image.SetOrigin([0, 0])

            label = np.where(preds[j] > 0, 0, 1)
            label_image = sitk.GetImageFromArray(label)
            label_image.SetSpacing([1, 1])
            label_image.SetOrigin([0, 0])

            ori_label = np.where(ori[j] > 0, 0, 1)
            ori_label_image = sitk.GetImageFromArray(ori_label)
            ori_label_image.SetSpacing([1, 1])
            ori_label_image.SetOrigin([0, 0])

            # # calculate ji
            ji.append(utils.jaccard(label, ori_label))

            # output image
            io.write_mhd_and_raw(
                eudt_image, '{}.mhd'.format(
                    os.path.join(args.outdir, 'EUDT', 'recon_{}'.format(j))))
            io.write_mhd_and_raw(
                label_image, '{}.mhd'.format(
                    os.path.join(args.outdir, 'label', 'recon_{}'.format(j))))

    generalization = np.mean(ji)
    print('generalization = %f' % generalization)

    # output csv file
    with open(os.path.join(args.outdir, 'generalization.csv'), 'w',
              newline='') as file:
        writer = csv.writer(file)
        writer.writerows(ji)
        writer.writerow(['generalization= ', generalization])
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")
    if not os.path.exists(os.path.join(FLAGS.dir, 'dice')):
        os.makedirs(os.path.join(FLAGS.dir, 'dice'))

    # get ground truth list
    ground_truth_list = io.load_list(FLAGS.ground_truth)

    # load ground truth
    ground_truth = io.load_data_from_path(ground_truth_list, dtype='int32')

    # get tfrecord list
    test_data_list = glob.glob(FLAGS.indir + '/*')

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                            num_parallel_calls=os.cpu_count())
    test_set = test_set.repeat()
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        sess.run(init_op)

        # print parameters
        utils.cal_parameter()

        # test
        dice_list = []

        Model.restore_model(FLAGS.dir +
                            '/model/model_{}'.format(FLAGS.model_index))
        for i in range(FLAGS.num_of_test):
            _, test_points_batch, _ = sess.run(test_data)

            np.random.seed(4)

            tbar = tqdm(range(FLAGS.num_of_generate // FLAGS.batch_size),
                        ascii=True)
            for j in tbar:

                z = np.random.normal(0.,
                                     1.,
                                     size=[FLAGS.batch_size, FLAGS.latent_dim])
                # z = utils.truncated_noise_sample(FLAGS.batch_size, FLAGS.latent_dim, truncation=2.0)
                generate_batch = Model.generate_sample(z, test_points_batch)

                # save logodds
                generate_batch_ = np.asarray(generate_batch)
                generate_batch_ = generate_batch_[0, :, :, :]
                for image_index in range(generate_batch_.shape[0]):
                    gen = generate_batch_[image_index][:, :, :, 0]
                    io.write_mhd_and_raw(
                        gen,
                        '{}.mhd'.format(
                            os.path.join(
                                FLAGS.dir, 'dice', '{}'.format(i),
                                '{}'.format(j * FLAGS.batch_size +
                                            image_index))),
                        spacing=[1, 1, 1],
                        origin=[0, 0, 0],
                        compress=True)

                if j is 0:
                    data = np.asarray(generate_batch)[0]
                    label = np.where(data > 0.5, 0, 1)
                    label = label.astype(np.int8)
                    pa = np.sum(label, axis=0)
                else:
                    data = np.asarray(generate_batch)[0]
                    label_ = np.where(data > 0.5, 0, 1)
                    label_ = label_.astype(np.int8)
                    pa = pa + np.sum(label_, axis=0)

            pa = pa / float(FLAGS.num_of_generate)
            pa = pa.astype(np.float32)

            # output image
            io.write_mhd_and_raw(pa,
                                 '{}_{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'dice', 'PA'), i),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)

            # dice
            gt = ground_truth[i]
            gt = gt.astype(np.float32)
            dice = utils.dice_coef(gt, pa)
            dice_list.append([round(dice, 6)])
            print(dice)

        print('dice = %f' % np.mean(dice_list))
        # write csv
        io.write_csv(
            dice_list,
            os.path.join(FLAGS.dir, 'dice',
                         'dice_{}.csv'.format(FLAGS.model_index)), 'dice')
Esempio n. 10
0
def main():

    # tf flag
    flags = tf.flags
    flags.DEFINE_string(
        "test_data_txt",
        'F:/data_info/VAE_liver/set_5/TFrecord/fold_1/test.txt',
        "test data txt")
    flags.DEFINE_string(
        "indir",
        'G:/experiment_result/liver/VAE/set_5/down/64/alpha_0.1/fold_1/VAE/axis_5/beta_7',
        "input dir")
    flags.DEFINE_string(
        "outdir",
        'G:/experiment_result/liver/VAE/set_5/down/64/alpha_0.1/fold_1/VAE/axis_5/beta_7/rec',
        "outdir")
    flags.DEFINE_integer("model_index", 3300, "index of model")
    flags.DEFINE_string("gpu_index", "0", "GPU-index")
    flags.DEFINE_float("beta", 1.0, "hyperparameter beta")
    flags.DEFINE_integer("num_of_test", 75, "number of test data")
    flags.DEFINE_integer("batch_size", 1, "batch size")
    flags.DEFINE_integer("latent_dim", 5, "latent dim")
    flags.DEFINE_list("image_size", [56, 72, 88, 1], "image size")
    FLAGS = flags.FLAGS

    # check folder
    if not (os.path.exists(FLAGS.outdir)):
        os.makedirs(FLAGS.outdir)

    # read list
    test_data_list = io.load_list(FLAGS.test_data_txt)

    # test step
    test_step = FLAGS.num_of_test // FLAGS.batch_size
    if FLAGS.num_of_test % FLAGS.batch_size != 0:
        test_step += 1

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list, compression_type='GZIP')
    test_set = test_set.map(
        lambda x: utils._parse_function(x, image_size=FLAGS.image_size),
        num_parallel_calls=os.cpu_count())
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': encoder_resblock_bn,
            'decoder': decoder_resblock_bn,
            'downsampling': down_sampling,
            'upsampling': up_sampling,
            'is_training': False,
            'is_down': False
        }
        VAE = Variational_Autoencoder(**kwargs)

        sess.run(init_op)

        # testing
        VAE.restore_model(
            os.path.join(FLAGS.indir, 'model',
                         'model_{}'.format(FLAGS.model_index)))
        tbar = tqdm(range(test_step), ascii=True)
        preds = []
        ori = []
        ji = []
        for k in tbar:
            test_data_batch = sess.run(test_data)
            ori_single = test_data_batch
            preds_single = VAE.reconstruction_image(ori_single)
            preds_single = preds_single[0, :, :, :, 0]
            ori_single = ori_single[0, :, :, :, 0]

            preds.append(preds_single)
            ori.append(ori_single)

            # # label
            ji = []
            for j in range(len(preds)):

                # EUDT
                eudt_image = sitk.GetImageFromArray(preds[j])
                eudt_image.SetSpacing([1, 1, 1])
                eudt_image.SetOrigin([0, 0, 0])

                label = np.where(preds[j] > 0.5, 0, 1)
                # label = np.where(preds[j] > 0.5, 1, 0.5)
                label = label.astype(np.int16)
                label_image = sitk.GetImageFromArray(label)
                label_image.SetSpacing([1, 1, 1])
                label_image.SetOrigin([0, 0, 0])

                ori_label = np.where(ori[j] > 0.5, 0, 1)
                ori_label_image = sitk.GetImageFromArray(ori_label)
                ori_label_image.SetSpacing([1, 1, 1])
                ori_label_image.SetOrigin([0, 0, 0])

                # # calculate ji
                ji.append([utils.jaccard(label, ori_label)])

                # output image
                io.write_mhd_and_raw(
                    label_image, '{}.mhd'.format(
                        os.path.join(FLAGS.outdir, 'label',
                                     'recon_{}'.format(j))))

        generalization = np.mean(ji)
        print('generalization = %f' % generalization)

        # # output csv file
        with open(os.path.join(
                FLAGS.outdir,
                'generalization_{}.csv'.format(FLAGS.model_index)),
                  'w',
                  newline='') as file:
            writer = csv.writer(file)
            writer.writerows(ji)
            writer.writerow(['generalization= ', generalization])
Esempio n. 11
0
def predict_gen():
    parser = argparse.ArgumentParser(
        description='py, test_data_list, name_list, outdir')
    parser.add_argument('--test_data_list',
                        '-i1',
                        default='',
                        help='test data')
    parser.add_argument('--truth_data_txt',
                        '-i2',
                        default='',
                        help='name list')
    parser.add_argument('--name_list', '-i3', default='', help='name list')
    parser.add_argument('--encoder_model', '-i4', default='', help='model')
    parser.add_argument('--decoder_model', '-i5', default='', help='model')
    parser.add_argument('--outdir', '-i6', default='', help='outdir')
    args = parser.parse_args()

    if not (os.path.exists(args.outdir)):
        os.makedirs(args.outdir)

    latent_dim = 32

    # load name_list
    name_list = []
    with open(args.name_list) as paths_file:
        for line in paths_file:
            line = line.split()
            if not line: continue
            name_list.append(line[:])

    print('number of test data : {}'.format(len(name_list)))

    # load test data
    test_data = io.load_matrix_data(args.test_data_list, 'float32')
    test_data = np.expand_dims(test_data, axis=4)
    print(test_data.shape)

    # load ground truth
    ground_truth = io.load_matrix_data(args.truth_data_txt, 'int32')
    print(ground_truth.shape)

    # get image size
    image_size = []
    image_size.extend([
        list(test_data.shape)[1],
        list(test_data.shape)[2],
        list(test_data.shape)[3]
    ])
    print(image_size)

    # # set network
    # network = Variational_auto_encoder(latent_dim, *image_size)
    # model = network.get_vae()
    # model.load_weights(args.model)

    # predict
    # preds = model.predict(test_data, 1)

    # network = Variational_auto_encoder(latent_dim, *image_size)
    # encoder = network.get_encoder()
    # encoder.load_weights(args.encoder_model)
    encoder = load_model(args.encoder_model)
    decoder = load_model(args.decoder_model)

    encoder_result = encoder.predict(test_data, 1)
    preds = decoder.predict(encoder_result, 1)

    # reshape
    preds = preds[:, :, :, :, 0]
    print(preds.shape)

    ji = []
    for i in range(preds.shape[0]):
        # EUDT
        eudt_image = sitk.GetImageFromArray(preds[i])
        eudt_image.SetSpacing([1, 1, 1])
        eudt_image.SetOrigin([0, 0, 0])

        # label
        label = np.where(preds[i] > 0, 0, 1)
        label_image = sitk.GetImageFromArray(label)
        label_image.SetSpacing([1, 1, 1])
        label_image.SetOrigin([0, 0, 0])

        # calculate ji
        ji.append([jaccard(label, ground_truth[i])])

        # output image
        io.write_mhd_and_raw(
            eudt_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'EUDT', *name_list[i])))
        io.write_mhd_and_raw(
            label_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'label', *name_list[i])))

    generalization = np.mean(ji)
    print('generalization = %f' % generalization)

    # output csv file
    with open(os.path.join(args.outdir, 'generalization.csv'), 'w',
              newline='') as file:
        writer = csv.writer(file)
        writer.writerows(ji)
        writer.writerow(['generalization= ', generalization])
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")
    if not os.path.exists(os.path.join(FLAGS.dir, 'generalization')):
        os.makedirs(os.path.join(FLAGS.dir, 'generalization'))

    # get tfrecord list
    test_data_list = glob.glob(FLAGS.indir + '/*')

    # test step
    test_step = FLAGS.num_of_test // FLAGS.batch_size
    if FLAGS.num_of_test % FLAGS.batch_size != 0:
        test_step += 1

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                            num_parallel_calls=os.cpu_count())
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        sess.run(init_op)

        # print parameters
        utils.cal_parameter()

        # test
        Model.restore_model(FLAGS.dir +
                            '/model/model_{}'.format(FLAGS.model_index))
        tbar = tqdm(range(test_step), ascii=True)
        for i in tbar:
            test_image_batch, test_points_batch, test_label_batch = sess.run(
                test_data)
            reconstruction_batch = Model.reconstruction(
                test_image_batch, test_points_batch)

            # dilation of points
            test_points_batch = tf.keras.layers.MaxPooling3D(
                pool_size=5, strides=1, padding='same')(test_points_batch)
            test_points_batch = test_points_batch.eval()
            test_points_batch = test_points_batch * 2  # scaling

            if i is 0:
                test_label = np.asarray(test_label_batch)
                reconstruction = np.asarray(reconstruction_batch)[0]
                points = np.asarray(test_points_batch)
            else:
                test_label = np.concatenate(
                    (test_label, np.asarray(test_label_batch)), axis=0)
                reconstruction = np.concatenate(
                    (reconstruction, np.asarray(reconstruction_batch)[0]),
                    axis=0)
                points = np.concatenate((points, np.array(test_points_batch)),
                                        axis=0)

        # calculate Jaccard Index and output images
        generalization = []
        tbar = tqdm(range(reconstruction.shape[0]), ascii=True)
        for i in tbar:
            test_label_single = test_label[i][:, :, :, 0]
            reconstruction_single = reconstruction[i][:, :, :, 0]
            points_single = points[i][:, :, :, 0]

            # label
            rec_label = np.where(reconstruction_single > 0.5, 0, 1)
            rec_label = rec_label.astype(np.int8)

            # calculate ji
            generalization.append(
                [utils.jaccard(rec_label, test_label_single)])

            # label and points
            label_and_points = rec_label + points_single

            rec_label = rec_label.astype(np.int8)
            label_and_points = label_and_points.astype(np.int8)

            # output image
            io.write_mhd_and_raw(reconstruction_single,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'generalization',
                                                  'logodds',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(rec_label,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'generalization',
                                                  'predict',
                                                  'recon_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(label_and_points,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'generalization',
                                                  'label_and_points',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)

        print('generalization = %f' % np.mean(generalization))

        # write csv
        io.write_csv(
            generalization,
            os.path.join(FLAGS.dir, 'generalization',
                         'generalization_val_{}.csv'.format(
                             FLAGS.model_index)), 'generalization')
Esempio n. 13
0
def main(argv):

    # turn off log message
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.FATAL)

    # check folder
    if not os.path.exists(FLAGS.dir):
        raise Exception("model dirctory is not existed!")
    if not os.path.exists(os.path.join(FLAGS.dir, 'specificity')):
        os.makedirs(os.path.join(FLAGS.dir, 'specificity'))

    # load ground truth
    ground_truth_list = glob.glob(FLAGS.ground_truth + '/*.mhd')
    ground_truth = io.load_data_from_path(ground_truth_list, dtype='int32')

    # get tfrecord list
    test_data_list = glob.glob(FLAGS.indir + '/*')

    # load test data
    test_set = tf.data.TFRecordDataset(test_data_list)
    test_set = test_set.map(lambda x: utils._parse_function_val_test(
        x, image_size=FLAGS.image_size),
                            num_parallel_calls=os.cpu_count())
    # test_set = test_set.shuffle(buffer_size=FLAGS.num_of_test)
    test_set = test_set.repeat()
    test_set = test_set.batch(FLAGS.batch_size)
    test_iter = test_set.make_one_shot_iterator()
    test_data = test_iter.get_next()

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config(index=FLAGS.gpu_index)) as sess:
        # set network
        kwargs = {
            'sess': sess,
            'latent_dim': FLAGS.latent_dim,
            'scale_lambda': FLAGS.scale_lambda,
            'scale_kappa': FLAGS.scale_kappa,
            'scale_psi': FLAGS.scale_psi,
            'image_size': FLAGS.image_size,
            'points_num': FLAGS.points_num,
            'k_size': FLAGS.k_size,
            'encoder_layer': encoder_layer,
            'points_encoder_layer': points_encoder_layer,
            'generator_layer': generator_layer,
            'discriminator_layer': discriminator_layer,
            'code_discriminator_layer': code_discriminator_layer,
            'is_training': False
        }

        Model = conditional_alphaGAN(**kwargs)

        sess.run(init_op)

        # print parameters
        utils.cal_parameter()

        # test
        Model.restore_model(FLAGS.dir +
                            '/model/model_{}'.format(FLAGS.model_index))

        tbar = tqdm(range(FLAGS.num_of_generate // FLAGS.batch_size),
                    ascii=True)
        for i in tbar:
            np.random.seed(4)

            z = np.random.normal(0.,
                                 1.,
                                 size=[FLAGS.batch_size, FLAGS.latent_dim])

            _, test_points_batch, _ = sess.run(test_data)
            generate_batch = Model.generate_sample(z, test_points_batch)

            # dilation of points
            test_points_dilate = tf.keras.layers.MaxPooling3D(
                pool_size=3, strides=1, padding='same')(test_points_batch)
            test_points_dilate = test_points_dilate.eval()
            test_points_dilate = test_points_dilate * 2  # scaling

            if i is 0:
                samples = np.asarray(generate_batch)[0]
                points = np.asarray(test_points_dilate)
            else:
                samples = np.concatenate(
                    (samples, np.asarray(generate_batch)[0]), axis=0)
                points = np.concatenate(
                    (points, np.asarray(test_points_dilate)), axis=0)

        # calculate Jaccard Index and output images
        specificity = []
        tbar = tqdm(range(samples.shape[0]), ascii=True)
        for i in tbar:
            gen = samples[i][:, :, :, 0]
            points_single = points[i][:, :, :, 0]

            # label
            gen_label = np.where(gen > 0.5, 0, 1)

            # calculate ji
            case_max_ji = 0.
            for image_index in range(ground_truth.shape[0]):
                ji = utils.jaccard(gen_label, ground_truth[image_index])
                if ji > case_max_ji:
                    case_max_ji = ji
            specificity.append([case_max_ji])

            # label and points
            label_and_points = gen_label + points_single

            gen_label = gen_label.astype(np.int8)
            label_and_points = label_and_points.astype(np.int8)

            # output image
            io.write_mhd_and_raw(gen,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'specificity',
                                                  'logodds',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(gen_label,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'specificity',
                                                  'label',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
            io.write_mhd_and_raw(label_and_points,
                                 '{}.mhd'.format(
                                     os.path.join(FLAGS.dir, 'specificity',
                                                  'label_and_points',
                                                  'generate_{}'.format(i))),
                                 spacing=[1, 1, 1],
                                 origin=[0, 0, 0],
                                 compress=True)
        #
        print('specificity = %f' % np.mean(specificity))

        # write csv
        io.write_csv(
            specificity,
            os.path.join(FLAGS.dir, 'specificity_shape',
                         'specificity_{}.csv'.format(FLAGS.model_index)),
            'specificity')
Esempio n. 14
0
def predict():
    parser = argparse.ArgumentParser(
        description='py, test_data_list, name_list, outdir')
    parser.add_argument('--test_data_list',
                        '-i1',
                        default='',
                        help='test data')
    parser.add_argument('--name_list', '-i2', default='', help='name list')
    parser.add_argument('--model', '-i3', default='', help='model')
    parser.add_argument('--outdir', '-i4', default='', help='outdir')
    args = parser.parse_args()

    if not (os.path.exists(args.outdir)):
        os.mkdir(args.outdir)

    # load name_list
    name_list = []
    with open(args.name_list) as paths_file:
        for line in paths_file:
            line = line.split()
            if not line: continue
            name_list.append(line[:])

    print('number of test data : {}'.format(len(name_list)))

    test_data = io.load_matrix_data(args.test_data_list, 'float32')
    test_data = np.expand_dims(test_data, axis=4)
    print(test_data.shape)

    image_size = []
    image_size.extend([
        list(test_data.shape)[1],
        list(test_data.shape)[2],
        list(test_data.shape)[3]
    ])
    print(image_size)

    # set network
    network = Autoencoder(*image_size)
    model = network.model()
    model.load_weights(args.model)

    preds = model.predict(test_data, 1)
    preds = preds[:, :, :, :, 0]

    print(preds.shape)

    for i in range(preds.shape[0]):
        # EUDT
        eudt_image = sitk.GetImageFromArray(preds[i])
        eudt_image.SetSpacing([1, 1, 1])
        eudt_image.SetOrigin([0, 0, 0])

        # label
        label = np.where(preds[i] > 0, 0, 1)
        label_image = sitk.GetImageFromArray(label)
        label_image.SetSpacing([1, 1, 1])
        label_image.SetOrigin([0, 0, 0])

        io.write_mhd_and_raw(
            eudt_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'EUDT', *name_list[i])))
        io.write_mhd_and_raw(
            label_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'label', *name_list[i])))