Пример #1
0
def main(argv):
    # check folder
    if not (os.path.exists(FLAGS.outdir)):
        os.makedirs(FLAGS.outdir)

    # load data set
    data_set = io.load_matrix_data(FLAGS.data_list, 'float32')

    # shuffle
    np.random.shuffle(data_set)

    num_per_tfrecord = int(FLAGS.num_per_tfrecord)
    num_of_total_image = data_set.shape[0]

    if (num_of_total_image % num_per_tfrecord != 0):
        num_of_recordfile = num_of_total_image // num_per_tfrecord + 1
    else:
        num_of_recordfile = num_of_total_image // num_per_tfrecord

    num_per_tfrecord_final = num_of_total_image - num_per_tfrecord * (num_of_recordfile - 1)

    print('number of total TFrecordfile: {}'.format(num_of_recordfile))

    # write TFrecord
    for i in range(num_of_recordfile):
        tfrecord_filename = os.path.join(FLAGS.outdir, 'recordfile_{}'.format(i + 1))
        # options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP)  # compress
        write = tf.python_io.TFRecordWriter(tfrecord_filename)

        print('Writing recordfile_{}'.format(i+1))

        if i == num_of_recordfile - 1:
            loop_buf = num_per_tfrecord_final
        else :
            loop_buf = num_per_tfrecord

        for image_index in range(loop_buf):
            image = data_set[image_index + i*num_per_tfrecord].flatten()

            example = tf.train.Example(
                features=tf.train.Features(feature={
                    'img_raw': tf.train.Feature(float_list=tf.train.FloatList(value=image)),
                    'label':tf.train.Feature(int64_list=tf.train.Int64List(value=[0])),
                    'shape':tf.train.Feature(int64_list=tf.train.Int64List(value=FLAGS.image_size))
                }))

            write.write(example.SerializeToString())
        write.close()
Пример #2
0
def plot_latent_space():
    parser = argparse.ArgumentParser(
        description='py, test_data_list, name_list, outdir')
    parser.add_argument('--model', '-i1', default='', help='model')
    parser.add_argument('--train_data_list',
                        '-i2',
                        default='',
                        help='name list')
    parser.add_argument('--outdir', '-i3', default='', help='outdir')
    args = parser.parse_args()

    if not (os.path.exists(args.outdir)):
        os.makedirs(args.outdir)

    latent_dim = 64

    # load test data
    train_data = io.load_matrix_data(args.train_data_list, 'float32')
    train_data = np.expand_dims(train_data, axis=-1)
    print(train_data.shape)
    image_size = []
    image_size.extend([
        list(train_data.shape)[1],
        list(train_data.shape)[2],
        list(train_data.shape)[3]
    ])

    # set network
    # network = Variational_auto_encoder(latent_dim, *image_size)
    # model = network.get_vae()
    # model.load_weights(args.model)
    #
    # mean_model = Model(inputs=model.input, outputs=model.get_layer('lambda_1').output)
    #
    # latent_space = mean_model.predict(train_data, batch_size=1)

    encoder = load_model(args.model)
    latent_space = encoder.predict(train_data, batch_size=1)

    plt.figure(figsize=(8, 6))
    fig = plt.scatter(latent_space[:, 0], latent_space[:, 1])
    plt.title('latent distribution')
    plt.xlabel('dim_1')
    plt.ylabel('dim_2')
    filename = open(os.path.join(args.outdir, 'latent_distribution.pickle'),
                    'wb')
    pickle.dump(fig, filename)
    plt.show()
Пример #3
0
def main():
    parser = argparse.ArgumentParser(
        description='py, test_data_txt, ground_truth_txt, outdir')

    parser.add_argument('--ground_truth_txt', '-i1', default='')

    parser.add_argument('--model', '-i2', default='./model_{}'.format(50000))

    parser.add_argument('--outdir', '-i3', default='')

    args = parser.parse_args()

    # check folder
    if not (os.path.exists(args.outdir)):
        os.makedirs(args.outdir)

    # tf flag
    flags = tf.flags
    flags.DEFINE_float("beta", 0.1, "hyperparameter beta")
    flags.DEFINE_integer("num_of_generate", 1000, "number of generate data")
    flags.DEFINE_integer("batch_size", 1, "batch size")
    flags.DEFINE_integer("latent_dim", 2, "latent dim")
    flags.DEFINE_list("image_size", [512, 512, 1], "image size")
    FLAGS = flags.FLAGS

    # load ground truth
    ground_truth = io.load_matrix_data(args.ground_truth_txt, 'int32')
    print(ground_truth.shape)

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config=utils.config) as sess:

        # set network
        kwargs = {
            'sess': sess,
            'outdir': args.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': cnn_encoder,
            'decoder': cnn_decoder
        }
        VAE = Variational_Autoencoder(**kwargs)

        sess.run(init_op)

        # testing
        VAE.restore_model(args.model)
        tbar = tqdm(range(FLAGS.num_of_generate), ascii=True)
        specificity = []
        for i in tbar:
            sample_z = np.random.normal(0, 1.0, (1, FLAGS.latent_dim))
            generate_data = VAE.generate_sample(sample_z)
            generate_data = generate_data[0, :, :, 0]

            # EUDT
            eudt_image = sitk.GetImageFromArray(generate_data)
            eudt_image.SetSpacing([1, 1])
            eudt_image.SetOrigin([0, 0])

            # label
            label = np.where(generate_data > 0, 0, 1)
            label_image = sitk.GetImageFromArray(label)
            label_image.SetSpacing([1, 1])
            label_image.SetOrigin([0, 0])

            # calculate ji
            case_max_ji = 0.
            for image_index in range(ground_truth.shape[0]):
                ji = utils.jaccard(label, ground_truth[image_index])
                if ji > case_max_ji:
                    case_max_ji = ji
            specificity.append([case_max_ji])

            # output image
            io.write_mhd_and_raw(
                eudt_image,
                '{}.mhd'.format(os.path.join(args.outdir, 'EUDT', str(i + 1))))
            io.write_mhd_and_raw(
                label_image,
                '{}.mhd'.format(os.path.join(args.outdir, 'label',
                                             str(i + 1))))

    print('specificity = %f' % np.mean(specificity))

    # output csv file
    with open(os.path.join(args.outdir, 'specificity.csv'), 'w',
              newline='') as file:
        writer = csv.writer(file)
        writer.writerows(specificity)
        writer.writerow(['specificity:', np.mean(specificity)])
Пример #4
0
def main():
    parser = argparse.ArgumentParser(description='py, train_data_txt, train_data_ture_txt, validation_data_txt, outdir')

    parser.add_argument('--train_data_txt', '-i1', default='',
                        help='train data list')

    parser.add_argument('--validation_data_txt', '-i2', default='',
                        help='validation data list')

    parser.add_argument('--outdir', '-i3', default='', help='outdir')

    args = parser.parse_args()

    # check folder
    if not (os.path.exists(args.outdir + '/encoder_model')):
        os.makedirs(args.outdir + '/encoder_model')
    if not (os.path.exists(args.outdir + '/decoder_model')):
        os.makedirs(args.outdir + '/decoder_model')

    # define
    batch_size = 3
    epoch = 200
    latent_dim = 2

    # load train data
    train_data = io.load_matrix_data(args.train_data_txt, 'float32')
    train_data = np.expand_dims(train_data, axis=4)

    # load validation data
    val_data = io.load_matrix_data(args.validation_data_txt, 'float32')
    val_data = np.expand_dims(val_data, axis=4)


    print(' number of training: {}'.format(len(train_data)))
    print('size of traning: {}'.format(train_data.shape))
    print(' number of validation: {}'.format(len(val_data)))
    print('size of validation: {}'.format(val_data.shape))

    image_size = []
    image_size.extend([list(train_data.shape)[1], list(train_data.shape)[2], list(train_data.shape)[3]])

    # # set network
    network = Variational_auto_encoder(latent_dim, *image_size)
    model = network.get_vae()
    encoder = network.get_encoder()
    decoder = network.get_decoder()
    model.summary()
    model.compile(optimizer=Adam(lr=8e-4, beta_1=0.5, beta_2=0.9), loss=[zero_loss])

    # set data_set
    train_steps, train_data = batch_iter(train_data, train_data, batch_size)
    valid_steps, val_data = batch_iter(val_data, val_data, batch_size)

    # fit network
    history_total = []
    for epoch_index in range(epoch):
        history = model.fit_generator(train_data, steps_per_epoch=train_steps, epochs=1, validation_data=val_data,
                                  validation_steps=valid_steps, verbose=1)

        history_total.append(history)
        encoder.save(os.path.join(args.outdir + '/encoder_model/', 'encoder_{}.hdf5'.format(epoch_index + 1)))
        decoder.save(os.path.join(args.outdir + '/decoder_model/', 'decoder_{}.hdf5'.format(epoch_index + 1)))

    plot_history(history_total, args.outdir)
Пример #5
0
def predict_spe():
    parser = argparse.ArgumentParser(
        description='py, test_data_list, name_list, outdir')
    parser.add_argument('--model', '-i1', default='', help='model')
    parser.add_argument('--truth_data_txt',
                        '-i2',
                        default='',
                        help='name list')
    parser.add_argument('--outdir', '-i3', default='', help='outdir')
    args = parser.parse_args()

    if not (os.path.exists(args.outdir)):
        os.mkdir(args.outdir)

    latent_dim = 32
    n_gen = 1

    # load ground truth
    ground_truth = io.load_matrix_data(args.truth_data_txt, 'int32')
    print(ground_truth.shape)

    # aset network
    decoder = load_model(args.model)

    specificity = []
    for i in range(n_gen):

        # # generate shape
        # sample_z = np.full(latent_dim, 0)
        # sample_z = np.array([sample_z])
        sample_z = np.random.normal(0, 1.0, (1, latent_dim))
        # print(sample_z.shape)
        preds = decoder.predict(sample_z)
        preds = preds[:, :, :, :, 0]

        # # EUDT
        eudt_image = sitk.GetImageFromArray(preds[0])
        eudt_image.SetSpacing([1, 1, 1])
        eudt_image.SetOrigin([0, 0, 0])

        # label
        label = np.where(preds[0] > 0, 0, 1)
        label_image = sitk.GetImageFromArray(label)
        label_image.SetSpacing([1, 1, 1])
        label_image.SetOrigin([0, 0, 0])

        # calculate ji
        case_max_ji = 0.
        for image_index in range(ground_truth.shape[0]):
            ji = jaccard(label, ground_truth[image_index])
            if ji > case_max_ji:
                case_max_ji = ji
        specificity.append([case_max_ji])

        # output image
        io.write_mhd_and_raw(
            eudt_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'EUDT', str(i + 1))))
        io.write_mhd_and_raw(
            label_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'label', str(i + 1))))

    # output csv file
    # with open(os.path.join(args.outdir, 'specificity.csv'), 'w', newline='') as file:
    #     writer = csv.writer(file)
    #     writer.writerows(specificity)
    #     writer.writerow(['specificity:', np.mean(specificity)])

    print('specificity = %f' % np.mean(specificity))
Пример #6
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'py, train_data_txt, train_data_ture_txt, validation_data_txt, outdir')
    parser.add_argument('--train_data_txt',
                        '-i1',
                        default='',
                        help='train data list')

    parser.add_argument('--train_ground_truth_txt',
                        '-i2',
                        default='',
                        help='train ground truth list')

    parser.add_argument('--validation_data_txt',
                        '-i3',
                        default='',
                        help='validation data list')

    parser.add_argument('--validation_ground_truth_txt',
                        '-i4',
                        default='',
                        help='validation ground truth list')

    parser.add_argument('--outdir', '-i5', default='', help='outdir')
    args = parser.parse_args()

    # check folder
    if not (os.path.exists(args.outdir)):
        os.mkdir(args.outdir)

    # define
    batch_size = 3
    epoch = 2500

    # load train data
    train_data = io.load_matrix_data(args.train_data_txt, 'float32')
    train_data = np.expand_dims(train_data, axis=4)

    # load train ground truth
    train_truth = io.load_matrix_data(args.train_ground_truth_txt, 'float32')
    train_truth = np.expand_dims(train_truth, axis=4)

    # load validation data
    val_data = io.load_matrix_data(args.validation_data_txt, 'float32')
    val_data = np.expand_dims(val_data, axis=4)

    # load validation ground truth
    val_truth = io.load_matrix_data(args.validation_ground_truth_txt,
                                    'float32')
    val_truth = np.expand_dims(val_truth, axis=4)

    print(' number of training: {}'.format(len(train_data)))
    print('size of traning: {}'.format(train_data.shape))
    print(' number of validation: {}'.format(len(val_data)))
    print('size of validation: {}'.format(val_data.shape))

    image_size = []
    image_size.extend([
        list(train_data.shape)[1],
        list(train_data.shape)[2],
        list(train_data.shape)[3]
    ])

    # set network
    network = Autoencoder(*image_size)
    model = network.model()
    model.summary()
    model.compile(optimizer='Nadam',
                  loss=losses.mean_squared_error,
                  metrics=['mse'])

    # set data_set
    train_steps, train_data = batch_iter(train_data, train_truth, batch_size)
    valid_steps, val_data = batch_iter(val_data, val_truth, batch_size)

    # fit network
    model_checkpoint = ModelCheckpoint(os.path.join(
        args.outdir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
                                       verbose=1)

    history = model.fit_generator(train_data,
                                  steps_per_epoch=train_steps,
                                  epochs=epoch,
                                  validation_data=val_data,
                                  validation_steps=valid_steps,
                                  verbose=1,
                                  callbacks=[model_checkpoint])

    plot_history(history, args.outdir)
Пример #7
0
def predict_gen():
    parser = argparse.ArgumentParser(
        description='py, test_data_list, name_list, outdir')
    parser.add_argument('--test_data_list',
                        '-i1',
                        default='',
                        help='test data')
    parser.add_argument('--truth_data_txt',
                        '-i2',
                        default='',
                        help='name list')
    parser.add_argument('--name_list', '-i3', default='', help='name list')
    parser.add_argument('--encoder_model', '-i4', default='', help='model')
    parser.add_argument('--decoder_model', '-i5', default='', help='model')
    parser.add_argument('--outdir', '-i6', default='', help='outdir')
    args = parser.parse_args()

    if not (os.path.exists(args.outdir)):
        os.makedirs(args.outdir)

    latent_dim = 32

    # load name_list
    name_list = []
    with open(args.name_list) as paths_file:
        for line in paths_file:
            line = line.split()
            if not line: continue
            name_list.append(line[:])

    print('number of test data : {}'.format(len(name_list)))

    # load test data
    test_data = io.load_matrix_data(args.test_data_list, 'float32')
    test_data = np.expand_dims(test_data, axis=4)
    print(test_data.shape)

    # load ground truth
    ground_truth = io.load_matrix_data(args.truth_data_txt, 'int32')
    print(ground_truth.shape)

    # get image size
    image_size = []
    image_size.extend([
        list(test_data.shape)[1],
        list(test_data.shape)[2],
        list(test_data.shape)[3]
    ])
    print(image_size)

    # # set network
    # network = Variational_auto_encoder(latent_dim, *image_size)
    # model = network.get_vae()
    # model.load_weights(args.model)

    # predict
    # preds = model.predict(test_data, 1)

    # network = Variational_auto_encoder(latent_dim, *image_size)
    # encoder = network.get_encoder()
    # encoder.load_weights(args.encoder_model)
    encoder = load_model(args.encoder_model)
    decoder = load_model(args.decoder_model)

    encoder_result = encoder.predict(test_data, 1)
    preds = decoder.predict(encoder_result, 1)

    # reshape
    preds = preds[:, :, :, :, 0]
    print(preds.shape)

    ji = []
    for i in range(preds.shape[0]):
        # EUDT
        eudt_image = sitk.GetImageFromArray(preds[i])
        eudt_image.SetSpacing([1, 1, 1])
        eudt_image.SetOrigin([0, 0, 0])

        # label
        label = np.where(preds[i] > 0, 0, 1)
        label_image = sitk.GetImageFromArray(label)
        label_image.SetSpacing([1, 1, 1])
        label_image.SetOrigin([0, 0, 0])

        # calculate ji
        ji.append([jaccard(label, ground_truth[i])])

        # output image
        io.write_mhd_and_raw(
            eudt_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'EUDT', *name_list[i])))
        io.write_mhd_and_raw(
            label_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'label', *name_list[i])))

    generalization = np.mean(ji)
    print('generalization = %f' % generalization)

    # output csv file
    with open(os.path.join(args.outdir, 'generalization.csv'), 'w',
              newline='') as file:
        writer = csv.writer(file)
        writer.writerows(ji)
        writer.writerow(['generalization= ', generalization])
Пример #8
0
def main():
    # tf flag
    flags = tf.flags
    flags.DEFINE_string("ground_truth_txt", 'F:/data_info/VAE_liver/set_5/PCA/alpha_0.1/fold_1/test_label.txt', "ground truth txt")
    flags.DEFINE_string("indir", 'G:/experiment_result/liver/VAE/set_5/down/64/alpha_0.1/fold_1/VAE/axis_4/beta_6', "input dir")
    flags.DEFINE_string("outdir", 'G:/experiment_result/liver/VAE/set_5/down/64/alpha_0.1/fold_1/VAE/axis_4/beta_6/random', "outdir")
    flags.DEFINE_integer("model_index", 3450 ,"index of model")
    flags.DEFINE_string("gpu_index", "0", "GPU-index")
    flags.DEFINE_float("beta", 1.0, "hyperparameter beta")
    flags.DEFINE_integer("num_of_generate", 1000, "number of generate data")
    flags.DEFINE_integer("batch_size", 1, "batch size")
    flags.DEFINE_integer("latent_dim", 4, "latent dim")
    flags.DEFINE_list("image_size", [56, 72, 88, 1], "image size")
    FLAGS = flags.FLAGS

    np.random.seed(1)

    # check folder
    if not (os.path.exists(FLAGS.outdir)):
        os.makedirs(FLAGS.outdir)

    # load ground truth
    ground_truth = io.load_matrix_data(FLAGS.ground_truth_txt, 'int32')
    print(ground_truth.shape)

    # initializer
    init_op = tf.group(tf.initializers.global_variables(),
                       tf.initializers.local_variables())

    with tf.Session(config = utils.config(index=FLAGS.gpu_index)) as sess:

        # set network
        kwargs = {
            'sess': sess,
            'outdir': FLAGS.outdir,
            'beta': FLAGS.beta,
            'latent_dim': FLAGS.latent_dim,
            'batch_size': FLAGS.batch_size,
            'image_size': FLAGS.image_size,
            'encoder': encoder_resblock_bn,
            'decoder': decoder_resblock_bn,
            'downsampling': down_sampling,
            'upsampling': up_sampling,
            'is_training': False,
            'is_down': False
        }
        VAE = Variational_Autoencoder(**kwargs)

        sess.run(init_op)

        # testing
        VAE.restore_model(os.path.join(FLAGS.indir,'model','model_{}'.format(FLAGS.model_index)))
        mean = np.loadtxt(os.path.join(FLAGS.indir, 'mean_{}.txt'.format(FLAGS.model_index)))
        var = np.loadtxt(os.path.join(FLAGS.indir, 'var_{}.txt'.format(FLAGS.model_index)))
        specificity = []

        tbar = tqdm(range(FLAGS.num_of_generate), ascii=True)
        for i in tbar:
            sample_z = np.random.normal(0, 1.0, (1, FLAGS.latent_dim))
            sample_z = np.asarray(mean) + np.sqrt(np.asarray(var)) * sample_z
            generate_data = VAE.generate_sample(sample_z)
            generate_data = generate_data[0, :, :, :, 0]

            # EUDT
            eudt_image = sitk.GetImageFromArray(generate_data)
            eudt_image.SetSpacing([1, 1, 1])
            eudt_image.SetOrigin([0, 0, 0])

            # label
            label = np.where(generate_data > 0.5, 0, 1)
            label = label.astype(np.int8)
            label_image = sitk.GetImageFromArray(label)
            label_image.SetSpacing([1, 1, 1])
            label_image.SetOrigin([0, 0, 0])

            # # calculate ji
            case_max_ji = 0.
            for image_index in range(ground_truth.shape[0]):
                ji = utils.jaccard(label, ground_truth[image_index])
                if ji > case_max_ji:
                    case_max_ji = ji
            specificity.append([case_max_ji])

            # # output image
            # io.write_mhd_and_raw(eudt_image, '{}.mhd'.format(os.path.join(FLAGS.outdir, 'EUDT', str(i+1))))
            # io.write_mhd_and_raw(label_image, '{}.mhd'.format(os.path.join(FLAGS.outdir, 'label', str(i + 1))))

    print('specificity = %f' % np.mean(specificity))

    # # output csv file
    with open(os.path.join(FLAGS.outdir, 'specificity_{}.csv'.format(FLAGS.model_index)), 'w', newline='') as file:
        writer = csv.writer(file)
        writer.writerows(specificity)
        writer.writerow(['specificity:', np.mean(specificity)])
Пример #9
0
def main():
    parser = argparse.ArgumentParser(
        description='py, data_list, num_per_tfrecord, outdir')

    parser.add_argument(
        '--data_list',
        '-i1',
        default=
        'F:/data_info/TFrecord/liver/set_5/down/64/alpha_0.1/fold_1/val.txt',
        help='data list')

    parser.add_argument('--num_per_tfrecord',
                        '-i2',
                        default=76,
                        help='number per tfrecord')

    parser.add_argument(
        '--outdir',
        '-i3',
        default='G:/data/tfrecord/liver/set_5/down/64/RBF/alpha_0.1/fold_1/val',
        help='outdir')

    args = parser.parse_args()

    # check folder
    if not (os.path.exists(args.outdir)):
        os.makedirs(args.outdir)

    data_set = io.load_matrix_data(args.data_list, 'float32')
    # data_set = io.load_matrix_raw_data(args.data_list, 'float')

    # shuffle
    # np.random.shuffle(data_set)
    print('data size: {}'.format(data_set.shape))

    num_per_tfrecord = int(args.num_per_tfrecord)
    num_of_total_image = data_set.shape[0]

    if (num_of_total_image % num_per_tfrecord != 0):
        num_of_recordfile = num_of_total_image // num_per_tfrecord + 1
    else:
        num_of_recordfile = num_of_total_image // num_per_tfrecord

    num_per_tfrecord_final = num_of_total_image - num_per_tfrecord * (
        num_of_recordfile - 1)

    print('number of total TFrecordfile: {}'.format(num_of_recordfile))

    # write TFrecord
    for i in range(num_of_recordfile):
        tfrecord_filename = os.path.join(args.outdir,
                                         'recordfile_{}'.format(i + 1))
        options = tf.python_io.TFRecordOptions(
            tf.python_io.TFRecordCompressionType.GZIP)
        write = tf.python_io.TFRecordWriter(tfrecord_filename, options=options)

        print('Writing recordfile_{}'.format(i + 1))

        if i == num_of_recordfile - 1:
            loop_buf = num_per_tfrecord_final
        else:
            loop_buf = num_per_tfrecord

        for image_index in range(loop_buf):
            image = data_set[image_index + i * num_per_tfrecord].flatten()

            example = tf.train.Example(features=tf.train.Features(
                feature={
                    'img_raw':
                    tf.train.Feature(float_list=tf.train.FloatList(
                        value=image)),
                }))

            write.write(example.SerializeToString())
        write.close()
Пример #10
0
def predict():
    parser = argparse.ArgumentParser(
        description='py, test_data_list, name_list, outdir')
    parser.add_argument('--test_data_list',
                        '-i1',
                        default='',
                        help='test data')
    parser.add_argument('--name_list', '-i2', default='', help='name list')
    parser.add_argument('--model', '-i3', default='', help='model')
    parser.add_argument('--outdir', '-i4', default='', help='outdir')
    args = parser.parse_args()

    if not (os.path.exists(args.outdir)):
        os.mkdir(args.outdir)

    # load name_list
    name_list = []
    with open(args.name_list) as paths_file:
        for line in paths_file:
            line = line.split()
            if not line: continue
            name_list.append(line[:])

    print('number of test data : {}'.format(len(name_list)))

    test_data = io.load_matrix_data(args.test_data_list, 'float32')
    test_data = np.expand_dims(test_data, axis=4)
    print(test_data.shape)

    image_size = []
    image_size.extend([
        list(test_data.shape)[1],
        list(test_data.shape)[2],
        list(test_data.shape)[3]
    ])
    print(image_size)

    # set network
    network = Autoencoder(*image_size)
    model = network.model()
    model.load_weights(args.model)

    preds = model.predict(test_data, 1)
    preds = preds[:, :, :, :, 0]

    print(preds.shape)

    for i in range(preds.shape[0]):
        # EUDT
        eudt_image = sitk.GetImageFromArray(preds[i])
        eudt_image.SetSpacing([1, 1, 1])
        eudt_image.SetOrigin([0, 0, 0])

        # label
        label = np.where(preds[i] > 0, 0, 1)
        label_image = sitk.GetImageFromArray(label)
        label_image.SetSpacing([1, 1, 1])
        label_image.SetOrigin([0, 0, 0])

        io.write_mhd_and_raw(
            eudt_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'EUDT', *name_list[i])))
        io.write_mhd_and_raw(
            label_image,
            '{}.mhd'.format(os.path.join(args.outdir, 'label', *name_list[i])))