コード例 #1
0
def main():
    parser = argparse.ArgumentParser(description='Copy data')
    parser.add_argument('--base', default=os.path.dirname(os.path.abspath(__file__)),
                        help='Base directory path to program files')
    parser.add_argument('--input_dir', type=str, default='../../data/raw',
                        help='Input directory')
    parser.add_argument('--output_dir', type=str, default='../../data/interim',
                        help='Output directory')
    args = parser.parse_args()

    input_dir = os.path.join(args.base, args.input_dir)
    path_list = [os.path.abspath(i) for i in glob.glob("{}/**/*T1*.gz".format(input_dir), recursive=True)]

    result_dir = os.path.join(args.base, args.output_dir)
    os.makedirs(result_dir, exist_ok=True)

    for i, path in enumerate(path_list):
        print('Data: {}'.format(path))
        img = IO.read_mhd_and_raw(path, False)
        #print('{}/{:04d}.mhd'.format(result_dir, i))
        # To make clean mhd file
        output = sitk.GetArrayFromImage(img)
        output = sitk.GetImageFromArray(output)
        output.SetSpacing(img.GetSpacing())
        output.SetOrigin([0,0,0])
        sitk.WriteImage(output, '{}/{:04d}.mhd'.format(result_dir, i))
コード例 #2
0
def get_dataset(input, patch_side, num_of_test):
    print('load data')
    list = io.load_list(input)
    data_set = np.zeros((num_of_test, patch_side, patch_side, patch_side))
    for i in trange(num_of_test):
        data_set[i, :] = np.reshape(io.read_mhd_and_raw(list[i]), [patch_side, patch_side, patch_side])
    return data_set
コード例 #3
0
def making_patch(num, img_path, mask_path, patch_side, threshold):
    z_size = 320
    y_size = 320
    x_size = 320
    w = int(patch_side / 2)

    path_w = "E:/data/data{}_patch/sigma_0.9/th_{}/size_{}/".format(
        num, threshold, patch_side)

    # load data
    print('load data')
    img = io.read_mhd_and_raw(img_path)
    mask = io.read_mhd_and_raw(mask_path)

    img = np.reshape(img, [z_size, y_size, x_size])
    mask = np.reshape(mask, [z_size, y_size, x_size])

    # check folder
    if not (os.path.exists(path_w)):
        os.makedirs(path_w)

    file = open(path_w + "filename.txt", mode='w')
    count = 0
    for z in range(z_size - 1):
        for y in range(y_size - 1):
            for x in range(x_size - 1):
                if mask[z, y, x] > threshold and mask[z, y, x] > mask[z, y, x - 1] and mask[z, y, x] > mask[z, y, x + 1] \
                        and mask[z, y, x] > mask[z - 1, y, x] and mask[z, y, x] > mask[z + 1, y, x] \
                        and mask[z, y, x] > mask[z, y - 1, x] and mask[z, y, x] > mask[z, y + 1, x]:
                    patch = img[z - w:z + w + 1, y - w:y + w + 1,
                                x - w:x + w + 1]
                    patch = patch.reshape([patch_side, patch_side, patch_side])
                    eudt_image = sitk.GetImageFromArray(patch)
                    eudt_image.SetOrigin([patch_side, patch_side, patch_side])
                    eudt_image.SetSpacing([0.885, 0.885, 1])
                    io.write_mhd_and_raw(
                        eudt_image,
                        os.path.join(path_w,
                                     "patch_{}_{}_{}.mhd".format(x, y, z)))
                    file.write(
                        os.path.join(
                            path_w,
                            "data1_patch_{}_{}_{}.mhd".format(x, y, z) + "\n"))
                    count += 1
                    print(count)

    return 0
コード例 #4
0
def main():
    parser = argparse.ArgumentParser(description='Settings')
    parser.add_argument('--root',
                        type=str,
                        default="E:/data/Tokushima/",
                        help='root path')
    parser.add_argument('--data_n', type=int, default=1, help='index of data')
    parser.add_argument('--org',
                        type=str,
                        default="float/",
                        help='target organ')

    args = parser.parse_args()

    # settings
    # data_n = str(args.data_n).zfill(2)
    # img_path = os.path.join(args.root, "Fukui_Ai-CT_Sept2015/Fukui_Ai-CT_Sept2015_{}-2.mhd".format(data_n))
    # mask_path = os.path.join(args.root, "Fukui_Ai-CT_2015_Label/{}/Fukui_Ai-CT_Sept2015_{}-2_{}.mhd".format(args.org, data_n, args.org))

    # check folder
    w_path = os.path.join(args.root, "{}/".format(args.org))
    os.makedirs(w_path, exist_ok=True)

    case_list = io.load_list(args.root + 'filename.txt')

    for i in case_list:
        img_path = os.path.join(args.root, "CT", i)
        out_path = os.path.join(w_path, os.path.basename(img_path))
        mask_path = os.path.join(args.root, "Label", i)

        # loading data
        print("-" * 20, 'Loading data', "-" * 20)
        print(img_path)
        if os.path.isfile(img_path):

            sitkimg = sitk.ReadImage(img_path, sitk.sitkInt16)
            img = sitk.GetArrayFromImage(sitkimg)
            mask = io.read_mhd_and_raw(mask_path)

            # masking
            img = np.where((mask == 1) | (mask == 2), img, -2000)
            img = np.array(img, dtype='float32')

            # cropping
            idx = np.where(img != -2000)
            z, y, x = idx
            img = cropping(img, np.min(x), np.max(x), np.min(y), np.max(y),
                           np.min(z), np.max(z))

            # plt.imshow(img[int(np.mean(z))], cmap='gray', interpolation=None)
            # plt.show()

            # saving img
            eudt_image = sitk.GetImageFromArray(img)
            eudt_image.SetSpacing(sitkimg.GetSpacing())
            eudt_image.SetOrigin(sitkimg.GetOrigin())
            sitk.WriteImage(eudt_image, out_path)
            print(out_path)
コード例 #5
0
def get_data_from_list(list, patch_side=9):
    print('load data')
    list = io.load_list(list)
    data_set = np.empty((len(list), patch_side, patch_side, patch_side))
    for i, name in enumerate(list):
        data_set[i, :] = np.reshape(io.read_mhd_and_raw(name),
                                    [patch_side, patch_side, patch_side])

    return data_set
コード例 #6
0
def main():
    parser = argparse.ArgumentParser(description='py, in, out, num')
    parser.add_argument('--indir', '-i1', default="E:/git/beta-VAE/output/CT/patch/model2/z24/alpha_1e-5/beta_0.1/spe/EUDT/", help='input directory')
    parser.add_argument('--side', '-i2', type=int, default=9, help='patch side size')
    parser.add_argument('--num_of_data', '-i3', type=int, default=5000, help='number of the input data')
    args = parser.parse_args()

    # check folder
    indir = args.indir
    outdir = os.path.join(indir, "dec/")
    if not (os.path.exists(outdir)):
        os.makedirs(outdir)
    num_of_data = args.num_of_data
    side = args.side

    # load data
    print('load data')
    data_set = np.zeros((num_of_data, side, side, side))
    # file = open(outdir + 'filename.txt', 'w')
    list = []
    with open(indir + 'filename.txt', 'rt') as f:
        i = 0
        for line in f:
            if i >= num_of_data:
                break
            line = line.split()
            sitkdata = sitk.ReadImage(line[0])
            # data = sitk.GetArrayFromImage(sitkdata)
            data = np.reshape(io.read_mhd_and_raw(line[0]), [side, side, side])
            data_set[i, :] = data
            list.append(line[0])
            i += 1
            print(i)
            # for x in line:
            #     file.write(str(x) + "/n")
    # file.close()
    # print(list)


    img = data_set.reshape(num_of_data, side, side, side)
    file = open(outdir + "filename.txt", mode='w')

    # Normalization
    for i in trange(len(img)):
            # print(i)
            eudt_image = sitk.GetImageFromArray(img[i].reshape(side, side, side))
            eudt_image.SetSpacing(sitkdata.GetSpacing())
            eudt_image.SetOrigin(sitkdata.GetOrigin())

            sitk.WriteImage(eudt_image, os.path.join(outdir, "{}.mhd".format(str(i).zfill(4))))
            file = open(outdir + "filename.txt", mode='a')
            file.write(os.path.join(outdir, "{}.mhd".format(str(i).zfill(4)) + "\n"))

    file.close()
コード例 #7
0
def main():
    parser = argparse.ArgumentParser(
        description='Generate low resolution images from HR images')
    parser.add_argument('--base',
                        default=os.path.dirname(os.path.abspath(__file__)),
                        help='Base directory path to program files')
    parser.add_argument('--config_path',
                        type=str,
                        default='../../configs/base.yml',
                        help='path to config file')
    parser.add_argument('--input_dir',
                        type=str,
                        default='../../data/interim',
                        help='Input directory')
    parser.add_argument('--output_dir',
                        type=str,
                        default='../../data/processed',
                        help='Output directory')
    args = parser.parse_args()

    config = yaml_utils.Config(
        yaml.load(open(os.path.join(args.base, args.config_path))))

    input_dir = os.path.join(args.base, args.input_dir)
    path_list = glob.glob('{}/*.mhd'.format(input_dir))

    result_dir = os.path.join(args.base, args.output_dir)
    os.makedirs('{}/HR'.format(result_dir), exist_ok=True)
    os.makedirs('{}/LR'.format(result_dir), exist_ok=True)
    for i, path in enumerate(path_list):
        hr_img = IO.read_mhd_and_raw(path, False)
        hr_img.SetOrigin([0, 0, 0])
        hr_size = hr_img.GetSize()
        hr_spacing = hr_img.GetSpacing()
        new_spacing = [i * config['upsampling_rate'] for i in hr_spacing]
        new_size = [
            int(hr_size[0] * (hr_spacing[0] / new_spacing[0]) + 0.5),
            int(hr_size[1] * (hr_spacing[1] / new_spacing[1]) + 0.5),
            int(hr_size[2] * (hr_spacing[2] / new_spacing[2]) + 0.5)
        ]
        resampleFilter = sitk.ResampleImageFilter()

        lr_img = resampleFilter.Execute(hr_img, new_size,
                                        sitk.Transform(), sitk.sitkBSpline,
                                        hr_img.GetOrigin(), new_spacing,
                                        hr_img.GetDirection(), 0,
                                        hr_img.GetPixelID())

        # Save HR and LR images
        sitk.WriteImage(lr_img, '{}/LR/{:04d}.mhd'.format(result_dir, i))
        sitk.WriteImage(hr_img, '{}/HR/{:04d}.mhd'.format(result_dir, i))
コード例 #8
0
if not (os.path.exists(outdir)):
    os.makedirs(outdir)

# save parameters
with open(os.path.join(outdir, "params.json"), mode="w") as f:
    json.dump(args.__dict__, f, indent=4)

writer = SummaryWriter(log_dir=outdir + "logs")

print('-' * 20, 'loading data', '-' * 20)
list = io.load_list(data_path)
data_set = np.zeros((len(list), patch_side, patch_side, patch_side))

for i in trange(len(list)):
    data_set[i, :] = np.reshape(io.read_mhd_and_raw(list[i]),
                                [patch_side, patch_side, patch_side])

data = data_set.reshape(num_of_data, patch_side * patch_side * patch_side)
data = min_max(data, axis=1)

# split data
test_data = torch.from_numpy(data[:num_of_test]).float()
val_data = torch.from_numpy(data[num_of_test:num_of_test +
                                 num_of_val]).float().to(device)
train_data = torch.from_numpy(data[num_of_test +
                                   num_of_val:]).float().to(device)

train_loader = torch.utils.data.DataLoader(train_data,
                                           batch_size=args.batch_size,
                                           shuffle=True,
コード例 #9
0
import random
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy import ndimage
import utils, os
import torch.nn

# outdir1 = "E:/result/cars/generalization/L1"
outdir2 = "./"
#
# # check folder
# if not (os.path.exists(outdir1)):
#     os.makedirs(outdir1)
# img = io.read_mhd_and_raw("E:/from_kubo/vector_rotation/x64/Release/output/output_5_5_2.mhd")
#
img1 = io.read_mhd_and_raw("E:/git/pytorch/vae/results/artificial/hole/z_6/B_0.1/batch128/L_60000/gen/ori/0001.mhd")
# img2 = io.read_mhd_and_raw("E:/git/pytorch/vae/results/artificial/tip/z_24/B_0.1/L_0/gen/rec/0000.mhd")
# # "E:/git/pca/output/CT/patch/z24/EUDT/recon_104.mhd"
#
# img1 = (img1 - np.min(img1))/ (np.max(img1) - np.min(img1))
# img3 = abs(img1 -img2)
# print(img3)
# print(np.max(img3))
# print(np.min(img3))

# ori=np.reshape(img1, [9,9,9])
# preds=np.reshape(img2, [9,9,9])
#
# # plot reconstruction

# utils.display_image(img1, img2, img3, 9, outdir1)
コード例 #10
0
from utils import get_dataset


def plot_hist(g_hist, th_v):
    plt.plot(g_hist)
    plt.axvline(x=th_v, color='red', label='otsu')
    plt.legend(loc='upper right')
    plt.title("histogram, otsu and ave value")
    plt.xlabel("brightness")
    plt.ylabel("frequency")
    plt.show()


print("load data")
path = "E:/data/Tokushima/Lung/t0000190_6.mhd"
img = io.read_mhd_and_raw(path)

target = np.where(img == -2000, False, img)
val = filters.threshold_otsu(target)
# val = -700

# hist, bins_center = exposure.histogram(img, nbins=10)
print(img.shape)
print(val)

slice = np.argmax(np.average(np.average(img, axis=2), axis=1), axis=0)
print(slice)
plt.figure(figsize=(9, 4))
plt.subplot(131)
plt.imshow(img[slice], cmap='gray', interpolation=None)
plt.axis('off')
コード例 #11
0
writer = SummaryWriter(log_dir=outdir+"logs")

if not (os.path.exists(outdir)):
    os.makedirs(outdir)

# save parameters
with open(os.path.join(outdir, "params.json"), mode="w") as f:
    json.dump(args.__dict__, f, indent=4)

print('load data')
list = io.load_list(data_path)
data_set = np.zeros((len(list), image_size, image_size, image_size))

for i in trange(len(list)):
    data_set[i, :] = np.reshape(io.read_mhd_and_raw(list[i]), [image_size, image_size, image_size])

data = data_set.reshape(num_of_data, image_size * image_size * image_size)


def min_max(x, axis=None):
    x_min = x.min(axis=axis, keepdims=True)
    x_max = x.max(axis=axis, keepdims=True)
    return (x - x_min) / (x_max - x_min)

data = min_max(data, axis=1)

test_data = torch.from_numpy(data[:num_of_test]).float()
val_data = torch.from_numpy(data[num_of_test:num_of_test+num_of_val]).float().to(device)
train_data = torch.from_numpy(data[num_of_test+num_of_val:]).float().to(device)
コード例 #12
0
def main():
    parser = argparse.ArgumentParser(description='py, in, out, num')
    parser.add_argument(
        '--indir',
        '-i1',
        default="E:/git/TFRecord_example/input/CT/patch/size9/",
        help='input directory')
    parser.add_argument('--side',
                        '-i2',
                        type=int,
                        default=9,
                        help='patch side size')
    parser.add_argument('--num_of_data',
                        '-i3',
                        type=int,
                        default=3039,
                        help='number of the input data')
    args = parser.parse_args()

    # check folder
    indir = args.indir
    outdir = os.path.join(indir, "cc1/")
    if not (os.path.exists(outdir)):
        os.makedirs(outdir + 'test')
        os.makedirs(outdir + 'val')
        os.makedirs(outdir + 'train')
    num_of_data = args.num_of_data
    side = args.side

    # load data
    print('load data')
    data_set = np.zeros((num_of_data, side, side, side))
    # file = open(outdir + 'filename.txt', 'w')
    list = []
    with open(indir + 'filename.txt', 'rt') as f:
        i = 0
        for line in f:
            if i >= num_of_data:
                break
            line = line.split()
            sitkdata = sitk.ReadImage(line[0])
            # data = sitk.GetArrayFromImage(sitkdata)
            data = np.reshape(io.read_mhd_and_raw(line[0]), [side, side, side])
            data_set[i, :] = data
            list.append(line[0])
            i += 1
            print(i)
            # for x in line:
            #     file.write(str(x) + "/n")
    # file.close()
    # print(list)

    # load mask
    # topo = pd.read_csv(indir + "topo.csv", header=None).values.tolist()
    # print(topo)
    # topo = np.loadtxt(indir + "topo.csv", delimiter=",", dtype="unicode")
    # topo = [flatten for inner in topo for flatten in inner]

    # file = open(outdir + "filename.txt", mode='w')

    # Normalization
    data_set = rank_norm(data_set.reshape(num_of_data, side * side * side))
    data_set = data_set.reshape(num_of_data, side, side, side)

    for i in trange(len(data_set)):
        # if topo[i] == 1:
        # print(i)
        eudt_image = sitk.GetImageFromArray(data_set[i].reshape(
            side, side, side))
        eudt_image.SetSpacing(sitkdata.GetSpacing())
        eudt_image.SetOrigin(sitkdata.GetOrigin())
        if i <= 602: folder = "test/"
        elif i <= 602 * 2: folder = "val/"
        else: folder = "train/"
        sitk.WriteImage(
            eudt_image,
            os.path.join(outdir, folder, "{}.mhd".format(str(i).zfill(4))))
        file = open(outdir + folder + "filename.txt", mode='a')
        file.write(
            os.path.join(outdir, folder,
                         "{}.mhd".format(str(i).zfill(4)) + "\n"))

    file.close()
コード例 #13
0
# setting
# center of the vessel
x_center, y_center, z_center = 6, 4, 7
size = 9
# ROI
s_roi = 1
e_roi = size - 1
roi = e_roi - s_roi
# path
path = "./input/"
file = "data"

# input
print("load data")
img = io.read_mhd_and_raw(path + file + ".mhd", 'double')
img = np.reshape(img, (size, size, size))

# roi
profile = img[z_center:z_center + 1, y_center:y_center + 1, s_roi:e_roi]

# profile = np.reshape(profile, (roi, 1))
x_fit = np.linspace(0, roi - 1, roi * 10)

# fitting
print("start fitting")
x = np.linspace(0, roi - 1, roi)
y = np.reshape(profile, (roi))

print("-" * 20, "Gaussian fitting", "-" * 20)
r = scipy_fit(x, y)
コード例 #14
0
def main():
    parser = argparse.ArgumentParser(
        description='py, data_list, num_per_tfrecord, outdir')

    parser.add_argument('--data_list',
                        '-i1',
                        default='F:/data_info/TFrecord/liver/set_2/train.txt',
                        help='data list')

    parser.add_argument('--num_per_tfrecord',
                        '-i2',
                        default=250,
                        help='number per tfrecord')

    parser.add_argument('--outdir',
                        '-i3',
                        default='F:/data/tfrecord/liver/test',
                        help='outdir')

    parser.add_argument('--tfrc_index',
                        '-i4',
                        default='1',
                        help='tfrecord index')

    args = parser.parse_args()

    # check folder
    if not (os.path.exists(args.outdir)):
        os.makedirs(args.outdir)

    # load list
    input_list = io.load_list(args.data_list)

    # shuffle
    random.shuffle(input_list)
    print('data size: {}'.format(len(input_list)))

    num_per_tfrecord = int(args.num_per_tfrecord)
    num_of_total_image = len(input_list)

    if (num_of_total_image % num_per_tfrecord != 0):
        num_of_recordfile = num_of_total_image // num_per_tfrecord + 1
    else:
        num_of_recordfile = num_of_total_image // num_per_tfrecord

    num_per_tfrecord_final = num_of_total_image - num_per_tfrecord * (
        num_of_recordfile - 1)

    print('number of total TFrecordfile: {}'.format(num_of_recordfile))

    # write TFrecord
    for i in range(num_of_recordfile):
        tfrecord_filename = os.path.join(
            args.outdir, 'recordfile_{}'.format(args.tfrc_index))
        options = tf.python_io.TFRecordOptions(
            tf.python_io.TFRecordCompressionType.GZIP)
        write = tf.python_io.TFRecordWriter(tfrecord_filename, options=options)

        print('Writing recordfile_{}'.format(i + 1))

        if i == num_of_recordfile - 1:
            loop_buf = num_per_tfrecord_final
        else:
            loop_buf = num_per_tfrecord

        for image_index in range(loop_buf):
            # load data
            print('image from: {}'.format(input_list[image_index +
                                                     i * num_per_tfrecord]))
            data = io.read_mhd_and_raw(
                input_list[image_index +
                           i * num_per_tfrecord]).astype('float32')
            image = data.flatten()

            example = tf.train.Example(features=tf.train.Features(
                feature={
                    'img_raw':
                    tf.train.Feature(float_list=tf.train.FloatList(
                        value=image)),
                }))

            write.write(example.SerializeToString())
        write.close()