Beispiel #1
0
def generate_heatingmaps(data_dir, target_label, patch_size, save_dir):
    patches_arr, paths, mhd_images = extract_patch(data_dir, str(target_label),
                                                   patch_size)
    from load_liver_density import load_raw_liver_density
    liver_density = load_raw_liver_density()
    for index, patches in enumerate(patches_arr):
        path = paths[index]
        basename = os.path.basename(path)
        predicted_labels = []
        start_index = 0
        while True:
            end_index = start_index + net_config.BATCH_SIZE
            if end_index > len(patches):
                # restart = end_index - len(patches)
                end_index = len(patches)
            cur_patches = patches[start_index:end_index]
            expand_patches = patches[start_index:end_index]
            # expand_patches = [mhd_images[index]] * len(cur_patches) # 使用完整的ROI作为expand 的patch
            roi_images_values = resize_images(cur_patches,
                                              net_config.ROI_SIZE_W,
                                              rescale=(not divided_liver))
            expand_roi_images_values = resize_images(
                expand_patches,
                net_config.EXPAND_SIZE_W,
                rescale=(not divided_liver))
            cur_liver_densitys = [liver_density[os.path.basename(path)]
                                  ] * len(cur_patches)
            if divided_liver:
                for i in range(len(roi_images_values)):
                    for j in range(3):
                        roi_images_values[
                            i, :, :,
                            j] = (1.0 * roi_images_values[i, :, :, j]) / (
                                1.0 * cur_liver_densitys[i][j])
                        expand_roi_images_values[i, :, :, j] = (
                            1.0 * expand_roi_images_values[i, :, :, j]) / (
                                1.0 * cur_liver_densitys[i][j])
            predicted_label_value = sess.run(predicted_label_tensor,
                                             feed_dict={
                                                 roi_images:
                                                 roi_images_values,
                                                 expand_roi_images:
                                                 expand_roi_images_values,
                                                 batch_size_tensor:
                                                 len(roi_images_values),
                                                 is_training_tensor:
                                                 False
                                             })
            predicted_labels.extend(predicted_label_value)
            start_index = end_index
            if start_index == len(patches):
                break
        if len(predicted_labels) == 0:
            continue
        heatingmap_size = int(math.sqrt(len(predicted_labels)))
        heatingmap_image = np.zeros([heatingmap_size, heatingmap_size, 3],
                                    np.uint8)
        for i in range(heatingmap_size):
            for j in range(heatingmap_size):
                heatingmap_image[i, j] = net_config.color_maping[
                    predicted_labels[i * heatingmap_size + j]]
        print index, np.shape(heatingmap_image), len(predicted_labels)
        img = Image.fromarray(np.asarray(heatingmap_image))
        img.save(os.path.join(save_dir, str(target_label), basename + '.jpg'))
Beispiel #2
0
def main(_):
    roi_images = tf.placeholder(shape=[
        None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    batch_size_tensor = tf.placeholder(dtype=tf.int32, shape=[])
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits = inference_small(roi_images,
                             expand_roi_images,
                             phase_names=['NC', 'ART', 'PV'],
                             num_classes=5,
                             point_phase=[2],
                             is_training=is_training_tensor,
                             batch_size=batch_size_tensor)
    model_path = '/home/give/PycharmProjects/MedicalImage/Net/ICIP/Patch_ROI/models/500.0'
    # model_path = '/home/give/PycharmProjects/MedicalImage/Net/forpatch/cross_validation/model/multiscale/parallel/0/2200.0'
    predictions = tf.nn.softmax(logits)
    saver = tf.train.Saver(tf.all_variables())
    print predictions

    predicted_label_tensor = tf.argmax(predictions, axis=1)
    print predicted_label_tensor
    init = tf.initialize_all_variables()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    latest = tf.train.latest_checkpoint(model_path)
    if not latest:
        print "No checkpoint to continue from in", model_path
        sys.exit(1)
    print "resume", latest
    saver.restore(sess, latest)

    data_dir = '/home/give/Documents/dataset/MedicalImage/MedicalImage/Patches/ICIP/only-patch/test'
    labels = []
    paths = []
    for typeid in [0, 1, 2, 3, 4]:
        cur_path = os.path.join(data_dir, str(typeid))
        names = os.listdir(cur_path)
        labels.extend([typeid] * len(names))
        paths.extend([os.path.join(cur_path, name) for name in names])
    paths, labels = shuffle_image_label(paths, labels)
    start_index = 0
    predicted_labels = []
    liver_density = load_raw_liver_density()
    while True:
        if start_index >= len(paths):
            break
        print start_index, len(paths)
        end_index = start_index + net_config.BATCH_SIZE
        cur_paths = paths[start_index:end_index]
        cur_roi_images = [np.asarray(load_patch(path)) for path in cur_paths]
        cur_expand_roi_images = [
            np.asarray(
                load_patch(
                    path,
                    return_roi=True,
                    parent_dir=
                    '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/val'
                )) for path in cur_paths
        ]
        cur_roi_images = resize_images(cur_roi_images, net_config.ROI_SIZE_W,
                                       True)
        cur_expand_roi_images = resize_images(cur_expand_roi_images,
                                              net_config.EXPAND_SIZE_W, True)
        cur_liver_densitys = [
            liver_density[os.path.basename(path)
                          [:os.path.basename(path).rfind('_')]]
            for path in cur_paths
        ]
        # for i in range(len(cur_roi_images)):
        #     for j in range(3):
        #         cur_roi_images[i, :, :, j] = (1.0 * cur_roi_images[i, :, :, j]) / (1.0 * cur_liver_densitys[i][j])
        #         cur_expand_roi_images[i, :, :, j] = (1.0 * cur_expand_roi_images[i, :, :, j]) / (
        #         1.0 * cur_liver_densitys[i][j])
        predicted_batch_labels = sess.run(predicted_label_tensor,
                                          feed_dict={
                                              roi_images:
                                              cur_roi_images,
                                              expand_roi_images:
                                              cur_expand_roi_images,
                                              is_training_tensor:
                                              False,
                                              batch_size_tensor:
                                              len(cur_roi_images)
                                          })
        batch_labels = labels[start_index:end_index]
        predicted_labels.extend(predicted_batch_labels)
        start_index = end_index
        calculate_acc_error(predicted_batch_labels, batch_labels)
    calculate_acc_error(predicted_labels, labels)
Beispiel #3
0
def main(_):
    roi_images = tf.placeholder(shape=[
        None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    batch_size_tensor = tf.placeholder(dtype=tf.int32, shape=[])
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits, _, _, representor_tensor = inference_small(
        roi_images,
        expand_roi_images,
        phase_names=['NC', 'ART', 'PV'],
        num_classes=4,
        is_training=is_training_tensor,
        batch_size=batch_size_tensor)
    model_path = '/home/give/PycharmProjects/MICCAI2018/deeplearning/LSTM/parameters/0/0.0001'
    # model_path = '/home/give/PycharmProjects/MedicalImage/Net/forpatch/cross_validation/model/multiscale/parallel/0/2200.0'
    predictions = tf.nn.softmax(logits)
    saver = tf.train.Saver(tf.all_variables())
    print predictions

    predicted_label_tensor = tf.argmax(predictions, axis=1)
    print predicted_label_tensor
    init = tf.initialize_all_variables()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    latest = tf.train.latest_checkpoint(model_path)
    if not latest:
        print "No checkpoint to continue from in", model_path
        sys.exit(1)
    print "resume", latest
    saver.restore(sess, latest)

    data_dir = '/home/give/Documents/dataset/MICCAI2018/Patches/crossvalidation/0/test'
    slice_dir = '/home/give/Documents/dataset/MICCAI2018/Slices/crossvalidation/0/test'
    labels = []
    paths = []
    for typeid in [0, 1, 2, 3]:
        cur_path = os.path.join(data_dir, str(typeid))
        names = os.listdir(cur_path)
        labels.extend([typeid] * len(names))
        paths.extend([os.path.join(cur_path, name) for name in names])
    paths, labels = shuffle_image_label(paths, labels)
    start_index = 0
    predicted_labels = []
    liver_density = load_raw_liver_density()
    while True:
        if start_index >= len(paths):
            break
        print start_index, len(paths)
        end_index = start_index + net_config.BATCH_SIZE
        cur_paths = paths[start_index:end_index]
        cur_roi_images = [np.asarray(load_patch(path)) for path in cur_paths]
        cur_expand_roi_images = [
            np.asarray(load_patch(path, return_roi=True, parent_dir=slice_dir))
            for path in cur_paths
        ]
        cur_roi_images = resize_images(cur_roi_images, net_config.ROI_SIZE_W,
                                       True)
        cur_expand_roi_images = resize_images(cur_expand_roi_images,
                                              net_config.EXPAND_SIZE_W, True)
        # cur_liver_densitys = [liver_density[os.path.basename(path)[:os.path.basename(path).rfind('_')]] for
        #                       path in cur_paths]
        # for i in range(len(cur_roi_images)):
        #     for j in range(3):
        #         cur_roi_images[i, :, :, j] = (1.0 * cur_roi_images[i, :, :, j]) / (1.0 * cur_liver_densitys[i][j])
        #         cur_expand_roi_images[i, :, :, j] = (1.0 * cur_expand_roi_images[i, :, :, j]) / (
        #         1.0 * cur_liver_densitys[i][j])
        predicted_batch_labels, representor_value, logits_value = sess.run(
            [predicted_label_tensor, representor_tensor, logits],
            feed_dict={
                roi_images: cur_roi_images,
                expand_roi_images: cur_expand_roi_images,
                is_training_tensor: False,
                batch_size_tensor: len(cur_roi_images)
            })
        features.extend(representor_value)
        batch_labels = labels[start_index:end_index]
        predicted_labels.extend(predicted_batch_labels)
        start_index = end_index
        calculate_acc_error(predicted_batch_labels, batch_labels)
    calculate_acc_error(predicted_labels, labels)

    # get the feature, visualize it
    # first dimension reduction
    from sklearn.decomposition import PCA
    dim = 2
    from plot import plot_scatter, plot_scatter3D
    pca_obj = PCA(n_components=dim)
    visualized_data = pca_obj.fit_transform(features)
    if dim == 3:
        plot_scatter3D(visualized_data[:, 0],
                       visualized_data[:, 1],
                       visualized_data[:, 2],
                       labels=labels,
                       category_num=4)
    else:
        plot_scatter(visualized_data[:, 0],
                     visualized_data[:, 1],
                     labels=labels,
                     category_num=4)

    dim = 3
    pca_obj = PCA(n_components=dim)
    visualized_data = pca_obj.fit_transform(features)
    if dim == 3:
        plot_scatter3D(visualized_data[:, 0],
                       visualized_data[:, 1],
                       visualized_data[:, 2],
                       labels=labels,
                       category_num=4)
    else:
        plot_scatter(visualized_data[:, 0],
                     visualized_data[:, 1],
                     labels=labels,
                     category_num=4)