예제 #1
0
 def __init__(self, data_path, batch_size=100):
     self.data_dir = data_path
     self.batch_size = batch_size
     self.train_image, self.train_label, self.test_image, self.test_label = read_npys(
         self.data_dir)
     self.train_image, self.train_label = shuffle_image_label(
         self.train_image, self.train_label)
     self.test_image, self.test_label = shuffle_image_label(
         self.test_image, self.test_label)
     split_rate = [0.8]
     splited_images = split_array(self.train_image, num=2, rate=split_rate)
     splited_labels = split_array(self.train_label, num=2, rate=split_rate)
     self.train_image = splited_images[0]
     self.train_label = splited_labels[0]
     self.val_image = splited_images[1]
     self.val_label = splited_labels[1]
     print np.shape(self.train_image), np.shape(self.val_image), np.shape(
         self.test_image)
     print np.shape(self.train_label), np.shape(self.val_label), np.shape(
         self.test_label)
     self.train_generator = GenerateBatch(
         self.train_image,
         self.train_label,
         self.batch_size,
         epoch_num=None).generate_next_batch()
     self.val_generator = GenerateBatch(
         self.val_image, self.val_label, self.batch_size,
         epoch_num=None).generate_next_batch()
     self.test_generator = GenerateBatch(self.test_image,
                                         self.test_label,
                                         self.batch_size,
                                         epoch_num=1).generate_next_batch()
예제 #2
0
 def generate_paths(dir_name,
                    target_labels=[0, 1, 2, 3],
                    mapping_label={
                        0: 0,
                        1: 1,
                        2: 2,
                        3: 3
                    },
                    shuffle=True):
     '''
     返回dirname中的所有病灶图像的路径
     :param dir_name:  父文件夹的路径
     :param cross_ids: 包含的交叉的折,一般来说我们做三折交叉验证,cross_ids就是[0, 1] 或者是[2]
     :param target_labels: 需要文件标注的label
     :return:
     '''
     roi_paths = []
     labels = []
     cur_dir = dir_name
     print cur_dir
     # names = os.listdir(cur_dir)
     for target_label in target_labels:
         type_dir = os.path.join(cur_dir, str(target_label))
         type_names = os.listdir(type_dir)
         roi_paths.extend(
             [os.path.join(type_dir, name) for name in type_names])
         labels.extend([mapping_label[target_label]] * len(type_names))
     if shuffle:
         roi_paths, labels = shuffle_image_label(roi_paths, labels)
     return roi_paths, roi_paths, labels
예제 #3
0
파일: train.py 프로젝트: UpCoder/MICCAI2018
 def generate_paths(dir_name,
                    target_labels=[0, 1, 2, 3],
                    mapping_label={
                        0: 0,
                        1: 1,
                        2: 2,
                        3: 3
                    },
                    shuffle=True):
     '''
     返回dirname中的所有病灶图像的路径
     :param dir_name:  父文件夹的路径
     :param cross_ids: 包含的交叉的折,一般来说我们做三折交叉验证,cross_ids就是[0, 1] 或者是[2]
     :param target_labels: 需要文件标注的label
     :return:
     '''
     roi_paths = []
     labels = []
     cur_dir = dir_name
     print cur_dir
     names = os.listdir(cur_dir)
     for name in names:
         if int(name[-1]) not in target_labels:
             continue
         type_dir = os.path.join(cur_dir, name)
         roi_paths.append(type_dir)
         labels.append(mapping_label[int(name[-1])])
     if shuffle:
         roi_paths, labels = shuffle_image_label(roi_paths, labels)
     return roi_paths, roi_paths, labels
예제 #4
0
def main(_):
    roi_images = tf.placeholder(shape=[
        None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    batch_size_tensor = tf.placeholder(dtype=tf.int32, shape=[])
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits, _, _, _ = inference_small(roi_images,
                                      expand_roi_images,
                                      phase_names=['NC', 'ART', 'PV'],
                                      num_classes=4,
                                      is_training=is_training_tensor,
                                      batch_size=batch_size_tensor)
    model_path = '/home/give/PycharmProjects/MICCAI2018/deeplearning/Parallel/parameters/1'
    # model_path = '/home/give/PycharmProjects/MedicalImage/Net/forpatch/cross_validation/model/multiscale/parallel/0/2200.0'
    predictions = tf.nn.softmax(logits)
    saver = tf.train.Saver(tf.all_variables())
    print predictions

    predicted_label_tensor = tf.argmax(predictions, axis=1)
    print predicted_label_tensor
    init = tf.initialize_all_variables()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    latest = tf.train.latest_checkpoint(model_path)
    if not latest:
        print "No checkpoint to continue from in", model_path
        sys.exit(1)
    print "resume", latest
    saver.restore(sess, latest)

    data_dir = '/home/give/Documents/dataset/MICCAI2018/Patches/crossvalidation/1/test'
    slice_dir = '/home/give/Documents/dataset/MICCAI2018/Slices/crossvalidation/1/test'
    labels = []
    paths = []
    for typeid in [0, 1, 2, 3]:
        cur_path = os.path.join(data_dir, str(typeid))
        names = os.listdir(cur_path)
        labels.extend([typeid] * len(names))
        paths.extend([os.path.join(cur_path, name) for name in names])
    paths, labels = shuffle_image_label(paths, labels)
    start_index = 0
    predicted_labels = []
    liver_density = load_raw_liver_density()
    while True:
        if start_index >= len(paths):
            break
        print start_index, len(paths)
        end_index = start_index + net_config.BATCH_SIZE
        cur_paths = paths[start_index:end_index]
        cur_roi_images = [np.asarray(load_patch(path)) for path in cur_paths]
        cur_expand_roi_images = [
            np.asarray(load_patch(path, return_roi=True, parent_dir=slice_dir))
            for path in cur_paths
        ]
        cur_roi_images = resize_images(cur_roi_images, net_config.ROI_SIZE_W,
                                       True)
        cur_expand_roi_images = resize_images(cur_expand_roi_images,
                                              net_config.EXPAND_SIZE_W, True)
        cur_liver_densitys = [
            liver_density[os.path.basename(path)
                          [:os.path.basename(path).rfind('_')]]
            for path in cur_paths
        ]
        # for i in range(len(cur_roi_images)):
        #     for j in range(3):
        #         cur_roi_images[i, :, :, j] = (1.0 * cur_roi_images[i, :, :, j]) / (1.0 * cur_liver_densitys[i][j])
        #         cur_expand_roi_images[i, :, :, j] = (1.0 * cur_expand_roi_images[i, :, :, j]) / (
        #         1.0 * cur_liver_densitys[i][j])
        predicted_batch_labels = sess.run(predicted_label_tensor,
                                          feed_dict={
                                              roi_images:
                                              cur_roi_images,
                                              expand_roi_images:
                                              cur_expand_roi_images,
                                              is_training_tensor:
                                              False,
                                              batch_size_tensor:
                                              len(cur_roi_images)
                                          })
        batch_labels = labels[start_index:end_index]
        predicted_labels.extend(predicted_batch_labels)
        start_index = end_index
        calculate_acc_error(predicted_batch_labels, batch_labels)
    calculate_acc_error(predicted_labels, labels)
예제 #5
0
파일: val_DIY.py 프로젝트: UpCoder/ICPR2018
def main(_):
    roi_images = tf.placeholder(shape=[
        None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    batch_size_tensor = tf.placeholder(dtype=tf.int32, shape=[])
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits = inference_small(roi_images,
                             expand_roi_images,
                             phase_names=['NC', 'ART', 'PV'],
                             num_classes=4,
                             point_phase=[2],
                             is_training=is_training_tensor,
                             batch_size=batch_size_tensor)
    # model_path = '/home/give/PycharmProjects/MedicalImage/Net/ICIP/4-class/Patch_ROI/models/300.0/'
    # model_path = '/home/give/PycharmProjects/MedicalImage/Net/forpatch/cross_validation/model/multiscale/parallel/0/2200.0'
    model_path = '/home/give/PycharmProjects/MedicalImage/Net/ICIP/4-class/Patch_ROI/models_7'
    predictions = tf.nn.softmax(logits)
    saver = tf.train.Saver(tf.all_variables())
    print predictions

    predicted_label_tensor = tf.argmax(predictions, axis=1)
    print predicted_label_tensor
    init = tf.initialize_all_variables()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    latest = tf.train.latest_checkpoint(model_path)
    if not latest:
        print "No checkpoint to continue from in", model_path
        sys.exit(1)
    print "resume", latest
    saver.restore(sess, latest)

    data_dir = '/home/give/Documents/dataset/MedicalImage/MedicalImage/Patches/ICIP/only-patch-7/val'
    labels = []
    paths = []
    mapping_label = {0: 0, 1: 1, 2: 2, 3: 3}
    for typeid in [0, 1, 2, 3]:
        cur_path = os.path.join(data_dir, str(typeid))
        names = os.listdir(cur_path)
        labels.extend([mapping_label[typeid]] * len(names))
        paths.extend([os.path.join(cur_path, name) for name in names])
    paths, labels = shuffle_image_label(paths, labels)
    start_index = 0
    predicted_labels = []
    while True:
        if start_index >= len(paths):
            break
        print start_index, len(paths)
        end_index = start_index + net_config.BATCH_SIZE
        cur_paths = paths[start_index:end_index]
        cur_roi_images = [np.asarray(load_patch(path)) for path in cur_paths]
        cur_expand_roi_images = [
            np.asarray(
                load_patch(
                    path,
                    return_roi=True,
                    parent_dir=
                    '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/val'
                )) for path in cur_paths
        ]
        cur_roi_images = resize_images(cur_roi_images, net_config.ROI_SIZE_W,
                                       True)
        cur_expand_roi_images = resize_images(cur_expand_roi_images,
                                              net_config.EXPAND_SIZE_W, True)
        predicted_batch_labels = sess.run(predicted_label_tensor,
                                          feed_dict={
                                              roi_images:
                                              cur_roi_images,
                                              expand_roi_images:
                                              cur_expand_roi_images,
                                              is_training_tensor:
                                              False,
                                              batch_size_tensor:
                                              len(cur_roi_images)
                                          })
        batch_labels = labels[start_index:end_index]
        predicted_labels.extend(predicted_batch_labels)
        start_index = end_index
        calculate_acc_error(predicted_batch_labels, batch_labels)
    calculate_acc_error(predicted_labels, labels)