예제 #1
0
 def split_train_and_validation(self):
     validation_lesions = []
     validation_labels = []
     train_lesions = []
     train_labels = []
     for index in range(len(Config.LESION_TYPE)):
         # 先挑出这个类型的所有病灶
         lesions = self.roi_images[np.where(self.labels == index)]
         labels = self.labels[np.where(self.labels == index)]
         random_index = range(len(lesions))
         # np.random.shuffle(random_index)
         lesions = lesions[random_index]
         labels = labels[random_index]
         validation_num = Config.MaxSlice_Base['VALIDATION_DISTRIBUTION'][
             index]
         validation_lesions.extend(lesions[:validation_num])
         train_lesions.extend(lesions[validation_num:])
         validation_labels.extend(labels[:validation_num])
         train_labels.extend(labels[validation_num:])
     print 'validation shape is ', np.shape(validation_lesions)
     print 'train shape is ', np.shape(train_lesions)
     self.validation_images, self.validation_labels = shuffle_image_label(
         validation_lesions, validation_labels)
     self.train_images, self.train_labels = shuffle_image_label(
         train_lesions, train_labels)
     print 'validation label is \n', self.validation_labels
     print 'train_label is \n', self.train_labels
예제 #2
0
 def generate_paths(dir_name,
                    target_labels=[0, 1, 2],
                    label_mapping={
                        0: 0,
                        1: 1,
                        2: 2
                    },
                    shuffle=True):
     '''
     返回dirname中的所有病灶图像的路径
     :param dir_name:  父文件夹的路径
     :param cross_ids: 包含的交叉的折,一般来说我们做三折交叉验证,cross_ids就是[0, 1] 或者是[2]
     :param target_labels: 需要文件标注的label
     :return:
     '''
     roi_paths = []
     roi_expand_paths = []
     labels = []
     cur_dir = dir_name
     print cur_dir
     # names = os.listdir(cur_dir)
     for target_label in target_labels:
         type_dir = os.path.join(cur_dir, str(target_label))
         type_names = os.listdir(type_dir)
         roi_paths.extend(
             [os.path.join(type_dir, name) for name in type_names])
         labels.extend([label_mapping[target_label]] * len(type_names))
     if shuffle:
         roi_paths, labels = shuffle_image_label(roi_paths, labels)
     return roi_paths, roi_paths, labels
예제 #3
0
    def generate_paths(dir_name,
                       state,
                       target_labels=[0, 1, 2, 3],
                       shuffle=True):
        '''
        返回dirname中的所有病灶图像的路径
        :param dir_name:  父文件夹的路径
        :param state: 状态,一般来说父文件夹有两个状态 train 和val
        :param target_labels: 需要文件标注的label
        :return:
        '''
        roi_paths = []
        roi_expand_paths = []
        labels = []

        cur_dir = os.path.join(dir_name, state)
        # names = os.listdir(cur_dir)
        for target_label in target_labels:
            type_dir = os.path.join(cur_dir, str(target_label))
            type_names = os.listdir(type_dir)
            roi_paths.extend(
                [os.path.join(type_dir, name) for name in type_names])
            labels.extend([target_label] * len(type_names))
        if shuffle:
            roi_paths, labels = shuffle_image_label(roi_paths, labels)
        return roi_paths, roi_paths, labels
예제 #4
0
 def get_next_batch(self, batch_size, distribution=None):
     end_index = self.start_index + batch_size
     images = []
     labels = []
     if distribution is None:
         if end_index >= len(self.train_images):
             images.extend(
                 self.train_images[self.start_index:len(self.train_images)])
             images.extend(self.train_images[:end_index -
                                             len(self.train_images)])
             labels.extend(
                 self.train_labels[self.start_index:len(self.train_images)])
             labels.extend(self.train_labels[:end_index -
                                             len(self.train_images)])
             self.start_index = end_index - len(self.train_images)
             self.epoch_num += 1
             # print self.epoch_num
         else:
             images.extend(self.train_images[self.start_index:end_index])
             labels.extend(self.train_labels[self.start_index:end_index])
             self.start_index = end_index
     else:
         for index, num in enumerate(distribution):
             target_indexs = np.where(self.train_labels == index)[0]
             np.random.shuffle(target_indexs)
             images.extend(self.train_images[target_indexs[:num]])
             labels.extend(self.train_labels[target_indexs[:num]])
         images, labels = shuffle_image_label(images, labels)
     return images, labels
예제 #5
0
 def __init__(self, path, new_size):
     self.path = path
     self.new_size = new_size
     self.patchs_path = []
     self.labels = []
     self.images = []
     self.images, self.labels = self.load_images_label()
     self.images, self.labels = shuffle_image_label(self.images,
                                                    self.labels)
예제 #6
0
 def __init__(self, data_path, new_size, shuffle=True, phase='ART', category_number=[0, 1, 2, 3, 4], label_index_start=0, suffix_name='_ROI.mhd'):
     self.data_path = data_path
     self.phase = phase
     self.shuffle = True
     self.avg_liver_dict = extract_avg_liver_dict()
     self.category_number = category_number
     self.images, self.labels, self.image_names = ValDataSet.load_data_path(data_path, new_size, self.phase,
                                                                            self.avg_liver_dict,
                                                                            self.category_number,
                                                                            label_index_start,
                                                                            suffix_name=suffix_name)
     if shuffle:
         self.images, self.labels = shuffle_image_label(self.images, self.labels)
예제 #7
0
 def get_next_batch(self, batch_size=None, distribution=None):
     if batch_size is None:
         return self.images, self.labels
     else:
         if distribution is None:
             random_index = range(len(self.labels))
             np.random.shuffle(random_index)
             batch_index = random_index[:batch_size]
             batch_images = []
             batch_labels = []
             for index in batch_index:
                 batch_images.append(
                     self.images[index]
                 )
                 batch_labels.append(
                     self.labels[index]
                 )
             batch_images, batch_labels = shuffle_image_label(batch_images, batch_labels)
             return batch_images, batch_labels
         else:
             images = []
             labels = []
             for index, count in enumerate(distribution):
                 cur_indexs = (np.array(self.labels) == index)
                 random_index = range(len(self.labels))
                 np.random.shuffle(random_index)
                 count = 0
                 for cur_index in random_index:
                     if cur_indexs[cur_index]:
                         count += 1
                         images.append(self.images[cur_index])
                         labels.append(self.labels[cur_index])
                     if count >= distribution[index]:
                         break
             images, labels = shuffle_image_label(images, labels)
             return images, labels
예제 #8
0
 def __init__(self,
              data_path,
              new_sizes,
              shuffle=True,
              phase='ART',
              category_number=5):
     self.data_path = data_path
     self.phase = phase
     self.shuffle = True
     self.avg_liver_dict = extract_avg_liver_dict()
     self.category_number = category_number
     self.images, self.labels, self.image_names = ValDataSet.load_data_path_multisize(
         data_path, new_sizes, self.phase, self.avg_liver_dict,
         self.category_number)
     if shuffle:
         self.images, self.labels = shuffle_image_label(
             self.images, self.labels)
예제 #9
0
 def generate_paths(dir_name, target_labels=[0, 1, 2, 3, 4], shuffle=True):
     '''
     返回dirname中的所有病灶图像的路径
     :param dir_name:  父文件夹的路径
     :param cross_ids: 包含的交叉的折,一般来说我们做三折交叉验证,cross_ids就是[0, 1] 或者是[2]
     :param target_labels: 需要文件标注的label
     :return:
     '''
     paths = []
     labels = []
     names = os.listdir(dir_name)
     for name in names:
         if int(name[-1]) in target_labels:
             paths.append(os.path.join(dir_name, name))
             labels.append(int(name[-1]))
     if shuffle:
         roi_paths, labels = shuffle_image_label(paths, labels)
     return paths, labels
예제 #10
0
 def __init__(self, paths, category_number):
     self.paths = paths
     self.patchs_path = []
     self.labels = []
     for path in self.paths:
         cur_paths = PatchBase.load_paths(path)
         self.patchs_path.extend(cur_paths)
         self.labels.extend([int(os.path.basename(path))] * len(cur_paths))
     if category_number == 2:
         for index, label in enumerate(self.labels):
             if label == 0 or label == 1 or label == 3:
                 self.labels[index] = 0
             else:
                 self.labels[index] = 1
     self.patchs_path, self.labels = shuffle_image_label(
         self.patchs_path, self.labels)
     self.startindex = 0
     self.epochnum = 0
예제 #11
0
def main(_):
    roi_images = tf.placeholder(shape=[
        None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    batch_size_tensor = tf.placeholder(dtype=tf.int32, shape=[])
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits = inference_small(roi_images,
                             expand_roi_images,
                             phase_names=['NC', 'ART', 'PV'],
                             num_classes=5,
                             point_phase=[2],
                             is_training=is_training_tensor,
                             batch_size=batch_size_tensor)
    model_path = '/home/give/PycharmProjects/MedicalImage/Net/ICIP/Patch_ROI/models/500.0'
    # model_path = '/home/give/PycharmProjects/MedicalImage/Net/forpatch/cross_validation/model/multiscale/parallel/0/2200.0'
    predictions = tf.nn.softmax(logits)
    saver = tf.train.Saver(tf.all_variables())
    print predictions

    predicted_label_tensor = tf.argmax(predictions, axis=1)
    print predicted_label_tensor
    init = tf.initialize_all_variables()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    latest = tf.train.latest_checkpoint(model_path)
    if not latest:
        print "No checkpoint to continue from in", model_path
        sys.exit(1)
    print "resume", latest
    saver.restore(sess, latest)

    data_dir = '/home/give/Documents/dataset/MedicalImage/MedicalImage/Patches/ICIP/only-patch/test'
    labels = []
    paths = []
    for typeid in [0, 1, 2, 3, 4]:
        cur_path = os.path.join(data_dir, str(typeid))
        names = os.listdir(cur_path)
        labels.extend([typeid] * len(names))
        paths.extend([os.path.join(cur_path, name) for name in names])
    paths, labels = shuffle_image_label(paths, labels)
    start_index = 0
    predicted_labels = []
    liver_density = load_raw_liver_density()
    while True:
        if start_index >= len(paths):
            break
        print start_index, len(paths)
        end_index = start_index + net_config.BATCH_SIZE
        cur_paths = paths[start_index:end_index]
        cur_roi_images = [np.asarray(load_patch(path)) for path in cur_paths]
        cur_expand_roi_images = [
            np.asarray(
                load_patch(
                    path,
                    return_roi=True,
                    parent_dir=
                    '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/val'
                )) for path in cur_paths
        ]
        cur_roi_images = resize_images(cur_roi_images, net_config.ROI_SIZE_W,
                                       True)
        cur_expand_roi_images = resize_images(cur_expand_roi_images,
                                              net_config.EXPAND_SIZE_W, True)
        cur_liver_densitys = [
            liver_density[os.path.basename(path)
                          [:os.path.basename(path).rfind('_')]]
            for path in cur_paths
        ]
        # for i in range(len(cur_roi_images)):
        #     for j in range(3):
        #         cur_roi_images[i, :, :, j] = (1.0 * cur_roi_images[i, :, :, j]) / (1.0 * cur_liver_densitys[i][j])
        #         cur_expand_roi_images[i, :, :, j] = (1.0 * cur_expand_roi_images[i, :, :, j]) / (
        #         1.0 * cur_liver_densitys[i][j])
        predicted_batch_labels = sess.run(predicted_label_tensor,
                                          feed_dict={
                                              roi_images:
                                              cur_roi_images,
                                              expand_roi_images:
                                              cur_expand_roi_images,
                                              is_training_tensor:
                                              False,
                                              batch_size_tensor:
                                              len(cur_roi_images)
                                          })
        batch_labels = labels[start_index:end_index]
        predicted_labels.extend(predicted_batch_labels)
        start_index = end_index
        calculate_acc_error(predicted_batch_labels, batch_labels)
    calculate_acc_error(predicted_labels, labels)
예제 #12
0
 def __init__(self, data_dir):
     self.data_dir = data_dir
     self.paths, self.labels = DataSet.generate_paths(self.data_dir)
     self.paths, self.labels = shuffle_image_label(self.paths, self.labels)