def execute(self): '''Execute the inference process. ''' model = self._load_model() image = read_nifti_file(self.path_to_data) mask = self.predict(image, model) self.write_image(mask)
def save_data(self): '''Saves CT scans and segmentation masks to separate training and validation tfrecords files. Raises: ValueError: If separation into training and validation sets has failed. ''' print("Write to train.tfrecords: ", [self.vol_paths[x] for x in self.train_range]) print("Write to val.tfrecords: ", [self.vol_paths[x] for x in self.val_range]) for i in [self.path_to_tfrecords_train, self.path_to_tfrecords_val]: if not os.path.exists(i): os.makedirs(i) train_writer = tf.io.TFRecordWriter(self.path_to_tfrecords_train + 'train.tfrecords') val_writer = tf.io.TFRecordWriter(self.path_to_tfrecords_val + 'val.tfrecords') # Cycle through the CT and segmentation pairs for i in range(len(self.vol_paths)): print('Writing CT Scan: {}/{}'.format(i+1, len(self.vol_paths))) sys.stdout.flush() print(self.vol_paths[i], self.seg_paths[i]) # Read nifti file vol_data = read_nifti_file(self.vol_paths[i]) seg_data = read_nifti_file(self.seg_paths[i]) # Clip values to -1024 HU and 600 HU vol_data = np.clip(vol_data, a_min=-1024, a_max=600) # Normalize the data vol_data = standardize_volume(vol_data) print(np.amax(vol_data), np.amin(vol_data), np.mean(vol_data), np.std(vol_data)) # Filter out only slices that contain labels if self.filter: vol_data, seg_data = filter_volume(vol_data, seg_data, self.axis) # Get the number of slices n_slices = get_n_slices(self.view, vol_data) # Cycle through the image slices n_images = 0 for j in range(n_slices): vol_slice = get_slice(self.view, vol_data, j) seg_slice = get_slice(self.view, seg_data, j) # Resize the slice if necessary h, w = np.shape(seg_slice) if h != self.input_height or w != self.input_width: vol_slice = resize(vol_slice, output_shape=(self.input_height, self.input_width), order=1, mode='constant', anti_aliasing=False, preserve_range=True) seg_slice = resize(seg_slice, output_shape=(self.input_height, self.input_width), order=0, mode='constant', anti_aliasing=False, preserve_range=True) # Expand dims and change datatype to string to be saved by TF vol_slice = make_tfrecords_ready(vol_slice) seg_slice = make_tfrecords_ready(seg_slice) # Create a feature # Create an example protocol buffer example = tf.train.Example(features=tf.train.Features(feature={ # Wrap the data as TensorFlow features 'data/slice': self._bytes_feature(vol_slice), 'data/seg': self._bytes_feature(seg_slice)})) # Serialize to string and write to the TFRecords file if i in self.train_range: train_writer.write(example.SerializeToString()) elif i in self.val_range: val_writer.write(example.SerializeToString()) else: raise ValueError("Not in training or validation range.") n_images += 1 print('Number of slices: {}'.format(n_images)) train_writer.close() val_writer.close() sys.stdout.flush() log.info('YELLOW', 'Saved as TFRecords') return self.path_to_tfrecords_train, self.path_to_tfrecords_val
def save_data(self): '''Saves CT scans and segmentation masks to separate training and validation tfrecords files. Raises: ValueError: If separation into training and validation sets has failed. ''' print("Write to train.tfrecords: ", [self.vol_paths[x] for x in self.train_range]) print("Write to val.tfrecords: ", [self.vol_paths[x] for x in self.val_range]) for i in [self.path_to_tfrecords_train, self.path_to_tfrecords_val]: if not os.path.exists(i): os.makedirs(i) train_writer = tf.io.TFRecordWriter(self.path_to_tfrecords_train + 'train.tfrecords') val_writer = tf.io.TFRecordWriter(self.path_to_tfrecords_val + 'val.tfrecords') # Cycle through the CT and segmentation pairs for i in range(len(self.vol_paths)): print('Writing CT Scan: {}/{}'.format(i+1, len(self.vol_paths))) sys.stdout.flush() print(self.vol_paths[i], self.seg_paths[i]) # Read nifti file vol_data = read_nifti_file(self.vol_paths[i]) seg_data = read_nifti_file(self.seg_paths[i]) print('Original shape: ', vol_data.shape, seg_data.shape) # Resize into smaller axial shape, keep original depth dimension vol_data = resize(vol_data, [vol_data.shape[0], self.image_shape_resize[1], self.image_shape_resize[2]]) seg_data = resize(seg_data, [seg_data.shape[0], self.image_shape_resize[1], self.image_shape_resize[2]]) print('Resized to shape: ', vol_data.shape, seg_data.shape) # Clip values to -1024 HU and 600 HU vol_data = np.clip(vol_data, a_min=-1024, a_max=600) # Normalize the data vol_data = standardize_volume(vol_data) print(np.amax(vol_data), np.amin(vol_data), np.mean(vol_data), np.std(vol_data)) # Calculate number of axial slabs s s = math.floor(vol_data.shape[0] / self.image_shape_resize[0]) print('CT scan will be split into {} slabs of size {}.'.format(s, self.image_shape_resize)) print(range(s)) # Split into slabs for j in range(s): start = self.image_shape_resize[0] * j end = self.image_shape_resize[0] * (j + 1) print(start, end) vol_slab = vol_data[start : end, :, :] seg_slab = seg_data[start : end, :, :] # Expand dims and change datatype to string to be saved by TF vol_slab = make_tfrecords_ready_3d(vol_slab) seg_slab = make_tfrecords_ready_3d(seg_slab) # Create a feature # Create an example protocol buffer example = tf.train.Example(features=tf.train.Features(feature={ # Wrap the data as TensorFlow features 'data/img': self._bytes_feature(vol_slab), 'data/seg': self._bytes_feature(seg_slab)})) # Serialize to string and write to the TFRecords file if i in self.train_range: train_writer.write(example.SerializeToString()) elif i in self.val_range: val_writer.write(example.SerializeToString()) else: raise ValueError("Not in training or validation range.") train_writer.close() val_writer.close() sys.stdout.flush() log.info('YELLOW', 'Saved as TFRecords') return self.path_to_tfrecords_train, self.path_to_tfrecords_val
def save_data(self): '''Read CT scans and images ''' print("Write to train.tfrecords: ", [self.vol_paths[x] for x in self.train_range]) print("Write to val.tfrecords: ", [self.vol_paths[x] for x in self.val_range]) self.path_to_tfrecords_train = os.path.join(self.path_to_tfrecords_train, 'concat/') self.path_to_tfrecords_val = os.path.join(self.path_to_tfrecords_val, 'concat/') for i in [self.path_to_tfrecords_train, self.path_to_tfrecords_val]: if not os.path.exists(i): os.makedirs(i) train_writer = tf.io.TFRecordWriter(self.path_to_tfrecords_train + 'concat_train.tfrecords') val_writer = tf.io.TFRecordWriter(self.path_to_tfrecords_val + 'concat_val.tfrecords') # Cycle through the CT and segmentation pairs for i in range(len(self.vol_paths)): print('Writing CT Scan: {}/{}'.format(i+1, len(self.vol_paths))) sys.stdout.flush() print(self.vol_paths[i], self.seg_paths[i]) # Read nifti file vol_data = read_nifti_file(self.vol_paths[i]) seg_data = read_nifti_file(self.seg_paths[i]) print('Original shape: ', vol_data.shape, seg_data.shape) # Clip values to -1024 HU and 600 HU vol_data = np.clip(vol_data, a_min=-1024, a_max=600) # Normalize the data vol_data = standardize_volume(vol_data) print(np.amax(vol_data), np.amin(vol_data), np.mean(vol_data), np.std(vol_data)) # Resize CT to downsampled shape for prediction vol_data_resized = resize(vol_data, self.image_shape_lowres) print('Original resized to shape: ', vol_data_resized.shape) # Predict pred = self._predict(vol_data_resized) print('Predicted to shape: ', pred.shape) # Upsample pred_up = resize(pred, [vol_data.shape[0], self.image_shape_highres[1], self.image_shape_highres[2]]) print('Prediction upsampled to shape: ', pred_up.shape) # Downsample the original vol_data and seg_data vol_data = resize(vol_data, [vol_data.shape[0], self.image_shape_highres[1], self.image_shape_highres[2]]) seg_data = resize(seg_data, [seg_data.shape[0], self.image_shape_highres[1], self.image_shape_highres[2]]) # Insert a new dimension (axis) at position -1 vol_data = np.expand_dims(vol_data, axis=-1) seg_data = np.expand_dims(seg_data, axis=-1) print('Original downsampled to :', vol_data.shape, seg_data.shape) # Concatenate the upsampled prediction and the downsampled original new_vol = self._concatenate(pred_up, vol_data) print('Concatenated to shape: ', new_vol.shape) # Write new tfrecords train_writer, val_writer = self._write_tfrecords(i, new_vol, seg_data, train_writer, val_writer) print('Number of training samples: {}, Number of validation samples: {}.'.format(self.n_train, self.n_val)) train_writer.close() val_writer.close() sys.stdout.flush() log.info('YELLOW', 'Saved as TFRecords') return self.path_to_tfrecords_train, self.path_to_tfrecords_val, self.n_train, self.n_val
def save_data(self): '''Saves CT scans and segmentation masks to separate training and validation tfrecords files. Raises: ValueError: If separation into training and validation sets has failed. ''' print("Write to train.tfrecords: ", [self.vol_paths[x] for x in self.train_range]) print("Write to val.tfrecords: ", [self.vol_paths[x] for x in self.val_range]) for i in [self.path_to_tfrecords_train, self.path_to_tfrecords_val]: if not os.path.exists(i): os.makedirs(i) train_writer = tf.io.TFRecordWriter(self.path_to_tfrecords_train + 'train.tfrecords') val_writer = tf.io.TFRecordWriter(self.path_to_tfrecords_val + 'val.tfrecords') # Cycle through the CT and segmentation pairs for i in range(len(self.vol_paths)): print('Writing CT Scan: {}/{}'.format(i+1, len(self.vol_paths))) sys.stdout.flush() print(self.vol_paths[i], self.seg_paths[i]) # Read nifti file vol_data = read_nifti_file(self.vol_paths[i]) seg_data = read_nifti_file(self.seg_paths[i]) print('Original shape: ', vol_data.shape, seg_data.shape) # Clip values to -1024 HU and 600 HU vol_data = np.clip(vol_data, a_min=-1024, a_max=600) # Normalize data between [0,1] #vol_data = normalize_volume(vol_data) print('Classes: ', np.unique(seg_data)) # Resize to new shape vol_data = resize(vol_data, self.image_shape_resize, mode='constant', order=1, preserve_range=True, anti_aliasing=False) # bi-linear seg_data = resize(seg_data, self.image_shape_resize, mode='constant', order=0, preserve_range=True, anti_aliasing=False) # nearest neighbor print('Resized to shape: ', vol_data.shape, seg_data.shape) # Seg back to integers #seg_data = np.round(seg_data) print('Classes: ', np.unique(seg_data)) #two_display([vol_data[None,64,:,:], seg_data[None,64,:,:]]) # Standardize the data #self.mean, self.std = compute_mean_variance_file(vol_data) vol_data = standardize_volume(vol_data) print("After Normalizing: Max {}, Min {}, Mean {}, Std {}".format(np.amax(vol_data), np.amin(vol_data), np.mean(vol_data), np.std(vol_data))) # Expand dims and change datatype to string to be saved by TF vol_data = make_tfrecords_ready_3d(vol_data) seg_data = make_tfrecords_ready_3d(seg_data) # Create a feature # Create an example protocol buffer example = tf.train.Example(features=tf.train.Features(feature={ # Wrap the data as TensorFlow features 'data/img': self._bytes_feature(vol_data), 'data/seg': self._bytes_feature(seg_data)})) # Serialize to string and write to the TFRecords file if i in self.train_range: train_writer.write(example.SerializeToString()) elif i in self.val_range: val_writer.write(example.SerializeToString()) else: raise ValueError("Not in training or validation range.") train_writer.close() val_writer.close() sys.stdout.flush() log.info('YELLOW', 'Saved as TFRecords') return self.path_to_tfrecords_train, self.path_to_tfrecords_val