コード例 #1
0
    def __init__(self, options=Options()):
        self.options = options

        if options.cache and os.path.isfile(self.pckl_name()):
            f = open(self.pckl_name(), 'rb')
            tmp = pickle.load(f)
            f.close()
            self._epochs_completed = tmp._epochs_completed
            self._index_in_epoch = tmp._index_in_epoch
            self.patients = self._get_patients()
            self._images, self._labels, self._sets = read_tf_record(
                self.tfrecord_name())

            f = open(self.split_name(), 'rb')
            self.patients_split = pickle.load(f)
            f.close()
            if not os.path.exists(self.split_name() + ".deprecated"):
                os.rename(self.split_name(), self.split_name() + ".deprecated")
            self._convert_patient_split()

            self._epochs_completed = {'TRAIN': 0, 'VAL': 0, 'TEST': 0}
            self._index_in_epoch = {'TRAIN': 0, 'VAL': 0, 'TEST': 0}
        else:
            # Collect all patients
            self.patients = self._get_patients()
            self.patients_split = {
            }  # Here we will later store the info whether a patient belongs to train, val or test

            # Determine Train, Val & Test set based on patients
            if not os.path.isfile(self.split_name()):
                _num_patients = len(self.patients)
                _ridx = numpy.random.permutation(_num_patients)

                _already_taken = 0
                for split in self.options.partition.keys():
                    if 1.0 >= self.options.partition[split] > 0.0:
                        num_patients_for_current_split = max(
                            1,
                            math.floor(self.options.partition[split] *
                                       _num_patients))
                    else:
                        num_patients_for_current_split = int(
                            self.options.partition[split])

                    if num_patients_for_current_split > (_num_patients -
                                                         _already_taken):
                        num_patients_for_current_split = _num_patients - _already_taken

                    self.patients_split[
                        split] = _ridx[_already_taken:_already_taken +
                                       num_patients_for_current_split]
                    _already_taken += num_patients_for_current_split

                self._convert_patient_split(
                )  # NEW! We have a new format for storing hte patientsSplit which is OS agnostic.
            else:
                f = open(self.split_name(), 'rb')
                self.patients_split = pickle.load(f)
                f.close()
                self._convert_patient_split(
                )  # NEW! We have a new format for storing hte patientsSplit which is OS agnostic.

            # Iterate over all patients and the filtered NII files and extract slices
            _images = []
            _labels = []
            _sets = []
            for p, patient in enumerate(self.patients):
                if patient["name"] in self.patients_split['TRAIN']:
                    _set_of_current_patient = BRAINWEB.SET_TYPES.index('TRAIN')
                elif patient["name"] in self.patients_split['VAL']:
                    _set_of_current_patient = BRAINWEB.SET_TYPES.index('VAL')
                elif patient["name"] in self.patients_split['TEST']:
                    _set_of_current_patient = BRAINWEB.SET_TYPES.index('TEST')

                minc, minc_seg, minc_skullmap = self.load_volume_and_groundtruth(
                    patient["filtered_files"][0], patient)

                # Iterate over all slices and collect them
                for s in range(
                        self.options.sliceStart,
                        min(self.options.sliceEnd,
                            minc.num_slices_along_axis(self.options.axis))):
                    if 0 < self.options.numSamples < len(_images):
                        break

                    slice_data = minc.get_slice(s, self.options.axis)
                    slice_seg = minc_seg.get_slice(s, self.options.axis)

                    # Skip the slice if it is entirely black
                    if numpy.unique(slice_data).size == 1:
                        continue

                    # assert numpy.max(slice_data) <= 1.0, "Slice range is outside [0; 1]!"

                    if self.options.sliceResolution is not None:
                        # If the images are too big in resolution, do downsampling
                        if slice_data.shape[0] > self.options.sliceResolution[
                                0] or slice_data.shape[
                                    1] > self.options.sliceResolution[1]:
                            slice_data = cv2.resize(
                                slice_data,
                                tuple(self.options.sliceResolution))
                            slice_seg = cv2.resize(
                                slice_seg,
                                tuple(self.options.sliceResolution),
                                interpolation=cv2.INTER_NEAREST)
                        # Otherwise, do zero padding
                        else:
                            tmp_slice = numpy.zeros(
                                self.options.sliceResolution)
                            tmp_slice_seg = numpy.zeros(
                                self.options.sliceResolution)
                            start_x = (self.options.sliceResolution[1] -
                                       slice_data.shape[1]) // 2
                            start_y = (self.options.sliceResolution[0] -
                                       slice_data.shape[0]) // 2
                            end_x = start_x + slice_data.shape[1]
                            end_y = start_y + slice_data.shape[0]
                            tmp_slice[start_y:end_y,
                                      start_x:end_x] = slice_data
                            tmp_slice_seg[start_y:end_y,
                                          start_x:end_x] = slice_seg
                            slice_data = tmp_slice
                            slice_seg = tmp_slice_seg

                    for angle in self.options.rotations:
                        if angle != 0:
                            slice_data_rotated = rotate(slice_data,
                                                        angle,
                                                        reshape=False)
                            slice_seg_rotated = rotate(slice_seg,
                                                       angle,
                                                       reshape=False,
                                                       mode='nearest')
                        else:
                            slice_data_rotated = slice_data
                            slice_seg_rotated = slice_seg

                        # Either collect crops
                        if self.options.useCrops:
                            if self.options.cropType == 'random':
                                rx = numpy.random.randint(
                                    0,
                                    high=(slice_data_rotated.shape[1] -
                                          self.options.cropWidth),
                                    size=self.options.numRandomCropsPerSlice)
                                ry = numpy.random.randint(
                                    0,
                                    high=(slice_data_rotated.shape[0] -
                                          self.options.cropHeight),
                                    size=self.options.numRandomCropsPerSlice)
                                for r in range(
                                        self.options.numRandomCropsPerSlice):
                                    _images.append(
                                        crop(slice_data_rotated, ry[r], rx[r],
                                             self.options.cropHeight,
                                             self.options.cropWidth))
                                    _labels.append(
                                        crop(slice_data_rotated, ry[r], rx[r],
                                             self.options.cropHeight,
                                             self.options.cropWidth))
                                    _sets.append(_set_of_current_patient)
                            elif self.options.cropType == 'center':
                                slice_data_cropped = crop_center(
                                    slice_data_rotated, self.options.cropWidth,
                                    self.options.cropHeight)
                                slice_seg_cropped = crop_center(
                                    slice_seg_rotated, self.options.cropWidth,
                                    self.options.cropHeight)
                                _images.append(slice_data_cropped)
                                _labels.append(slice_seg_cropped)
                                _sets.append(_set_of_current_patient)
                        # Or whole slices
                        else:
                            _images.append(slice_data_rotated)
                            _labels.append(slice_seg_rotated)
                            _sets.append(_set_of_current_patient)

            self._images = numpy.array(_images).astype(numpy.float32)
            self._labels = numpy.array(_labels).astype(numpy.float32)
            # assert numpy.max(self._images) <= 1.0, "MINC range is outside [0; 1]!"
            if self._images.ndim < 4:
                self._images = numpy.expand_dims(self._images, 3)
            self._sets = numpy.array(_sets).astype(numpy.int32)
            self._epochs_completed = {'TRAIN': 0, 'VAL': 0, 'TEST': 0}
            self._index_in_epoch = {'TRAIN': 0, 'VAL': 0, 'TEST': 0}

            if self.options.cache:
                write_tf_record(self._images, self._labels, self._sets,
                                self.tfrecord_name())
                tmp = copy.copy(self)
                tmp._images = None
                tmp._labels = None
                tmp._sets = None
                f = open(self.pckl_name(), 'wb')
                pickle.dump(tmp, f)
                f.close()
コード例 #2
0
 def preprocess(self, state):
     preprocessed_state = to_grayscale(state)
     preprocessed_state = zero_center(preprocessed_state)
     preprocessed_state = crop(preprocessed_state)
     return preprocessed_state
コード例 #3
0
ファイル: MSSEG2008.py プロジェクト: irfixq/AE
    def gather_data(self, patient, nrrd_filename):
        _images = []
        _labels = []

        nrrd, nrrd_seg, nrrd_skullmap = self.load_volume_and_groundtruth(
            nrrd_filename, patient)

        # Iterate over all slices and collect them
        # We only want to select in the range from 15 to 125 (in axial view)
        for s in xrange(
                self.options.sliceStart,
                min(self.options.sliceEnd,
                    nrrd.num_slices_along_axis(self.options.axis))):
            if 0 < self.options.numSamples < len(_images):
                break

            slice_data = nrrd.get_slice(s, self.options.axis)
            slice_seg = nrrd_seg.get_slice(s, self.options.axis)
            slice_skullmap = nrrd_skullmap.get_slice(s, self.options.axis)

            # Skip the slice if it is "empty"
            # if numpy.max(slice_data) < empty_thresh:
            if numpy.percentile(slice_data, 90) < 0.2:
                continue

            # assert numpy.max(slice_data) <= 1.0, "Slice range is outside [0; 1]!"

            if self.options.sliceResolution is not None:
                # Pad withzeros to top and bottom, if the image is too small
                if slice_data.shape[0] < self.options.sliceResolution[0]:
                    before_y = math.floor(
                        (self.options.sliceResolution[0] - slice_data.shape[0])
                        / 2.0)
                    after_y = math.ceil(
                        (self.options.sliceResolution[0] - slice_data.shape[0])
                        / 2.0)
                if slice_data.shape[1] < self.options.sliceResolution[1]:
                    before_x = math.floor(
                        (self.options.sliceResolution[1] - slice_data.shape[1])
                        / 2.0)
                    after_x = math.ceil(
                        (self.options.sliceResolution[1] - slice_data.shape[1])
                        / 2.0)
                if slice_data.shape[0] < self.options.sliceResolution[
                        0] or slice_data.shape[
                            1] < self.options.sliceResolution[1]:
                    slice_data = np.pad(slice_data, ((before_y, after_y),
                                                     (before_x, after_x)),
                                        'constant',
                                        constant_values=(0, 0))
                    slice_seg = np.pad(slice_seg, ((before_y, after_y),
                                                   (before_x, after_x)),
                                       'constant',
                                       constant_values=(0, 0))
                slice_data = zoom(
                    slice_data,
                    float(self.options.sliceResolution[0]) /
                    float(slice_data.shape[0]))
                slice_seg = zoom(slice_seg,
                                 float(self.options.sliceResolution[0]) /
                                 float(slice_seg.shape[0]),
                                 mode="nearest")
                slice_seg[slice_seg < 0.9] = 0.0
                slice_seg[slice_seg >= 0.9] = 1.0

            # Either collect crops
            if self.options.useCrops:
                if self.options.cropType == 'random':
                    rx = numpy.random.randint(
                        0,
                        high=(slice_data.shape[1] - self.options.cropWidth),
                        size=self.options.numRandomCropsPerSlice)
                    ry = numpy.random.randint(
                        0,
                        high=(slice_data.shape[0] - self.options.cropHeight),
                        size=self.options.numRandomCropsPerSlice)
                    for r in range(self.options.numRandomCropsPerSlice):
                        _images.append(
                            crop(slice_data, ry(r), rx(r),
                                 self.options.cropHeight,
                                 self.options.cropWidth))
                        _labels.append(
                            crop(slice_data, ry(r), rx(r),
                                 self.options.cropHeight,
                                 self.options.cropWidth))
                elif self.options.cropType == 'center':
                    slice_data_cropped = crop_center(slice_data,
                                                     self.options.cropWidth,
                                                     self.options.cropHeight)
                    slice_seg_cropped = crop_center(slice_seg,
                                                    self.options.cropWidth,
                                                    self.options.cropHeight)
                    _images.append(slice_data_cropped)
                    _labels.append(slice_seg_cropped)
                elif self.options.cropType == 'lesions':
                    cc_slice = label(slice_seg)
                    props = regionprops(cc_slice)
                    if len(props) > 0:
                        for prop in props:
                            cx = prop['centroid'][1]
                            cy = prop['centroid'][0]
                            if cy < self.options.cropHeight // 2:
                                cy = self.options.cropHeight // 2
                            if cy > (slice_data.shape[0] -
                                     (self.options.cropHeight // 2)):
                                cy = (slice_data.shape[0] -
                                      (self.options.cropHeight // 2))
                            if cx < self.options.cropWidth // 2:
                                cx = self.options.cropWidth // 2
                            if cx > (slice_data.shape[1] -
                                     (self.options.cropWidth // 2)):
                                cx = (slice_data.shape[1] -
                                      (self.options.cropWidth // 2))
                            image_crop = crop(
                                slice_data,
                                int(cy) - (self.options.cropHeight // 2),
                                int(cx) - (self.options.cropWidth // 2),
                                self.options.cropHeight,
                                self.options.cropWidth)
                            seg_crop = crop(
                                slice_seg,
                                int(cy) - (self.options.cropHeight // 2),
                                int(cx) - (self.options.cropWidth // 2),
                                self.options.cropHeight,
                                self.options.cropWidth)
                            if image_crop.shape[
                                    0] != self.options.cropHeight or image_crop.shape[
                                        1] != self.options.cropWidth:
                                continue
                            _images.append(image_crop)
                            _labels.append(seg_crop)
                            # _masks.append(crop(slice_data, prop['centroid'][0], prop['centroid'][1], self.options.cropHeight, self.options.cropWidth))
                        # find connected components in segmentation slice
                        # for every connected component, do a center crop from the segmentation slice, the mask and the actual slice
            # Or whole slices
            else:
                _images.append(slice_data)
                _labels.append(slice_seg)

        return _images, _labels