Exemplo n.º 1
0
    def add_subjects_to_path(self, path, data):

        # For each subject
        for subject in data:
            subject_path = join(path, subject.dataset.name, subject.name)
            frames = KeyFrameAnnotation.objects.filter(
                image_annotation__task=self.task,
                image_annotation__image__subject=subject)
            for frame in frames:
                # Check if image was rejected
                if frame.image_annotation.rejected:
                    continue
                # Get image sequence
                image_sequence = frame.image_annotation.image

                # Copy image frames
                sequence_id = os.path.basename(
                    os.path.dirname(image_sequence.format))
                subject_subfolder = join(subject_path, str(sequence_id))
                create_folder(subject_subfolder)

                target_name = os.path.basename(image_sequence.format).replace(
                    '#', str(frame.frame_nr))
                target_gt_name = os.path.splitext(target_name)[0] + "_gt.mhd"

                filename = image_sequence.format.replace(
                    '#', str(frame.frame_nr))
                new_filename = join(subject_subfolder, target_name)
                copy_image(filename, new_filename)

                # Get control points to create segmentation
                x_scaling = 1
                if new_filename.endswith('.mhd'):
                    image_mhd = MetaImage(filename=new_filename)
                    image_size = image_mhd.get_size()
                    spacing = image_mhd.get_spacing()

                    if spacing[0] != spacing[1]:
                        # In this case we have to compensate for a change in with
                        real_aspect = image_size[0] * spacing[0] / (
                            image_size[1] * spacing[1])
                        current_aspect = float(image_size[0]) / image_size[1]
                        new_width = int(image_size[0] *
                                        (real_aspect / current_aspect))
                        new_height = image_size[1]
                        x_scaling = float(image_size[0]) / new_width
                        print(image_size[0], new_width, image_size[1],
                              new_height)
                    image = np.asarray(image_mhd.get_pixel_data())
                else:
                    image_pil = PIL.Image.open(new_filename)
                    image_size = image_pil.size
                    spacing = [1, 1]
                    image = np.asarray(image_pil)
                self.save_segmentation(frame, image_size,
                                       join(subject_subfolder, target_gt_name),
                                       spacing, x_scaling, image)

        return True, path
Exemplo n.º 2
0
def copy_image(filename, new_filename):
    _, original_extension = os.path.splitext(filename)
    _, new_extension = os.path.splitext(new_filename)

    # Read image
    if original_extension.lower() == '.mhd':
        metaimage = MetaImage(filename=filename)
        if new_extension.lower() == '.mhd':
            metaimage.write(new_filename)
        elif new_extension.lower() == '.png':
            pil_image = metaimage.get_image()
            pil_image.save(new_filename)
        else:
            raise Exception('Unknown output image extension ' + new_extension)
    elif original_extension.lower() == '.png':
        if new_extension.lower() == '.mhd':
            pil_image = PIL.Image.open(filename)
            metaimage = MetaImage(data=np.asarray(pil_image))
            metaimage.write(new_filename)
        elif new_extension.lower() == '.png':
            copyfile(filename, new_filename)
        else:
            raise Exception('Unknown output image extension ' + new_extension)
    else:
        raise Exception('Unknown input image extension ' + original_extension)
    def save_segmentation(self, frame, image_size, filename, spacing):
        image_size = [image_size[1], image_size[0]]

        # Create compounded segmentation object
        segmentation = self.get_object_segmentation(image_size, frame)

        segmentation_mhd = MetaImage(data=segmentation)
        segmentation_mhd.set_attribute('ImageQuality',
                                       frame.image_annotation.image_quality)
        segmentation_mhd.set_spacing(spacing)
        metadata = ImageMetadata.objects.filter(
            image=frame.image_annotation.image)
        for item in metadata:
            segmentation_mhd.set_attribute(item.name, item.value)
        segmentation_mhd.write(filename)
Exemplo n.º 4
0
    def add_subjects_to_path(self, path, data):

        # Get labels for this task and write it to labels.txt
        label_file = open(join(path, 'labels.txt'), 'w')
        labels = Label.objects.filter(task=self.task)
        label_dict = {}
        counter = 0
        for label in labels:
            label_file.write(label.name + '\n')
            label_dict[label.id] = counter
            counter += 1
        label_file.close()

        # For each subject
        for subject in data:
            annotations = ImageAnnotation.objects.filter(
                task=self.task, image__subject=subject, rejected=False)
            for annotation in annotations:
                frames = KeyFrameAnnotation.objects.filter(
                    image_annotation=annotation)
                storage_path = join(path, subject.name)
                create_folder(storage_path)
                with open(join(storage_path,
                               str(annotation.id) + '.txt'), 'w') as f:
                    f.write(subject.name + '\n')
                    f.write(annotation.image.format + '\n')
                    f.write((annotation.comments).encode(
                        'ascii', 'ignore').decode('ascii').replace(
                            '\n', '<br>') + '\n')  # Encoding fix
                    # Get aspect ratio to correct x landmarks, because they are stored with isotropic spacing, while images
                    # are often not stored in isotropic spacing
                    metaimage = MetaImage(
                        filename=annotation.image.format.replace('#', str(0)))
                    spacingX = metaimage.get_spacing()[0]
                    spacingY = metaimage.get_spacing()[1]
                    aspect = (spacingY / spacingX)
                    for frame in frames:
                        # Write bounding boxes txt file
                        landmarks = Landmark.objects.filter(image=frame)
                        for landmark in landmarks:
                            label = label_dict[landmark.label.id]
                            f.write(
                                f'{frame.frame_nr} {label} {int(round(landmark.x*aspect))} {landmark.y}\n'
                            )

        return True, path
    def add_subjects_to_path(self, path, data):

        # For each subject
        for subject in data:
            subject_path = join(path, subject.dataset.name, subject.name)
            frames = KeyFrameAnnotation.objects.filter(
                image_annotation__task=self.task,
                image_annotation__image__subject=subject)
            for frame in frames:
                # Check if image was rejected
                if frame.image_annotation.rejected:
                    continue
                # Get image sequence
                image_sequence = frame.image_annotation.image

                # Copy image frames
                sequence_id = os.path.basename(
                    os.path.dirname(image_sequence.format))
                subject_subfolder = join(subject_path, str(sequence_id))
                create_folder(subject_subfolder)

                target_name = os.path.basename(image_sequence.format).replace(
                    '#', str(frame.frame_nr))
                target_gt_name = os.path.splitext(target_name)[0] + "_gt.mhd"

                filename = image_sequence.format.replace(
                    '#', str(frame.frame_nr))
                new_filename = join(subject_subfolder, target_name)
                copy_image(filename, new_filename)

                # Get control points to create segmentation
                if new_filename.endswith('.mhd'):
                    image_mhd = MetaImage(filename=new_filename)
                    image_size = image_mhd.get_size()
                    spacing = image_mhd.get_spacing()
                else:
                    image_pil = PIL.Image.open(new_filename)
                    image_size = image_pil.size
                    spacing = [1, 1]
                self.save_segmentation(frame, image_size,
                                       join(subject_subfolder, target_gt_name),
                                       spacing)

        return True, path
Exemplo n.º 6
0
def get_image_as_http_response(filename, post_processing_method=''):
    _, extension = os.path.splitext(filename)
    buffer = BytesIO()
    start = time.time()
    if extension.lower() == '.mhd':
        reader = MetaImage(filename=filename)
        source = reader
        # Convert raw data to image, and then to a http response
        pil_image = reader.get_image()
        spacing = reader.get_spacing()
        if spacing[0] != spacing[1]:
            # Compensate for anistropic pixel spacing
            real_aspect = pil_image.width * spacing[0] / (pil_image.height *
                                                          spacing[1])
            current_aspect = float(pil_image.width) / pil_image.height
            new_width = int(pil_image.width * (real_aspect / current_aspect))
            new_height = pil_image.height
            pil_image = pil_image.resize((new_width, new_height))
    elif extension.lower() == '.png':
        pil_image = PIL.Image.open(filename)
        source = pil_image
    else:
        raise Exception('Unknown output image extension ' + extension)

    if post_processing_method is not '':
        post_processing = post_processing_register.get(post_processing_method)
        new_image = post_processing.post_process(np.asarray(pil_image), source,
                                                 filename)
        if len(new_image.shape) > 2 and new_image.shape[2] == 3:
            pil_image = PIL.Image.fromarray(new_image, 'RGB')
        else:
            pil_image = PIL.Image.fromarray(new_image, 'L')

    #print('Image loading time', time.time() - start, 'seconds')
    pil_image.save(buffer, "PNG", compress_level=1
                   )  # TODO This function is very slow due to PNG compression
    #print('Image loading time with save to buffer', time.time() - start, 'seconds')

    return HttpResponse(buffer.getvalue(), content_type="image/png")
Exemplo n.º 7
0
    def save_segmentation(self, frame, image_size, filename, spacing,
                          x_scaling):
        print('X scaling is', x_scaling)
        image_size = [image_size[1], image_size[0]]
        # Get control points for all objects
        control_points0 = list(
            ControlPoint.objects.filter(image=frame,
                                        object=0).order_by('index'))
        control_points1 = list(
            ControlPoint.objects.filter(image=frame,
                                        object=1).order_by('index'))
        control_points2 = list(
            ControlPoint.objects.filter(image=frame,
                                        object=2).order_by('index'))
        if x_scaling != 1:
            for point in control_points0:
                point.x *= x_scaling
            for point in control_points1:
                point.x *= x_scaling
            for point in control_points2:
                point.x *= x_scaling

        # Endpoints of object 2 are the same as object 1
        if len(control_points0) > 0 and len(control_points2) > 0:
            control_points2.insert(0, control_points0[-1])
            control_points2.append(control_points0[0])

        # Create new endpoints for object 1
        if len(control_points1) > 0:
            point = self.calculate_new_endpoints(control_points0,
                                                 control_points1[0])
            control_points1.insert(0, point)
            point = self.calculate_new_endpoints(control_points0,
                                                 control_points1[-1])
            control_points1.append(point)

        # Create compounded segmentation object
        #image_size[1] = int(round(image_size[1]/x_scaling))
        #spacing[1] = spacing[0]
        segmentation = np.zeros(image_size, dtype=np.uint8)
        if len(control_points1) > 0:
            object_segmentation = self.get_object_segmentation(
                image_size, control_points1, x_scaling)
            segmentation[object_segmentation == 1] = 2  # Draw epi before endo
        if len(control_points2) > 0:
            object_segmentation = self.get_object_segmentation(
                image_size, control_points2, x_scaling)
            segmentation[object_segmentation == 1] = 3
        if len(control_points0) > 0:
            object_segmentation = self.get_object_segmentation(
                image_size, control_points0, x_scaling)
            segmentation[object_segmentation == 1] = 1

        segmentation_mhd = MetaImage(data=segmentation)
        segmentation_mhd.set_attribute('ImageQuality',
                                       frame.image_annotation.image_quality)
        segmentation_mhd.set_attribute('FrameType', frame.frame_metadata)
        segmentation_mhd.set_spacing(spacing)
        metadata = ImageMetadata.objects.filter(
            image=frame.image_annotation.image)
        for item in metadata:
            segmentation_mhd.set_attribute(item.name, item.value)
        segmentation_mhd.write(filename)
Exemplo n.º 8
0
    def save_segmentation(self, frame, image_size, filename, spacing,
                          x_scaling, image):
        print('X scaling is', x_scaling)
        image_size = [image_size[1], image_size[0]]
        # Get control points for all objects
        control_points0 = list(
            ControlPoint.objects.filter(image=frame,
                                        object=0).order_by('index'))
        control_points1 = list(
            ControlPoint.objects.filter(image=frame,
                                        object=1).order_by('index'))
        control_points2 = list(
            ControlPoint.objects.filter(image=frame,
                                        object=2).order_by('index'))
        control_points3 = list(
            ControlPoint.objects.filter(image=frame,
                                        object=3).order_by('index'))

        if x_scaling != 1:
            for point in control_points0:
                point.x *= x_scaling
            for point in control_points1:
                point.x *= x_scaling
            for point in control_points2:
                point.x *= x_scaling
            for point in control_points3:
                point.x *= x_scaling

        # Endpoints of object 2 (LA) are the same as object 0 (endo/LV)
        if len(control_points0) > 0 and len(control_points2) > 0:
            control_points2.insert(0, control_points0[-1])
            control_points2.append(control_points0[0])
        # Aorta (3) endpoints
        if len(control_points3) > 0 and len(control_points0) > 0:
            control_points3.insert(0, control_points0[-2])
            control_points3.append(control_points0[-1])
        # (myocard/epi) (1) endpoints
        if len(control_points0) > 0 and len(control_points1) > 0:
            control_points1.insert(0, control_points0[0])
            control_points1.append(control_points0[-2])

        # Create compounded segmentation object
        #image_size[1] = int(round(image_size[1]/x_scaling))
        #spacing[1] = spacing[0]
        segmentation = np.zeros(image_size, dtype=np.uint8)
        if len(control_points1) > 0:
            object_segmentation = self.get_object_segmentation(
                image_size,
                control_points1,
                x_scaling,
                straight_lines=[[0, -1], [-2, -1], [0, 1]])
            segmentation[object_segmentation == 1] = 2  # Draw epi before endo
        if len(control_points2) > 0:
            object_segmentation = self.get_object_segmentation(
                image_size,
                control_points2,
                x_scaling,
                straight_lines=[[0, -1]])
            segmentation[object_segmentation == 1] = 3
        if len(control_points0) > 0:
            object_segmentation = self.get_object_segmentation(
                image_size,
                control_points0,
                x_scaling,
                straight_lines=[[0, -1], [-2, -1]])
            segmentation[object_segmentation == 1] = 1
        if len(control_points3) > 0:
            object_segmentation = self.get_object_segmentation(
                image_size,
                control_points3,
                x_scaling,
                straight_lines=[[0, -1]])
            segmentation[object_segmentation == 1] = 4

        segmentation_mhd = MetaImage(data=segmentation)
        segmentation_mhd.set_attribute('ImageQuality',
                                       frame.image_annotation.image_quality)
        segmentation_mhd.set_attribute('FrameType', frame.frame_metadata)
        segmentation_mhd.set_spacing(spacing)
        metadata = ImageMetadata.objects.filter(
            image=frame.image_annotation.image)
        for item in metadata:
            segmentation_mhd.set_attribute(item.name, item.value)
        segmentation_mhd.write(filename)
    def add_subjects_to_path(self, path, form):
        # Create label file
        label_file = open(join(path, 'labels.txt'), 'w')

        # Create a .txt file with the labels matching the sequence name
        sequence_label_file = open(join(path, 'sequence_to_label.txt'), 'w')

        # Find labels with parents
        labels = form.cleaned_data['labels']
        label_dict = {}
        has_parent_dict = {}
        label_file.write('All labels involved: \n\n')
        for label_id in labels:
            label = Label.objects.get(pk=label_id)
            label_name = get_complete_label_name(label)
            label_file.write(label_name + '\n')
            has_parent_dict[label_name] = False
            label_dict[label_name] = None

        for start_label in has_parent_dict:
            for full_label in has_parent_dict:
                if full_label.startswith(start_label) & (start_label !=
                                                         full_label):
                    has_parent_dict[full_label] = True

        # Assign children to the parent class
        label_file.write(
            '\nClassification based on the following parent labels: \n\n')
        counter = 0
        for label in has_parent_dict:
            if has_parent_dict[label] == False:
                label_file.write(label + '\n')
                for label_name in label_dict:
                    if label_name.startswith(label):
                        label_dict[label_name] = counter
                counter += 1
        nb_parent_classes = counter

        label_file.close()

        # For each subject
        subjects = Subject.objects.filter(dataset__task=self.task)
        for subject in subjects:
            # Get labeled images
            labeled_images = ImageAnnotation.objects.filter(
                task=self.task, rejected=False, image__subject=subject)
            if labeled_images.count() == 0:
                continue

            width = form.cleaned_data['width']
            height = form.cleaned_data['height']

            sequence_frames = []
            labels = []

            for labeled_image in labeled_images:
                label = ImageLabel.objects.get(
                    image__image_annotation=labeled_image)

                if get_complete_label_name(label.label) in label_dict.keys():
                    # Get sequence
                    key_frame = KeyFrameAnnotation.objects.get(
                        image_annotation=labeled_image)
                    image_sequence = labeled_image.image
                    nr_of_frames = image_sequence.nr_of_frames

                    start_frame = 0
                    end_frame = nr_of_frames
                    if form.cleaned_data[
                            'displayed_frames_only'] and not self.task.show_entire_sequence:
                        start_frame = max(
                            0, key_frame.frame_nr - self.task.frames_before)
                        end_frame = min(
                            nr_of_frames,
                            key_frame.frame_nr + self.task.frames_after + 1)

                    for i in range(start_frame, end_frame):
                        # Get image
                        filename = image_sequence.format.replace('#', str(i))
                        if filename[-4:] == '.mhd':
                            metaimage = MetaImage(filename=filename)
                            image = metaimage.get_image()
                        else:
                            image = PIL.Image.open(filename)

                        # Setup assigned colormode
                        if form.cleaned_data['colormode'] != image.mode:
                            image = image.convert(
                                form.cleaned_data['colormode'])

                        # Resize
                        image = image.resize((width, height),
                                             PIL.Image.BILINEAR)

                        # Convert to numpy array and normalize
                        image_array = np.array(image).astype(np.float32)
                        image_array /= 255

                        if len(image_array.shape) != 3:
                            image_array = image_array[..., None]

                        sequence_frames.append(image_array)
                        labels.append(label_dict[get_complete_label_name(
                            label.label)])

                    if form.cleaned_data['sequence_wise'] and len(
                            sequence_frames) > 0:
                        input = np.array(sequence_frames, dtype=np.float32)
                        output = np.array(labels, dtype=np.uint8)

                        sequence_label_file.write(
                            join(
                                subject.name,
                                os.path.basename(
                                    os.path.dirname(image_sequence.format))) +
                            '\t' + str(output[0]) + '\n')

                        if form.cleaned_data['image_dim_ordering'] == 'theano':
                            input = np.transpose(input, [0, 3, 1, 2])

                        if form.cleaned_data['categorical']:
                            output = to_categorical(
                                output, nb_classes=nb_parent_classes)

                        subj_path = join(path, subject.name)
                        create_folder(subj_path)
                        try:
                            os.stat(subj_path)
                        except:
                            return False, 'Failed to create directory at ' + subj_path

                        f = h5py.File(
                            join(
                                subj_path,
                                os.path.basename(
                                    os.path.dirname(image_sequence.format) +
                                    '.hd5')), 'w')
                        f.create_dataset("data",
                                         data=input,
                                         compression="gzip",
                                         compression_opts=4,
                                         dtype='float32')
                        f.create_dataset("label",
                                         data=output,
                                         compression="gzip",
                                         compression_opts=4,
                                         dtype='uint8')
                        f.close()

                        sequence_frames = []
                        labels = []

            if not form.cleaned_data['sequence_wise'] and len(
                    sequence_frames) > 0:
                input = np.array(sequence_frames, dtype=np.float32)
                output = np.array(labels, dtype=np.uint8)

                if form.cleaned_data['image_dim_ordering'] == 'theano':
                    input = np.transpose(input, [0, 3, 1, 2])

                if form.cleaned_data['categorical']:
                    output = to_categorical(output, nb_classes=len(label_dict))

                f = h5py.File(join(path, subject.name + '.hd5'), 'w')
                f.create_dataset("data",
                                 data=input,
                                 compression="gzip",
                                 compression_opts=4,
                                 dtype='float32')
                f.create_dataset("label",
                                 data=output,
                                 compression="gzip",
                                 compression_opts=4,
                                 dtype='uint8')
                f.close()
        sequence_label_file.close()
Exemplo n.º 10
0
    def save_segmentation(self, frame, image_size, filename, spacing, x_scaling, image):
        print('X scaling is', x_scaling)
        image_size = [image_size[1], image_size[0]]
        # Get control points for all objects
        control_points0 = list(ControlPoint.objects.filter(image=frame, object=0).order_by('index'))
        control_points1 = list(ControlPoint.objects.filter(image=frame, object=1).order_by('index'))
        control_points2 = list(ControlPoint.objects.filter(image=frame, object=2).order_by('index'))
        control_points3 = list(ControlPoint.objects.filter(image=frame, object=3).order_by('index'))
        control_points4 = list(ControlPoint.objects.filter(image=frame, object=4).order_by('index'))
        control_points5 = list(ControlPoint.objects.filter(image=frame, object=5).order_by('index'))

        if x_scaling != 1:
            for point in control_points0:
                point.x *= x_scaling
            for point in control_points1:
                point.x *= x_scaling
            for point in control_points2:
                point.x *= x_scaling
            for point in control_points3:
                point.x *= x_scaling
            for point in control_points4:
                point.x *= x_scaling
            for point in control_points5:
                point.x *= x_scaling

        # Endpoints of object 2 (LA) are the same as object 0 (endo/LV)
        if len(control_points0) > 0 and len(control_points2) > 0:
            control_points2.insert(0, control_points0[-1])
            control_points2.append(control_points0[0])
        # Aorta (3) endpoints
        if len(control_points3) > 0 and len(control_points0) > 0:
            control_points3.insert(0, control_points0[-2])
            control_points3.append(control_points0[-1])
        # (LVOT) (5) endpoints
        if len(control_points0) > 0 and len(control_points5) > 0:
            control_points5.insert(0, control_points0[-1])
            control_points5.append(control_points0[-2])
        # (myocard/epi) (1) endpoints
        if len(control_points0) > 0 and len(control_points1) > 0:
            control_points1.insert(0, control_points0[0])
            control_points1.append(control_points0[-2])

        # Create compounded segmentation object
        #image_size[1] = int(round(image_size[1]/x_scaling))
        #spacing[1] = spacing[0]
        segmentation = np.zeros(image_size, dtype=np.uint8)
        if len(control_points1) > 0:
            object_segmentation = self.get_object_segmentation(image_size, control_points1, x_scaling, straight_lines=[[0, -1], [-2, -1], [0, 1]])
            segmentation[object_segmentation == 1] = 2  # Draw epi before endo
        if len(control_points2) > 0:
            object_segmentation = self.get_object_segmentation(image_size, control_points2, x_scaling, straight_lines=[[0, -1]])
            segmentation[object_segmentation == 1] = 3
        if len(control_points0) > 0:
            object_segmentation = self.get_object_segmentation(image_size, control_points0, x_scaling, straight_lines=[[0, -1], [-2, -1]])
            segmentation[object_segmentation == 1] = 1
        if len(control_points3) > 0:
            object_segmentation = self.get_object_segmentation(image_size, control_points3, x_scaling, straight_lines=[[0, -1]])
            segmentation[object_segmentation == 1] = 4
        if len(control_points4) > 0:
            object_segmentation = self.get_object_segmentation(image_size, control_points4, x_scaling)
            segmentation[object_segmentation == 1] = 5
        if len(control_points5) > 0:
            object_segmentation = self.get_object_segmentation(image_size, control_points5, x_scaling, straight_lines=[[0, -1], [-2, -1], [-3, -2], [-4, -3]])
            segmentation[np.logical_and(object_segmentation == 1, segmentation != 2)] = 6 # Draw LVOT after endo, but epi should be subtracted

        # Remove data outside ultrasound sector
        segmentation[image == 0] = 0 # TODO improve this maybe?

        segmentation_mhd = MetaImage(data=segmentation)
        segmentation_mhd.set_attribute('ImageQuality', frame.image_annotation.image_quality)
        segmentation_mhd.set_attribute('FrameType', frame.frame_metadata)
        segmentation_mhd.set_spacing(spacing)
        metadata = ImageMetadata.objects.filter(image=frame.image_annotation.image)
        for item in metadata:
            segmentation_mhd.set_attribute(item.name, item.value)
        segmentation_mhd.write(filename)