コード例 #1
0
    def apply_transform(self, subject: torchio.Subject) -> torchio.Subject:
        original_quality = subject['info'][0]
        if original_quality < 6 and len(
                subject.applied_transforms) == 1:  # low quality image
            return subject
        else:  # high-quality image, corrupt it
            transformed_subject = super().apply_transform(subject)

            # now determine how much quality was reduced
            applied_params = transformed_subject.applied_transforms[-1][1]
            time = applied_params['times']['img']
            degrees = np.sum(np.absolute(applied_params['degrees']['img']))
            translation = np.sum(
                np.absolute(applied_params['translation']['img']))
            # motion in the middle of the acquisition process produces the most noticeable artifact
            quality_reduction = clamp(degrees + translation, 0, 10) * min(
                time, 1.0 - time)

            # update the ground truth information
            new_quality = original_quality - quality_reduction
            transformed_subject['info'][0] = clamp(new_quality, 0, 10)
            if degrees + translation > 1:  # it definitely has motion now
                transformed_subject['info'][ghosting_motion_index] = 1

            return transformed_subject
コード例 #2
0
    def apply_transform(self, subject: torchio.Subject) -> torchio.Subject:
        original_quality = subject['info'][0]
        if original_quality < 6 and len(
                subject.applied_transforms) == 1:  # low quality image
            return subject
        else:  # high-quality image, corrupt it
            transformed_subject = super().apply_transform(subject)

            # now determine how much quality was reduced
            applied_params = transformed_subject.applied_transforms[-1][1]
            gamma = applied_params['gamma']['img'][0]
            quality_reduction = 10 * abs(1.0 - gamma)

            # update the ground truth information
            new_quality = original_quality - quality_reduction
            transformed_subject['info'][0] = clamp(new_quality, 0, 10)

            return transformed_subject
コード例 #3
0
ファイル: nn_training.py プロジェクト: knowledgevis/miqa
    def apply_transform(self, subject: torchio.Subject) -> torchio.Subject:
        original_quality = subject['info'][0]
        if original_quality < 6 and len(subject.applied_transforms) == 1:  # low quality image
            return subject
        else:  # high-quality image, corrupt it
            transformed_subject = super().apply_transform(subject)

            # now determine how much quality was reduced
            applied_params = transformed_subject.applied_transforms[-1][1]
            intensity = applied_params['intensity']['img']
            num_ghosts = applied_params['num_ghosts']['img']
            quality_reduction = 8 * intensity * math.log10(num_ghosts)

            # update the ground truth information
            new_quality = original_quality - quality_reduction
            transformed_subject['info'][0] = clamp(new_quality, 0, 10)
            # it definitely has ghosting now
            transformed_subject['info'][ghosting_motion_index] = 1

            return transformed_subject
コード例 #4
0
    def apply_transform(self, subject: torchio.Subject) -> torchio.Subject:
        original_quality = subject['info'][0]
        if original_quality < 6 and len(
                subject.applied_transforms) == 1:  # low quality image
            return subject
        else:  # high-quality image, corrupt it
            transformed_subject = super().apply_transform(subject)

            # now determine how much quality was reduced
            applied_params = transformed_subject.applied_transforms[-1][1]
            intensity = applied_params['intensity']['img']
            # spikes_positions = applied_params['spikes_positions']['img']
            quality_reduction = 0 + 2 * intensity

            # update the ground truth information
            new_quality = original_quality - quality_reduction
            transformed_subject['info'][0] = clamp(new_quality, 0, 10)
            # it definitely has bias field now
            transformed_subject['info'][inhomogeneity_index] = 1

            return transformed_subject
コード例 #5
0
    def apply_transform(self, subject: torchio.Subject) -> torchio.Subject:
        original_quality = subject['info'][0]
        if original_quality < 6 and len(
                subject.applied_transforms) == 1:  # low quality image
            return subject
        else:  # high-quality image, corrupt it
            transformed_subject = super().apply_transform(subject)

            # now determine how much quality was reduced
            # applied_params = transformed_subject.applied_transforms[-1][1]
            # coefficients = applied_params['coefficients']['img']
            # quality_reduction = 2 + np.linalg.norm(np.asarray(coefficients))
            quality_reduction = 4  # it is hard to assess impact on image quality

            # update the ground truth information
            new_quality = original_quality - quality_reduction
            transformed_subject['info'][0] = clamp(new_quality, 0, 10)
            # it definitely has bias field now
            transformed_subject['info'][inhomogeneity_index] = 1

            return transformed_subject
コード例 #6
0
    def apply_transform(self, subject: torchio.Subject) -> torchio.Subject:
        original_quality = subject['info'][0]
        if original_quality < 6 and len(
                subject.applied_transforms) == 1:  # low quality image
            return subject
        else:  # high-quality image, corrupt it
            transformed_subject = super().apply_transform(subject)

            # make sure we don't have negative intensities after adding noise
            transformed_subject.img.set_data(
                torch.clamp(transformed_subject.img.data, min=0.0, max=1.0))

            # now determine how much quality was reduced
            applied_params = transformed_subject.applied_transforms[-1][1]
            std = applied_params['std']['img']
            quality_reduction = 40 * std

            # update the ground truth information
            new_quality = original_quality - quality_reduction
            transformed_subject['info'][0] = clamp(new_quality, 0, 10)

            return transformed_subject