class Config(UUIDModel): # This must match the syntax used in jquery datatables # https://datatables.net/reference/option/order ASCENDING = "asc" DESCENDING = "desc" EVALUATION_SCORE_SORT_CHOICES = ( (ASCENDING, "Ascending"), (DESCENDING, "Descending"), ) OFF = "off" OPTIONAL = "opt" REQUIRED = "req" PUBLICATION_LINK_CHOICES = SUPPLEMENTARY_FILE_CHOICES = ( (OFF, "Off"), (OPTIONAL, "Optional"), (REQUIRED, "Required"), ) ALL = "all" MOST_RECENT = "rec" BEST = "bst" RESULT_DISPLAY_CHOICES = ( (ALL, "Display all results"), (MOST_RECENT, "Only display each users most recent result"), (BEST, "Only display each users best result"), ) ABSOLUTE = "abs" MEAN = "avg" MEDIAN = "med" SCORING_CHOICES = ( (ABSOLUTE, "Use the absolute value of the score column"), ( MEAN, "Use the mean of the relative ranks of the score and extra result columns", ), ( MEDIAN, "Use the median of the relative ranks of the score and extra result columns", ), ) class SubmissionKind(models.IntegerChoices): CSV = 1, "CSV" ZIP = 2, "ZIP" ALGORITHM = 3, "Algorithm" challenge = models.OneToOneField( Challenge, on_delete=models.CASCADE, related_name="evaluation_config", editable=False, ) use_teams = models.BooleanField( default=False, help_text=( "If true, users are able to form teams together to participate in " "challenges."), ) score_title = models.CharField( max_length=32, blank=False, default="Score", help_text=( "The name that will be displayed for the scores column, for " "instance: Score (log-loss)"), ) score_jsonpath = models.CharField( max_length=255, blank=True, help_text=( "The jsonpath of the field in metrics.json that will be used " "for the overall scores on the results page. See " "http://goessner.net/articles/JsonPath/ for syntax. For example: " "dice.mean"), ) score_error_jsonpath = models.CharField( max_length=255, blank=True, help_text=( "The jsonpath for the field in metrics.json that contains the " "error of the score, eg: dice.std"), ) score_default_sort = models.CharField( max_length=4, choices=EVALUATION_SCORE_SORT_CHOICES, default=DESCENDING, help_text=( "The default sorting to use for the scores on the results page."), ) score_decimal_places = models.PositiveSmallIntegerField( blank=False, default=4, help_text=("The number of decimal places to display for the score"), ) extra_results_columns = JSONField( default=list, blank=True, help_text=( "A JSON object that contains the extra columns from metrics.json " "that will be displayed on the results page. "), validators=[JSONSchemaValidator(schema=EXTRA_RESULT_COLUMNS_SCHEMA)], ) scoring_method_choice = models.CharField( max_length=3, choices=SCORING_CHOICES, default=ABSOLUTE, help_text=("How should the rank of each result be calculated?"), ) result_display_choice = models.CharField( max_length=3, choices=RESULT_DISPLAY_CHOICES, default=ALL, help_text=("Which results should be displayed on the leaderboard?"), ) submission_kind = models.PositiveSmallIntegerField( default=SubmissionKind.CSV, choices=SubmissionKind.choices, help_text=( "Should participants submit a .csv/.zip file of predictions, " "or an algorithm?"), ) allow_submission_comments = models.BooleanField( default=False, help_text=( "Allow users to submit comments as part of their submission."), ) display_submission_comments = models.BooleanField( default=False, help_text=( "If true, submission comments are shown on the results page."), ) supplementary_file_choice = models.CharField( max_length=3, choices=SUPPLEMENTARY_FILE_CHOICES, default=OFF, help_text=( "Show a supplementary file field on the submissions page so that " "users can upload an additional file along with their predictions " "file as part of their submission (eg, include a pdf description " "of their method). Off turns this feature off, Optional means " "that including the file is optional for the user, Required means " "that the user must upload a supplementary file."), ) supplementary_file_label = models.CharField( max_length=32, blank=True, default="Supplementary File", help_text=( "The label that will be used on the submission and results page " "for the supplementary file. For example: Algorithm Description."), ) supplementary_file_help_text = models.CharField( max_length=128, blank=True, default="", help_text=( "The help text to include on the submissions page to describe the " 'submissions file. Eg: "A PDF description of the method.".'), ) show_supplementary_file_link = models.BooleanField( default=False, help_text=( "Show a link to download the supplementary file on the results " "page."), ) publication_url_choice = models.CharField( max_length=3, choices=PUBLICATION_LINK_CHOICES, default=OFF, help_text=( "Show a publication url field on the submission page so that " "users can submit a link to a publication that corresponds to " "their submission. Off turns this feature off, Optional means " "that including the url is optional for the user, Required means " "that the user must provide an url."), ) show_publication_url = models.BooleanField( default=False, help_text=("Show a link to the publication on the results page"), ) daily_submission_limit = models.PositiveIntegerField( default=10, help_text=("The limit on the number of times that a user can make a " "submission in a 24 hour period."), ) submission_page_html = models.TextField( help_text=( "HTML to include on the submission page for this challenge."), blank=True, ) auto_publish_new_results = BooleanField( default=True, help_text=( "If true, new results are automatically made public. If false, " "the challenge administrator must manually publish each new " "result."), ) display_all_metrics = models.BooleanField( default=True, help_text=( "Should all of the metrics be displayed on the Result detail page?" ), ) submission_join_key = models.CharField( blank=True, default="", max_length=32, help_text=( "If predictions are submitted as csv files, which column should " "be used to join the data? eg. case_id"), ) inputs = models.ManyToManyField(to=ComponentInterface, related_name="evaluation_inputs") outputs = models.ManyToManyField(to=ComponentInterface, related_name="evaluation_outputs") def save(self, *args, **kwargs): adding = self._state.adding super().save(*args, **kwargs) if adding: self.set_default_interfaces() calculate_ranks.apply_async(kwargs={"challenge_pk": self.challenge.pk}) def set_default_interfaces(self): self.inputs.set( [ComponentInterface.objects.get(slug="predictions-csv-file")]) self.outputs.set( [ComponentInterface.objects.get(slug="metrics-json-file")]) def get_absolute_url(self): return reverse( "pages:home", kwargs={"challenge_short_name": self.challenge.short_name}, )
class WorkstationConfig(TitleSlugDescriptionModel, UUIDModel): ORIENTATION_AXIAL = "A" ORIENTATION_CORONAL = "C" ORIENTATION_SAGITTAL = "S" ORIENTATION_CHOICES = ( (ORIENTATION_AXIAL, "Axial"), (ORIENTATION_CORONAL, "Coronal"), (ORIENTATION_SAGITTAL, "Sagittal"), ) SLAB_RENDER_METHOD_MAXIMUM = "MAX" SLAB_RENDER_METHOD_MINIMUM = "MIN" SLAB_RENDER_METHOD_AVERAGE = "AVG" SLAB_RENDER_METHOD_CHOICES = ( (SLAB_RENDER_METHOD_MAXIMUM, "Maximum"), (SLAB_RENDER_METHOD_MINIMUM, "Minimum"), (SLAB_RENDER_METHOD_AVERAGE, "Average"), ) IMAGE_INTERPOLATION_TYPE_NEAREST = "NN" IMAGE_INTERPOLATION_TYPE_TRILINEAR = "TL" IMAGE_INTERPOLATION_TYPE_CHOICES = ( (IMAGE_INTERPOLATION_TYPE_NEAREST, "NearestNeighbor"), (IMAGE_INTERPOLATION_TYPE_TRILINEAR, "Trilinear"), ) creator = models.ForeignKey(get_user_model(), null=True, on_delete=models.SET_NULL) window_presets = models.ManyToManyField( to="WindowPreset", blank=True, related_name="workstation_window_presets", ) default_window_preset = models.ForeignKey( to="WindowPreset", blank=True, null=True, on_delete=models.SET_NULL, related_name="workstation_default_window_presets", ) # 4 digits, 2 decimal places, 0.01 min, 99.99 max default_slab_thickness_mm = models.DecimalField( blank=True, null=True, max_digits=4, decimal_places=2, validators=[MinValueValidator(limit_value=0.01)], ) default_slab_render_method = models.CharField( max_length=3, choices=SLAB_RENDER_METHOD_CHOICES, blank=True) default_orientation = models.CharField(max_length=1, choices=ORIENTATION_CHOICES, blank=True) default_overlay_lut = models.ForeignKey(to="LookUpTable", blank=True, null=True, on_delete=models.SET_NULL) default_overlay_interpolation = models.CharField( max_length=2, choices=IMAGE_INTERPOLATION_TYPE_CHOICES, default=IMAGE_INTERPOLATION_TYPE_NEAREST, blank=True, ) # 3 digits, 2 decimal places, 0.00 min, 1.00 max default_overlay_alpha = models.DecimalField( blank=True, null=True, max_digits=3, decimal_places=2, validators=[ MinValueValidator(limit_value=0.00), MaxValueValidator(limit_value=1.00), ], ) overlay_segments = JSONField( default=list, blank=True, validators=[JSONSchemaValidator(schema=OVERLAY_SEGMENTS_SCHEMA)], ) # 4 digits, 2 decimal places, 0.01 min, 99.99 max default_zoom_scale = models.DecimalField( blank=True, null=True, max_digits=4, decimal_places=2, validators=[MinValueValidator(limit_value=0.01)], ) show_image_info_plugin = models.BooleanField(default=True) show_display_plugin = models.BooleanField(default=True) show_invert_tool = models.BooleanField(default=True) show_flip_tool = models.BooleanField(default=True) show_window_level_tool = models.BooleanField(default=True) show_reset_tool = models.BooleanField(default=True) class Meta(TitleSlugDescriptionModel.Meta, UUIDModel.Meta): ordering = ("created", "creator") def __str__(self): return f"{self.title} (created by {self.creator})" def get_absolute_url(self): return reverse("workstation-configs:detail", kwargs={"slug": self.slug}) def save(self, *args, **kwargs): super().save(*args, **kwargs) if self.creator: assign_perm( f"{self._meta.app_label}.change_{self._meta.model_name}", self.creator, self, )
class ComponentInterface(models.Model): Kind = InterfaceKind.InterfaceKindChoices title = models.CharField( max_length=255, help_text="Human readable name of this input/output field.", unique=True, ) slug = AutoSlugField(populate_from="title") description = models.TextField( blank=True, help_text="Description of this input/output field.") default_value = models.JSONField( blank=True, null=True, default=None, help_text="Default value for this field, only valid for inputs.", ) schema = models.JSONField( default=dict, blank=True, help_text=( "Additional JSON schema that the values for this interface must " "satisfy. See https://json-schema.org/. " "Only Draft 7, 6, 4 or 3 are supported."), validators=[JSONSchemaValidator()], ) kind = models.CharField( blank=False, max_length=5, choices=Kind.choices, help_text=( "What is the type of this interface? Used to validate interface " "values and connections between components."), ) relative_path = models.CharField( max_length=255, help_text=( "The path to the entity that implements this interface relative " "to the input or output directory."), unique=True, validators=[ validate_safe_path, validate_no_slash_at_ends, # No uuids in path RegexValidator( regex= r".*[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}.*", inverse_match=True, flags=re.IGNORECASE, ), ], ) store_in_database = models.BooleanField( default=True, editable=True, help_text=("Should the value be saved in a database field, " "only valid for outputs."), ) def __str__(self): return f"{self.title} ({self.get_kind_display()})" @property def is_image_kind(self): return self.kind in InterfaceKind.interface_type_image() @property def is_json_kind(self): return self.kind in InterfaceKind.interface_type_json() @property def is_file_kind(self): return self.kind in InterfaceKind.interface_type_file() @property def super_kind(self): if self.saved_in_object_store: if self.is_image_kind: return InterfaceSuperKindChoices.IMAGE else: return InterfaceSuperKindChoices.FILE else: return InterfaceSuperKindChoices.VALUE @property def saved_in_object_store(self): # files and images should always be saved to S3, others are optional return (self.is_image_kind or self.is_file_kind or not self.store_in_database) def create_instance(self, *, image=None, value=None, fileobj=None): civ = ComponentInterfaceValue.objects.create(interface=self) if image: civ.image = image elif fileobj: container = File(fileobj) civ.file.save(Path(self.relative_path).name, container) elif self.saved_in_object_store: civ.file = ContentFile( json.dumps(value).encode("utf-8"), name=Path(self.relative_path).name, ) else: civ.value = value civ.full_clean() civ.save() return civ def clean(self): super().clean() self._clean_store_in_database() self._clean_relative_path() def _clean_relative_path(self): if self.is_json_kind: if not self.relative_path.endswith(".json"): raise ValidationError("Relative path should end with .json") elif self.is_file_kind and not self.relative_path.endswith( f".{self.kind.lower()}"): raise ValidationError( f"Relative path should end with .{self.kind.lower()}") if self.is_image_kind: if not self.relative_path.startswith("images/"): raise ValidationError( "Relative path should start with images/") if Path(self.relative_path).name != Path(self.relative_path).stem: # Maybe not in the future raise ValidationError("Images should be a directory") else: if self.relative_path.startswith("images/"): raise ValidationError( "Relative path should not start with images/") def _clean_store_in_database(self): object_store_required = self.kind in { *InterfaceKind.interface_type_image(), *InterfaceKind.interface_type_file(), # These values can be large, so for any new interfaces of this # type always add them to the object store InterfaceKind.InterfaceKindChoices.MULTIPLE_TWO_D_BOUNDING_BOXES, InterfaceKind.InterfaceKindChoices.MULTIPLE_DISTANCE_MEASUREMENTS, InterfaceKind.InterfaceKindChoices.MULTIPLE_POINTS, InterfaceKind.InterfaceKindChoices.MULTIPLE_POLYGONS, InterfaceKind.InterfaceKindChoices.MULTIPLE_LINES, } if object_store_required and self.store_in_database: raise ValidationError( f"Interface {self.kind} objects cannot be stored in the database" ) def validate_against_schema(self, *, value): """Validates values against both default and custom schemas""" JSONValidator( schema={ **INTERFACE_VALUE_SCHEMA, "anyOf": [{ "$ref": f"#/definitions/{self.kind}" }], })(value=value) if self.schema: JSONValidator(schema=self.schema)(value=value) class Meta: ordering = ("pk", )
class Phase(UUIDModel): # This must match the syntax used in jquery datatables # https://datatables.net/reference/option/order ASCENDING = "asc" DESCENDING = "desc" EVALUATION_SCORE_SORT_CHOICES = ( (ASCENDING, "Ascending"), (DESCENDING, "Descending"), ) OFF = "off" OPTIONAL = "opt" REQUIRED = "req" PUBLICATION_LINK_CHOICES = SUPPLEMENTARY_FILE_CHOICES = ( (OFF, "Off"), (OPTIONAL, "Optional"), (REQUIRED, "Required"), ) ALL = "all" MOST_RECENT = "rec" BEST = "bst" RESULT_DISPLAY_CHOICES = ( (ALL, "Display all results"), (MOST_RECENT, "Only display each users most recent result"), (BEST, "Only display each users best result"), ) ABSOLUTE = "abs" MEAN = "avg" MEDIAN = "med" SCORING_CHOICES = ( (ABSOLUTE, "Use the absolute value of the score column"), ( MEAN, "Use the mean of the relative ranks of the score and extra result columns", ), ( MEDIAN, "Use the median of the relative ranks of the score and extra result columns", ), ) class SubmissionKind(models.IntegerChoices): CSV = 1, "CSV" ZIP = 2, "ZIP" ALGORITHM = 3, "Algorithm" challenge = models.ForeignKey( Challenge, on_delete=models.CASCADE, editable=False, ) archive = models.ForeignKey( Archive, on_delete=models.SET_NULL, null=True, blank=True, help_text=( "Which archive should be used as the source dataset for this " "phase?" ), ) title = models.CharField( max_length=64, help_text="The title of this phase.", default="Challenge", ) slug = AutoSlugField(populate_from="title", max_length=64) score_title = models.CharField( max_length=32, blank=False, default="Score", help_text=( "The name that will be displayed for the scores column, for " "instance: Score (log-loss)" ), ) score_jsonpath = models.CharField( max_length=255, blank=True, help_text=( "The jsonpath of the field in metrics.json that will be used " "for the overall scores on the results page. See " "http://goessner.net/articles/JsonPath/ for syntax. For example: " "dice.mean" ), ) score_error_jsonpath = models.CharField( max_length=255, blank=True, help_text=( "The jsonpath for the field in metrics.json that contains the " "error of the score, eg: dice.std" ), ) score_default_sort = models.CharField( max_length=4, choices=EVALUATION_SCORE_SORT_CHOICES, default=DESCENDING, help_text=( "The default sorting to use for the scores on the results page." ), ) score_decimal_places = models.PositiveSmallIntegerField( blank=False, default=4, help_text=("The number of decimal places to display for the score"), ) extra_results_columns = models.JSONField( default=list, blank=True, help_text=( "A JSON object that contains the extra columns from metrics.json " "that will be displayed on the results page. " ), validators=[JSONSchemaValidator(schema=EXTRA_RESULT_COLUMNS_SCHEMA)], ) scoring_method_choice = models.CharField( max_length=3, choices=SCORING_CHOICES, default=ABSOLUTE, help_text=("How should the rank of each result be calculated?"), ) result_display_choice = models.CharField( max_length=3, choices=RESULT_DISPLAY_CHOICES, default=ALL, help_text=("Which results should be displayed on the leaderboard?"), ) creator_must_be_verified = models.BooleanField( default=False, help_text=( "If True, only participants with verified accounts can make " "submissions to this phase" ), ) submission_kind = models.PositiveSmallIntegerField( default=SubmissionKind.CSV, choices=SubmissionKind.choices, help_text=( "Should participants submit a .csv/.zip file of predictions, " "or an algorithm?" ), ) allow_submission_comments = models.BooleanField( default=False, help_text=( "Allow users to submit comments as part of their submission." ), ) display_submission_comments = models.BooleanField( default=False, help_text=( "If true, submission comments are shown on the results page." ), ) supplementary_file_choice = models.CharField( max_length=3, choices=SUPPLEMENTARY_FILE_CHOICES, default=OFF, help_text=( "Show a supplementary file field on the submissions page so that " "users can upload an additional file along with their predictions " "file as part of their submission (eg, include a pdf description " "of their method). Off turns this feature off, Optional means " "that including the file is optional for the user, Required means " "that the user must upload a supplementary file." ), ) supplementary_file_label = models.CharField( max_length=32, blank=True, default="Supplementary File", help_text=( "The label that will be used on the submission and results page " "for the supplementary file. For example: Algorithm Description." ), ) supplementary_file_help_text = models.CharField( max_length=128, blank=True, default="", help_text=( "The help text to include on the submissions page to describe the " 'submissions file. Eg: "A PDF description of the method.".' ), ) show_supplementary_file_link = models.BooleanField( default=False, help_text=( "Show a link to download the supplementary file on the results " "page." ), ) publication_url_choice = models.CharField( max_length=3, choices=PUBLICATION_LINK_CHOICES, default=OFF, help_text=( "Show a publication url field on the submission page so that " "users can submit a link to a publication that corresponds to " "their submission. Off turns this feature off, Optional means " "that including the url is optional for the user, Required means " "that the user must provide an url." ), ) show_publication_url = models.BooleanField( default=False, help_text=("Show a link to the publication on the results page"), ) daily_submission_limit = models.PositiveIntegerField( default=10, help_text=( "The limit on the number of times that a user can make a " "submission in a 24 hour period." ), ) submissions_open = models.DateTimeField( null=True, blank=True, help_text=( "If set, participants will not be able to make submissions to " "this phase before this time." ), ) submissions_close = models.DateTimeField( null=True, blank=True, help_text=( "If set, participants will not be able to make submissions to " "this phase after this time." ), ) submission_page_html = models.TextField( help_text=( "HTML to include on the submission page for this challenge." ), blank=True, ) auto_publish_new_results = models.BooleanField( default=True, help_text=( "If true, new results are automatically made public. If false, " "the challenge administrator must manually publish each new " "result." ), ) display_all_metrics = models.BooleanField( default=True, help_text=( "Should all of the metrics be displayed on the Result detail page?" ), ) evaluation_detail_observable_url = models.URLField( blank=True, validators=[OBSERVABLE_URL_VALIDATOR], max_length=2000, help_text=( "The URL of the embeddable observable notebook for viewing " "individual results. Must be of the form " "https://observablehq.com/embed/@user/notebook?cell=..." ), ) evaluation_comparison_observable_url = models.URLField( blank=True, validators=[OBSERVABLE_URL_VALIDATOR], max_length=2000, help_text=( "The URL of the embeddable observable notebook for comparing" "results. Must be of the form " "https://observablehq.com/embed/@user/notebook?cell=..." ), ) inputs = models.ManyToManyField( to=ComponentInterface, related_name="evaluation_inputs" ) outputs = models.ManyToManyField( to=ComponentInterface, related_name="evaluation_outputs" ) class Meta: unique_together = ( ("challenge", "title"), ("challenge", "slug"), ) ordering = ("challenge", "submissions_open", "created") permissions = (("create_phase_submission", "Create Phase Submission"),) def __str__(self): return f"{self.title} Evaluation for {self.challenge.short_name}" def save(self, *args, **kwargs): adding = self._state.adding super().save(*args, **kwargs) if adding: self.set_default_interfaces() self.assign_permissions() calculate_ranks.apply_async(kwargs={"phase_pk": self.pk}) def set_default_interfaces(self): self.inputs.set( [ComponentInterface.objects.get(slug="predictions-csv-file")] ) self.outputs.set( [ComponentInterface.objects.get(slug="metrics-json-file")] ) def assign_permissions(self): assign_perm("view_phase", self.challenge.admins_group, self) assign_perm("change_phase", self.challenge.admins_group, self) assign_perm( "create_phase_submission", self.challenge.admins_group, self ) assign_perm( "create_phase_submission", self.challenge.participants_group, self ) def get_absolute_url(self): return reverse( "pages:home", kwargs={"challenge_short_name": self.challenge.short_name}, ) def get_observable_url(self, view_kind, url_kind): if view_kind == "detail": url = self.evaluation_detail_observable_url elif view_kind == "comparison": url = self.evaluation_comparison_observable_url else: raise ValueError("View or notebook not found") if not url: return "", [] parsed_url = urlparse(url) cells = parse_qs(parsed_url.query)["cell"] url = f"{urljoin(url, parsed_url.path)}" if url_kind == "js": url = url.replace( "https://observablehq.com/embed/", "https://api.observablehq.com/", ) url += ".js?v=3" elif url_kind == "edit": url = url.replace( "https://observablehq.com/embed/", "https://observablehq.com/" ) else: raise ValueError("URL kind must be one of edit or js") return url, cells @property def observable_detail_edit_url(self): url, _ = self.get_observable_url(view_kind="detail", url_kind="edit") return url @property def observable_comparison_edit_url(self): url, _ = self.get_observable_url( view_kind="comparison", url_kind="edit" ) return url
class ReaderStudy(UUIDModel, TitleSlugDescriptionModel): editors_group = models.OneToOneField( Group, on_delete=models.CASCADE, editable=False, related_name=f"editors_of_readerstudy", ) readers_group = models.OneToOneField( Group, on_delete=models.CASCADE, editable=False, related_name=f"readers_of_readerstudy", ) images = models.ManyToManyField("cases.Image", related_name="readerstudies") workstation = models.ForeignKey("workstations.Workstation", on_delete=models.CASCADE) logo = models.ImageField(upload_to=get_logo_path) # A hanging_list is a list of dictionaries where the keys are the # view names, and the values are the filenames to place there. hanging_list = JSONField( default=list, blank=True, validators=[JSONSchemaValidator(schema=HANGING_LIST_SCHEMA)], ) shuffle_hanging_list = models.BooleanField(default=False) class Meta(UUIDModel.Meta, TitleSlugDescriptionModel.Meta): verbose_name_plural = "reader studies" def __str__(self): return f"{self.title}" def get_absolute_url(self): return reverse("reader-studies:detail", kwargs={"slug": self.slug}) @property def api_url(self): return reverse("api:reader-study-detail", kwargs={"pk": self.pk}) def create_groups(self): self.editors_group = Group.objects.create( name= f"{self._meta.app_label}_{self._meta.model_name}_{self.pk}_editors" ) self.readers_group = Group.objects.create( name= f"{self._meta.app_label}_{self._meta.model_name}_{self.pk}_readers" ) def assign_permissions(self): # Allow the editors group to change this study assign_perm(f"change_{self._meta.model_name}", self.editors_group, self) # Allow the editors and readers groups to view this study assign_perm(f"view_{self._meta.model_name}", self.editors_group, self) assign_perm(f"view_{self._meta.model_name}", self.readers_group, self) # Allow readers to add answers (globally), adding them to this reader # study is checked in the serializers as there is no # get_permission_object in django rest framework. assign_perm( f"{Answer._meta.app_label}.add_{Answer._meta.model_name}", self.readers_group, ) def assign_workstation_permissions(self): perm = f"view_{Workstation._meta.model_name}" group = self.readers_group workstations = get_objects_for_group(group=group, perms=perm, klass=Workstation) if (self.workstation not in workstations) or workstations.count() > 1: remove_perm(perm=perm, user_or_group=group, obj=workstations) # Allow readers to view the workstation used for this reader study assign_perm(perm=perm, user_or_group=group, obj=self.workstation) def save(self, *args, **kwargs): adding = self._state.adding if adding: self.create_groups() super().save(*args, **kwargs) if adding: self.assign_permissions() self.assign_workstation_permissions() def is_editor(self, user): return user.groups.filter(pk=self.editors_group.pk).exists() def add_editor(self, user): return user.groups.add(self.editors_group) def remove_editor(self, user): return user.groups.remove(self.editors_group) def is_reader(self, user): return user.groups.filter(pk=self.readers_group.pk).exists() def add_reader(self, user): return user.groups.add(self.readers_group) def remove_reader(self, user): return user.groups.remove(self.readers_group) @property def study_image_names(self): return [im.name for im in self.images.all()] @property def hanging_image_names(self): return [ name for hanging in self.hanging_list for name in hanging.values() ] @property def hanging_list_valid(self): """ Test that all of the study images are included in the hanging list exactly once. """ return sorted(self.study_image_names) == sorted( self.hanging_image_names) @property def non_unique_study_image_names(self): """ Get all of the image names that are non-unique for this ReaderStudy """ return [ name for name, count in Counter(self.study_image_names).items() if count > 1 ] @property def is_valid(self): """ Is this ReaderStudy valid? """ return (self.hanging_list_valid and len(self.non_unique_study_image_names) == 0) @property def hanging_list_images(self): """ Substitutes the image name for the image detail api url for each image defined in the hanging list. """ if not self.is_valid: return None study_images = {im.name: im.api_url for im in self.images.all()} hanging_list_images = [{ view: study_images.get(name) for view, name in hanging.items() } for hanging in self.hanging_list] return hanging_list_images def get_hanging_list_images_for_user(self, *, user): """ Returns a shuffled list of the hanging list images for a particular user. The shuffle is seeded with the users pk, and using RandomState from numpy guarantees that the ordering will be consistent across python/library versions. Returns the normal list if shuffle_hanging_list is false. """ hanging_list = self.hanging_list_images if self.shuffle_hanging_list and hanging_list is not None: # In place shuffle RandomState(seed=int(user.pk)).shuffle(hanging_list) return hanging_list
class WorkstationConfig(TitleSlugDescriptionModel, UUIDModel): class Orientation(models.TextChoices): AXIAL = "A", "Axial" CORONAL = "C", "Coronal" SAGITTAL = "S", "Sagittal" class SlabRenderMethod(models.TextChoices): MAXIMUM = "MAX", "Maximum" MINIMUM = "MIN", "Minimum" AVERAGE = "AVG", "Average" class ImageContext(models.TextChoices): PATHOLOGY = "PATH", "Pathology" OPHTHALMOLOGY = "OPHTH", "Ophthalmology" MPMRI = "MPMRI", "Multiparametric MRI" class ImageInterpolationType(models.TextChoices): NEAREST = "NN", "NearestNeighbor" TRILINEAR = "TL", "Trilinear" creator = models.ForeignKey( get_user_model(), null=True, on_delete=models.SET_NULL ) window_presets = models.ManyToManyField( to="WindowPreset", blank=True, related_name="workstation_window_presets", ) default_window_preset = models.ForeignKey( to="WindowPreset", blank=True, null=True, on_delete=models.SET_NULL, related_name="workstation_default_window_presets", ) image_context = models.CharField( blank=True, max_length=6, choices=ImageContext.choices ) # 4 digits, 2 decimal places, 0.01 min, 99.99 max default_slab_thickness_mm = models.DecimalField( blank=True, null=True, max_digits=4, decimal_places=2, validators=[MinValueValidator(limit_value=0.01)], ) default_slab_render_method = models.CharField( max_length=3, choices=SlabRenderMethod.choices, blank=True ) default_orientation = models.CharField( max_length=1, choices=Orientation.choices, blank=True ) overlay_luts = models.ManyToManyField( to="LookUpTable", blank=True, related_name="workstation_overlay_luts" ) default_overlay_lut = models.ForeignKey( to="LookUpTable", blank=True, null=True, on_delete=models.SET_NULL ) default_overlay_interpolation = models.CharField( max_length=2, choices=ImageInterpolationType.choices, default=ImageInterpolationType.NEAREST, blank=True, ) # 3 digits, 2 decimal places, 0.00 min, 1.00 max default_overlay_alpha = models.DecimalField( blank=True, null=True, max_digits=3, decimal_places=2, validators=[ MinValueValidator(limit_value=0.00), MaxValueValidator(limit_value=1.00), ], ) overlay_segments = models.JSONField( default=list, blank=True, validators=[JSONSchemaValidator(schema=OVERLAY_SEGMENTS_SCHEMA)], ) key_bindings = models.JSONField( default=list, blank=True, validators=[JSONSchemaValidator(schema=KEY_BINDINGS_SCHEMA)], ) # 4 digits, 2 decimal places, 0.01 min, 99.99 max default_zoom_scale = models.DecimalField( blank=True, null=True, max_digits=4, decimal_places=2, validators=[MinValueValidator(limit_value=0.01)], ) show_image_info_plugin = models.BooleanField(default=True) show_display_plugin = models.BooleanField(default=True) show_invert_tool = models.BooleanField(default=True) show_flip_tool = models.BooleanField(default=True) show_window_level_tool = models.BooleanField(default=True) show_reset_tool = models.BooleanField(default=True) enable_contrast_enhancement = models.BooleanField( default=False, verbose_name="Enable contrast enhancement preprocessing (fundus)", ) client_rendered_sidebar = models.BooleanField( default=True, help_text="Use client side rendering for the side bar" ) class Meta(TitleSlugDescriptionModel.Meta, UUIDModel.Meta): ordering = ("created", "creator") def __str__(self): return f"{self.title} (created by {self.creator})" def get_absolute_url(self): return reverse( "workstation-configs:detail", kwargs={"slug": self.slug} ) def save(self, *args, **kwargs): super().save(*args, **kwargs) if self.creator: assign_perm( f"{self._meta.app_label}.change_{self._meta.model_name}", self.creator, self, )