예제 #1
0
def test_mimetype_validator():
    json_validator = MimeTypeValidator(allowed_types=("application/json",))
    json_validator1 = MimeTypeValidator(allowed_types=("application/json",))
    text_validator = MimeTypeValidator(allowed_types=("text/plain",))
    assert json_validator is not json_validator1
    assert json_validator == json_validator1
    assert json_validator != text_validator
    assert hash(json_validator) == hash(json_validator1)
    assert hash(json_validator) != hash(text_validator)
예제 #2
0
class AlgorithmForm(forms.ModelForm):
    ipython_notebook = forms.FileField(
        validators=[
            MimeTypeValidator(allowed_types=('text/plain',))
        ],
        required=False,
        help_text=(
            "Please upload an iPython notebook that describes your algorithm"
        ),
    )
    chunked_upload = UploadedAjaxFileList(
        widget=algorithm_upload_widget,
        label='Algorithm Image',
        validators=[
            ExtensionValidator(allowed_extensions=('.tar',))
        ],
        help_text=(
            'Tar archive of the container image produced from the command '
            '`docker save IMAGE > IMAGE.tar`. See '
            'https://docs.docker.com/engine/reference/commandline/save/'
        ),
    )

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.helper = FormHelper(self)

    class Meta:
        model = Algorithm
        fields = ('ipython_notebook', 'chunked_upload',)
class ComponentInterfaceValue(models.Model):
    """Encapsulates the value of an interface at a certain point in the graph."""

    id = models.BigAutoField(primary_key=True)
    interface = models.ForeignKey(to=ComponentInterface,
                                  on_delete=models.CASCADE)
    value = models.JSONField(null=True, blank=True, default=None)
    file = models.FileField(
        null=True,
        blank=True,
        upload_to=component_interface_value_path,
        storage=protected_s3_storage,
        validators=[
            ExtensionValidator(allowed_extensions=(".json", ".zip", ".csv")),
            MimeTypeValidator(allowed_types=(
                "application/json",
                "application/zip",
                "text/plain",
            )),
        ],
    )
    image = models.ForeignKey(to=Image,
                              null=True,
                              blank=True,
                              on_delete=models.CASCADE)

    @property
    def has_value(self):
        return self.value is not None or self.image or self.file

    def __str__(self):
        return f"Component Interface Value {self.pk} for {self.interface}"

    class Meta:
        ordering = ("pk", )
예제 #4
0
class AlgorithmForm(forms.ModelForm):
    ipython_notebook = forms.FileField(
        validators=[MimeTypeValidator(allowed_types=("text/plain", ))],
        required=False,
        help_text=(
            "Please upload an iPython notebook that describes your algorithm"),
    )
    chunked_upload = UploadedAjaxFileList(
        widget=algorithm_upload_widget,
        label="Algorithm Image",
        validators=[
            ExtensionValidator(allowed_extensions=(".tar", ".tar.gz"))
        ],
        help_text=(
            ".tar.gz archive of the container image produced from the command "
            "'docker save IMAGE > IMAGE.tar | gzip'. See "
            "https://docs.docker.com/engine/reference/commandline/save/"),
    )

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.helper = FormHelper(self)

    class Meta:
        model = Algorithm
        fields = (
            "title",
            "requires_gpu",
            "ipython_notebook",
            "chunked_upload",
        )
예제 #5
0
class Submission(UUIDModel):
    """
    Stores files for evaluation
    """

    creator = models.ForeignKey(settings.AUTH_USER_MODEL,
                                null=True,
                                on_delete=models.SET_NULL)
    challenge = models.ForeignKey(Challenge, on_delete=models.CASCADE)
    # Limitation for now: only accept zip files as these are expanded in
    # evaluation.tasks.Evaluation. We could extend this first to csv file
    # submission with some validation
    file = models.FileField(
        upload_to=submission_file_path,
        validators=[
            MimeTypeValidator(allowed_types=("application/zip", "text/plain")),
            ExtensionValidator(allowed_extensions=(".zip", ".csv")),
        ],
    )
    supplementary_file = models.FileField(
        upload_to=submission_supplementary_file_path,
        validators=[
            MimeTypeValidator(allowed_types=("text/plain", "application/pdf"))
        ],
        blank=True,
    )
    comment = models.CharField(
        max_length=128,
        blank=True,
        default="",
        help_text=("You can add a comment here to help you keep track of your "
                   "submissions."),
    )
    publication_url = models.URLField(
        blank=True,
        help_text=(
            "A URL for the publication associated with this submission."),
    )

    def get_absolute_url(self):
        return reverse(
            "evaluation:submission-detail",
            kwargs={
                "pk": self.pk,
                "challenge_short_name": self.challenge.short_name,
            },
        )
예제 #6
0
class Submission(UUIDModel):
    """
    Stores files for evaluation
    """
    creator = models.ForeignKey(settings.AUTH_USER_MODEL,
                                null=True,
                                on_delete=models.SET_NULL)
    challenge = models.ForeignKey(Challenge, on_delete=models.CASCADE)
    # Limitation for now: only accept zip files as these are expanded in
    # evaluation.tasks.Evaluation. We could extend this first to csv file
    # submission with some validation
    file = models.FileField(
        upload_to=submission_file_path,
        validators=[
            MimeTypeValidator(allowed_types=('application/zip', 'text/plain')),
            ExtensionValidator(allowed_extensions=('.zip', '.csv')),
        ],
    )
    supplementary_file = models.FileField(
        upload_to=submission_supplementary_file_path,
        validators=[
            MimeTypeValidator(allowed_types=('text/plain', 'application/pdf'))
        ],
        blank=True,
    )
    comment = models.CharField(
        max_length=128,
        blank=True,
        default='',
        help_text=('You can add a comment here to help you keep track of your '
                   'submissions.'),
    )

    def get_absolute_url(self):
        return reverse(
            'evaluation:submission-detail',
            kwargs={
                'pk': self.pk,
                'challenge_short_name': self.challenge.short_name,
            },
        )
예제 #7
0
class Submission(UUIDModel):
    """Store files for evaluation."""

    creator = models.ForeignKey(settings.AUTH_USER_MODEL,
                                null=True,
                                on_delete=models.SET_NULL)
    creators_ip = models.GenericIPAddressField(null=True,
                                               default=None,
                                               editable=False)
    creators_user_agent = models.TextField(blank=True,
                                           default="",
                                           editable=False)

    phase = models.ForeignKey(Phase, on_delete=models.CASCADE, null=True)
    algorithm_image = models.ForeignKey(AlgorithmImage,
                                        null=True,
                                        on_delete=models.SET_NULL)
    predictions_file = models.FileField(
        upload_to=submission_file_path,
        validators=[
            MimeTypeValidator(allowed_types=("application/zip", "text/plain")),
            ExtensionValidator(allowed_extensions=(".zip", ".csv")),
        ],
        storage=protected_s3_storage,
        blank=True,
    )
    supplementary_file = models.FileField(
        upload_to=submission_supplementary_file_path,
        storage=public_s3_storage,
        validators=[
            MimeTypeValidator(allowed_types=("text/plain", "application/pdf"))
        ],
        blank=True,
    )
    comment = models.CharField(
        max_length=128,
        blank=True,
        default="",
        help_text=("You can add a comment here to help you keep track of your "
                   "submissions."),
    )
    publication_url = models.URLField(
        blank=True,
        help_text=(
            "A URL for the publication associated with this submission."),
    )

    class Meta:
        unique_together = (("phase", "predictions_file", "algorithm_image"), )

    def save(self, *args, **kwargs):
        adding = self._state.adding

        super().save(*args, **kwargs)

        if adding:
            self.create_evaluation()
            self.assign_permissions()

    def assign_permissions(self):
        assign_perm("view_submission", self.phase.challenge.admins_group, self)
        assign_perm("view_submission", self.creator, self)

    def create_evaluation(self):
        method = self.latest_ready_method

        if not method:
            send_missing_method_email(self)
            return

        evaluation = Evaluation.objects.create(submission=self, method=method)

        if self.algorithm_image:
            on_commit(lambda: create_algorithm_jobs_for_evaluation.apply_async(
                kwargs={"evaluation_pk": evaluation.pk}))
        else:
            mimetype = get_file_mimetype(self.predictions_file)

            if mimetype == "application/zip":
                interface = ComponentInterface.objects.get(
                    slug="predictions-zip-file")
            elif mimetype == "text/plain":
                interface = ComponentInterface.objects.get(
                    slug="predictions-csv-file")
            else:
                evaluation.update_status(
                    status=Evaluation.FAILURE,
                    stderr=f"{mimetype} files are not supported.",
                    error_message=f"{mimetype} files are not supported.",
                )
                return

            evaluation.inputs.set([
                ComponentInterfaceValue.objects.create(
                    interface=interface, file=self.predictions_file)
            ])
            on_commit(evaluation.signature.apply_async)

    @property
    def latest_ready_method(self):
        return (Method.objects.filter(phase=self.phase,
                                      ready=True).order_by("-created").first())

    def get_absolute_url(self):
        return reverse(
            "evaluation:submission-detail",
            kwargs={
                "pk": self.pk,
                "challenge_short_name": self.phase.challenge.short_name,
            },
        )
예제 #8
0
class Submission(UUIDModel):
    """Store files for evaluation."""

    creator = models.ForeignKey(
        settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL
    )
    creators_ip = models.GenericIPAddressField(
        null=True, default=None, editable=False
    )
    creators_user_agent = models.TextField(
        blank=True, default="", editable=False
    )

    phase = models.ForeignKey(Phase, on_delete=models.CASCADE, null=True)
    algorithm_image = models.ForeignKey(
        AlgorithmImage, null=True, on_delete=models.SET_NULL
    )
    predictions_file = models.FileField(
        upload_to=submission_file_path,
        validators=[
            MimeTypeValidator(allowed_types=("application/zip", "text/plain")),
            ExtensionValidator(allowed_extensions=(".zip", ".csv")),
        ],
        storage=protected_s3_storage,
        blank=True,
    )
    supplementary_file = models.FileField(
        upload_to=submission_supplementary_file_path,
        storage=public_s3_storage,
        validators=[
            MimeTypeValidator(allowed_types=("text/plain", "application/pdf"))
        ],
        blank=True,
    )
    comment = models.CharField(
        max_length=128,
        blank=True,
        default="",
        help_text=(
            "You can add a comment here to help you keep track of your "
            "submissions."
        ),
    )
    publication_url = models.URLField(
        blank=True,
        help_text=(
            "A URL for the publication associated with this submission."
        ),
    )

    class Meta:
        unique_together = (("phase", "predictions_file", "algorithm_image"),)

    def save(self, *args, **kwargs):
        adding = self._state.adding

        super().save(*args, **kwargs)

        if adding:
            self.create_evaluation()
            self.assign_permissions()

    def assign_permissions(self):
        assign_perm("view_submission", self.phase.challenge.admins_group, self)
        assign_perm("view_submission", self.creator, self)

    def create_evaluation(self):
        method = self.latest_ready_method

        if not method:
            # TODO Email admins
            return

        evaluation = Evaluation.objects.create(submission=self, method=method)

        if self.algorithm_image:
            default_input_interface = ComponentInterface.objects.get(
                slug=DEFAULT_INPUT_INTERFACE_SLUG
            )

            jobs = []

            for image in self.phase.archive.images.all():
                if not ComponentInterfaceValue.objects.filter(
                    interface=default_input_interface,
                    image=image,
                    evaluation_algorithmevaluations_as_input__submission=self,
                ).exists():
                    j = AlgorithmEvaluation.objects.create(submission=self)
                    j.inputs.set(
                        [
                            ComponentInterfaceValue.objects.create(
                                interface=default_input_interface, image=image
                            )
                        ]
                    )
                    jobs.append(j.signature)

            if jobs:
                (
                    group(*jobs)
                    | set_evaluation_inputs.signature(
                        kwargs={"evaluation_pk": evaluation.pk},
                        immutable=True,
                    )
                ).apply_async()

        else:
            mimetype = get_file_mimetype(self.predictions_file)

            if mimetype == "application/zip":
                interface = ComponentInterface.objects.get(
                    slug="predictions-zip-file"
                )
            elif mimetype == "text/plain":
                interface = ComponentInterface.objects.get(
                    slug="predictions-csv-file"
                )
            else:
                raise NotImplementedError(
                    f"Interface is not defined for {mimetype} files"
                )

            evaluation.inputs.set(
                [
                    ComponentInterfaceValue.objects.create(
                        interface=interface, file=self.predictions_file
                    )
                ]
            )
            evaluation.signature.apply_async()

    @property
    def latest_ready_method(self):
        return (
            Method.objects.filter(phase=self.phase, ready=True)
            .order_by("-created")
            .first()
        )

    def get_absolute_url(self):
        return reverse(
            "evaluation:submission-detail",
            kwargs={
                "pk": self.pk,
                "challenge_short_name": self.phase.challenge.short_name,
            },
        )
예제 #9
0
class Submission(UUIDModel):
    """Store files for evaluation."""

    creator = models.ForeignKey(settings.AUTH_USER_MODEL,
                                null=True,
                                on_delete=models.SET_NULL)
    challenge = models.ForeignKey(Challenge, on_delete=models.CASCADE)
    file = models.FileField(
        upload_to=submission_file_path,
        validators=[
            MimeTypeValidator(allowed_types=("application/zip", "text/plain")),
            ExtensionValidator(allowed_extensions=(".zip", ".csv")),
        ],
        storage=protected_s3_storage,
    )
    supplementary_file = models.FileField(
        upload_to=submission_supplementary_file_path,
        storage=public_s3_storage,
        validators=[
            MimeTypeValidator(allowed_types=("text/plain", "application/pdf"))
        ],
        blank=True,
    )
    comment = models.CharField(
        max_length=128,
        blank=True,
        default="",
        help_text=("You can add a comment here to help you keep track of your "
                   "submissions."),
    )
    publication_url = models.URLField(
        blank=True,
        help_text=(
            "A URL for the publication associated with this submission."),
    )

    def save(self, *args, **kwargs):
        adding = self._state.adding

        super().save(*args, **kwargs)

        if adding:
            self.create_evaluation()

            # Convert this submission to an annotation set
            base = ImageSet.objects.get(challenge=self.challenge,
                                        phase=ImageSet.TESTING)
            SubmissionToAnnotationSetJob.objects.create(base=base,
                                                        submission=self)

    def create_evaluation(self):
        method = self.latest_ready_method

        if not method:
            # TODO Email admins
            return

        e = Evaluation.objects.create(submission=self,
                                      method=self.latest_ready_method)

        mimetype = get_file_mimetype(self.file)

        if mimetype == "application/zip":
            interface = ComponentInterface.objects.get(
                slug="predictions-zip-file")
        elif mimetype == "text/plain":
            interface = ComponentInterface.objects.get(
                slug="predictions-csv-file")
        else:
            raise NotImplementedError(
                f"Interface is not defined for {mimetype} files")

        e.inputs.set([
            ComponentInterfaceValue.objects.create(interface=interface,
                                                   file=self.file)
        ])

        e.schedule_job()

    @property
    def latest_ready_method(self):
        return (Method.objects.filter(challenge=self.challenge,
                                      ready=True).order_by("-created").first())

    def get_absolute_url(self):
        return reverse(
            "evaluation:submission-detail",
            kwargs={
                "pk": self.pk,
                "challenge_short_name": self.challenge.short_name,
            },
        )
예제 #10
0
class Submission(UUIDModel):
    """Store files for evaluation."""

    creator = models.ForeignKey(
        settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL
    )
    phase = models.ForeignKey(Phase, on_delete=models.PROTECT, null=True)
    algorithm_image = models.ForeignKey(
        AlgorithmImage, null=True, on_delete=models.SET_NULL
    )
    user_upload = models.ForeignKey(
        UserUpload, blank=True, null=True, on_delete=models.SET_NULL
    )
    predictions_file = models.FileField(
        upload_to=submission_file_path,
        validators=[
            MimeTypeValidator(allowed_types=("application/zip", "text/plain")),
            ExtensionValidator(allowed_extensions=(".zip", ".csv")),
        ],
        storage=protected_s3_storage,
        blank=True,
    )
    supplementary_file = models.FileField(
        upload_to=submission_supplementary_file_path,
        storage=public_s3_storage,
        validators=[
            MimeTypeValidator(allowed_types=("text/plain", "application/pdf"))
        ],
        blank=True,
    )
    comment = models.CharField(
        max_length=128,
        blank=True,
        default="",
        help_text=(
            "You can add a comment here to help you keep track of your "
            "submissions."
        ),
    )
    supplementary_url = models.URLField(
        blank=True, help_text="A URL associated with this submission."
    )

    class Meta:
        unique_together = (("phase", "predictions_file", "algorithm_image"),)

    def save(self, *args, **kwargs):
        adding = self._state.adding

        super().save(*args, **kwargs)

        if adding:
            self.assign_permissions()
            if not is_following(self.creator, self.phase):
                follow(
                    user=self.creator,
                    obj=self.phase,
                    actor_only=False,
                    send_action=False,
                )
            e = create_evaluation.signature(
                kwargs={"submission_pk": self.pk}, immutable=True
            )
            on_commit(e.apply_async)

    def assign_permissions(self):
        assign_perm("view_submission", self.phase.challenge.admins_group, self)

        if self.phase.public:
            assign_perm("view_submission", self.creator, self)
        else:
            remove_perm("view_submission", self.creator, self)

    def get_absolute_url(self):
        return reverse(
            "evaluation:submission-detail",
            kwargs={
                "pk": self.pk,
                "challenge_short_name": self.phase.challenge.short_name,
            },
        )
예제 #11
0
class ComponentInterfaceValue(models.Model):
    """Encapsulates the value of an interface at a certain point in the graph."""

    id = models.BigAutoField(primary_key=True)
    interface = models.ForeignKey(to=ComponentInterface,
                                  on_delete=models.PROTECT)
    value = models.JSONField(null=True, blank=True, default=None)
    file = models.FileField(
        null=True,
        blank=True,
        upload_to=component_interface_value_path,
        storage=protected_s3_storage,
        validators=[
            ExtensionValidator(allowed_extensions=(
                ".json",
                ".zip",
                ".csv",
                ".png",
                ".jpg",
                ".jpeg",
                ".pdf",
                ".sqreg",
            )),
            MimeTypeValidator(allowed_types=(
                "application/json",
                "application/zip",
                "text/plain",
                "application/csv",
                "application/pdf",
                "image/png",
                "image/jpeg",
                "application/octet-stream",
                "application/x-sqlite3",
                "application/vnd.sqlite3",
            )),
        ],
    )
    image = models.ForeignKey(to=Image,
                              null=True,
                              blank=True,
                              on_delete=models.PROTECT)

    @property
    def title(self):
        if self.value is not None:
            return str(self.value)
        if self.file:
            return self.file.name
        if self.image:
            return self.image.name
        return ""

    @property
    def has_value(self):
        return self.value is not None or self.image or self.file

    @property
    def decompress(self):
        """
        Should the CIV be decompressed?

        This is only for legacy support of zip file submission for
        prediction evaluation. We should not support this anywhere
        else as it clobbers the input directory.
        """
        return self.interface.kind == InterfaceKindChoices.ZIP

    @cached_property
    def image_file(self):
        """The single image file for this interface"""
        return (self.image.files.filter(image_type__in=[
            ImageFile.IMAGE_TYPE_MHD,
            ImageFile.IMAGE_TYPE_TIFF,
        ]).get().file)

    @property
    def input_file(self):
        """The file to use as component input"""
        if self.image:
            return self.image_file
        elif self.file:
            return self.file
        else:
            src = NamedTemporaryFile(delete=True)
            src.write(bytes(json.dumps(self.value), "utf-8"))
            src.flush()
            return File(src, name=self.relative_path.name)

    @property
    def relative_path(self):
        """
        Where should the file be located?

        Images need special handling as their names are fixed.
        """
        path = Path(self.interface.relative_path)

        if self.image:
            path /= Path(self.image_file.name).name

        return path

    def __str__(self):
        return f"Component Interface Value {self.pk} for {self.interface}"

    def clean(self):
        super().clean()

        if self.interface.is_image_kind:
            self._validate_image_only()
        elif self.interface.is_file_kind:
            self._validate_file_only()
        else:
            self._validate_value()

    def _validate_image_only(self):
        if not self.image:
            raise ValidationError("Image must be set")
        if self.file or self.value is not None:
            raise ValidationError(
                f"File ({self.file}) or value should not be set for images")

    def _validate_file_only(self):
        if not self.file:
            raise ValidationError("File must be set")
        if self.image or self.value is not None:
            raise ValidationError(
                f"Image ({self.image}) or value must not be set for files")

    def _validate_value_only(self):
        # Do not check self.value here, it can be anything including None.
        # This is checked later with interface.validate_against_schema.
        if self.image or self.file:
            raise ValidationError(
                f"Image ({self.image}) or file ({self.file}) must not be set for values"
            )

    def _validate_value(self):
        if self.interface.saved_in_object_store:
            self._validate_file_only()
            with self.file.open("r") as f:
                value = json.loads(f.read().decode("utf-8"))
        else:
            self._validate_value_only()
            value = self.value

        self.interface.validate_against_schema(value=value)

    class Meta:
        ordering = ("pk", )
예제 #12
0
class ChallengeRequest(UUIDModel, CommonChallengeFieldsMixin):
    class ChallengeTypeChoices(models.IntegerChoices):
        """Challenge type choices."""

        T1 = 1, "Type 1 - prediction submission"
        T2 = 2, "Type 2 - algorithm submission"

    class ChallengeRequestStatusChoices(models.TextChoices):
        ACCEPTED = "ACPT", _("Accepted")
        REJECTED = "RJCT", _("Rejected")
        PENDING = "PEND", _("Pending")

    status = models.CharField(
        max_length=4,
        choices=ChallengeRequestStatusChoices.choices,
        default=ChallengeRequestStatusChoices.PENDING,
    )
    abstract = models.TextField(
        help_text="Provide a summary of the challenge purpose.",
    )
    contact_email = models.EmailField(
        help_text="Please provide an email that our team can use to contact "
        "you should there be any questions about your request.",
    )
    start_date = models.DateField(
        help_text="Estimated start date for this challenge.",
    )
    end_date = models.DateField(
        help_text="Estimated end date for this challenge. Please note that we aim to "
        "keep challenges open for submission for at least 3 years after "
        "the official end date if possible.",
    )
    organizers = models.TextField(
        help_text="Provide information about the organizing team (names and affiliations)",
    )
    affiliated_event = models.CharField(
        blank=True,
        max_length=50,
        help_text="Is this challenge part of a workshop or conference? If so, which one?",
    )
    structured_challenge_submission_form = models.FileField(
        null=True,
        blank=True,
        upload_to=submission_pdf_path,
        storage=protected_s3_storage,
        validators=[
            ExtensionValidator(allowed_extensions=(".pdf",)),
            MimeTypeValidator(allowed_types=("application/pdf",)),
        ],
    )
    challenge_type = models.PositiveSmallIntegerField(
        choices=ChallengeTypeChoices.choices,
        default=ChallengeTypeChoices.T2,
        help_text="What type is this challenge?",
    )
    challenge_setup = models.TextField(
        help_text="Describe the challenge set-up."
    )
    data_set = models.TextField(
        help_text="Describe the training and test datasets you are planning to use."
    )
    submission_assessment = models.TextField(
        help_text="Define the metrics you will use to assess and rank "
        "participants’ submissions."
    )
    challenge_publication = models.TextField(
        help_text="Please indicate if you plan to coordinate a publication "
        "of the challenge results."
    )
    code_availability = models.TextField(
        help_text="Will the participants’ code be accessible after the challenge?"
    )
    expected_number_of_teams = models.IntegerField(
        help_text="How many teams do you expect to participate in your challenge?"
    )
    average_algorithm_container_size_in_gb = models.IntegerField(
        default=10, help_text="Average algorithm container size in GB."
    )
    average_number_of_containers_per_team = models.IntegerField(
        default=10,
        help_text="Average number of algorithm containers per team.",
    )
    inference_time_limit_in_minutes = models.IntegerField(
        blank=True,
        null=True,
        help_text="Average run time per algorithm job in minutes.",
    )
    average_size_of_test_image_in_mb = models.IntegerField(
        null=True, blank=True, help_text="Average size of a test image in MB."
    )
    phase_1_number_of_submissions_per_team = models.IntegerField(
        null=True,
        blank=True,
        help_text="How many submissions do you expect per team in this phase?",
    )
    phase_2_number_of_submissions_per_team = models.IntegerField(
        null=True,
        blank=True,
        help_text="How many submissions do you expect per team in this phase?",
    )
    phase_1_number_of_test_images = models.IntegerField(
        null=True,
        blank=True,
        help_text="Number of test images for this phase.",
    )
    phase_2_number_of_test_images = models.IntegerField(
        null=True,
        blank=True,
        help_text="Number of test images for this phase.",
    )
    number_of_tasks = models.IntegerField(
        default=1,
        help_text="If your challenge has multiple tasks, we multiply the "
        "phase 1 and 2 cost estimates by the number of tasks.",
    )
    budget_for_hosting_challenge = models.IntegerField(
        default=0,
        null=True,
        blank=True,
        help_text="What is your budget for hosting this challenge, if any?",
    )
    long_term_commitment = models.BooleanField(
        null=True,
        blank=True,
    )
    long_term_commitment_extra = models.CharField(
        max_length=2000,
        blank=True,
    )
    data_license = models.BooleanField(
        null=True,
        blank=True,
    )
    data_license_extra = models.CharField(
        max_length=2000,
        blank=True,
    )
    comments = models.TextField(
        blank=True,
        help_text="If you have any comments, remarks or questions, please leave them here.",
    )
    algorithm_inputs = models.TextField(
        blank=True,
        help_text="What are the inputs to the algorithms submitted as solutions to "
        "your Type 2 challenge going to be? "
        "Please describe in detail "
        "what the input(s) reflect(s), for example, "
        "MRI scan of the brain, or chest X-ray. Grand Challenge only "
        "supports .mha and .tiff image files and json files for algorithms.",
    )
    algorithm_outputs = models.TextField(
        blank=True,
        help_text="What are the outputs to the algorithms submitted as solutions to "
        "your Type 2 challenge going to be? "
        "Please describe in detail what the output(s) "
        "reflect(s), for example, probability of a positive PCR result, or "
        "stroke lesion segmentation. ",
    )

    def __str__(self):
        return self.title

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._orig_status = self.status

    def get_absolute_url(self):
        return reverse("challenges:requests-detail", kwargs={"pk": self.pk})

    def save(self, *args, **kwargs):
        adding = self._state.adding
        super().save(*args, **kwargs)

        if adding:
            send_challenge_requested_email_to_reviewers(self)
            send_challenge_requested_email_to_requester(self)

    def create_challenge(self):
        challenge = Challenge(
            title=self.title,
            short_name=self.short_name,
            creator=self.creator,
            hidden=True,
            contact_email=self.contact_email,
        )
        challenge.full_clean()
        challenge.save()
        challenge.task_types.set(self.task_types.all())
        challenge.modalities.set(self.modalities.all())
        challenge.structures.set(self.structures.all())
        challenge.save()

        if self.challenge_type == self.ChallengeTypeChoices.T2:
            phase = challenge.phase_set.get()
            phase.submission_kind = phase.SubmissionKind.ALGORITHM
            phase.creator_must_be_verified = True
            phase.full_clean()
            phase.save()

        return challenge

    @cached_property
    def budget(self):
        budget = None
        if self.challenge_type == self.ChallengeTypeChoices.T2:
            compute_costs = settings.CHALLENGES_COMPUTE_COST_CENTS_PER_HOUR
            storage_costs = (
                settings.CHALLENGES_STORAGE_COST_CENTS_PER_TB_PER_YEAR
            )

            budget = {
                "Data storage cost for phase 1": None,
                "Compute costs for phase 1": None,
                "Total phase 1": None,
                "Data storage cost for phase 2": None,
                "Compute costs for phase 2": None,
                "Total phase 2": None,
                "Docker storage cost": None,
                "Total": None,
            }

            # calculate budget for phase 1
            budget["Data storage cost for phase 1"] = round(
                self.phase_1_number_of_test_images
                * self.average_size_of_test_image_in_mb
                * storage_costs
                / 1000000
                / 100,
                ndigits=2,
            )
            budget["Compute costs for phase 1"] = round(
                self.phase_1_number_of_test_images
                * self.phase_1_number_of_submissions_per_team
                * self.expected_number_of_teams
                * self.inference_time_limit_in_minutes
                * compute_costs
                / 60
                / 100,
                ndigits=2,
            )
            budget["Total phase 1"] = round(
                (
                    budget["Data storage cost for phase 1"]
                    + budget["Compute costs for phase 1"]
                )
                * self.number_of_tasks,
                ndigits=2,
            )

            # calculate budget for phase 2
            budget["Data storage cost for phase 2"] = round(
                self.phase_2_number_of_test_images
                * self.average_size_of_test_image_in_mb
                * storage_costs
                / 1000000
                / 100,
                ndigits=2,
            )
            budget["Compute costs for phase 2"] = round(
                self.phase_2_number_of_test_images
                * self.phase_2_number_of_submissions_per_team
                * self.expected_number_of_teams
                * self.inference_time_limit_in_minutes
                * compute_costs
                / 60
                / 100,
                ndigits=2,
            )
            budget["Total phase 2"] = round(
                (
                    budget["Data storage cost for phase 2"]
                    + budget["Compute costs for phase 2"]
                )
                * self.number_of_tasks,
                ndigits=2,
            )

            budget["Docker storage cost"] = round(
                self.average_algorithm_container_size_in_gb
                * self.average_number_of_containers_per_team
                * self.expected_number_of_teams
                * storage_costs
                / 1000
                / 100,
                ndigits=2,
            )

            budget["Total"] = round(
                sum(
                    filter(
                        None,
                        [
                            budget["Total phase 1"],
                            budget["Total phase 2"],
                            budget["Docker storage cost"],
                        ],
                    )
                ),
                ndigits=2,
            )

        return budget