예제 #1
0
def run_submission(challenge_id, challenge_phase, submission_id, submission,
                   user_annotation_file_path):
    '''
        * receives a challenge id, phase id and user annotation file path
        * checks whether the corresponding evaluation script for the challenge exists or not
        * checks the above for annotation file
        * calls evaluation script via subprocess passing annotation file and user_annotation_file_path as argument
    '''
    submission_output = None
    phase_id = challenge_phase.id
    annotation_file_name = PHASE_ANNOTATION_FILE_NAME_MAP.get(
        challenge_id).get(phase_id)
    annotation_file_path = PHASE_ANNOTATION_FILE_PATH.format(
        challenge_id=challenge_id,
        phase_id=phase_id,
        annotation_file=annotation_file_name)
    submission_data_dir = SUBMISSION_DATA_DIR.format(
        submission_id=submission_id)
    # create a temporary run directory under submission directory, so that
    # main directory does not gets polluted
    temp_run_dir = join(submission_data_dir, 'run')
    create_dir(temp_run_dir)

    stdout_file_name = 'temp_stdout.txt'
    stderr_file_name = 'temp_stderr.txt'

    stdout_file = join(temp_run_dir, stdout_file_name)
    stderr_file = join(temp_run_dir, stderr_file_name)

    stdout = open(stdout_file, 'a+')
    stderr = open(stderr_file, 'a+')

    # call `main` from globals and set `status` to running and hence `started_at`
    submission.status = Submission.RUNNING
    submission.save()
    try:
        successful_submission_flag = True
        with stdout_redirect(stdout) as new_stdout, stderr_redirect(
                stderr) as new_stderr:  # noqa
            submission_output = EVALUATION_SCRIPTS[challenge_id].evaluate(
                annotation_file_path,
                user_annotation_file_path,
                challenge_phase.codename,
            )
        '''
        A submission will be marked successful only if it is of the format
            {
               "result":[
                  {
                     "split_codename_1":{
                        "key1":30,
                        "key2":50,
                     }
                  },
                  {
                     "split_codename_2":{
                        "key1":90,
                        "key2":10,
                     }
                  },
                  {
                     "split_codename_3":{
                        "key1":100,
                        "key2":45,
                     }
                  }
               ],
               "submission_metadata": {'foo': 'bar'},
               "submission_result": ['foo', 'bar'],
            }
        '''
        if 'result' in submission_output:

            leaderboard_data_list = []
            for split_result in submission_output['result']:

                # Check if the dataset_split exists for the codename in the result
                try:
                    split_code_name = split_result.items()[0][
                        0]  # get split_code_name that is the key of the result
                    dataset_split = DatasetSplit.objects.get(
                        codename=split_code_name)
                except:
                    stderr.write(
                        "ORGINIAL EXCEPTION: The codename specified by your Challenge Host doesn't match"
                        " with that in the evaluation Script.\n")
                    stderr.write(traceback.format_exc())
                    successful_submission_flag = False
                    break

                # Check if the challenge_phase_split exists for the challenge_phase and dataset_split
                try:
                    challenge_phase_split = ChallengePhaseSplit.objects.get(
                        challenge_phase=challenge_phase,
                        dataset_split=dataset_split)
                except:
                    stderr.write(
                        "ORGINIAL EXCEPTION: No such relation between between Challenge Phase and DatasetSplit"
                        " specified by Challenge Host \n")
                    stderr.write(traceback.format_exc())
                    successful_submission_flag = False
                    break

                leaderboard_data = LeaderboardData()
                leaderboard_data.challenge_phase_split = challenge_phase_split
                leaderboard_data.submission = submission
                leaderboard_data.leaderboard = challenge_phase_split.leaderboard
                leaderboard_data.result = split_result.get(
                    dataset_split.codename)

                leaderboard_data_list.append(leaderboard_data)

            if successful_submission_flag:
                LeaderboardData.objects.bulk_create(leaderboard_data_list)

        # Once the submission_output is processed, then save the submission object with appropriate status
        else:
            successful_submission_flag = False

    except:
        stderr.write(traceback.format_exc())
        successful_submission_flag = False

    submission_status = Submission.FINISHED if successful_submission_flag else Submission.FAILED
    submission.status = submission_status
    submission.save()

    # after the execution is finished, set `status` to finished and hence `completed_at`
    if submission_output:
        output = {}
        output['result'] = submission_output.get('result', '')
        submission.output = output

        # Save submission_result_file
        submission_result = submission_output.get('submission_result', '')
        submission.submission_result_file.save('submission_result.json',
                                               ContentFile(submission_result))

        # Save submission_metadata_file
        submission_metadata = submission_output.get('submission_metadata', '')
        submission.submission_metadata_file.save(
            'submission_metadata.json', ContentFile(submission_metadata))

    submission.save()

    stderr.close()
    stdout.close()
    stderr_content = open(stderr_file, 'r').read()
    stdout_content = open(stdout_file, 'r').read()

    # TODO :: see if two updates can be combine into a single update.
    with open(stdout_file, 'r') as stdout:
        stdout_content = stdout.read()
        submission.stdout_file.save('stdout.txt', ContentFile(stdout_content))
    with open(stderr_file, 'r') as stderr:
        stderr_content = stderr.read()
        submission.stderr_file.save('stderr.txt', ContentFile(stderr_content))

    # delete the complete temp run directory
    shutil.rmtree(temp_run_dir)
예제 #2
0
def run_submission(challenge_id, challenge_phase, submission,
                   user_annotation_file_path):
    """
        * receives a challenge id, phase id and user annotation file path
        * checks whether the corresponding evaluation script for the challenge exists or not
        * checks the above for annotation file
        * calls evaluation script via subprocess passing annotation file and user_annotation_file_path as argument
    """

    # Use the submission serializer to send relevant data to evaluation script
    # so that challenge hosts can use data for webhooks or any other service.
    submission_serializer = SubmissionSerializer(submission)

    submission_output = None
    phase_id = challenge_phase.id
    annotation_file_name = PHASE_ANNOTATION_FILE_NAME_MAP.get(
        challenge_id).get(phase_id)
    annotation_file_path = PHASE_ANNOTATION_FILE_PATH.format(
        challenge_id=challenge_id,
        phase_id=phase_id,
        annotation_file=annotation_file_name,
    )
    submission_data_dir = SUBMISSION_DATA_DIR.format(
        submission_id=submission.id)

    submission.status = Submission.RUNNING
    submission.started_at = timezone.now()
    submission.save()

    # create a temporary run directory under submission directory, so that
    # main directory does not gets polluted
    temp_run_dir = join(submission_data_dir, "run")
    create_dir(temp_run_dir)

    stdout_file = join(temp_run_dir, "temp_stdout.txt")
    stderr_file = join(temp_run_dir, "temp_stderr.txt")

    stdout = open(stdout_file, "a+")
    stderr = open(stderr_file, "a+")

    remote_evaluation = submission.challenge_phase.challenge.remote_evaluation

    if remote_evaluation:
        try:
            logger.info("Sending submission {} for remote evaluation".format(
                submission.id))
            with stdout_redirect(stdout) as new_stdout, stderr_redirect(
                    stderr) as new_stderr:
                submission_output = EVALUATION_SCRIPTS[challenge_id].evaluate(
                    annotation_file_path,
                    user_annotation_file_path,
                    challenge_phase.codename,
                    submission_metadata=submission_serializer.data,
                )
                return
        except Exception:
            stderr.write(traceback.format_exc())
            stderr.close()
            stdout.close()
            submission.status = Submission.FAILED
            submission.completed_at = timezone.now()
            submission.save()
            with open(stdout_file, "r") as stdout:
                stdout_content = stdout.read()
                submission.stdout_file.save("stdout.txt",
                                            ContentFile(stdout_content))
            with open(stderr_file, "r") as stderr:
                stderr_content = stderr.read()
                submission.stderr_file.save("stderr.txt",
                                            ContentFile(stderr_content))

            # delete the complete temp run directory
            shutil.rmtree(temp_run_dir)
            return

    # call `main` from globals and set `status` to running and hence `started_at`
    try:
        successful_submission_flag = True
        with stdout_redirect(stdout) as new_stdout, stderr_redirect(  # noqa
                stderr) as new_stderr:  # noqa
            submission_output = EVALUATION_SCRIPTS[challenge_id].evaluate(
                annotation_file_path,
                user_annotation_file_path,
                challenge_phase.codename,
                submission_metadata=submission_serializer.data,
            )
        """
        A submission will be marked successful only if it is of the format
            {
               "result":[
                  {
                     "split_codename_1":{
                        "key1":30,
                        "key2":50,
                     }
                  },
                  {
                     "split_codename_2":{
                        "key1":90,
                        "key2":10,
                     }
                  },
                  {
                     "split_codename_3":{
                        "key1":100,
                        "key2":45,
                     }
                  }
               ],
               "submission_metadata": {'foo': 'bar'},
               "submission_result": ['foo', 'bar'],
            }
        """

        error_bars_dict = dict()
        if "error" in submission_output:
            for split_error in submission_output["error"]:
                split_code_name = list(split_error.keys())[0]
                error_bars_dict[split_code_name] = split_error[split_code_name]

        if "result" in submission_output:

            leaderboard_data_list = []
            for split_result in submission_output["result"]:
                # get split_code_name that is the key of the result
                split_code_name = list(split_result.keys())[0]

                # Check if the challenge_phase_split exists for the challenge_phaseand dataset_split
                try:
                    challenge_phase_split = ChallengePhaseSplit.objects.get(
                        challenge_phase=challenge_phase,
                        dataset_split__codename=split_code_name,
                    )
                except Exception:
                    stderr.write(
                        "ORGINIAL EXCEPTION: No such relation between Challenge Phase and DatasetSplit"
                        " specified by Challenge Host \n")
                    stderr.write(traceback.format_exc())
                    successful_submission_flag = False
                    break

                # Check if the dataset_split exists for the codename in the result
                try:
                    dataset_split = challenge_phase_split.dataset_split
                except Exception:
                    stderr.write(
                        "ORGINIAL EXCEPTION: The codename specified by your Challenge Host doesn't match"
                        " with that in the evaluation Script.\n")
                    stderr.write(traceback.format_exc())
                    successful_submission_flag = False
                    break

                leaderboard_data = LeaderboardData()
                leaderboard_data.challenge_phase_split = challenge_phase_split
                leaderboard_data.submission = submission
                leaderboard_data.leaderboard = (
                    challenge_phase_split.leaderboard)
                leaderboard_data.result = split_result.get(
                    dataset_split.codename)

                if "error" in submission_output:
                    leaderboard_data.error = error_bars_dict.get(
                        dataset_split.codename)

                leaderboard_data_list.append(leaderboard_data)

            if successful_submission_flag:
                LeaderboardData.objects.bulk_create(leaderboard_data_list)

        # Once the submission_output is processed, then save the submission object with appropriate status
        else:
            successful_submission_flag = False

    except Exception:
        stderr.write(traceback.format_exc())
        successful_submission_flag = False

    submission_status = (Submission.FINISHED
                         if successful_submission_flag else Submission.FAILED)
    submission.status = submission_status
    submission.completed_at = timezone.now()
    submission.save()

    # after the execution is finished, set `status` to finished and hence `completed_at`
    if submission_output:
        output = {}
        output["result"] = submission_output.get("result", "")
        submission.output = output

        # Save submission_result_file
        submission_result = submission_output.get("submission_result", "")
        submission_result = json.dumps(submission_result)
        submission.submission_result_file.save("submission_result.json",
                                               ContentFile(submission_result))

        # Save submission_metadata_file
        submission_metadata = submission_output.get("submission_metadata", "")
        submission.submission_metadata_file.save(
            "submission_metadata.json", ContentFile(submission_metadata))

    submission.save()

    stderr.close()
    stdout.close()
    stderr_content = open(stderr_file, "r").read()
    stdout_content = open(stdout_file, "r").read()

    # TODO :: see if two updates can be combine into a single update.
    with open(stdout_file, "r") as stdout:
        stdout_content = stdout.read()
        submission.stdout_file.save("stdout.txt", ContentFile(stdout_content))
    if submission_status is Submission.FAILED:
        with open(stderr_file, "r") as stderr:
            stderr_content = stderr.read()
            submission.stderr_file.save("stderr.txt",
                                        ContentFile(stderr_content))

    # delete the complete temp run directory
    shutil.rmtree(temp_run_dir)