예제 #1
0
파일: tasks.py 프로젝트: sjoerdk/codalab
def score(submission, job_id):
    """
    Dispatches the scoring task for the given submission to an appropriate compute worker.

    submission: The CompetitionSubmission object.
    job_id: The job ID used to track the progress of the evaluation.
    """
    # Loads the computation state.
    state = {}
    if len(submission.execution_key) > 0:
        state = json.loads(submission.execution_key)
    has_generated_predictions = 'predict' in state

    # Generate metadata-only bundle describing the inputs. Reference data is an optional
    # dataset provided by the competition organizer. Results are provided by the participant
    # either indirectly (has_generated_predictions is True i.e. participant provides a program
    # which is run to generate results) ordirectly (participant uploads results directly).
    lines = []
    ref_value = submission.phase.reference_data.name
    if len(ref_value) > 0:
        lines.append("ref: %s" % ref_value)
    res_value = submission.prediction_output_file.name if has_generated_predictions else submission.file.name
    if len(res_value) > 0:
        lines.append("res: %s" % res_value)
    else:
        raise ValueError("Results are missing.")
    submission.inputfile.save('input.txt', ContentFile('\n'.join(lines)))
    # Generate metadata-only bundle describing the computation.
    lines = []
    program_value = submission.phase.scoring_program.name
    if len(program_value) > 0:
        lines.append("program: %s" % program_value)
    else:
        raise ValueError("Program is missing.")
    lines.append("input: %s" % submission.inputfile.name)
    lines.append("stdout: %s" % submission_stdout_filename(submission))
    lines.append("stderr: %s" % submission_stderr_filename(submission))
    submission.runfile.save('run.txt', ContentFile('\n'.join(lines)))

    # Create stdout.txt & stderr.txt
    if has_generated_predictions == False:
        username = submission.participant.user.username
        lines = ["Standard output for submission #{0} by {1}.".format(submission.submission_number, username), ""]
        submission.stdout_file.save('stdout.txt', ContentFile('\n'.join(lines)))
        lines = ["Standard error for submission #{0} by {1}.".format(submission.submission_number, username), ""]
        submission.stderr_file.save('stderr.txt', ContentFile('\n'.join(lines)))
    # Update workflow state
    state['score'] = job_id
    submission.execution_key = json.dumps(state)
    submission.save()
    # Submit the request to the computation service
    body = json.dumps({"id" : job_id,
                       "task_type": "run",
                       "task_args": {
                           "bundle_id" : submission.runfile.name,
                           "container_name" : settings.BUNDLE_AZURE_CONTAINER,
                           "reply_to" : settings.SBS_RESPONSE_QUEUE}})
    getQueue(settings.SBS_COMPUTE_QUEUE).send_message(body)
    if has_generated_predictions == False:
        _set_submission_status(submission.id, CompetitionSubmissionStatus.SUBMITTED)
예제 #2
0
def predict(submission, job_id):
    """
    Dispatches the prediction taks for the given submission to an appropriate compute worker.

    submission: The CompetitionSubmission object.
    job_id: The job ID used to track the progress of the evaluation.
    """
    # Generate metadata-only bundle describing the computation
    lines = []
    program_value = submission.file.name
    if len(program_value) > 0:
        lines.append("program: %s" % program_value)
    else:
        raise ValueError("Program is missing.")
    input_value = submission.phase.input_data.name

    if len(input_value) > 0:
        lines.append("input: %s" % input_value)
    lines.append("stdout: %s" % submission_stdout_filename(submission))
    lines.append("stderr: %s" % submission_stderr_filename(submission))
    submission.prediction_runfile.save('run.txt',
                                       ContentFile('\n'.join(lines)))
    # Create stdout.txt & stderr.txt
    username = submission.participant.user.username
    lines = [
        "Standard output for submission #{0} by {1}.".format(
            submission.submission_number, username), ""
    ]
    submission.stdout_file.save('stdout.txt', ContentFile('\n'.join(lines)))
    lines = [
        "Standard error for submission #{0} by {1}.".format(
            submission.submission_number, username), ""
    ]
    submission.stderr_file.save('stderr.txt', ContentFile('\n'.join(lines)))
    submission.save('stderr.txt', ContentFile('\n'.join(lines)))

    # Store workflow state
    submission.execution_key = json.dumps({'predict': job_id})
    submission.save()
    # Submit the request to the computation service
    body = json.dumps({
        "id": job_id,
        "task_type": "run",
        "task_args": {
            "bundle_id": submission.prediction_runfile.name,
            "container_name": settings.BUNDLE_AZURE_CONTAINER,
            "reply_to": settings.SBS_RESPONSE_QUEUE,
            "execution_time_limit": submission.phase.execution_time_limit
        }
    })

    getQueue(settings.SBS_COMPUTE_QUEUE).send_message(body)
    # Update the submission object
    _set_submission_status(submission.id,
                           CompetitionSubmissionStatus.SUBMITTED)
예제 #3
0
파일: tasks.py 프로젝트: wguo123/codalab
def predict(submission, job_id):
    """
    Dispatches the prediction taks for the given submission to an appropriate compute worker.

    submission: The CompetitionSubmission object.
    job_id: The job ID used to track the progress of the evaluation.
    """
    # Generate metadata-only bundle describing the computation
    lines = []
    program_value = submission.file.name

    if len(program_value) > 0:
        lines.append("program: %s" % program_value)
    else:
        raise ValueError("Program is missing.")
    input_value = submission.phase.input_data.name

    logger.info("Running prediction")

    if len(input_value) > 0:
        lines.append("input: %s" % input_value)
    lines.append("stdout: %s" % submission_stdout_filename(submission))
    lines.append("stderr: %s" % submission_stderr_filename(submission))
    submission.prediction_runfile.save('run.txt', ContentFile('\n'.join(lines)))
    # Create stdout.txt & stderr.txt
    username = submission.participant.user.username
    lines = ["Standard output for submission #{0} by {1}.".format(submission.submission_number, username), ""]
    submission.stdout_file.save('stdout.txt', ContentFile('\n'.join(lines)))
    submission.prediction_stdout_file.save('prediction_stdout_file.txt', ContentFile('\n'.join(lines)))
    lines = ["Standard error for submission #{0} by {1}.".format(submission.submission_number, username), ""]
    submission.stderr_file.save('stderr.txt', ContentFile('\n'.join(lines)))
    submission.prediction_stderr_file.save('prediction_stderr_file.txt', ContentFile('\n'.join(lines)))

    # Store workflow state
    submission.execution_key = json.dumps({'predict' : job_id})
    submission.save()
    # Submit the request to the computation service
    body = json.dumps({
        "id" : job_id,
        "task_type": "run",
        "task_args": {
            "bundle_id": submission.prediction_runfile.name,
            "container_name": settings.BUNDLE_AZURE_CONTAINER,
            "reply_to": settings.SBS_RESPONSE_QUEUE,
            "execution_time_limit": submission.phase.execution_time_limit,
            "predict": True,
        }
    })

    getQueue(settings.SBS_COMPUTE_QUEUE).send_message(body)
    # Update the submission object
    _set_submission_status(submission.id, CompetitionSubmissionStatus.SUBMITTED)
예제 #4
0
파일: tasks.py 프로젝트: wguo123/codalab
def score(submission, job_id):
    """
    Dispatches the scoring task for the given submission to an appropriate compute worker.

    submission: The CompetitionSubmission object.
    job_id: The job ID used to track the progress of the evaluation.
    """
    # Loads the computation state.
    state = {}
    if len(submission.execution_key) > 0:
        state = json.loads(submission.execution_key)
    has_generated_predictions = 'predict' in state

    #generate metadata-only bundle describing the history of submissions and phases
    last_submissions = CompetitionSubmission.objects.filter(
        participant=submission.participant,
        status__codename=CompetitionSubmissionStatus.FINISHED
    ).order_by('-submitted_at')


    lines = []
    lines.append("description: history of all previous successful runs output files")

    if last_submissions:
        for past_submission in last_submissions:
            if past_submission.pk != submission.pk:
                #pad folder numbers for sorting os side, 001, 002, 003,... 010, etc...
                past_submission_phasenumber = '%03d' % past_submission.phase.phasenumber
                past_submission_number = '%03d' % past_submission.submission_number
                lines.append('%s/%s/output/: %s' % (
                        past_submission_phasenumber,
                        past_submission_number,
                        submission_private_output_filename(past_submission),
                    )
                )
    else:
        pass

    submission.history_file.save('history.txt', ContentFile('\n'.join(lines)))

    score_csv = submission.phase.competition.get_results_csv(submission.phase.pk)
    submission.scores_file.save('scores.txt', ContentFile(score_csv))

    # Extra submission info
    coopetition_zip_buffer = StringIO.StringIO()
    coopetition_zip_file = zipfile.ZipFile(coopetition_zip_buffer, "w")

    for phase in submission.phase.competition.phases.all():
        coopetition_field_names = (
            "participant__user__username",
            "pk",
            "when_made_public",
            "when_unmade_public",
            "started_at",
            "completed_at",
            "download_count",
            "submission_number",
        )
        annotated_submissions = phase.submissions.filter(status__codename=CompetitionSubmissionStatus.FINISHED).values(
            *coopetition_field_names
        ).annotate(like_count=Count("likes"), dislike_count=Count("dislikes"))

        # Add this after fetching annotated count from db
        coopetition_field_names += ("like_count", "dislike_count")

        coopetition_csv = StringIO.StringIO()
        writer = csv.DictWriter(coopetition_csv, coopetition_field_names)
        writer.writeheader()
        for row in annotated_submissions:
            writer.writerow(row)

        coopetition_zip_file.writestr('coopetition_phase_%s.txt' % phase.phasenumber, coopetition_csv.getvalue())

    # Scores metadata
    for phase in submission.phase.competition.phases.all():
        coopetition_zip_file.writestr(
            'coopetition_scores_phase_%s.txt' % phase.phasenumber,
            phase.competition.get_results_csv(phase.pk, include_scores_not_on_leaderboard=True)
        )

    # Download metadata
    coopetition_downloads_csv = StringIO.StringIO()
    writer = csv.writer(coopetition_downloads_csv)
    writer.writerow((
        "submission_pk",
        "submission_owner",
        "downloaded_by",
        "time_of_download",
    ))
    for download in DownloadRecord.objects.filter(submission__phase__competition=submission.phase.competition):
        writer.writerow((
            download.submission.pk,
            download.submission.participant.user.username,
            download.user.username,
            str(download.timestamp),
        ))

    coopetition_zip_file.writestr('coopetition_downloads.txt', coopetition_downloads_csv.getvalue())

    coopetition_zip_file.close()
    submission.coopetition_file.save('coopetition.zip', ContentFile(coopetition_zip_buffer.getvalue()))

    # Generate metadata-only bundle describing the inputs. Reference data is an optional
    # dataset provided by the competition organizer. Results are provided by the participant
    # either indirectly (has_generated_predictions is True i.e. participant provides a program
    # which is run to generate results) ordirectly (participant uploads results directly).
    lines = []
    ref_value = submission.phase.reference_data.name
    if len(ref_value) > 0:
        lines.append("ref: %s" % ref_value)
    res_value = submission.prediction_output_file.name if has_generated_predictions else submission.file.name
    if len(res_value) > 0:
        lines.append("res: %s" % res_value)
    else:
        raise ValueError("Results are missing.")

    lines.append("history: %s" % submission_history_file_name(submission))
    lines.append("scores: %s" % submission_scores_file_name(submission))
    lines.append("coopetition: %s" % submission_coopetition_file_name(submission))
    lines.append("submitted-by: %s" % submission.participant.user.username)
    lines.append("submitted-at: %s" % submission.submitted_at.replace(microsecond=0).isoformat())
    lines.append("competition-submission: %s" % submission.submission_number)
    lines.append("competition-phase: %s" % submission.phase.phasenumber)
    is_automatic_submission = False
    if submission.phase.auto_migration:
        # If this phase has auto_migration and this submission is the first in the phase, it is an automatic submission!
        submissions_this_phase = CompetitionSubmission.objects.filter(
            phase=submission.phase,
            participant=submission.participant
        ).count()
        is_automatic_submission = submissions_this_phase == 1

    lines.append("automatic-submission: %s" % is_automatic_submission)
    submission.inputfile.save('input.txt', ContentFile('\n'.join(lines)))


    # Generate metadata-only bundle describing the computation.
    lines = []
    program_value = submission.phase.scoring_program.name
    if len(program_value) > 0:
        lines.append("program: %s" % program_value)
    else:
        raise ValueError("Program is missing.")
    lines.append("input: %s" % submission.inputfile.name)
    lines.append("stdout: %s" % submission_stdout_filename(submission))
    lines.append("stderr: %s" % submission_stderr_filename(submission))
    submission.runfile.save('run.txt', ContentFile('\n'.join(lines)))

    # Create stdout.txt & stderr.txt
    if has_generated_predictions == False:
        username = submission.participant.user.username
        lines = ["Standard output for submission #{0} by {1}.".format(submission.submission_number, username), ""]
        submission.stdout_file.save('stdout.txt', ContentFile('\n'.join(lines)))
        lines = ["Standard error for submission #{0} by {1}.".format(submission.submission_number, username), ""]
        submission.stderr_file.save('stderr.txt', ContentFile('\n'.join(lines)))
    # Update workflow state
    state['score'] = job_id
    submission.execution_key = json.dumps(state)
    submission.save()
    # Submit the request to the computation service
    body = json.dumps({
        "id" : job_id,
        "task_type": "run",
        "task_args": {
            "bundle_id" : submission.runfile.name,
            "container_name" : settings.BUNDLE_AZURE_CONTAINER,
            "reply_to" : settings.SBS_RESPONSE_QUEUE,
            "execution_time_limit": submission.phase.execution_time_limit,
            "predict": False,
        }
    })
    getQueue(settings.SBS_COMPUTE_QUEUE).send_message(body)
    if has_generated_predictions == False:
        _set_submission_status(submission.id, CompetitionSubmissionStatus.SUBMITTED)
예제 #5
0
파일: tasks.py 프로젝트: dkmisra/codalab
def score(submission, job_id):
    """
    Dispatches the scoring task for the given submission to an appropriate compute worker.

    submission: The CompetitionSubmission object.
    job_id: The job ID used to track the progress of the evaluation.
    """
    # Loads the computation state.
    state = {}
    if len(submission.execution_key) > 0:
        state = json.loads(submission.execution_key)
    has_generated_predictions = 'predict' in state

    #generate metadata-only bundle describing the history of submissions and phases
    last_submissions = CompetitionSubmission.objects.filter(
        participant=submission.participant,
        status__codename=CompetitionSubmissionStatus.FINISHED
    ).order_by('-submitted_at')


    lines = []
    lines.append("description: history of all previous successful runs output files")

    if last_submissions:
        for past_submission in last_submissions:
            if past_submission.pk != submission.pk:
                #pad folder numbers for sorting os side, 001, 002, 003,... 010, etc...
                past_submission_phasenumber = '%03d' % past_submission.phase.phasenumber
                past_submission_number = '%03d' % past_submission.submission_number
                lines.append('%s/%s/output/: %s' % (
                        past_submission_phasenumber,
                        past_submission_number,
                        submission_private_output_filename(past_submission),
                    )
                )
    else:
        pass

    submission.history_file.save('history.txt', ContentFile('\n'.join(lines)))
    submission.scores_file.save('scores.txt', ContentFile(submission.phase.competition.get_results_csv(submission.phase.pk)))

    # Generate metadata-only bundle describing the inputs. Reference data is an optional
    # dataset provided by the competition organizer. Results are provided by the participant
    # either indirectly (has_generated_predictions is True i.e. participant provides a program
    # which is run to generate results) ordirectly (participant uploads results directly).
    lines = []
    ref_value = submission.phase.reference_data.name
    if len(ref_value) > 0:
        lines.append("ref: %s" % ref_value)
    res_value = submission.prediction_output_file.name if has_generated_predictions else submission.file.name
    if len(res_value) > 0:
        lines.append("res: %s" % res_value)
    else:
        raise ValueError("Results are missing.")

    lines.append("history: %s" % submission_history_file_name(submission))
    lines.append("scores: %s" % submission_scores_file_name(submission))
    lines.append("submitted-by: %s" % submission.participant.user.username)
    lines.append("submitted-at: %s" % submission.submitted_at.replace(microsecond=0).isoformat())
    lines.append("competition-submission: %s" % submission.submission_number)
    lines.append("competition-phase: %s" % submission.phase.phasenumber)
    is_automatic_submission = False
    if submission.phase.auto_migration:
        # If this phase has auto_migration and this submission is the first in the phase, it is an automatic submission!
        submissions_this_phase = CompetitionSubmission.objects.filter(
            phase=submission.phase,
            participant=submission.participant
        ).count()
        is_automatic_submission = submissions_this_phase == 1

    lines.append("automatic-submission: %s" % is_automatic_submission)
    submission.inputfile.save('input.txt', ContentFile('\n'.join(lines)))


    # Generate metadata-only bundle describing the computation.
    lines = []
    program_value = submission.phase.scoring_program.name
    if len(program_value) > 0:
        lines.append("program: %s" % program_value)
    else:
        raise ValueError("Program is missing.")
    lines.append("input: %s" % submission.inputfile.name)
    lines.append("stdout: %s" % submission_stdout_filename(submission))
    lines.append("stderr: %s" % submission_stderr_filename(submission))
    submission.runfile.save('run.txt', ContentFile('\n'.join(lines)))

    # Create stdout.txt & stderr.txt
    if has_generated_predictions == False:
        username = submission.participant.user.username
        lines = ["Standard output for submission #{0} by {1}.".format(submission.submission_number, username), ""]
        submission.stdout_file.save('stdout.txt', ContentFile('\n'.join(lines)))
        lines = ["Standard error for submission #{0} by {1}.".format(submission.submission_number, username), ""]
        submission.stderr_file.save('stderr.txt', ContentFile('\n'.join(lines)))
    # Update workflow state
    state['score'] = job_id
    submission.execution_key = json.dumps(state)
    submission.save()
    # Submit the request to the computation service
    body = json.dumps({
        "id" : job_id,
        "task_type": "run",
        "task_args": {
            "bundle_id" : submission.runfile.name,
            "container_name" : settings.BUNDLE_AZURE_CONTAINER,
            "reply_to" : settings.SBS_RESPONSE_QUEUE,
            "execution_time_limit": submission.phase.execution_time_limit,
            "predict": False,
        }
    })
    getQueue(settings.SBS_COMPUTE_QUEUE).send_message(body)
    if has_generated_predictions == False:
        _set_submission_status(submission.id, CompetitionSubmissionStatus.SUBMITTED)
예제 #6
0
def score(submission, job_id):
    """
    Dispatches the scoring task for the given submission to an appropriate compute worker.

    submission: The CompetitionSubmission object.
    job_id: The job ID used to track the progress of the evaluation.
    """
    # Loads the computation state.
    state = {}
    if len(submission.execution_key) > 0:
        state = json.loads(submission.execution_key)
    has_generated_predictions = 'predict' in state

    #generate metadata-only bundle describing the history of submissions and phases
    last_submissions = CompetitionSubmission.objects.filter(
        participant=submission.participant,
        status__codename=CompetitionSubmissionStatus.FINISHED).order_by(
            '-submitted_at')

    lines = []
    lines.append(
        "description: history of all previous successful runs output files")

    if last_submissions:
        for past_submission in last_submissions:
            if past_submission.pk != submission.pk:
                #pad folder numbers for sorting os side, 001, 002, 003,... 010, etc...
                past_submission_phasenumber = '%03d' % past_submission.phase.phasenumber
                past_submission_number = '%03d' % past_submission.submission_number
                lines.append('%s/%s/output/: %s' % (
                    past_submission_phasenumber,
                    past_submission_number,
                    submission_private_output_filename(past_submission),
                ))
    else:
        pass

    submission.history_file.save('history.txt', ContentFile('\n'.join(lines)))

    score_csv = submission.phase.competition.get_results_csv(
        submission.phase.pk)
    submission.scores_file.save('scores.txt', ContentFile(score_csv))

    # Extra submission info
    coopetition_zip_buffer = StringIO.StringIO()
    coopetition_zip_file = zipfile.ZipFile(coopetition_zip_buffer, "w")

    for phase in submission.phase.competition.phases.all():
        coopetition_field_names = (
            "participant__user__username",
            "pk",
            "when_made_public",
            "when_unmade_public",
            "started_at",
            "completed_at",
            "download_count",
            "submission_number",
        )
        annotated_submissions = phase.submissions.filter(
            status__codename=CompetitionSubmissionStatus.FINISHED).values(
                *coopetition_field_names).annotate(
                    like_count=Count("likes"), dislike_count=Count("dislikes"))

        # Add this after fetching annotated count from db
        coopetition_field_names += ("like_count", "dislike_count")

        coopetition_csv = StringIO.StringIO()
        writer = csv.DictWriter(coopetition_csv, coopetition_field_names)
        writer.writeheader()
        for row in annotated_submissions:
            writer.writerow(row)

        coopetition_zip_file.writestr(
            'coopetition_phase_%s.txt' % phase.phasenumber,
            coopetition_csv.getvalue().encode('utf-8'))

    # Scores metadata
    for phase in submission.phase.competition.phases.all():
        coopetition_zip_file.writestr(
            'coopetition_scores_phase_%s.txt' % phase.phasenumber,
            phase.competition.get_results_csv(
                phase.pk,
                include_scores_not_on_leaderboard=True).encode('utf-8'))

    # Download metadata
    coopetition_downloads_csv = StringIO.StringIO()
    writer = csv.writer(coopetition_downloads_csv)
    writer.writerow((
        "submission_pk",
        "submission_owner",
        "downloaded_by",
        "time_of_download",
    ))
    for download in DownloadRecord.objects.filter(
            submission__phase__competition=submission.phase.competition):
        writer.writerow((
            download.submission.pk,
            download.submission.participant.user.username,
            download.user.username,
            str(download.timestamp),
        ))

    coopetition_zip_file.writestr(
        'coopetition_downloads.txt',
        coopetition_downloads_csv.getvalue().encode('utf-8'))

    # Current user
    coopetition_zip_file.writestr(
        'current_user.txt',
        submission.participant.user.username.encode('utf-8'))
    coopetition_zip_file.close()

    # Save them all
    submission.coopetition_file.save(
        'coopetition.zip', ContentFile(coopetition_zip_buffer.getvalue()))

    # Generate metadata-only bundle describing the inputs. Reference data is an optional
    # dataset provided by the competition organizer. Results are provided by the participant
    # either indirectly (has_generated_predictions is True i.e. participant provides a program
    # which is run to generate results) ordirectly (participant uploads results directly).
    lines = []
    ref_value = submission.phase.reference_data.name
    if len(ref_value) > 0:
        lines.append("ref: %s" % ref_value)
    res_value = submission.prediction_output_file.name if has_generated_predictions else submission.file.name
    if len(res_value) > 0:
        lines.append("res: %s" % res_value)
    else:
        raise ValueError("Results are missing.")

    lines.append("history: %s" % submission_history_file_name(submission))
    lines.append("scores: %s" % submission_scores_file_name(submission))
    lines.append("coopetition: %s" %
                 submission_coopetition_file_name(submission))
    lines.append("submitted-by: %s" % submission.participant.user.username)
    lines.append("submitted-at: %s" %
                 submission.submitted_at.replace(microsecond=0).isoformat())
    lines.append("competition-submission: %s" % submission.submission_number)
    lines.append("competition-phase: %s" % submission.phase.phasenumber)
    is_automatic_submission = False
    if submission.phase.auto_migration:
        # If this phase has auto_migration and this submission is the first in the phase, it is an automatic submission!
        submissions_this_phase = CompetitionSubmission.objects.filter(
            phase=submission.phase,
            participant=submission.participant).count()
        is_automatic_submission = submissions_this_phase == 1

    lines.append("automatic-submission: %s" % is_automatic_submission)
    submission.inputfile.save('input.txt', ContentFile('\n'.join(lines)))

    # Generate metadata-only bundle describing the computation.
    lines = []
    program_value = submission.phase.scoring_program.name
    if len(program_value) > 0:
        lines.append("program: %s" % program_value)
    else:
        raise ValueError("Program is missing.")
    lines.append("input: %s" % submission.inputfile.name)
    lines.append("stdout: %s" % submission_stdout_filename(submission))
    lines.append("stderr: %s" % submission_stderr_filename(submission))
    submission.runfile.save('run.txt', ContentFile('\n'.join(lines)))

    # Create stdout.txt & stderr.txt
    if has_generated_predictions == False:
        username = submission.participant.user.username
        lines = [
            "Standard output for submission #{0} by {1}.".format(
                submission.submission_number, username), ""
        ]
        submission.stdout_file.save('stdout.txt',
                                    ContentFile('\n'.join(lines)))
        lines = [
            "Standard error for submission #{0} by {1}.".format(
                submission.submission_number, username), ""
        ]
        submission.stderr_file.save('stderr.txt',
                                    ContentFile('\n'.join(lines)))
    # Update workflow state
    state['score'] = job_id
    submission.execution_key = json.dumps(state)
    submission.save()
    # Submit the request to the computation service
    body = json.dumps({
        "id": job_id,
        "task_type": "run",
        "task_args": {
            "bundle_id": submission.runfile.name,
            "container_name": settings.BUNDLE_AZURE_CONTAINER,
            "reply_to": settings.SBS_RESPONSE_QUEUE,
            "execution_time_limit": submission.phase.execution_time_limit,
            "predict": False,
        }
    })
    getQueue(settings.SBS_COMPUTE_QUEUE).send_message(body)
    if has_generated_predictions == False:
        _set_submission_status(submission.id,
                               CompetitionSubmissionStatus.SUBMITTED)
예제 #7
0
파일: tasks.py 프로젝트: v-bech/codalab
def score(submission, job_id):
    """
    Dispatches the scoring task for the given submission to an appropriate compute worker.

    submission: The CompetitionSubmission object.
    job_id: The job ID used to track the progress of the evaluation.
    """
    # Loads the computation state.
    state = {}
    if len(submission.execution_key) > 0:
        state = json.loads(submission.execution_key)
    has_generated_predictions = 'predict' in state

    #generate metadata-only bundle describing the history of submissions and phases
    last_submissions = CompetitionSubmission.objects.filter(
        participant=submission.participant,
        status__codename=CompetitionSubmissionStatus.FINISHED).order_by(
            '-submitted_at')

    lines = []
    lines.append(
        "description: history of all previous successful runs output files")

    if last_submissions:
        for past_submission in last_submissions:
            if past_submission.pk != submission.pk:
                #pad folder numbers for sorting os side, 001, 002, 003,... 010, etc...
                past_submission_phasenumber = '%03d' % past_submission.phase.phasenumber
                past_submission_number = '%03d' % past_submission.submission_number
                lines.append('%s/%s/output/: %s' % (
                    past_submission_phasenumber,
                    past_submission_number,
                    submission_private_output_filename(past_submission),
                ))
    else:
        pass

    submission.history_file.save('history.txt', ContentFile('\n'.join(lines)))

    # Generate metadata-only bundle describing the inputs. Reference data is an optional
    # dataset provided by the competition organizer. Results are provided by the participant
    # either indirectly (has_generated_predictions is True i.e. participant provides a program
    # which is run to generate results) ordirectly (participant uploads results directly).
    lines = []
    ref_value = submission.phase.reference_data.name
    if len(ref_value) > 0:
        lines.append("ref: %s" % ref_value)
    res_value = submission.prediction_output_file.name if has_generated_predictions else submission.file.name
    if len(res_value) > 0:
        lines.append("res: %s" % res_value)
    else:
        raise ValueError("Results are missing.")

    lines.append("history: %s" % submission_history_file_name(submission))
    lines.append("submitted-by: %s" % submission.participant.user.username)
    lines.append("submitted-at: %s" %
                 submission.submitted_at.replace(microsecond=0).isoformat())
    lines.append("competition-submission: %s" % submission.submission_number)
    lines.append("competition-phase: %s" % submission.phase.phasenumber)
    is_automatic_submission = False
    if submission.phase.auto_migration:
        # If this phase has auto_migration and this submission is the first in the phase, it is an automatic submission!
        submissions_this_phase = CompetitionSubmission.objects.filter(
            phase=submission.phase,
            participant=submission.participant).count()
        is_automatic_submission = submissions_this_phase == 1

    lines.append("automatic-submission: %s" % is_automatic_submission)
    submission.inputfile.save('input.txt', ContentFile('\n'.join(lines)))

    # Generate metadata-only bundle describing the computation.
    lines = []
    program_value = submission.phase.scoring_program.name
    if len(program_value) > 0:
        lines.append("program: %s" % program_value)
    else:
        raise ValueError("Program is missing.")
    lines.append("input: %s" % submission.inputfile.name)
    lines.append("stdout: %s" % submission_stdout_filename(submission))
    lines.append("stderr: %s" % submission_stderr_filename(submission))
    submission.runfile.save('run.txt', ContentFile('\n'.join(lines)))

    # Create stdout.txt & stderr.txt
    if has_generated_predictions == False:
        username = submission.participant.user.username
        lines = [
            "Standard output for submission #{0} by {1}.".format(
                submission.submission_number, username), ""
        ]
        submission.stdout_file.save('stdout.txt',
                                    ContentFile('\n'.join(lines)))
        lines = [
            "Standard error for submission #{0} by {1}.".format(
                submission.submission_number, username), ""
        ]
        submission.stderr_file.save('stderr.txt',
                                    ContentFile('\n'.join(lines)))
    # Update workflow state
    state['score'] = job_id
    submission.execution_key = json.dumps(state)
    submission.save()
    # Submit the request to the computation service
    body = json.dumps({
        "id": job_id,
        "task_type": "run",
        "task_args": {
            "bundle_id": submission.runfile.name,
            "container_name": settings.BUNDLE_AZURE_CONTAINER,
            "reply_to": settings.SBS_RESPONSE_QUEUE,
            "execution_time_limit": submission.phase.execution_time_limit
        }
    })
    getQueue(settings.SBS_COMPUTE_QUEUE).send_message(body)
    if has_generated_predictions == False:
        _set_submission_status(submission.id,
                               CompetitionSubmissionStatus.SUBMITTED)