def parallel_score(submissionId,evaluation,dry_run):
    submission = syn.getSubmission(submissionId)
    status = syn.getSubmissionStatus(submissionId)
    logFile = open(os.path.join(LOG_DIR,status['id'] + "_log.txt",'w'))
    status.status = "INVALID"

    try:
        score, message = conf.score_submission(evaluation, submission)

        logFile.write("scored: %s %s %s %s" % (submission.id,submission.name,submission.userId,str(score)))
        logFile.flush()
        ## fill in team in submission status annotations
        if 'teamId' in submission:
            team = syn.restGET('/team/{id}'.format(id=submission.teamId))
            if 'name' in team:
                score['team'] = team['name']
            else:
                score['team'] = submission.teamId
        elif 'userId' in submission:
            profile = syn.getUserProfile(submission.userId)
            score['team'] = get_user_name(profile)
        else:
            score['team'] = '?'
        add_annotations = synapseclient.annotations.to_submission_status_annotations(score,is_private=True)
        status = update_single_submission_status(status, add_annotations)

        status.status = "SCORED"
        ### Add in DATE as a public annotation and change team annotation to not private
        ## if there's a table configured, update it
        if not dry_run and evaluation.id in conf.leaderboard_tables:
            update_leaderboard_table(conf.leaderboard_tables[evaluation.id], submission, fields=score, dry_run=False)

    except Exception as ex1:
        logFile.write('\n\nError scoring submission %s %s:\n' % (submission.name, submission.id))
        st = StringIO()
        traceback.print_exc(file=st)
        # sys.stderr.write(st.getvalue())
        # sys.stderr.write('\n')
        message = st.getvalue()
        logFile.write(message)
        logFile.flush()

        if conf.ADMIN_USER_IDS:
            submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (submission.id, submission.name, submission.userId)
            messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=submission_info+message)

    if not dry_run:
        status = syn.store(status)

    ## send message AFTER storing status to ensure we don't get repeat messages
    profile = syn.getUserProfile(submission.userId)

    if status.status == 'SCORED':
        messages.scoring_succeeded(
            userIds=[submission.userId],
            message=message,
            username=get_user_name(profile),
            queue_name=evaluation.name,
            submission_name=submission.name,
            submission_id=submission.id)
    else:
        messages.scoring_error(
            userIds=conf.ADMIN_USER_IDS,
            message=message,
            username="******",
            queue_name=evaluation.name,
            submission_name=submission.name,
            submission_id=submission.id)

    #sys.stdout.write('\n')

    logFile.close()
def score(evaluation, dry_run=False):

    if type(evaluation) != Evaluation:
        evaluation = syn.getEvaluation(evaluation)

    print '\n\nScoring ', evaluation.id, evaluation.name
    print "-" * 60
    sys.stdout.flush()

    for submission, status in syn.getSubmissionBundles(evaluation, status='VALIDATED'):

        status.status = "INVALID"

        ## refetch the submission so that we get the file path
        ## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
        submission = syn.getSubmission(submission)

        try:
            score, message = conf.score_submission(evaluation, submission)

            print "scored:", submission.id, submission.name, submission.userId, score

            ## fill in team in submission status annotations
            if 'teamId' in submission:
                team = syn.restGET('/team/{id}'.format(id=submission.teamId))
                if 'name' in team:
                    score['team'] = team['name']
                else:
                    score['team'] = submission.teamId
            elif 'userId' in submission:
                profile = syn.getUserProfile(submission.userId)
                score['team'] = get_user_name(profile)
            else:
                score['team'] = '?'
            add_annotations = synapseclient.annotations.to_submission_status_annotations(score,is_private=True)
            for i in add_annotations:
                if status.annotations.get(i) is not None:
                    status.annotations[i].extend(add_annotations[i])
                else:
                    status.annotations[i] = add_annotations[i]
            #status.annotations = synapseclient.annotations.to_submission_status_annotations(score,is_private=True)
            status.status = "SCORED"
            ### Add in DATE as a public annotation and change team annotation to not private
            ## if there's a table configured, update it
            if not dry_run and evaluation.id in conf.leaderboard_tables:
                update_leaderboard_table(conf.leaderboard_tables[evaluation.id], submission, fields=score, dry_run=False)

        except Exception as ex1:
            sys.stderr.write('\n\nError scoring submission %s %s:\n' % (submission.name, submission.id))
            st = StringIO()
            traceback.print_exc(file=st)
            sys.stderr.write(st.getvalue())
            sys.stderr.write('\n')
            message = st.getvalue()

            if conf.ADMIN_USER_IDS:
                submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (submission.id, submission.name, submission.userId)
                messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=submission_info+st.getvalue())

        if not dry_run:
            status = syn.store(status)

        ## send message AFTER storing status to ensure we don't get repeat messages
        profile = syn.getUserProfile(submission.userId)

        if status.status == 'SCORED':
            messages.scoring_succeeded(
                userIds=[submission.userId],
                message=message,
                username=get_user_name(profile),
                queue_name=evaluation.name,
                submission_name=submission.name,
                submission_id=submission.id)
        else:
            messages.scoring_error(
                userIds=[submission.userId],
                message=message,
                username=get_user_name(profile),
                queue_name=evaluation.name,
                submission_name=submission.name,
                submission_id=submission.id)

    sys.stdout.write('\n')
def score(evaluation, dry_run=False):

    if type(evaluation) != Evaluation:
        evaluation = syn.getEvaluation(evaluation)

    print '\n\nScoring ', evaluation.id, evaluation.name
    print "-" * 60
    sys.stdout.flush()

    for submission, status in syn.getSubmissionBundles(evaluation, status='VALIDATED'):

        status.status = "INVALID"

        ## refetch the submission so that we get the file path
        ## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
        submission = syn.getSubmission(submission)

        try:
            score, message = conf.score_submission(evaluation, submission)

            print "scored:", submission.id, submission.name, submission.userId, score

            ## fill in team in submission status annotations
            if 'teamId' in submission:
                team = syn.restGET('/team/{id}'.format(id=submission.teamId))
                if 'name' in team:
                    score['team'] = team['name']
                else:
                    score['team'] = submission.teamId
            elif 'userId' in submission:
                profile = syn.getUserProfile(submission.userId)
                score['team'] = get_user_name(profile)
            else:
                score['team'] = '?'

            status.annotations = synapseclient.annotations.to_submission_status_annotations(score,is_private=True)
            status.status = "SCORED"
            ## if there's a table configured, update it
            if not dry_run and evaluation.id in conf.leaderboard_tables:
                update_leaderboard_table(conf.leaderboard_tables[evaluation.id], submission, fields=score, dry_run=False)

        except Exception as ex1:
            sys.stderr.write('\n\nError scoring submission %s %s:\n' % (submission.name, submission.id))
            st = StringIO()
            traceback.print_exc(file=st)
            sys.stderr.write(st.getvalue())
            sys.stderr.write('\n')
            message = st.getvalue()

            if conf.ADMIN_USER_IDS:
                submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (submission.id, submission.name, submission.userId)
                messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=submission_info+st.getvalue())

        if not dry_run:
            status = syn.store(status)

        ## send message AFTER storing status to ensure we don't get repeat messages
        profile = syn.getUserProfile(submission.userId)

        if status.status == 'SCORED':
            messages.scoring_succeeded(
                userIds=[submission.userId],
                message=message,
                username=get_user_name(profile),
                queue_name=evaluation.name,
                submission_name=submission.name,
                submission_id=submission.id)
        else:
            messages.scoring_failed(
                userIds=[submission.userId],
                message=message,
                username=get_user_name(profile),
                queue_name=evaluation.name,
                submission_name=submission.name,
                submission_id=submission.id)

    sys.stdout.write('\n')
예제 #4
0
def parallel_run(submissionId,
                 evaluation,
                 syn,
                 canCancel,
                 userName,
                 password,
                 dry_run=False):
    client = docker.from_env()
    client.login(userName, password, registry="http://docker.synapse.org")
    submission = syn.getSubmission(submissionId)
    status = syn.getSubmissionStatus(submissionId)
    logFile = open(os.path.join(LOG_DIR, status['id'] + "_log.txt"), 'w')
    if canCancel:
        status.canCancel = True
    status.status = "EVALUATION_IN_PROGRESS"
    startTime = {"RUN_START": int(time.time() * 1000)}
    add_annotations = synapseclient.annotations.to_submission_status_annotations(
        startTime, is_private=True)
    status = update_single_submission_status(status, add_annotations)
    status = syn.store(status)

    status.status = "INVALID"

    ## refetch the submission so that we get the file path
    ## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
    submission = syn.getSubmission(submission)
    #If submission_info is None, then there the code passed
    submission_info = None
    try:
        score, message = conf.run_docker(evaluation, submission, syn, client)

        logFile.write(
            "scored: %s %s %s %s" %
            (submission.id, submission.name, submission.userId, str(score)))
        logFile.flush()
        ## fill in team in submission status annotations
        if 'teamId' in submission:
            team = syn.restGET('/team/{id}'.format(id=submission.teamId))
            if 'name' in team:
                score['team'] = team['name']
            else:
                score['team'] = submission.teamId
        elif 'userId' in submission:
            profile = syn.getUserProfile(submission.userId)
            score['team'] = get_user_name(profile)
        else:
            score['team'] = '?'
        score['RUN_END'] = int(time.time() * 1000)

        add_annotations = synapseclient.annotations.to_submission_status_annotations(
            score, is_private=True)
        status = update_single_submission_status(status, add_annotations)
        if score['PREDICTION_FILE'] is None:
            status.status = "INVALID"
        else:
            status.status = "ACCEPTED"
        if not dry_run and evaluation.id in conf.leaderboard_tables:
            update_leaderboard_table(conf.leaderboard_tables[evaluation.id],
                                     submission,
                                     fields=score,
                                     dry_run=False)

    except Exception as ex1:
        logFile.write('\n\nError scoring submission %s %s:\n' %
                      (submission.name, submission.id))
        st = StringIO()
        traceback.print_exc(file=st)
        #sys.stderr.write(st.getvalue())
        #sys.stderr.write('\n')
        message = st.getvalue()
        logFile.write(message)
        logFile.flush()

        if conf.ADMIN_USER_IDS:
            submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (
                submission.id, submission.name, submission.userId)
            messages.error_notification(userIds=conf.ADMIN_USER_IDS,
                                        message=submission_info +
                                        st.getvalue(),
                                        queue_name=evaluation.name)

    if not dry_run:
        status = syn.store(status)

    ## send message AFTER storing status to ensure we don't get repeat messages
    profile = syn.getUserProfile(submission.userId)

    if status.status == 'ACCEPTED':
        messages.scoring_succeeded(userIds=[submission.userId],
                                   message=message,
                                   username=get_user_name(profile),
                                   queue_name=evaluation.name,
                                   submission_name=submission.name,
                                   submission_id=submission.id)
    elif submission_info is None:
        messages.scoring_error(userIds=[submission.userId],
                               message=message,
                               username=get_user_name(profile),
                               queue_name=evaluation.name,
                               submission_name=submission.name,
                               submission_id=submission.id)
예제 #5
0
def score(evaluation, syn, client, canCancel, dry_run=False):

    if type(evaluation) != Evaluation:
        evaluation = syn.getEvaluation(evaluation)

    print '\n\nScoring ', evaluation.id, evaluation.name
    print "-" * 60
    sys.stdout.flush()

    for submission, status in syn.getSubmissionBundles(evaluation,
                                                       status='OPEN'):
        if canCancel:
            status.canCancel = True
        status.status = "EVALUATION_IN_PROGRESS"
        startTime = {"RUN_START": int(time.time() * 1000)}
        add_annotations = synapseclient.annotations.to_submission_status_annotations(
            startTime, is_private=True)
        status = update_single_submission_status(status, add_annotations)
        status = syn.store(status)
        status.status = "INVALID"
        ## refetch the submission so that we get the file path
        ## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
        submission = syn.getSubmission(submission)

        try:
            score, message = conf.run_docker(evaluation, submission, syn,
                                             client)

            print "scored:", submission.id, submission.name, submission.userId, score

            ## fill in team in submission status annotations
            if 'teamId' in submission:
                team = syn.restGET('/team/{id}'.format(id=submission.teamId))
                if 'name' in team:
                    score['team'] = team['name']
                else:
                    score['team'] = submission.teamId
            elif 'userId' in submission:
                profile = syn.getUserProfile(submission.userId)
                score['team'] = get_user_name(profile)
            else:
                score['team'] = '?'
            add_annotations = synapseclient.annotations.to_submission_status_annotations(
                score, is_private=True)
            status = update_single_submission_status(status, add_annotations)
            if score['PREDICTION_FILE'] is None:
                status.status = "INVALID"
            else:
                #Status should be accepted because the docker agent is different from the scoring harness
                status.status = "ACCEPTED"
            score['RUN_END'] = int(time.time() * 1000)
            ## if there's a table configured, update it
            if not dry_run and evaluation.id in conf.leaderboard_tables:
                update_leaderboard_table(
                    conf.leaderboard_tables[evaluation.id],
                    submission,
                    fields=score,
                    dry_run=False)

        except Exception as ex1:
            sys.stderr.write('\n\nError scoring submission %s %s:\n' %
                             (submission.name, submission.id))
            st = StringIO()
            traceback.print_exc(file=st)
            sys.stderr.write(st.getvalue())
            sys.stderr.write('\n')
            message = st.getvalue()

            if conf.ADMIN_USER_IDS:
                submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (
                    submission.id, submission.name, submission.userId)
                messages.error_notification(userIds=conf.ADMIN_USER_IDS,
                                            message=submission_info +
                                            st.getvalue())

        if not dry_run:
            status = syn.store(status)

        ## send message AFTER storing status to ensure we don't get repeat messages
        profile = syn.getUserProfile(submission.userId)

        if status.status == 'ACCEPTED':
            messages.scoring_succeeded(userIds=[submission.userId],
                                       message=message,
                                       username=get_user_name(profile),
                                       queue_name=evaluation.name,
                                       submission_name=submission.name,
                                       submission_id=submission.id)
        else:
            messages.scoring_error(userIds=[submission.userId],
                                   message=message,
                                   username=get_user_name(profile),
                                   queue_name=evaluation.name,
                                   submission_name=submission.name,
                                   submission_id=submission.id)

    sys.stdout.write('\n')
예제 #6
0
def run(syn,
        client,
        submissionId,
        configFile,
        challenge_prediction_folder,
        challenge_log_folder,
        output_dir,
        mountedVolumes,
        canCancel,
        timeQuota=None,
        dry_run=False):
    submission = syn.getSubmission(submissionId)
    status = syn.getSubmissionStatus(submissionId)
    evaluation = syn.getEvaluation(submission.evaluationId)
    logFile = open(os.path.join(LOG_DIR, status['id'] + "_log.txt"), 'w')
    # if canCancel:
    #     status.canCancel = True
    # status.status = "EVALUATION_IN_PROGRESS"
    # #Store run time and evaluation in progress
    # startTime = {"RUN_START":int(time.time()*1000)}
    # add_annotations = synapseclient.annotations.to_submission_status_annotations(startTime,is_private=False)
    # status = update_single_submission_status(status, add_annotations, force=True)
    # status = syn.store(status)

    status.status = "INVALID"
    #Create dictionary that mounts the volumes
    volumes = {output_dir: {'bind': '/output', 'mode': 'rw'}}
    for mount in mountedVolumes:
        binds = mount.split(":")
        assert len(
            binds
        ) == 3, "Mounted volumes must be formated- /full/path:/mountpoint:ro"
        volumes[binds[0]] = {'bind': binds[1], 'mode': binds[2]}

    with open(configFile, 'r') as config:
        config_evaluations = json.load(config)['config_evaluations']
    score_sh = [
        ev['score_sh'] for ev in config_evaluations
        if ev['id'] == int(evaluation.id)
    ]
    returnLog = [
        ev['returnLog'] for ev in config_evaluations
        if ev['id'] == int(evaluation.id)
    ][0]
    #If submission_info is None, then the code passed
    submission_info = None
    try:
        score, message, exceedTimeQuota = dockerRun(
            syn, client, submission, score_sh, challenge_prediction_folder,
            challenge_log_folder, volumes, output_dir, timeQuota, returnLog)

        logFile.write(
            "scored: %s %s %s %s" %
            (submission.id, submission.name, submission.userId, str(score)))
        logFile.flush()
        score['team'] = getTeam(syn, submission)
        score['RUN_END'] = int(time.time() * 1000)
        if exceedTimeQuota:
            score['FAILURE_REASON'] = "Exceeded Time Quota of %s hours" % str(
                timeQuota / (1000 * 60 * 60.0))

        add_annotations = synapseclient.annotations.to_submission_status_annotations(
            score, is_private=False)
        status = update_single_submission_status(status,
                                                 add_annotations,
                                                 force=True)
        if score['PREDICTION_FILE'] is None:
            status.status = "INVALID"
        else:
            status.status = "ACCEPTED"

    except Exception as ex1:
        logFile.write('\n\nError scoring submission %s %s:\n' %
                      (submission.name, submission.id))
        st = StringIO()
        traceback.print_exc(file=st)
        message = st.getvalue()
        logFile.write(message)
        logFile.flush()

        if ADMIN_USER_IDS:
            submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (
                submission.id, submission.name, submission.userId)
            messages.error_notification(userIds=ADMIN_USER_IDS,
                                        message=submission_info +
                                        st.getvalue(),
                                        queue_name=evaluation.name)

    if not dry_run:
        status = syn.store(status)

    ## send message AFTER storing status to ensure we don't get repeat messages
    profile = syn.getUserProfile(submission.userId)
    if status.status == 'ACCEPTED':
        messages.scoring_succeeded(userIds=[submission.userId],
                                   message=message,
                                   username=get_user_name(profile),
                                   queue_name=evaluation.name,
                                   submission_name=submission.name,
                                   submission_id=submission.id)
    elif submission_info is None:
        messages.scoring_error(userIds=[submission.userId],
                               message=message,
                               username=get_user_name(profile),
                               queue_name=evaluation.name,
                               submission_name=submission.name,
                               submission_id=submission.id)