def main():

    if conf.CHALLENGE_SYN_ID == "":
        sys.stderr.write("Please configure your challenge. See sample_challenge.py for an example.")

    global syn

    parser = argparse.ArgumentParser()

    parser.add_argument("-u", "--user", help="UserName", default=None)
    parser.add_argument("-p", "--password", help="Password", default=None)
    parser.add_argument("--notifications", help="Send error notifications to challenge admins", action="store_true", default=False)
    parser.add_argument("--send-messages", help="Send validation and scoring messages to participants", action="store_true", default=False)
    parser.add_argument("--acknowledge-receipt", help="Send confirmation message on passing validation to participants", action="store_true", default=False)
    parser.add_argument("--dry-run", help="Perform the requested command without updating anything in Synapse", action="store_true", default=False)
    parser.add_argument("--debug", help="Show verbose error output from Synapse API calls", action="store_true", default=False)
    parser.add_argument("--threads", help="Number of parallel processes to use for validation and scoring", type=int, default=1)

    subparsers = parser.add_subparsers(title="subcommand")

    parser_list = subparsers.add_parser('list', help="List submissions to an evaluation or list evaluations")
    parser_list.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
    parser_list.add_argument("--challenge-project", "--challenge", "--project", metavar="SYNAPSE-ID", default=None)
    parser_list.add_argument("-s", "--status", default=None)
    parser_list.add_argument("--all", action="store_true", default=False)
    parser_list.set_defaults(func=command_list)

    parser_status = subparsers.add_parser('status', help="Check the status of a submission")
    parser_status.add_argument("submission")
    parser_status.set_defaults(func=command_check_status)

    parser_reset = subparsers.add_parser('reset', help="Reset a submission to RECEIVED for re-scoring (or set to some other status)")
    parser_reset.add_argument("submission", metavar="SUBMISSION-ID", type=int, nargs='*', help="One or more submission IDs, or omit if using --rescore-all")
    parser_reset.add_argument("-s", "--status", default='RECEIVED')
    parser_reset.add_argument("--rescore-all", action="store_true", default=False)
    parser_reset.add_argument("--rescore", metavar="EVALUATION-ID", type=int, nargs='*', help="One or more evaluation IDs to rescore")
    parser_reset.set_defaults(func=command_reset)

    parser_validate = subparsers.add_parser('validate', help="Validate all RECEIVED submissions to an evaluation")
    parser_validate.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
    parser_validate.add_argument("--all", action="store_true", default=False)
    parser_validate.add_argument("--canCancel", action="store_true", default=False)
    parser_validate.set_defaults(func=command_validate)

    parser_score = subparsers.add_parser('score', help="Score all VALIDATED submissions to an evaluation")
    parser_score.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
    parser_score.add_argument("--all", action="store_true", default=False)
    parser_score.add_argument("--canCancel", action="store_true", default=False)
    parser_score.set_defaults(func=command_score)

    parser_rank = subparsers.add_parser('rank', help="Rank all SCORED submissions to an evaluation")
    parser_rank.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
    parser_rank.set_defaults(func=command_rank)

    parser_archive = subparsers.add_parser('archive', help="Archive submissions to a challenge")
    parser_archive.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
    parser_archive.add_argument("archiveType",metavar="TYPE", choices=["submission","writeup"])
    parser_archive.add_argument("destination", metavar="FOLDER-ID", default=None)
    parser_archive.add_argument("-q", "--query", default=None)
    parser_archive.add_argument("-n", "--name", default=None)
    parser_archive.set_defaults(func=command_archive)

    parser_leaderboard = subparsers.add_parser('leaderboard', help="Print the leaderboard for an evaluation")
    parser_leaderboard.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
    parser_leaderboard.add_argument("--out", default=None)
    parser_leaderboard.set_defaults(func=command_leaderboard)

    args = parser.parse_args()

    print "\n" * 2, "=" * 75
    print datetime.utcnow().isoformat()

    ## Acquire lock, don't run two scoring scripts at once
    try:
        update_lock = lock.acquire_lock_or_fail('challenge', max_age=timedelta(hours=4))
    except lock.LockedException:
        print u"Is the scoring script already running? Can't acquire lock."
        # can't acquire lock, so return error code 75 which is a
        # temporary error according to /usr/include/sysexits.h
        return 75

    try:
        syn = synapseclient.Synapse(debug=args.debug)
        if not args.user:
            args.user = os.environ.get('SYNAPSE_USER', None)
        if not args.password:
            args.password = os.environ.get('SYNAPSE_PASSWORD', None)
        syn.login(email=args.user, password=args.password)

        ## initialize messages
        messages.syn = syn
        messages.dry_run = args.dry_run
        messages.send_messages = args.send_messages
        messages.send_notifications = args.notifications
        messages.acknowledge_receipt = args.acknowledge_receipt

        args.func(args)

    except Exception as ex1:
        sys.stderr.write('Error in scoring script:\n')
        st = StringIO()
        traceback.print_exc(file=st)
        sys.stderr.write(st.getvalue())
        sys.stderr.write('\n')

        if conf.ADMIN_USER_IDS:
            messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=st.getvalue(), queue_name=conf.CHALLENGE_NAME)

    finally:
        update_lock.release()

    print "\ndone: ", datetime.utcnow().isoformat()
    print "=" * 75, "\n" * 2
def main():

    if conf.CHALLENGE_SYN_ID == "":
        sys.stderr.write("Please configure your challenge. See sample_challenge.py for an example.")

    global syn

    parser = argparse.ArgumentParser()

    parser.add_argument("-u", "--user", help="UserName", default=None)
    parser.add_argument("-p", "--password", help="Password", default=None)
    parser.add_argument("--notifications", help="Send error notifications to challenge admins", action="store_true", default=False)
    parser.add_argument("--send-messages", help="Send validation and scoring messages to participants", action="store_true", default=False)
    parser.add_argument("--acknowledge-receipt", help="Send confirmation message on passing validation to participants", action="store_true", default=False)
    parser.add_argument("--dry-run", help="Perform the requested command without updating anything in Synapse", action="store_true", default=False)
    parser.add_argument("--debug", help="Show verbose error output from Synapse API calls", action="store_true", default=False)

    subparsers = parser.add_subparsers(title="subcommand")

    parser_list = subparsers.add_parser('list', help="List submissions to an evaluation or list evaluations")
    parser_list.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
    parser_list.add_argument("--challenge-project", "--challenge", "--project", metavar="SYNAPSE-ID", default=None)
    parser_list.add_argument("-s", "--status", default=None)
    parser_list.add_argument("--all", action="store_true", default=False)
    parser_list.set_defaults(func=command_list)

    parser_status = subparsers.add_parser('status', help="Check the status of a submission")
    parser_status.add_argument("submission")
    parser_status.set_defaults(func=command_check_status)

    parser_reset = subparsers.add_parser('reset', help="Reset a submission to RECEIVED for re-scoring (or set to some other status)")
    parser_reset.add_argument("submission", metavar="SUBMISSION-ID", type=int, nargs='*', help="One or more submission IDs, or omit if using --rescore-all")
    parser_reset.add_argument("-s", "--status", default='RECEIVED')
    parser_reset.add_argument("--rescore-all", action="store_true", default=False)
    parser_reset.add_argument("--rescore", metavar="EVALUATION-ID", type=int, nargs='*', help="One or more evaluation IDs to rescore")
    parser_reset.set_defaults(func=command_reset)

    parser_validate = subparsers.add_parser('validate', help="Validate all RECEIVED submissions to an evaluation")
    parser_validate.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None, )
    parser_validate.add_argument("--all", action="store_true", default=False)
    parser_validate.set_defaults(func=command_validate)

    parser_score = subparsers.add_parser('score', help="Score all VALIDATED submissions to an evaluation")
    parser_score.add_argument("evaluation", metavar="EVALUATION-ID", nargs='?', default=None)
    parser_score.add_argument("--all", action="store_true", default=False)
    parser_score.set_defaults(func=command_score)

    parser_rank = subparsers.add_parser('rank', help="Rank all SCORED submissions to an evaluation")
    parser_rank.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
    parser_rank.set_defaults(func=command_rank)

    parser_archive = subparsers.add_parser('archive', help="Archive submissions to a challenge")
    parser_archive.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
    parser_archive.add_argument("destination", metavar="FOLDER-ID", default=None)
    parser_archive.add_argument("-q", "--query", default=None)
    parser_archive.add_argument("-n", "--name", default=None)
    parser_archive.set_defaults(func=command_archive)

    parser_leaderboard = subparsers.add_parser('leaderboard', help="Print the leaderboard for an evaluation")
    parser_leaderboard.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
    parser_leaderboard.add_argument("--out", default=None)
    parser_leaderboard.set_defaults(func=command_leaderboard)

    args = parser.parse_args()

    print "\n" * 2, "=" * 75
    print datetime.utcnow().isoformat()

    ## Acquire lock, don't run two scoring scripts at once
    try:
        update_lock = lock.acquire_lock_or_fail('challenge', max_age=timedelta(hours=4))
    except lock.LockedException:
        print u"Is the scoring script already running? Can't acquire lock."
        # can't acquire lock, so return error code 75 which is a
        # temporary error according to /usr/include/sysexits.h
        return 75

    try:
        syn = synapseclient.Synapse(debug=args.debug)
        if not args.user:
            args.user = os.environ.get('SYNAPSE_USER', None)
        if not args.password:
            args.password = os.environ.get('SYNAPSE_PASSWORD', None)
        syn.login(email=args.user, password=args.password)

        ## initialize messages
        messages.syn = syn
        messages.dry_run = args.dry_run
        messages.send_messages = args.send_messages
        messages.send_notifications = args.notifications
        messages.acknowledge_receipt = args.acknowledge_receipt

        args.func(args)

    except Exception as ex1:
        sys.stderr.write('Error in scoring script:\n')
        st = StringIO()
        traceback.print_exc(file=st)
        sys.stderr.write(st.getvalue())
        sys.stderr.write('\n')

        if conf.ADMIN_USER_IDS:
            messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=st.getvalue(), queue_name=conf.CHALLENGE_NAME)

    finally:
        update_lock.release()

    print "\ndone: ", datetime.utcnow().isoformat()
    print "=" * 75, "\n" * 2
def parallel_score(submissionId,evaluation,dry_run):
    submission = syn.getSubmission(submissionId)
    status = syn.getSubmissionStatus(submissionId)
    logFile = open(os.path.join(LOG_DIR,status['id'] + "_log.txt",'w'))
    status.status = "INVALID"

    try:
        score, message = conf.score_submission(evaluation, submission)

        logFile.write("scored: %s %s %s %s" % (submission.id,submission.name,submission.userId,str(score)))
        logFile.flush()
        ## fill in team in submission status annotations
        if 'teamId' in submission:
            team = syn.restGET('/team/{id}'.format(id=submission.teamId))
            if 'name' in team:
                score['team'] = team['name']
            else:
                score['team'] = submission.teamId
        elif 'userId' in submission:
            profile = syn.getUserProfile(submission.userId)
            score['team'] = get_user_name(profile)
        else:
            score['team'] = '?'
        add_annotations = synapseclient.annotations.to_submission_status_annotations(score,is_private=True)
        status = update_single_submission_status(status, add_annotations)

        status.status = "SCORED"
        ### Add in DATE as a public annotation and change team annotation to not private
        ## if there's a table configured, update it
        if not dry_run and evaluation.id in conf.leaderboard_tables:
            update_leaderboard_table(conf.leaderboard_tables[evaluation.id], submission, fields=score, dry_run=False)

    except Exception as ex1:
        logFile.write('\n\nError scoring submission %s %s:\n' % (submission.name, submission.id))
        st = StringIO()
        traceback.print_exc(file=st)
        # sys.stderr.write(st.getvalue())
        # sys.stderr.write('\n')
        message = st.getvalue()
        logFile.write(message)
        logFile.flush()

        if conf.ADMIN_USER_IDS:
            submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (submission.id, submission.name, submission.userId)
            messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=submission_info+message)

    if not dry_run:
        status = syn.store(status)

    ## send message AFTER storing status to ensure we don't get repeat messages
    profile = syn.getUserProfile(submission.userId)

    if status.status == 'SCORED':
        messages.scoring_succeeded(
            userIds=[submission.userId],
            message=message,
            username=get_user_name(profile),
            queue_name=evaluation.name,
            submission_name=submission.name,
            submission_id=submission.id)
    else:
        messages.scoring_error(
            userIds=conf.ADMIN_USER_IDS,
            message=message,
            username="******",
            queue_name=evaluation.name,
            submission_name=submission.name,
            submission_id=submission.id)

    #sys.stdout.write('\n')

    logFile.close()
def score(evaluation, dry_run=False):

    if type(evaluation) != Evaluation:
        evaluation = syn.getEvaluation(evaluation)

    print '\n\nScoring ', evaluation.id, evaluation.name
    print "-" * 60
    sys.stdout.flush()

    for submission, status in syn.getSubmissionBundles(evaluation, status='VALIDATED'):

        status.status = "INVALID"

        ## refetch the submission so that we get the file path
        ## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
        submission = syn.getSubmission(submission)

        try:
            score, message = conf.score_submission(evaluation, submission)

            print "scored:", submission.id, submission.name, submission.userId, score

            ## fill in team in submission status annotations
            if 'teamId' in submission:
                team = syn.restGET('/team/{id}'.format(id=submission.teamId))
                if 'name' in team:
                    score['team'] = team['name']
                else:
                    score['team'] = submission.teamId
            elif 'userId' in submission:
                profile = syn.getUserProfile(submission.userId)
                score['team'] = get_user_name(profile)
            else:
                score['team'] = '?'

            status.annotations = synapseclient.annotations.to_submission_status_annotations(score,is_private=True)
            status.status = "SCORED"
            ## if there's a table configured, update it
            if not dry_run and evaluation.id in conf.leaderboard_tables:
                update_leaderboard_table(conf.leaderboard_tables[evaluation.id], submission, fields=score, dry_run=False)

        except Exception as ex1:
            sys.stderr.write('\n\nError scoring submission %s %s:\n' % (submission.name, submission.id))
            st = StringIO()
            traceback.print_exc(file=st)
            sys.stderr.write(st.getvalue())
            sys.stderr.write('\n')
            message = st.getvalue()

            if conf.ADMIN_USER_IDS:
                submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (submission.id, submission.name, submission.userId)
                messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=submission_info+st.getvalue())

        if not dry_run:
            status = syn.store(status)

        ## send message AFTER storing status to ensure we don't get repeat messages
        profile = syn.getUserProfile(submission.userId)

        if status.status == 'SCORED':
            messages.scoring_succeeded(
                userIds=[submission.userId],
                message=message,
                username=get_user_name(profile),
                queue_name=evaluation.name,
                submission_name=submission.name,
                submission_id=submission.id)
        else:
            messages.scoring_failed(
                userIds=[submission.userId],
                message=message,
                username=get_user_name(profile),
                queue_name=evaluation.name,
                submission_name=submission.name,
                submission_id=submission.id)

    sys.stdout.write('\n')
def score(evaluation, dry_run=False):

    if type(evaluation) != Evaluation:
        evaluation = syn.getEvaluation(evaluation)

    print '\n\nScoring ', evaluation.id, evaluation.name
    print "-" * 60
    sys.stdout.flush()

    for submission, status in syn.getSubmissionBundles(evaluation, status='VALIDATED'):

        status.status = "INVALID"

        ## refetch the submission so that we get the file path
        ## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
        submission = syn.getSubmission(submission)

        try:
            score, message = conf.score_submission(evaluation, submission)

            print "scored:", submission.id, submission.name, submission.userId, score

            ## fill in team in submission status annotations
            if 'teamId' in submission:
                team = syn.restGET('/team/{id}'.format(id=submission.teamId))
                if 'name' in team:
                    score['team'] = team['name']
                else:
                    score['team'] = submission.teamId
            elif 'userId' in submission:
                profile = syn.getUserProfile(submission.userId)
                score['team'] = get_user_name(profile)
            else:
                score['team'] = '?'
            add_annotations = synapseclient.annotations.to_submission_status_annotations(score,is_private=True)
            for i in add_annotations:
                if status.annotations.get(i) is not None:
                    status.annotations[i].extend(add_annotations[i])
                else:
                    status.annotations[i] = add_annotations[i]
            #status.annotations = synapseclient.annotations.to_submission_status_annotations(score,is_private=True)
            status.status = "SCORED"
            ### Add in DATE as a public annotation and change team annotation to not private
            ## if there's a table configured, update it
            if not dry_run and evaluation.id in conf.leaderboard_tables:
                update_leaderboard_table(conf.leaderboard_tables[evaluation.id], submission, fields=score, dry_run=False)

        except Exception as ex1:
            sys.stderr.write('\n\nError scoring submission %s %s:\n' % (submission.name, submission.id))
            st = StringIO()
            traceback.print_exc(file=st)
            sys.stderr.write(st.getvalue())
            sys.stderr.write('\n')
            message = st.getvalue()

            if conf.ADMIN_USER_IDS:
                submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (submission.id, submission.name, submission.userId)
                messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=submission_info+st.getvalue())

        if not dry_run:
            status = syn.store(status)

        ## send message AFTER storing status to ensure we don't get repeat messages
        profile = syn.getUserProfile(submission.userId)

        if status.status == 'SCORED':
            messages.scoring_succeeded(
                userIds=[submission.userId],
                message=message,
                username=get_user_name(profile),
                queue_name=evaluation.name,
                submission_name=submission.name,
                submission_id=submission.id)
        else:
            messages.scoring_error(
                userIds=[submission.userId],
                message=message,
                username=get_user_name(profile),
                queue_name=evaluation.name,
                submission_name=submission.name,
                submission_id=submission.id)

    sys.stdout.write('\n')
Exemple #6
0
def parallel_run(submissionId,
                 evaluation,
                 syn,
                 canCancel,
                 userName,
                 password,
                 dry_run=False):
    client = docker.from_env()
    client.login(userName, password, registry="http://docker.synapse.org")
    submission = syn.getSubmission(submissionId)
    status = syn.getSubmissionStatus(submissionId)
    logFile = open(os.path.join(LOG_DIR, status['id'] + "_log.txt"), 'w')
    if canCancel:
        status.canCancel = True
    status.status = "EVALUATION_IN_PROGRESS"
    startTime = {"RUN_START": int(time.time() * 1000)}
    add_annotations = synapseclient.annotations.to_submission_status_annotations(
        startTime, is_private=True)
    status = update_single_submission_status(status, add_annotations)
    status = syn.store(status)

    status.status = "INVALID"

    ## refetch the submission so that we get the file path
    ## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
    submission = syn.getSubmission(submission)
    #If submission_info is None, then there the code passed
    submission_info = None
    try:
        score, message = conf.run_docker(evaluation, submission, syn, client)

        logFile.write(
            "scored: %s %s %s %s" %
            (submission.id, submission.name, submission.userId, str(score)))
        logFile.flush()
        ## fill in team in submission status annotations
        if 'teamId' in submission:
            team = syn.restGET('/team/{id}'.format(id=submission.teamId))
            if 'name' in team:
                score['team'] = team['name']
            else:
                score['team'] = submission.teamId
        elif 'userId' in submission:
            profile = syn.getUserProfile(submission.userId)
            score['team'] = get_user_name(profile)
        else:
            score['team'] = '?'
        score['RUN_END'] = int(time.time() * 1000)

        add_annotations = synapseclient.annotations.to_submission_status_annotations(
            score, is_private=True)
        status = update_single_submission_status(status, add_annotations)
        if score['PREDICTION_FILE'] is None:
            status.status = "INVALID"
        else:
            status.status = "ACCEPTED"
        if not dry_run and evaluation.id in conf.leaderboard_tables:
            update_leaderboard_table(conf.leaderboard_tables[evaluation.id],
                                     submission,
                                     fields=score,
                                     dry_run=False)

    except Exception as ex1:
        logFile.write('\n\nError scoring submission %s %s:\n' %
                      (submission.name, submission.id))
        st = StringIO()
        traceback.print_exc(file=st)
        #sys.stderr.write(st.getvalue())
        #sys.stderr.write('\n')
        message = st.getvalue()
        logFile.write(message)
        logFile.flush()

        if conf.ADMIN_USER_IDS:
            submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (
                submission.id, submission.name, submission.userId)
            messages.error_notification(userIds=conf.ADMIN_USER_IDS,
                                        message=submission_info +
                                        st.getvalue(),
                                        queue_name=evaluation.name)

    if not dry_run:
        status = syn.store(status)

    ## send message AFTER storing status to ensure we don't get repeat messages
    profile = syn.getUserProfile(submission.userId)

    if status.status == 'ACCEPTED':
        messages.scoring_succeeded(userIds=[submission.userId],
                                   message=message,
                                   username=get_user_name(profile),
                                   queue_name=evaluation.name,
                                   submission_name=submission.name,
                                   submission_id=submission.id)
    elif submission_info is None:
        messages.scoring_error(userIds=[submission.userId],
                               message=message,
                               username=get_user_name(profile),
                               queue_name=evaluation.name,
                               submission_name=submission.name,
                               submission_id=submission.id)
Exemple #7
0
def main():

    global syn

    parser = argparse.ArgumentParser()

    parser.add_argument("-u", "--user", help="UserName", default=None)
    parser.add_argument("-p", "--password", help="Password", default=None)
    parser.add_argument("--challengeName", help="Challenge Name", required=True)
    parser.add_argument("--notifications", help="Send error notifications to challenge admins", action="store_true", default=False)
    parser.add_argument("--send-messages", help="Send validation and scoring messages to participants", action="store_true", default=False)
    parser.add_argument("--acknowledge-receipt", help="Send confirmation message on passing validation to participants", action="store_true", default=False)
    parser.add_argument("--dry-run", help="Perform the requested command without updating anything in Synapse", action="store_true", default=False)
    parser.add_argument("--debug", help="Show verbose error output from Synapse API calls", action="store_true", default=False)

    subparsers = parser.add_subparsers(title="subcommand")

    parser_validate = subparsers.add_parser('validate', help="Validate all RECEIVED submissions to an evaluation")
    parser_validate.add_argument("evaluation", metavar="EVALUATION-IDs", nargs='*', default=None)
    parser_validate.add_argument("--admin", metavar="ADMIN", default=None)
    parser_validate.add_argument("--public", action="store_true", default=False)
    parser_validate.set_defaults(func=command_validate)

    parser_archive = subparsers.add_parser('archive', help="Archive submissions to a challenge")
    parser_archive.add_argument("evaluation", metavar="EVALUATION-ID", default=None)
    parser_archive.add_argument("--status",metavar="STATUS", default="VALIDATED")
    parser_archive.add_argument("--reArchive", action="store_true", default=False)
    parser_archive.set_defaults(func=command_archive)

    args = parser.parse_args()

    print("\n" * 2, "=" * 75)
    print(datetime.utcnow().isoformat())

    ## Acquire lock, don't run two scoring scripts at once
    try:
        update_lock = lock.acquire_lock_or_fail('challenge', max_age=timedelta(hours=4))
    except lock.LockedException:
        print("Is the scoring script already running? Can't acquire lock.")
        # can't acquire lock, so return error code 75 which is a
        # temporary error according to /usr/include/sysexits.h
        return 75

    try:
        syn = synapseclient.Synapse(debug=args.debug)
        if not args.user:
            args.user = os.environ.get('SYNAPSE_USER', None)
        if not args.password:
            args.password = os.environ.get('SYNAPSE_PASSWORD', None)
        syn.login(email=args.user, password=args.password)

        ## initialize messages
        messages.syn = syn
        messages.dry_run = args.dry_run
        messages.send_messages = args.send_messages
        messages.send_notifications = args.notifications
        messages.acknowledge_receipt = args.acknowledge_receipt

        args.func(args)

    except Exception as ex1:
        sys.stderr.write('Error in scoring script:\n')
        st = StringIO()
        traceback.print_exc(file=st)
        sys.stderr.write(st.getvalue())
        sys.stderr.write('\n')

        if conf.ADMIN_USER_IDS:
            messages.error_notification(userIds=conf.ADMIN_USER_IDS, message=st.getvalue(), queue_name=args.challengeName)

    finally:
        update_lock.release()

    print("\ndone: ", datetime.utcnow().isoformat())
    print("=" * 75, "\n" * 2)
Exemple #8
0
def score(evaluation, syn, client, canCancel, dry_run=False):

    if type(evaluation) != Evaluation:
        evaluation = syn.getEvaluation(evaluation)

    print '\n\nScoring ', evaluation.id, evaluation.name
    print "-" * 60
    sys.stdout.flush()

    for submission, status in syn.getSubmissionBundles(evaluation,
                                                       status='OPEN'):
        if canCancel:
            status.canCancel = True
        status.status = "EVALUATION_IN_PROGRESS"
        startTime = {"RUN_START": int(time.time() * 1000)}
        add_annotations = synapseclient.annotations.to_submission_status_annotations(
            startTime, is_private=True)
        status = update_single_submission_status(status, add_annotations)
        status = syn.store(status)
        status.status = "INVALID"
        ## refetch the submission so that we get the file path
        ## to be later replaced by a "downloadFiles" flag on getSubmissionBundles
        submission = syn.getSubmission(submission)

        try:
            score, message = conf.run_docker(evaluation, submission, syn,
                                             client)

            print "scored:", submission.id, submission.name, submission.userId, score

            ## fill in team in submission status annotations
            if 'teamId' in submission:
                team = syn.restGET('/team/{id}'.format(id=submission.teamId))
                if 'name' in team:
                    score['team'] = team['name']
                else:
                    score['team'] = submission.teamId
            elif 'userId' in submission:
                profile = syn.getUserProfile(submission.userId)
                score['team'] = get_user_name(profile)
            else:
                score['team'] = '?'
            add_annotations = synapseclient.annotations.to_submission_status_annotations(
                score, is_private=True)
            status = update_single_submission_status(status, add_annotations)
            if score['PREDICTION_FILE'] is None:
                status.status = "INVALID"
            else:
                #Status should be accepted because the docker agent is different from the scoring harness
                status.status = "ACCEPTED"
            score['RUN_END'] = int(time.time() * 1000)
            ## if there's a table configured, update it
            if not dry_run and evaluation.id in conf.leaderboard_tables:
                update_leaderboard_table(
                    conf.leaderboard_tables[evaluation.id],
                    submission,
                    fields=score,
                    dry_run=False)

        except Exception as ex1:
            sys.stderr.write('\n\nError scoring submission %s %s:\n' %
                             (submission.name, submission.id))
            st = StringIO()
            traceback.print_exc(file=st)
            sys.stderr.write(st.getvalue())
            sys.stderr.write('\n')
            message = st.getvalue()

            if conf.ADMIN_USER_IDS:
                submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (
                    submission.id, submission.name, submission.userId)
                messages.error_notification(userIds=conf.ADMIN_USER_IDS,
                                            message=submission_info +
                                            st.getvalue())

        if not dry_run:
            status = syn.store(status)

        ## send message AFTER storing status to ensure we don't get repeat messages
        profile = syn.getUserProfile(submission.userId)

        if status.status == 'ACCEPTED':
            messages.scoring_succeeded(userIds=[submission.userId],
                                       message=message,
                                       username=get_user_name(profile),
                                       queue_name=evaluation.name,
                                       submission_name=submission.name,
                                       submission_id=submission.id)
        else:
            messages.scoring_error(userIds=[submission.userId],
                                   message=message,
                                   username=get_user_name(profile),
                                   queue_name=evaluation.name,
                                   submission_name=submission.name,
                                   submission_id=submission.id)

    sys.stdout.write('\n')
Exemple #9
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("submissionId", metavar="Submission Id")
    parser.add_argument("--challengePredFolder", required=True)
    parser.add_argument("--challengeLogFolder", required=True)
    parser.add_argument("--outputDir", required=True)
    parser.add_argument("--mountedVolumes", nargs="*", required=True)
    parser.add_argument("--configFile", required=True)
    parser.add_argument("--timeQuota",
                        help="Time quota in milliseconds",
                        type=int)
    #Has default values
    parser.add_argument("-u", "--user", help="UserName", default=None)
    parser.add_argument("-p", "--password", help="Password", default=None)
    parser.add_argument("--notifications",
                        help="Send error notifications to challenge admins",
                        action="store_true",
                        default=False)
    parser.add_argument(
        "--send-messages",
        help="Send validation and scoring messages to participants",
        action="store_true",
        default=False)
    parser.add_argument(
        "--acknowledge-receipt",
        help="Send confirmation message on passing validation to participants",
        action="store_true",
        default=False)
    parser.add_argument(
        "--dry-run",
        help=
        "Perform the requested command without updating anything in Synapse",
        action="store_true",
        default=False)
    parser.add_argument(
        "--debug",
        help="Show verbose error output from Synapse API calls",
        action="store_true",
        default=False)
    parser.add_argument("--canCancel", action="store_true", default=False)

    #Test run
    #python runDockerSubmission.py 9636069 --challengePredFolder syn7998461 --challengeLogFolder syn9974718 --configFile config.json --mountedVolumes /home/ubuntu/sc2/Celgene-Multiple-Myeloma-Challenge/docker_agent/test-data:/test-data:ro /.synapseCache:/.synapseCache:ro -u $SYNAPSE_USER -p $SYNAPSE_PASS --outputDir /home/ubuntu/sc2/Celgene-Multiple-Myeloma-Challenge/docker_agent/9636069 --send-messages --notifications
    args = parser.parse_args()

    print "\n" * 2, "=" * 75
    print datetime.utcnow().isoformat()

    # Acquire lock, don't run two scoring scripts at once
    try:
        submission_lock = lock.acquire_lock_or_fail(
            args.submissionId, max_age=timedelta(hours=9000))
    except lock.LockedException:
        print u"Is the scoring script already running? Can't acquire lock."
        # can't acquire lock, so return error code 75 which is a
        # temporary error according to /usr/include/sysexits.h
        return 75

    try:
        syn = synapseclient.Synapse(debug=args.debug)
        if not args.user:
            args.user = os.environ.get('SYNAPSE_USER', None)
        if not args.password:
            args.password = os.environ.get('SYNAPSE_PASSWORD', None)
        syn.login(email=args.user, password=args.password)
        #Add client into arguments
        client = docker.from_env()
        client.login(args.user,
                     args.password,
                     registry="http://docker.synapse.org")
        #Add syn and client into arguments
        args.syn = syn
        args.client = client

        ## initialize messages
        messages.syn = syn
        messages.dry_run = args.dry_run
        messages.send_messages = args.send_messages
        messages.send_notifications = args.notifications
        messages.acknowledge_receipt = args.acknowledge_receipt
        command_run(args)

    except Exception as ex1:
        sys.stderr.write('Error in scoring script:\n')
        st = StringIO()
        traceback.print_exc(file=st)
        sys.stderr.write(st.getvalue())
        sys.stderr.write('\n')
        if ADMIN_USER_IDS:
            messages.error_notification(userIds=ADMIN_USER_IDS,
                                        message=st.getvalue(),
                                        queue_name=CHALLENGE_NAME)

    finally:
        submission_lock.release()

    print "\ndone: ", datetime.utcnow().isoformat()
    print "=" * 75, "\n" * 2
Exemple #10
0
def run(syn,
        client,
        submissionId,
        configFile,
        challenge_prediction_folder,
        challenge_log_folder,
        output_dir,
        mountedVolumes,
        canCancel,
        timeQuota=None,
        dry_run=False):
    submission = syn.getSubmission(submissionId)
    status = syn.getSubmissionStatus(submissionId)
    evaluation = syn.getEvaluation(submission.evaluationId)
    logFile = open(os.path.join(LOG_DIR, status['id'] + "_log.txt"), 'w')
    # if canCancel:
    #     status.canCancel = True
    # status.status = "EVALUATION_IN_PROGRESS"
    # #Store run time and evaluation in progress
    # startTime = {"RUN_START":int(time.time()*1000)}
    # add_annotations = synapseclient.annotations.to_submission_status_annotations(startTime,is_private=False)
    # status = update_single_submission_status(status, add_annotations, force=True)
    # status = syn.store(status)

    status.status = "INVALID"
    #Create dictionary that mounts the volumes
    volumes = {output_dir: {'bind': '/output', 'mode': 'rw'}}
    for mount in mountedVolumes:
        binds = mount.split(":")
        assert len(
            binds
        ) == 3, "Mounted volumes must be formated- /full/path:/mountpoint:ro"
        volumes[binds[0]] = {'bind': binds[1], 'mode': binds[2]}

    with open(configFile, 'r') as config:
        config_evaluations = json.load(config)['config_evaluations']
    score_sh = [
        ev['score_sh'] for ev in config_evaluations
        if ev['id'] == int(evaluation.id)
    ]
    returnLog = [
        ev['returnLog'] for ev in config_evaluations
        if ev['id'] == int(evaluation.id)
    ][0]
    #If submission_info is None, then the code passed
    submission_info = None
    try:
        score, message, exceedTimeQuota = dockerRun(
            syn, client, submission, score_sh, challenge_prediction_folder,
            challenge_log_folder, volumes, output_dir, timeQuota, returnLog)

        logFile.write(
            "scored: %s %s %s %s" %
            (submission.id, submission.name, submission.userId, str(score)))
        logFile.flush()
        score['team'] = getTeam(syn, submission)
        score['RUN_END'] = int(time.time() * 1000)
        if exceedTimeQuota:
            score['FAILURE_REASON'] = "Exceeded Time Quota of %s hours" % str(
                timeQuota / (1000 * 60 * 60.0))

        add_annotations = synapseclient.annotations.to_submission_status_annotations(
            score, is_private=False)
        status = update_single_submission_status(status,
                                                 add_annotations,
                                                 force=True)
        if score['PREDICTION_FILE'] is None:
            status.status = "INVALID"
        else:
            status.status = "ACCEPTED"

    except Exception as ex1:
        logFile.write('\n\nError scoring submission %s %s:\n' %
                      (submission.name, submission.id))
        st = StringIO()
        traceback.print_exc(file=st)
        message = st.getvalue()
        logFile.write(message)
        logFile.flush()

        if ADMIN_USER_IDS:
            submission_info = "submission id: %s\nsubmission name: %s\nsubmitted by user id: %s\n\n" % (
                submission.id, submission.name, submission.userId)
            messages.error_notification(userIds=ADMIN_USER_IDS,
                                        message=submission_info +
                                        st.getvalue(),
                                        queue_name=evaluation.name)

    if not dry_run:
        status = syn.store(status)

    ## send message AFTER storing status to ensure we don't get repeat messages
    profile = syn.getUserProfile(submission.userId)
    if status.status == 'ACCEPTED':
        messages.scoring_succeeded(userIds=[submission.userId],
                                   message=message,
                                   username=get_user_name(profile),
                                   queue_name=evaluation.name,
                                   submission_name=submission.name,
                                   submission_id=submission.id)
    elif submission_info is None:
        messages.scoring_error(userIds=[submission.userId],
                               message=message,
                               username=get_user_name(profile),
                               queue_name=evaluation.name,
                               submission_name=submission.name,
                               submission_id=submission.id)