def test_Evaluation():
    """Test the construction and accessors of Evaluation objects."""

    content_source_missing_ex = 'The "contentSource" parameter must be specified'
    with pytest.raises(ValueError, match=content_source_missing_ex):
        # with no contentSource
        Evaluation(name='foo', description='bar')
    with pytest.raises(ValueError, match=content_source_missing_ex):
        # with non-synapse id contentSource
        Evaluation(name='foo', description='bar', contentSource='a')

    # Assert that the values are
    ev = Evaluation(name='foobar2', description='bar', contentSource='syn1234')
    assert ev['name'] == ev.name
    assert ev['description'] == ev.description
    def test_submit(self, *mocks):
        mocks = [item for item in mocks]
        POST_mock = mocks.pop()
        getEvaluation_mock = mocks.pop()

        # -- Unmet access rights --
        getEvaluation_mock.return_value = Evaluation(**{u'contentSource': u'syn1001',
                                                        u'createdOn': u'2013-11-06T06:04:26.789Z',
                                                        u'etag': u'86485ea1-8c89-4f24-a0a4-2f63bc011091',
                                                        u'id': u'9090',
                                                        u'name': u'test evaluation',
                                                        u'ownerId': u'1560252',
                                                        u'status': u'OPEN',
                                                        u'submissionReceiptMessage': u'mmmm yummy!'})

        # -- Normal submission --
        # insert a shim that returns the dictionary it was passed after adding a bogus id
        def shim(*args):
            assert_equal(args[0], '/evaluation/submission?etag=Fake eTag')
            submission = json.loads(args[1])
            submission['id'] = 1234
            return submission
        POST_mock.side_effect = shim

        submission = syn.submit('9090', {'versionNumber': 1337, 'id': "Whee...", 'etag': 'Fake eTag'}, name='George',
                                submitterAlias='Team X')

        assert_equal(submission.id, 1234)
        assert_equal(submission.evaluationId, '9090')
        assert_equal(submission.name, 'George')
        assert_equal(submission.submitterAlias, 'Team X')
예제 #3
0
def create_evaluation_queue(syn: Synapse, name: str) -> Evaluation:
    """Creates evaluation queue

    Args:
        name: Name of queue

    Returns:
        a synapseclient.Evaluation
    """
    queue = Evaluation(name=name, contentSource="syn21849255")
    try:
        queue = syn.store(queue)
    except Exception:
        url_name = quote(name)
        queue = syn.restGET(f"/evaluation/name/{url_name}")
        queue = Evaluation(**queue)
    return queue
예제 #4
0
    def get_or_create_queue(self, **kwargs) -> Evaluation:
        """Gets an existing evaluation queue by name or creates a new one.

        Args:
            Same arguments as synapseclient.Evaluation

        Returns:
            A synapseclient.Evaluation

        """
        queue = Evaluation(**kwargs)
        queue = self._find_by_obj_or_create(queue)
        self.logger.info('{} Queue {}({})'.format(self._update_str, queue.name,
                                                  queue.id))
        return queue
예제 #5
0
def test_submit(*mocks):
    mocks = [item for item in mocks]
    GET_mock        = mocks.pop()
    POST_mock       = mocks.pop()
    getEvaluation_mock = mocks.pop()
    
    # -- Unmet access rights --
    GET_mock.return_value = {'totalNumberOfResults': 2, 
                             'results': [
                                {'accessType': 'Foo', 
                                 'termsOfUse': 'Bar'}, 
                                {'accessType': 'bat', 
                                 'termsOfUse': 'baz'}]}
    getEvaluation_mock.return_value = Evaluation(**{u'contentSource': u'syn1001',
                                                    u'createdOn': u'2013-11-06T06:04:26.789Z',
                                                    u'etag': u'86485ea1-8c89-4f24-a0a4-2f63bc011091',
                                                    u'id': u'9090',
                                                    u'name': u'test evaluation',
                                                    u'ownerId': u'1560252',
                                                    u'status': u'OPEN',
                                                    u'submissionReceiptMessage': u'mmmm yummy!'})

    assert_raises(SynapseAuthenticationError, syn.submit, "9090", "syn1001")
    GET_mock.assert_called_once_with('/evaluation/9090/accessRequirementUnfulfilled')
    
    # -- Normal submission --
    # Pretend the user has access rights 
    GET_mock.return_value = {'totalNumberOfResults': 0, 'results': []}
    
    # insert a shim that returns the dictionary it was passed after adding a bogus id
    def shim(*args):
        assert args[0] == '/evaluation/submission?etag=Fake eTag'
        submission = json.loads(args[1])
        submission['id'] = 1234
        return submission
    POST_mock.side_effect = shim
    
    submission = syn.submit('9090', {'versionNumber': 1337, 'id': "Whee...", 'etag': 'Fake eTag'}, name='George', submitterAlias='Team X')
    assert GET_mock.call_count == 2

    assert submission.id == 1234
    assert submission.evaluationId == '9090'
    assert submission.name == 'George'
    assert submission.submitterAlias == 'Team X'

    print submission
예제 #6
0
def main():
	parser = argparse.ArgumentParser(description='Creates XYZ evaluation via synapse.')
	parser.add_argument('--name', dest='name', required=True, help='Name of workflow')
	parser.add_argument('--description', dest='desc', required=True, help='Description of workflow')
	parser.add_argument('--parent', dest='pid', required=False, help='Project ID with which to associate this evaluation.', default='syn1972151')

	args = parser.parse_args()


	syn = synapseclient.Synapse()
	syn.login()

	newEval = Evaluation(name = args.name, description = args.desc, contentSource = args.pid)
	newEval = syn.store(newEval)
	syn.joinEvaluation(newEval)

	print 'Synapse ID for evaluation %s is %s' % (args.name, newEval.id)
예제 #7
0
def create_evaluation_queue(syn, name, description, parentId,
                            submissionInstructionsMessage):
    '''
    Convenience function to create Evaluation Queues

    Args:
        syn: Synpase object
        name: Name of evaluation queue
        description: Description of queue
        parentid: Synapse project id
        submissionInstructionsMessage: Instructions for submission

    Returns:
        Evalation Queue
    '''
    evaluation = Evaluation(
        name=name,
        description=description,
        contentSource=parentId,
        submissionInstructionsMessage=submissionInstructionsMessage)
    # submissionReceiptMessage="Thanks for submitting to %s!" % name)
    queue = syn.store(evaluation)
    return (queue)
예제 #8
0
def test_Evaluation():
    """Test the construction and accessors of Evaluation objects."""

    # Status can only be one of ['OPEN', 'PLANNED', 'CLOSED', 'COMPLETED']
    pytest.raises(ValueError,
                  Evaluation,
                  name='foo',
                  description='bar',
                  status='BAH')
    pytest.raises(ValueError,
                  Evaluation,
                  name='foo',
                  description='bar',
                  status='OPEN',
                  contentSource='a')

    # Assert that the values are
    ev = Evaluation(name='foobar2',
                    description='bar',
                    status='OPEN',
                    contentSource='syn1234')
    assert ev['name'] == ev.name
    assert ev['description'] == ev.description
    assert ev['status'] == ev.status
예제 #9
0
def test_command_line_store_and_submit(test_state):
    # Create a Project
    output = run(test_state,
                 'synapse'
                 '--skip-checks',
                 'store',
                 '--name',
                 str(uuid.uuid4()),
                 '--description',
                 'test of store command',
                 '--type',
                 'Project')
    project_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
    test_state.schedule_for_cleanup(project_id)

    # Create and upload a file
    filename = utils.make_bogus_data_file()
    test_state.schedule_for_cleanup(filename)
    output = run(test_state,
                 'synapse'
                 '--skip-checks',
                 'store',
                 '--description',
                 'Bogus data to test file upload',
                 '--parentid',
                 project_id,
                 '--file',
                 filename)
    file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)

    # Verify that we stored the file in Synapse
    f1 = test_state.syn.get(file_entity_id)
    fh = test_state.syn._get_file_handle_as_creator(f1.dataFileHandleId)
    assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'

    # Test that entity is named after the file it contains
    assert f1.name == os.path.basename(filename)

    # Create an Evaluation to submit to
    eval = Evaluation(name=str(uuid.uuid4()), contentSource=project_id)
    eval = test_state.syn.store(eval)
    test_state.schedule_for_cleanup(eval)

    # Submit a bogus file
    output = run(test_state,
                 'synapse'
                 '--skip-checks',
                 'submit',
                 '--evaluation',
                 eval.id,
                 '--name',
                 'Some random name',
                 '--entity',
                 file_entity_id)
    parse(r'Submitted \(id: (\d+)\) entity:\s+', output)

    # testing different commmand line options for submitting to an evaluation
    # submitting to an evaluation by evaluationID
    output = run(test_state,
                 'synapse'
                 '--skip-checks',
                 'submit',
                 '--evalID',
                 eval.id,
                 '--name',
                 'Some random name',
                 '--alias',
                 'My Team',
                 '--entity',
                 file_entity_id)
    parse(r'Submitted \(id: (\d+)\) entity:\s+', output)

    # Update the file
    filename = utils.make_bogus_data_file()
    test_state.schedule_for_cleanup(filename)
    output = run(test_state,
                 'synapse'
                 '--skip-checks',
                 'store',
                 '--id',
                 file_entity_id,
                 '--file',
                 filename)
    updated_entity_id = parse(r'Updated entity:\s+(syn\d+)', output)
    test_state.schedule_for_cleanup(updated_entity_id)

    # Submit an updated bogus file and this time by evaluation name
    run(test_state,
        'synapse'
        '--skip-checks',
        'submit',
        '--evaluationName',
        eval.name,
        '--entity',
        file_entity_id)

    # Tests shouldn't have external dependencies, but here it's required
    ducky_url = 'https://www.synapse.org/Portal/clear.cache.gif'

    # Test external file handle
    output = run(test_state,
                 'synapse'
                 '--skip-checks',
                 'store',
                 '--name',
                 'Rubber Ducky',
                 '--description',
                 'I like rubber duckies',
                 '--parentid',
                 project_id,
                 '--file',
                 ducky_url)
    exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
    test_state.schedule_for_cleanup(exteral_entity_id)

    # Verify that we created an external file handle
    f2 = test_state.syn.get(exteral_entity_id)
    fh = test_state.syn._get_file_handle_as_creator(f2.dataFileHandleId)
    assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle'

    # submit an external file to an evaluation and use provenance
    filename = utils.make_bogus_data_file()
    test_state.schedule_for_cleanup(filename)
    repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
    run(test_state,
        'synapse'
        '--skip-checks', 'submit',
        '--evalID', eval.id,
        '--file', filename,
        '--parent', project_id,
        '--used', exteral_entity_id,
        '--executed', repo_url)

    # Delete project
    run(test_state,
        'synapse'
        '--skip-checks', 'delete', project_id)
def test_evaluations(syn, project, schedule_for_cleanup):
    # Create an Evaluation
    name = 'Test Evaluation %s' % str(uuid.uuid4())
    ev = Evaluation(name=name,
                    description='Evaluation for testing',
                    contentSource=project['id'])
    ev = syn.store(ev)

    try:

        # -- Get the Evaluation by name
        evalNamed = syn.getEvaluationByName(name)
        assert ev['contentSource'] == evalNamed['contentSource']
        assert ev['createdOn'] == evalNamed['createdOn']
        assert ev['description'] == evalNamed['description']
        assert ev['etag'] == evalNamed['etag']
        assert ev['id'] == evalNamed['id']
        assert ev['name'] == evalNamed['name']
        assert ev['ownerId'] == evalNamed['ownerId']

        # -- Get the Evaluation by project
        evalProj = syn.getEvaluationByContentSource(project)
        evalProj = next(evalProj)
        assert ev['contentSource'] == evalProj['contentSource']
        assert ev['createdOn'] == evalProj['createdOn']
        assert ev['description'] == evalProj['description']
        assert ev['etag'] == evalProj['etag']
        assert ev['id'] == evalProj['id']
        assert ev['name'] == evalProj['name']
        assert ev['ownerId'] == evalProj['ownerId']

        # Add the current user as a participant
        myOwnerId = int(syn.getUserProfile()['ownerId'])
        syn._allowParticipation(ev, myOwnerId)

        # AUTHENTICATED_USERS = 273948
        # PUBLIC = 273949
        syn.setPermissions(ev, 273948, accessType=['READ'])
        syn.setPermissions(ev, 273949, accessType=['READ'])

        # test getPermissions
        permissions = syn.getPermissions(ev, 273949)
        assert ['READ'] == permissions

        permissions = syn.getPermissions(ev, syn.getUserProfile()['ownerId'])
        for p in [
                'READ', 'CREATE', 'DELETE', 'UPDATE', 'CHANGE_PERMISSIONS',
                'READ_PRIVATE_SUBMISSION'
        ]:
            assert p in permissions

        # Test getSubmissions with no Submissions (SYNR-453)
        submissions = syn.getSubmissions(ev)
        assert len(list(submissions)) == 0

        # Increase this to fully test paging by getEvaluationSubmissions
        # not to be less than 2
        num_of_submissions = 2

        # Create a bunch of Entities and submit them for scoring
        for i in range(num_of_submissions):
            with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
                filename = f.name
                f.write(str(random.gauss(0, 1)) + '\n')

            f = File(filename,
                     parentId=project.id,
                     name='entry-%02d' % i,
                     description='An entry for testing evaluation')
            entity = syn.store(f)

            # annotate the submission entity in order to flex some extra
            # code paths
            annos = syn.get_annotations(entity)
            annos['submissionCount'] = i
            syn.set_annotations(annos)

            entity = syn.get(entity.id)

            last_submission = syn.submit(ev,
                                         entity,
                                         name='Submission %02d' % i,
                                         submitterAlias='My Team')

        # retrieve the submission individually to exercise that call
        submission = syn.getSubmission(last_submission.id)
        assert submission.id == last_submission.id
        assert submission.entity.annotations['submissionCount'] == [
            num_of_submissions - 1
        ]

        # Score the submissions
        submissions = syn.getSubmissions(ev, limit=num_of_submissions - 1)
        for submission in submissions:
            assert re.match('Submission \\d+', submission['name'])
            status = syn.getSubmissionStatus(submission)
            if submission['name'] == 'Submission 01':
                status.status = 'INVALID'
            else:
                status.status = 'SCORED'
            syn.store(status)

        # Annotate the submissions
        bogosity = {}
        submissions = syn.getSubmissions(ev)
        b = 123
        for submission, status in syn.getSubmissionBundles(ev):

            bogosity[submission.id] = b
            a = dict(foo='bar', bogosity=b)
            b += 123
            status.submissionAnnotations = a
            syn.store(status)

        # Test that the annotations stuck
        for submission, status in syn.getSubmissionBundles(ev):
            a = status.submissionAnnotations
            assert a['foo'] == ['bar']
            assert a['bogosity'] == [bogosity[submission.id]]

        # Test that we can retrieve submissions with a specific status
        invalid_submissions = list(syn.getSubmissions(ev, status='INVALID'))
        assert len(invalid_submissions) == 1, len(invalid_submissions)
        assert invalid_submissions[0]['name'] == 'Submission 01'

        # test that we can retrieve annotations via a submission view
        # retry a few times because this may be related to asynchronous worker activity
        attempts = 8
        sleep_time = 1
        i = 0
        while True:
            try:
                view = SubmissionViewSchema(name="Testing view",
                                            scopes=[ev['id']],
                                            parent=project['id'])
                view_ent = syn.store(view)
                view_table = syn.tableQuery(f"select * from {view_ent.id}")
                viewdf = view_table.asDataFrame()
                assert viewdf['foo'].tolist() == ["bar", "bar"]
                assert viewdf['bogosity'].tolist() == [123, 246]
                assert viewdf['id'].astype(str).tolist() == list(
                    bogosity.keys())
                break
            except (AssertionError, KeyError):
                i += 1
                if i >= attempts:
                    raise

                time.sleep(sleep_time)
                sleep_time *= 2

    finally:
        # Clean up
        syn.delete(ev)

    # Just deleted it. Shouldn't be able to get it.
    pytest.raises(SynapseHTTPError, syn.getEvaluation, ev)
예제 #11
0
def find_or_create_evaluations():
    # create the within cell type classification evaluation object
    try:
        intra_cell_type_evaluation = syn.getEvaluationByName(
            "Within Cell Type Classification")
    except:
        intra_cell_type_evaluation = syn.store(
            Evaluation(
                name="Within Cell Type Classification",
                contentSource=project.id,
                status="OPEN",
                submissionInstructionsMessage=\
                "Submit a tsv file containing predicted bin labels.",
                submissionReceiptMessage=\
                "Your submission has been received."),

            quota=dict(numberOfRounds=1,
                       roundDurationMillis=1000*60*60*48, ## 48 hours
                       submissionLimit=20,
                       firstRoundStart=datetime.datetime.now().strftime(
                           synapseclient.utils.ISO_FORMAT))
        )

    # create the within cell type classification evaluation object
    try:
        inter_cell_type_evaluation = syn.getEvaluationByName(
            "Between Cell Type Classification")
    except:
        inter_cell_type_evaluation = syn.store(
            Evaluation(
                name="Between Cell Type Classification",
                contentSource=project.id,
                status="OPEN",
                submissionInstructionsMessage=\
                "Submit a tsv file containing predicted bin labels.",
                submissionReceiptMessage=\
                "Your submission has been received."),

            quota=dict(numberOfRounds=1,
                       roundDurationMillis=1000*60*60*48, ## 48 hours
                       submissionLimit=20,
                       firstRoundStart=datetime.datetime.now().strftime(synapseclient.utils.ISO_FORMAT))
        )

    # create the within cell type classification evaluation object
    try:
        regression_evaluation = syn.getEvaluationByName(
            "Between Cell Type Regression")
    except:
        regression_evaluation = syn.store(
            Evaluation(
                name="Between Cell Type Regression",
                contentSource=project.id,
                status="OPEN",
                submissionInstructionsMessage=\
                "Submit a tsv file containing predicted average ChIP-seq read coverage.",
                submissionReceiptMessage=\
                "Your submission has been received."),

            quota=dict(numberOfRounds=1,
                       roundDurationMillis=1000*60*60*48, ## 48 hours
                       submissionLimit=20,
                       firstRoundStart=datetime.datetime.now().strftime(
                           synapseclient.utils.ISO_FORMAT))
        )

    return (intra_cell_type_evaluation, inter_cell_type_evaluation,
            regression_evaluation)
def set_up():
    try:

        uuid_suffix = " " + str(uuid.uuid4())

        # Create the Challenge Project
        challenge_project = syn.store(
            Project(name=CHALLENGE_PROJECT_NAME + uuid_suffix))
        print "Created project %s %s" % (challenge_project.id,
                                         challenge_project.name)

        evaluation = syn.store(
            Evaluation(
                name=challenge_project.name,
                contentSource=challenge_project.id,
                status="OPEN",
                submissionInstructionsMessage=
                "To submit to the XYZ Challenge, send a tab-delimited file as described here: https://...",
                submissionReceiptMessage=
                "Your submission has been received. For further information, consult the leader board at https://..."
            ),
            quota=dict(
                numberOfRounds=1,
                roundDurationMillis=1000 * 60 * 60 * 48,  ## 48 hours
                submissionLimit=20,
                firstRoundStart=datetime.now().strftime(
                    synapseclient.utils.ISO_FORMAT)))
        print "Created Evaluation %s %s" % (evaluation.id, evaluation.name)

        # Create teams for participants and administrators
        participants_team = syn.store(
            Team(
                name=CHALLENGE_PROJECT_NAME + uuid_suffix + ' Participants',
                description='A team for people who have joined the challenge'))
        print "Created team %s %s" % (participants_team.id,
                                      participants_team.name)

        admin_team = syn.store(
            Team(name=CHALLENGE_PROJECT_NAME + uuid_suffix + ' Administrators',
                 description='A team for challenge administrators'))
        print "Created team %s %s" % (admin_team.id, admin_team.name)

        # give the teams permissions on challenge artifacts
        # see: http://rest.synapse.org/org/sagebionetworks/repo/model/ACCESS_TYPE.html
        # see: http://rest.synapse.org/org/sagebionetworks/evaluation/model/UserEvaluationPermissions.html
        syn.setPermissions(challenge_project, admin_team.id, [
            'CREATE', 'READ', 'UPDATE', 'DELETE', 'CHANGE_PERMISSIONS',
            'DOWNLOAD', 'UPLOAD'
        ])
        syn.setPermissions(challenge_project, participants_team.id,
                           ['READ', 'DOWNLOAD'])
        syn.setPermissions(evaluation, admin_team.id, [
            'CREATE', 'READ', 'UPDATE', 'DELETE', 'CHANGE_PERMISSIONS',
            'DOWNLOAD', 'PARTICIPATE', 'SUBMIT', 'DELETE_SUBMISSION',
            'UPDATE_SUBMISSION', 'READ_PRIVATE_SUBMISSION'
        ])
        syn.setPermissions(evaluation, participants_team.id, [
            'CREATE', 'READ', 'UPDATE', 'PARTICIPATE', 'SUBMIT',
            'READ_PRIVATE_SUBMISSION'
        ])
        ## the challenge object associates the challenge project with the
        ## participants team
        challenge_object = create_challenge_object(challenge_project,
                                                   participants_team)

        # create a team that will make submissions
        my_team = syn.store(
            Team(name="My team" + uuid_suffix,
                 description='A team to make submissions'))

        # register team with challenge
        request_body = {
            'teamId': my_team.id,
            'challengeId': challenge_object.id
        }
        syn.restPOST(
            '/challenge/{challengeId}/challengeTeam'.format(
                challengeId=challenge_object.id), json.dumps(request_body))

        # Create the participant project
        participant_project = syn.store(
            Project(name=PARTICIPANT_PROJECT_NAME + uuid_suffix))
        print "Created project %s %s" % (participant_project.id,
                                         participant_project.name)

        participant_file = syn.store(
            File(synapseclient.utils.make_bogus_data_file(),
                 parent=participant_project))

        # Write challenge config file, which is just an ordinary python
        # script that can be manually edited later.
        current_user = syn.getUserProfile()
        write_config(challenge_syn_id=challenge_project.id,
                     challenge_name=CHALLENGE_PROJECT_NAME,
                     admin_user_ids=[current_user.ownerId],
                     evaluation_queues=[evaluation])

        return dict(challenge_project=challenge_project,
                    challenge_object=challenge_object,
                    evaluation=evaluation,
                    participant_project=participant_project,
                    participant_file=participant_file,
                    participants_team=participants_team,
                    admin_team=admin_team,
                    my_team=my_team,
                    uuid_suffix=uuid_suffix)

    except Exception as ex:
        tear_down(locals())
        raise