def set_up_managers(self):
     super(TagManagerTestCase, self).set_up_managers()
     self.hda_manager = hdas.HDAManager(self.app)
     self.history_manager = HistoryManager(self.app)
     self.dataset_manager = DatasetManager(self.app)
     self.tag_manager = GalaxyTagManager(self.trans.sa_session)
     self.user = self.user_manager.create(**user2_data)
示例#2
0
class HDATestCase(BaseTestCase):
    def set_up_managers(self):
        super(HDATestCase, self).set_up_managers()
        self.hda_manager = hdas.HDAManager(self.app)
        self.history_manager = HistoryManager(self.app)
        self.dataset_manager = DatasetManager(self.app)

    def _create_vanilla_hda(self, user_data=None):
        user_data = user_data or user2_data
        owner = self.user_manager.create(**user_data)
        history1 = self.history_manager.create(name="history1", user=owner)
        dataset1 = self.dataset_manager.create()
        return self.hda_manager.create(history=history1, dataset=dataset1)
class DatasetDeserializerTestCase( BaseTestCase ):

    def set_up_managers( self ):
        super( DatasetDeserializerTestCase, self ).set_up_managers()
        self.dataset_manager = DatasetManager( self.app )
        self.dataset_deserializer = DatasetDeserializer( self.app )

    def test_deserialize_delete( self ):
        dataset = self.dataset_manager.create()

        self.log( 'should raise when deserializing deleted from non-bool' )
        self.assertFalse( dataset.deleted )
        self.assertRaises( exceptions.RequestParameterInvalidException,
            self.dataset_deserializer.deserialize, dataset, data={ 'deleted': None } )
        self.assertFalse( dataset.deleted )
        self.log( 'should be able to deserialize deleted from True' )
        self.dataset_deserializer.deserialize( dataset, data={ 'deleted': True } )
        self.assertTrue( dataset.deleted )
        self.log( 'should be able to reverse by deserializing deleted from False' )
        self.dataset_deserializer.deserialize( dataset, data={ 'deleted': False } )
        self.assertFalse( dataset.deleted )

    def test_deserialize_purge( self ):
        dataset = self.dataset_manager.create()

        self.log( 'should raise when deserializing purged from non-bool' )
        self.assertRaises( exceptions.RequestParameterInvalidException,
            self.dataset_deserializer.deserialize, dataset, data={ 'purged': None } )
        self.assertFalse( dataset.purged )
        self.log( 'should be able to deserialize purged from True' )
        self.dataset_deserializer.deserialize( dataset, data={ 'purged': True } )
        self.assertTrue( dataset.purged )
        # TODO: should this raise an error?
        self.log( 'should NOT be able to deserialize purged from False (will remain True)' )
        self.dataset_deserializer.deserialize( dataset, data={ 'purged': False } )
        self.assertTrue( dataset.purged )
示例#4
0
文件: jobs.py 项目: biomarble/galaxy
class JobManager:
    def __init__(self, app):
        self.app = app
        self.dataset_manager = DatasetManager(app)

    def job_lock(self):
        return JobLock(active=self.app.job_manager.job_lock)

    def update_job_lock(self, job_lock: JobLock):
        self.app.queue_worker.send_control_task(
            'admin_job_lock',
            kwargs={'job_lock': job_lock.active},
            get_response=True)
        return self.job_lock()

    def get_accessible_job(self, trans, decoded_job_id):
        job = trans.sa_session.query(trans.app.model.Job).filter(
            trans.app.model.Job.id == decoded_job_id).first()
        if job is None:
            raise ObjectNotFound()
        belongs_to_user = (
            job.user_id == trans.user.id) if job.user_id and trans.user else (
                job.session_id == trans.get_galaxy_session().id)
        if not trans.user_is_admin and not belongs_to_user:
            # Check access granted via output datasets.
            if not job.output_datasets:
                raise ItemAccessibilityException("Job has no output datasets.")
            for data_assoc in job.output_datasets:
                if not self.dataset_manager.is_accessible(
                        data_assoc.dataset.dataset, trans.user):
                    raise ItemAccessibilityException(
                        "You are not allowed to rerun this job.")
        trans.sa_session.refresh(job)
        return job

    def stop(self, job, message=None):
        if not job.finished:
            job.mark_deleted(self.app.config.track_jobs_in_database)
            self.app.model.context.current.flush()
            self.app.job_manager.stop(job, message=message)
            return True
        else:
            return False
示例#5
0
class JobManager(object):

    def __init__(self, app):
        self.app = app
        self.dataset_manager = DatasetManager(app)

    def get_accessible_job(self, trans, decoded_job_id):
        job = trans.sa_session.query(trans.app.model.Job).filter(trans.app.model.Job.id == decoded_job_id).first()
        if job is None:
            raise ObjectNotFound()
        belongs_to_user = (job.user == trans.user) if job.user else (job.session_id == trans.get_galaxy_session().id)
        if not trans.user_is_admin and not belongs_to_user:
            # Check access granted via output datasets.
            if not job.output_datasets:
                raise ItemAccessibilityException("Job has no output datasets.")
            for data_assoc in job.output_datasets:
                if not self.dataset_manager.is_accessible(data_assoc.dataset.dataset, trans.user):
                    raise ItemAccessibilityException("You are not allowed to rerun this job.")
        trans.sa_session.refresh(job)
        return job
class TagManagerTestCase(BaseTestCase):

    def set_up_managers(self):
        super(TagManagerTestCase, self).set_up_managers()
        self.hda_manager = hdas.HDAManager(self.app)
        self.history_manager = HistoryManager(self.app)
        self.dataset_manager = DatasetManager(self.app)
        self.tag_manager = GalaxyTagManager(self.trans.sa_session)
        self.user = self.user_manager.create(**user2_data)

    def _create_vanilla_hda(self, user=None):
        owner = user or self.user
        history1 = self.history_manager.create(name='history1', user=owner)
        dataset1 = self.dataset_manager.create()
        return self.hda_manager.create(history=history1, dataset=dataset1)

    def _check_tag_list(self, tags, expected_tags):
        self.assertEqual(len(tags), len(expected_tags))
        actual_tags = []
        for tag in tags:
            if tag.user_value:
                tag = "%s:%s" % (tag.user_tname, tag.user_value)
            else:
                tag = tag.user_tname
            actual_tags.append(tag)
        expected = [unicodify(e) for e in expected_tags]
        assert sorted(expected) == sorted(actual_tags), "%s vs %s" % (expected, actual_tags)

    def test_apply_item_tags(self):
        tag_strings = [
            'tag1',
            'tag1:value1',
            'tag1:value1:value11',
            '\x00tag1',
            'tag1:\x00value1',
            'tag1,tag2'
        ]
        expected_tags = [
            ['tag1'],
            ['tag1:value1'],
            ['tag1:value1:value11'],
            ['tag1'],
            ['tag1:value1'],
            ['tag1', 'tag2']
        ]
        for tag_string, expected_tag in zip(tag_strings, expected_tags):
            hda = self._create_vanilla_hda()
            self.tag_manager.apply_item_tags(user=self.user, item=hda, tags_str=tag_string)
            self._check_tag_list(hda.tags, expected_tag)

    def test_set_tag_from_list(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1', 'tag2']
        self.tag_manager.set_tags_from_list(self.user, hda, tags)
        self._check_tag_list(hda.tags, tags)
        # Setting tags should erase previous tags
        self.tag_manager.set_tags_from_list(self.user, hda, ['tag1'])
        self._check_tag_list(hda.tags, expected_tags=['tag1'])

    def test_add_tag_from_list(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1', 'tag2']
        self.tag_manager.add_tags_from_list(self.user, hda, tags)
        self._check_tag_list(tags=hda.tags, expected_tags=tags)
        # Adding tags should keep previous tags
        self.tag_manager.add_tags_from_list(self.user, hda, ['tag3'])
        self._check_tag_list(hda.tags, expected_tags=['tag1', 'tag2', 'tag3'])

    def test_remove_tag_from_list(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1', 'tag2']
        self.tag_manager.set_tags_from_list(self.user, hda, tags)
        self._check_tag_list(hda.tags, tags)
        self.tag_manager.remove_tags_from_list(self.user, hda, ['tag1'])
        self._check_tag_list(hda.tags, ['tag2'])

    def test_delete_item_tags(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1']
        self.tag_manager.set_tags_from_list(self.user, hda, tags)
        self.tag_manager.delete_item_tags(user=self.user, item=hda)
        self.assertEqual(hda.tags, [])

    def test_item_has_tag(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1']
        self.tag_manager.set_tags_from_list(self.user, hda, tags)
        self.assertTrue(self.tag_manager.item_has_tag(self.user, item=hda, tag='tag1'))
        # ItemTagAssociation
        self.assertTrue(self.tag_manager.item_has_tag(self.user, item=hda, tag=hda.tags[0]))
        # Tag
        self.assertTrue(self.tag_manager.item_has_tag(self.user, item=hda, tag=hda.tags[0].tag))
        self.assertFalse(self.tag_manager.item_has_tag(self.user, item=hda, tag='tag2'))
class DatasetSerializerTestCase( BaseTestCase ):

    def set_up_managers( self ):
        super( DatasetSerializerTestCase, self ).set_up_managers()
        self.dataset_manager = DatasetManager( self.app )
        self.dataset_serializer = DatasetSerializer( self.app )

    def test_views( self ):
        dataset = self.dataset_manager.create()

        self.log( 'should have a summary view' )
        summary_view = self.dataset_serializer.serialize_to_view( dataset, view='summary' )
        self.assertKeys( summary_view, self.dataset_serializer.views[ 'summary' ] )

        self.log( 'should have the summary view as default view' )
        default_view = self.dataset_serializer.serialize_to_view( dataset, default_view='summary' )
        self.assertKeys( summary_view, self.dataset_serializer.views[ 'summary' ] )

        self.log( 'should have a serializer for all serializable keys' )
        for key in self.dataset_serializer.serializable_keyset:
            instantiated_attribute = getattr( dataset, key, None )
            if not ( ( key in self.dataset_serializer.serializers )
                  or ( isinstance( instantiated_attribute, self.TYPES_NEEDING_NO_SERIALIZERS ) ) ):
                self.fail( 'no serializer for: %s (%s)' % ( key, instantiated_attribute ) )
        else:
            self.assertTrue( True, 'all serializable keys have a serializer' )

    def test_views_and_keys( self ):
        dataset = self.dataset_manager.create()

        self.log( 'should be able to use keys with views' )
        serialized = self.dataset_serializer.serialize_to_view( dataset,
            view='summary', keys=[ 'permissions' ] )
        self.assertKeys( serialized,
            self.dataset_serializer.views[ 'summary' ] + [ 'permissions' ] )

        self.log( 'should be able to use keys on their own' )
        serialized = self.dataset_serializer.serialize_to_view( dataset,
            keys=[ 'purgable', 'file_size' ] )
        self.assertKeys( serialized, [ 'purgable', 'file_size' ] )

    def test_serialize_permissions( self ):
        dataset = self.dataset_manager.create()
        self.log( 'serialized permissions should be well formed' )

    def test_serializers( self ):
        user2 = self.user_manager.create( **user2_data )
        dataset = self.dataset_manager.create()
        all_keys = list( self.dataset_serializer.serializable_keyset )
        serialized = self.dataset_serializer.serialize( dataset, all_keys )

        self.log( 'everything serialized should be of the proper type' )
        self.assertEncodedId( serialized[ 'id' ] )
        self.assertDate( serialized[ 'create_time' ] )
        self.assertDate( serialized[ 'update_time' ] )

        self.assertUUID( serialized[ 'uuid' ] )
        self.assertIsInstance( serialized[ 'state' ], basestring )
        self.assertIsInstance( serialized[ 'deleted' ], bool )
        self.assertIsInstance( serialized[ 'purged' ], bool )
        self.assertIsInstance( serialized[ 'purgable' ], bool )

        # # TODO: no great way to do these with mocked dataset
        # self.assertIsInstance( serialized[ 'file_size' ], int )
        # self.assertIsInstance( serialized[ 'total_size' ], int )

        self.log( 'serialized should jsonify well' )
        self.assertIsJsonifyable( serialized )
示例#8
0
class TagHandlerTestCase(BaseTestCase):
    def set_up_managers(self):
        super().set_up_managers()
        self.hda_manager = hdas.HDAManager(self.app)
        self.history_manager = HistoryManager(self.app)
        self.dataset_manager = DatasetManager(self.app)
        self.tag_handler = GalaxyTagHandler(self.trans.sa_session)
        self.user = self.user_manager.create(**user2_data)

    def _create_vanilla_hda(self, user=None):
        owner = user or self.user
        history1 = self.history_manager.create(name='history1', user=owner)
        dataset1 = self.dataset_manager.create()
        return self.hda_manager.create(history=history1, dataset=dataset1)

    def _check_tag_list(self, tags, expected_tags):
        self.assertEqual(len(tags), len(expected_tags))
        actual_tags = []
        for tag in tags:
            if tag.user_value:
                tag = f"{tag.user_tname}:{tag.user_value}"
            else:
                tag = tag.user_tname
            actual_tags.append(tag)
        expected = [unicodify(e) for e in expected_tags]
        assert sorted(expected) == sorted(
            actual_tags), f"{expected} vs {actual_tags}"

    def test_apply_item_tags(self):
        tag_strings = [
            'tag1',
            'tag1:value1',
            'tag1:value1:value11',
            '\x00tag1',
            'tag1:\x00value1',
            'tag1,tag2',
            '...',
            '.test',
            'test.a.b',
        ]
        expected_tags = [
            ['tag1'],
            ['tag1:value1'],
            ['tag1:value1:value11'],
            ['tag1'],
            ['tag1:value1'],
            ['tag1', 'tag2'],
            [],
            ['test'],
            ['test.a.b'],
        ]
        for tag_string, expected_tag in zip(tag_strings, expected_tags):
            hda = self._create_vanilla_hda()
            self.tag_handler.apply_item_tags(user=self.user,
                                             item=hda,
                                             tags_str=tag_string)
            self._check_tag_list(hda.tags, expected_tag)

    def test_set_tag_from_list(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1', 'tag2']
        self.tag_handler.set_tags_from_list(self.user, hda, tags)
        self._check_tag_list(hda.tags, tags)
        # Setting tags should erase previous tags
        self.tag_handler.set_tags_from_list(self.user, hda, ['tag1'])
        self._check_tag_list(hda.tags, expected_tags=['tag1'])

    def test_add_tag_from_list(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1', 'tag2']
        self.tag_handler.add_tags_from_list(self.user, hda, tags)
        self._check_tag_list(tags=hda.tags, expected_tags=tags)
        # Adding tags should keep previous tags
        self.tag_handler.add_tags_from_list(self.user, hda, ['tag3'])
        self._check_tag_list(hda.tags, expected_tags=['tag1', 'tag2', 'tag3'])

    def test_remove_tag_from_list(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1', 'tag2', 'tag3']
        self.tag_handler.set_tags_from_list(self.user, hda, tags)
        self._check_tag_list(hda.tags, tags)
        self.tag_handler.remove_tags_from_list(self.user, hda,
                                               ['tag1', 'tag3'])
        self._check_tag_list(hda.tags, ['tag2'])

    def test_delete_item_tags(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1']
        self.tag_handler.set_tags_from_list(self.user, hda, tags)
        self.tag_handler.delete_item_tags(user=self.user, item=hda)
        self.assertEqual(hda.tags, [])

    def test_item_has_tag(self):
        hda = self._create_vanilla_hda()
        tags = ['tag1']
        self.tag_handler.set_tags_from_list(self.user, hda, tags)
        self.assertTrue(
            self.tag_handler.item_has_tag(self.user, item=hda, tag='tag1'))
        # ItemTagAssociation
        self.assertTrue(
            self.tag_handler.item_has_tag(self.user, item=hda,
                                          tag=hda.tags[0]))
        # Tag
        self.assertTrue(
            self.tag_handler.item_has_tag(self.user,
                                          item=hda,
                                          tag=hda.tags[0].tag))
        self.assertFalse(
            self.tag_handler.item_has_tag(self.user, item=hda, tag='tag2'))
class DatasetManagerTestCase( BaseTestCase ):

    def set_up_managers( self ):
        super( DatasetManagerTestCase, self ).set_up_managers()
        self.dataset_manager = DatasetManager( self.app )

    def test_create( self ):
        self.log( "should be able to create a new Dataset" )
        dataset1 = self.dataset_manager.create()
        self.assertIsInstance( dataset1, model.Dataset )
        self.assertEqual( dataset1, self.trans.sa_session.query( model.Dataset ).get( dataset1.id ) )

    def test_base( self ):
        dataset1 = self.dataset_manager.create()
        dataset2 = self.dataset_manager.create()

        self.log( "should be able to query" )
        datasets = self.trans.sa_session.query( model.Dataset ).all()
        self.assertEqual( self.dataset_manager.list(), datasets )
        self.assertEqual( self.dataset_manager.one( filters=( model.Dataset.id == dataset1.id ) ), dataset1 )
        self.assertEqual( self.dataset_manager.by_id( dataset1.id ), dataset1 )
        self.assertEqual( self.dataset_manager.by_ids( [ dataset2.id, dataset1.id ] ), [ dataset2, dataset1 ] )

        self.log( "should be able to limit and offset" )
        self.assertEqual( self.dataset_manager.list( limit=1 ), datasets[0:1] )
        self.assertEqual( self.dataset_manager.list( offset=1 ), datasets[1:] )
        self.assertEqual( self.dataset_manager.list( limit=1, offset=1 ), datasets[1:2] )

        self.assertEqual( self.dataset_manager.list( limit=0 ), [] )
        self.assertEqual( self.dataset_manager.list( offset=3 ), [] )

        self.log( "should be able to order" )
        self.assertEqual( self.dataset_manager.list( order_by=sqlalchemy.desc( model.Dataset.create_time ) ),
            [ dataset2, dataset1 ] )

    def test_delete( self ):
        item1 = self.dataset_manager.create()

        self.log( "should be able to delete and undelete a dataset" )
        self.assertFalse( item1.deleted )
        self.assertEqual( self.dataset_manager.delete( item1 ), item1 )
        self.assertTrue( item1.deleted )
        self.assertEqual( self.dataset_manager.undelete( item1 ), item1 )
        self.assertFalse( item1.deleted )

    def test_purge_allowed( self ):
        self.trans.app.config.allow_user_dataset_purge = True
        item1 = self.dataset_manager.create()

        self.log( "should purge a dataset if config does allow" )
        self.assertFalse( item1.purged )
        self.assertEqual( self.dataset_manager.purge( item1 ), item1 )
        self.assertTrue( item1.purged )

        self.log( "should delete a dataset when purging" )
        self.assertTrue( item1.deleted )

    def test_purge_not_allowed( self ):
        self.trans.app.config.allow_user_dataset_purge = False
        item1 = self.dataset_manager.create()

        self.log( "should raise an error when purging a dataset if config does not allow" )
        self.assertFalse( item1.purged )
        self.assertRaises( exceptions.ConfigDoesNotAllowException, self.dataset_manager.purge, item1 )
        self.assertFalse( item1.purged )

    def test_create_with_no_permissions( self ):
        self.log( "should be able to create a new Dataset without any permissions" )
        dataset = self.dataset_manager.create()

        permissions = self.dataset_manager.permissions.get( dataset )
        self.assertIsInstance( permissions, tuple )
        self.assertEqual( len( permissions ), 2 )
        manage_permissions, access_permissions = permissions
        self.assertEqual( manage_permissions, [] )
        self.assertEqual( access_permissions, [] )

        user3 = self.user_manager.create( **user3_data )
        self.log( "a dataset without permissions shouldn't be manageable to just anyone" )
        self.assertFalse( self.dataset_manager.permissions.manage.is_permitted( dataset, user3 ) )
        self.log( "a dataset without permissions should be accessible" )
        self.assertTrue( self.dataset_manager.permissions.access.is_permitted( dataset, user3 ) )

    def test_create_public_dataset( self ):
        self.log( "should be able to create a new Dataset and give it some permissions that actually, you know, "
            "might work if there's any justice in this universe" )
        owner = self.user_manager.create( **user2_data )
        owner_private_role = self.user_manager.private_role( owner )
        dataset = self.dataset_manager.create( manage_roles=[ owner_private_role ] )

        permissions = self.dataset_manager.permissions.get( dataset )
        self.assertIsInstance( permissions, tuple )
        self.assertEqual( len( permissions ), 2 )
        manage_permissions, access_permissions = permissions
        self.assertIsInstance( manage_permissions, list )
        self.assertIsInstance( manage_permissions[0], model.DatasetPermissions )
        self.assertEqual( access_permissions, [] )

        user3 = self.user_manager.create( **user3_data )
        self.log( "a public dataset should be manageable to it's owner" )
        self.assertTrue( self.dataset_manager.permissions.manage.is_permitted( dataset, owner ) )
        self.log( "a public dataset shouldn't be manageable to just anyone" )
        self.assertFalse( self.dataset_manager.permissions.manage.is_permitted( dataset, user3 ) )
        self.log( "a public dataset should be accessible" )
        self.assertTrue( self.dataset_manager.permissions.access.is_permitted( dataset, user3 ) )

    def test_create_private_dataset( self ):
        self.log( "should be able to create a new Dataset and give it private permissions" )
        owner = self.user_manager.create( **user2_data )
        owner_private_role = self.user_manager.private_role( owner )
        dataset = self.dataset_manager.create(
            manage_roles=[ owner_private_role ], access_roles=[ owner_private_role ] )

        permissions = self.dataset_manager.permissions.get( dataset )
        self.assertIsInstance( permissions, tuple )
        self.assertEqual( len( permissions ), 2 )
        manage_permissions, access_permissions = permissions
        self.assertIsInstance( manage_permissions, list )
        self.assertIsInstance( manage_permissions[0], model.DatasetPermissions )
        self.assertIsInstance( access_permissions, list )
        self.assertIsInstance( access_permissions[0], model.DatasetPermissions )

        self.log( "a private dataset should be manageable by it's owner" )
        self.assertTrue( self.dataset_manager.permissions.manage.is_permitted( dataset, owner ) )
        self.log( "a private dataset should be accessible to it's owner" )
        self.assertTrue( self.dataset_manager.permissions.access.is_permitted( dataset, owner ) )

        user3 = self.user_manager.create( **user3_data )
        self.log( "a private dataset shouldn't be manageable to just anyone" )
        self.assertFalse( self.dataset_manager.permissions.manage.is_permitted( dataset, user3 ) )
        self.log( "a private dataset shouldn't be accessible to just anyone" )
        self.assertFalse( self.dataset_manager.permissions.access.is_permitted( dataset, user3 ) )
示例#10
0
 def __init__(self, app):
     super(JobController, self).__init__(app)
     self.dataset_manager = DatasetManager(app)
     self.job_search = JobSearch(app)
class DatasetManagerTestCase( BaseTestCase ):

    def set_up_managers( self ):
        super( DatasetManagerTestCase, self ).set_up_managers()
        self.dataset_mgr = DatasetManager( self.app )

    def test_create( self ):
        self.log( "should be able to create a new Dataset" )
        dataset1 = self.dataset_mgr.create( self.trans )
        self.assertIsInstance( dataset1, model.Dataset )
        self.assertEqual( dataset1, self.trans.sa_session.query( model.Dataset ).get( dataset1.id ) )

    def test_base( self ):
        dataset1 = self.dataset_mgr.create( self.trans )
        dataset2 = self.dataset_mgr.create( self.trans )

        self.log( "should be able to query" )
        datasets = self.trans.sa_session.query( model.Dataset ).all()
        self.assertEqual( self.dataset_mgr.list( self.trans ), datasets )
        self.assertEqual( self.dataset_mgr.one( self.trans, filters=( model.Dataset.id == dataset1.id ) ), dataset1 )
        self.assertEqual( self.dataset_mgr.by_id( self.trans, dataset1.id ), dataset1 )
        self.assertEqual( self.dataset_mgr.by_ids( self.trans, [ dataset2.id, dataset1.id ] ), [ dataset2, dataset1 ] )

        self.log( "should be able to limit and offset" )
        self.assertEqual( self.dataset_mgr.list( self.trans, limit=1 ), datasets[0:1] )
        self.assertEqual( self.dataset_mgr.list( self.trans, offset=1 ), datasets[1:] )
        self.assertEqual( self.dataset_mgr.list( self.trans, limit=1, offset=1 ), datasets[1:2] )

        self.assertEqual( self.dataset_mgr.list( self.trans, limit=0 ), [] )
        self.assertEqual( self.dataset_mgr.list( self.trans, offset=3 ), [] )

        self.log( "should be able to order" )
        self.assertEqual( self.dataset_mgr.list( self.trans, order_by=sqlalchemy.desc( model.Dataset.create_time ) ),
            [ dataset2, dataset1 ] )

    def test_delete( self ):
        item1 = self.dataset_mgr.create( self.trans )

        self.log( "should be able to delete and undelete an hda" )
        self.assertFalse( item1.deleted )
        self.assertEqual( self.dataset_mgr.delete( self.trans, item1 ), item1 )
        self.assertTrue( item1.deleted )
        self.assertEqual( self.dataset_mgr.undelete( self.trans, item1 ), item1 )
        self.assertFalse( item1.deleted )

    def test_purge_allowed( self ):
        self.trans.app.config.allow_user_dataset_purge = True
        item1 = self.dataset_mgr.create( self.trans )

        self.log( "should purge an hda if config does allow" )
        self.assertFalse( item1.purged )
        self.assertEqual( self.dataset_mgr.purge( self.trans, item1 ), item1 )
        self.assertTrue( item1.purged )

    def test_purge_not_allowed( self ):
        self.trans.app.config.allow_user_dataset_purge = False
        item1 = self.dataset_mgr.create( self.trans )

        self.log( "should raise an error when purging an hda if config does not allow" )
        self.assertFalse( item1.purged )
        self.assertRaises( exceptions.ConfigDoesNotAllowException, self.dataset_mgr.purge, self.trans, item1 )
        self.assertFalse( item1.purged )

    ##TODO: I'm unclear as to how these work, so I'm kicking this down the road a bit....
    #def test_access_permission( self ):
    #    owner = self.user_mgr.create( self.trans, **user2_data )
    #    dataset = self.dataset_mgr.create( self.trans )
    #    # giving one user access permission makes it non-public, removing access for anyone else
    #    self.dataset_mgr.give_access_permission( self.trans, dataset, owner )
    #
    #    user3 = self.user_mgr.create( self.trans, **user3_data )
    #    user4 = self.user_mgr.create( self.trans,
    #        email='[email protected]', username='******', password=default_password )
    #
    #    self.assertTrue( self.dataset_mgr.has_access_permission( self.trans, dataset, owner ) )
    #    self.assertFalse( self.dataset_mgr.has_access_permission( self.trans, dataset, user3 ) )
    #    self.assertFalse( self.dataset_mgr.has_access_permission( self.trans, dataset, user4 ) )
    #
    #    # should be able to progressively add more roles without removing the previous
    #    self.dataset_mgr.give_access_permission( self.trans, dataset, user3 )
    #    self.assertTrue( self.dataset_mgr.has_access_permission( self.trans, dataset, user3 ) )
    #    self.assertTrue( self.dataset_mgr.has_access_permission( self.trans, dataset, owner ) )
    #    self.assertFalse( self.dataset_mgr.has_access_permission( self.trans, dataset, user4 ) )
    #
    #    #self.assertTrue( self.dataset_mgr.is_accessible( self.trans, dataset, owner ) )
    #    #self.assertFalse( self.dataset_mgr.is_accessible( self.trans, dataset, non_owner ) )

    def test_accessible( self ):
        owner = self.user_mgr.create( self.trans, **user2_data )
        non_owner = self.user_mgr.create( self.trans, **user3_data )

        dataset = self.dataset_mgr.create( self.trans )

        self.log( "(by default, dataset permissions are lax) should be accessible to all" )
        for user in self.user_mgr.list( self.trans ):
            self.assertTrue( self.dataset_mgr.is_accessible( self.trans, dataset, user ) )
示例#12
0
class DatasetSerializerTestCase(BaseTestCase):
    def set_up_managers(self):
        super(DatasetSerializerTestCase, self).set_up_managers()
        self.dataset_manager = DatasetManager(self.app)
        self.dataset_serializer = DatasetSerializer(self.app)

    def test_views(self):
        dataset = self.dataset_manager.create()

        self.log('should have a summary view')
        summary_view = self.dataset_serializer.serialize_to_view(
            dataset, view='summary')
        self.assertKeys(summary_view, self.dataset_serializer.views['summary'])

        self.log('should have the summary view as default view')
        default_view = self.dataset_serializer.serialize_to_view(
            dataset, default_view='summary')
        self.assertKeys(summary_view, self.dataset_serializer.views['summary'])

        self.log('should have a serializer for all serializable keys')
        for key in self.dataset_serializer.serializable_keyset:
            instantiated_attribute = getattr(dataset, key, None)
            if not ((key in self.dataset_serializer.serializers) or
                    (isinstance(instantiated_attribute,
                                self.TYPES_NEEDING_NO_SERIALIZERS))):
                self.fail('no serializer for: %s (%s)' %
                          (key, instantiated_attribute))
        else:
            self.assertTrue(True, 'all serializable keys have a serializer')

    def test_views_and_keys(self):
        dataset = self.dataset_manager.create()

        self.log('should be able to use keys with views')
        serialized = self.dataset_serializer.serialize_to_view(
            dataset, view='summary', keys=['permissions'])
        self.assertKeys(
            serialized,
            self.dataset_serializer.views['summary'] + ['permissions'])

        self.log('should be able to use keys on their own')
        serialized = self.dataset_serializer.serialize_to_view(
            dataset, keys=['purgable', 'file_size'])
        self.assertKeys(serialized, ['purgable', 'file_size'])

    def test_serialize_permissions(self):
        dataset = self.dataset_manager.create()
        self.log('serialized permissions should be well formed')

    def test_serializers(self):
        user2 = self.user_manager.create(**user2_data)
        dataset = self.dataset_manager.create()
        all_keys = list(self.dataset_serializer.serializable_keyset)
        serialized = self.dataset_serializer.serialize(dataset, all_keys)

        self.log('everything serialized should be of the proper type')
        self.assertEncodedId(serialized['id'])
        self.assertDate(serialized['create_time'])
        self.assertDate(serialized['update_time'])

        self.assertUUID(serialized['uuid'])
        self.assertIsInstance(serialized['state'], basestring)
        self.assertIsInstance(serialized['deleted'], bool)
        self.assertIsInstance(serialized['purged'], bool)
        self.assertIsInstance(serialized['purgable'], bool)

        # # TODO: no great way to do these with mocked dataset
        # self.assertIsInstance( serialized[ 'file_size' ], int )
        # self.assertIsInstance( serialized[ 'total_size' ], int )

        self.log('serialized should jsonify well')
        self.assertIsJsonifyable(serialized)
class DatasetCollectionManagerTestCase( BaseTestCase ):

    def set_up_managers( self ):
        super( DatasetCollectionManagerTestCase, self ).set_up_managers()
        self.dataset_manager = DatasetManager( self.app )
        self.hda_manager = HDAManager( self.app )
        self.history_manager = HistoryManager( self.app )
        self.collection_manager = DatasetCollectionManager( self.app )

    def build_element_identifiers( self, elements ):
        identifier_list = []
        for element in elements:
            src = 'hda'
            # if isinstance( element, model.DatasetCollection ):
            #    src = 'collection'#?
            # elif isinstance( element, model.LibraryDatasetDatasetAssociation ):
            #    src = 'ldda'#?
            encoded_id = self.trans.security.encode_id( element.id )
            identifier_list.append( dict( src=src, name=element.name, id=encoded_id ) )
        return identifier_list

    def test_create_simple_list( self ):
        owner = self.user_manager.create( **user2_data )

        history = self.history_manager.create( name='history1', user=owner )

        hda1 = self.hda_manager.create( name='one',
            history=history, dataset=self.dataset_manager.create() )
        hda2 = self.hda_manager.create( name='two',
            history=history, dataset=self.dataset_manager.create() )
        hda3 = self.hda_manager.create( name='three',
            history=history, dataset=self.dataset_manager.create() )

        self.log( "should be able to create a new Collection via ids" )
        element_identifiers = self.build_element_identifiers( [ hda1, hda2, hda3 ] )
        hdca = self.collection_manager.create( self.trans, history, 'test collection', 'list',
                                           element_identifiers=element_identifiers )
        self.assertIsInstance( hdca, model.HistoryDatasetCollectionAssociation )
        self.assertEqual( hdca.name, 'test collection' )
        self.assertEqual( hdca.hid, 4 )
        self.assertFalse( hdca.deleted )
        self.assertTrue( hdca.visible )

        # print 'hdca dir:'
        # for k in dir( hdca ):
        #     print k, getattr( hdca, k, '(?)' )

        self.log( "should contain an underlying, well-formed DatasetCollection" )
        self.assertIsInstance( hdca.collection, model.DatasetCollection )
        collection = hdca.collection
        self.assertEqual( collection.collection_type, 'list' )
        self.assertEqual( collection.state, 'ok' )
        self.assertEqual( len( collection.dataset_instances ), 3 )
        self.assertEqual( len( collection.elements ), 3 )

        # print 'hdca.collection dir:'
        # for k in dir( hdca.collection ):
        #     print k, getattr( hdca.collection, k, '(?)' )

        # elements = collection.elements
        # print 'hdca.collection element dir:'
        # for k in dir( elements[0] ):
        #     print k, getattr( elements[0], k, '(?)' )

        self.log( "and that collection should have three well-formed Elements" )
        self.assertIsInstance( collection.elements[0], model.DatasetCollectionElement )
        self.assertEqual( collection.elements[0].element_identifier, 'one' )
        self.assertEqual( collection.elements[0].element_index, 0 )
        self.assertEqual( collection.elements[0].element_type, 'hda' )
        self.assertEqual( collection.elements[0].element_object, hda1 )

        self.assertIsInstance( collection.elements[1], model.DatasetCollectionElement )
        self.assertEqual( collection.elements[1].element_identifier, 'two' )
        self.assertEqual( collection.elements[1].element_index, 1 )
        self.assertEqual( collection.elements[1].element_type, 'hda' )
        self.assertEqual( collection.elements[1].element_object, hda2 )

        self.assertIsInstance( collection.elements[2], model.DatasetCollectionElement )
        self.assertEqual( collection.elements[2].element_identifier, 'three' )
        self.assertEqual( collection.elements[2].element_index, 2 )
        self.assertEqual( collection.elements[2].element_type, 'hda' )
        self.assertEqual( collection.elements[2].element_object, hda3 )

        self.log( "should be able to create a new Collection via objects" )
        elements = dict( one=hda1, two=hda2, three=hda3 )
        hdca2 = self.collection_manager.create( self.trans, history, 'test collection 2', 'list', elements=elements )
        self.assertIsInstance( hdca2, model.HistoryDatasetCollectionAssociation )

    def test_update_from_dict( self ):
        owner = self.user_manager.create( **user2_data )

        history = self.history_manager.create( name='history1', user=owner )

        hda1 = self.hda_manager.create( name='one',
            history=history, dataset=self.dataset_manager.create() )
        hda2 = self.hda_manager.create( name='two',
            history=history, dataset=self.dataset_manager.create() )
        hda3 = self.hda_manager.create( name='three',
            history=history, dataset=self.dataset_manager.create() )

        elements = dict( one=hda1, two=hda2, three=hda3 )
        hdca = self.collection_manager.create( self.trans, history, 'test collection', 'list', elements=elements )

        self.log( "should be set from a dictionary" )
        self.collection_manager._set_from_dict( self.trans, hdca, {
            'deleted'   : True,
            'visible'   : False,
            'name'      : 'New Name',
            # TODO: doesn't work
            # 'tags'      : [ 'one', 'two', 'three' ]
            # 'annotations'      : [?]
        })
        self.assertEqual( hdca.name, 'New Name' )
        self.assertTrue( hdca.deleted )
        self.assertFalse( hdca.visible )
示例#14
0
class HDAManagerTestCase( BaseTestCase ):

    def set_up_managers( self ):
        super( HDAManagerTestCase, self ).set_up_managers()
        self.history_mgr = HistoryManager( self.app )
        self.dataset_mgr = DatasetManager( self.app )
        self.hda_mgr = HDAManager( self.app )

    def test_base( self ):
        hda_model = model.HistoryDatasetAssociation
        owner = self.user_mgr.create( self.trans, **user2_data )
        history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
        hda1 = self.hda_mgr.create( self.trans, history=history1, hid=1 )
        hda2 = self.hda_mgr.create( self.trans, history=history1, hid=2 )
        hda3 = self.hda_mgr.create( self.trans, history=history1, hid=3 )

        self.log( "should be able to query" )
        hdas = self.trans.sa_session.query( hda_model ).all()
        self.assertEqual( self.hda_mgr.list( self.trans ), hdas )
        self.assertEqual( self.hda_mgr.one( self.trans, filters=( hda_model.id == hda1.id ) ), hda1 )
        self.assertEqual( self.hda_mgr.by_id( self.trans, hda1.id ), hda1 )
        self.assertEqual( self.hda_mgr.by_ids( self.trans, [ hda2.id, hda1.id ] ), [ hda2, hda1 ] )

        self.log( "should be able to limit and offset" )
        self.assertEqual( self.hda_mgr.list( self.trans, limit=1 ), hdas[0:1] )
        self.assertEqual( self.hda_mgr.list( self.trans, offset=1 ), hdas[1:] )
        self.assertEqual( self.hda_mgr.list( self.trans, limit=1, offset=1 ), hdas[1:2] )

        self.assertEqual( self.hda_mgr.list( self.trans, limit=0 ), [] )
        self.assertEqual( self.hda_mgr.list( self.trans, offset=3 ), [] )

        self.log( "should be able to order" )
        self.assertEqual( self.hda_mgr.list( self.trans, order_by=sqlalchemy.desc( hda_model.create_time ) ),
            [ hda3, hda2, hda1 ] )

    def test_create( self ):
        owner = self.user_mgr.create( self.trans, **user2_data )
        non_owner = self.user_mgr.create( self.trans, **user3_data )

        history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
        dataset1 = self.dataset_mgr.create( self.trans )

        self.log( "should be able to create a new HDA with a specified history and dataset" )
        hda1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )
        self.assertIsInstance( hda1, model.HistoryDatasetAssociation )
        self.assertEqual( hda1, self.trans.sa_session.query( model.HistoryDatasetAssociation ).get( hda1.id ) )
        self.assertEqual( hda1.history, history1 )
        self.assertEqual( hda1.dataset, dataset1 )
        self.assertEqual( hda1.hid, 1 )

        self.log( "should be able to create a new HDA with only a specified history and no dataset" )
        hda2 = self.hda_mgr.create( self.trans, history=history1 )
        self.assertIsInstance( hda2, model.HistoryDatasetAssociation )
        self.assertIsInstance( hda2.dataset, model.Dataset )
        self.assertEqual( hda2.history, history1 )
        self.assertEqual( hda2.hid, 2 )

        self.log( "should be able to create a new HDA with no history and no dataset" )
        hda3 = self.hda_mgr.create( self.trans, hid=None )
        self.assertIsInstance( hda3, model.HistoryDatasetAssociation )
        self.assertIsInstance( hda3.dataset, model.Dataset, msg="dataset will be auto created" )
        self.assertIsNone( hda3.history, msg="history will be None" )
        self.assertEqual( hda3.hid, None, msg="should allow setting hid to None (or any other value)" )

    def test_copy_from_hda( self ):
        owner = self.user_mgr.create( self.trans, **user2_data )
        history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
        dataset1 = self.dataset_mgr.create( self.trans )
        hda1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )

        self.log( "should be able to copy an HDA" )
        hda2 = self.hda_mgr.copy( self.trans, hda1, history=history1 )
        self.assertIsInstance( hda2, model.HistoryDatasetAssociation )
        self.assertEqual( hda2, self.trans.sa_session.query( model.HistoryDatasetAssociation ).get( hda2.id ) )
        self.assertEqual( hda2.name, hda1.name )
        self.assertEqual( hda2.history, hda1.history )
        self.assertEqual( hda2.dataset, hda1.dataset )
        self.assertNotEqual( hda2, hda1 )

    #def test_copy_from_ldda( self ):
    #    owner = self.user_mgr.create( self.trans, **user2_data )
    #    history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
    #
    #    self.log( "should be able to copy an HDA" )
    #    hda2 = self.hda_mgr.copy_ldda( self.trans, history1, hda1 )

    def test_delete( self ):
        owner = self.user_mgr.create( self.trans, **user2_data )
        history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
        dataset1 = self.dataset_mgr.create( self.trans )
        item1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )

        self.log( "should be able to delete and undelete an hda" )
        self.assertFalse( item1.deleted )
        self.assertEqual( self.hda_mgr.delete( self.trans, item1 ), item1 )
        self.assertTrue( item1.deleted )
        self.assertEqual( self.hda_mgr.undelete( self.trans, item1 ), item1 )
        self.assertFalse( item1.deleted )

    def test_purge_allowed( self ):
        self.trans.app.config.allow_user_dataset_purge = True

        owner = self.user_mgr.create( self.trans, **user2_data )
        history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
        dataset1 = self.dataset_mgr.create( self.trans )
        item1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )

        self.log( "should purge an hda if config does allow" )
        self.assertFalse( item1.purged )
        self.assertEqual( self.hda_mgr.purge( self.trans, item1 ), item1 )
        self.assertTrue( item1.purged )

    def test_purge_not_allowed( self ):
        self.trans.app.config.allow_user_dataset_purge = False

        owner = self.user_mgr.create( self.trans, **user2_data )
        history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
        dataset1 = self.dataset_mgr.create( self.trans )
        item1 = self.hda_mgr.create( self.trans, history=history1, dataset=dataset1 )

        self.log( "should raise an error when purging an hda if config does not allow" )
        self.assertFalse( item1.purged )
        self.assertRaises( exceptions.ConfigDoesNotAllowException, self.hda_mgr.purge, self.trans, item1 )
        self.assertFalse( item1.purged )

    def test_ownable( self ):
        owner = self.user_mgr.create( self.trans, **user2_data )
        non_owner = self.user_mgr.create( self.trans, **user3_data )

        history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
        dataset1 = self.dataset_mgr.create( self.trans )
        item1 = self.hda_mgr.create( self.trans, history1, dataset1 )

        self.log( "should be able to poll whether a given user owns an item" )
        self.assertTrue(  self.hda_mgr.is_owner( self.trans, item1, owner ) )
        self.assertFalse( self.hda_mgr.is_owner( self.trans, item1, non_owner ) )

        self.log( "should raise an error when checking ownership with non-owner" )
        self.assertRaises( exceptions.ItemOwnershipException,
            self.hda_mgr.error_unless_owner, self.trans, item1, non_owner )

        self.log( "should raise an error when checking ownership with anonymous" )
        self.assertRaises( exceptions.ItemOwnershipException,
            self.hda_mgr.error_unless_owner, self.trans, item1, None )

        self.log( "should not raise an error when checking ownership with owner" )
        self.assertEqual( self.hda_mgr.error_unless_owner( self.trans, item1, owner ), item1 )

        self.log( "should not raise an error when checking ownership with admin" )
        self.assertEqual( self.hda_mgr.error_unless_owner( self.trans, item1, self.admin_user ), item1 )

    def test_accessible( self ):
        owner = self.user_mgr.create( self.trans, **user2_data )
        non_owner = self.user_mgr.create( self.trans, **user3_data )

        history1 = self.history_mgr.create( self.trans, name='history1', user=owner )
        dataset1 = self.dataset_mgr.create( self.trans )
        item1 = self.hda_mgr.create( self.trans, history1, dataset1 )

        self.log( "(by default, dataset permissions are lax) should be accessible to all" )
        for user in self.user_mgr.list( self.trans ):
            self.assertTrue( self.hda_mgr.is_accessible( self.trans, item1, user ) )

        #TODO: set perms on underlying dataset and then test accessible

    def test_anon( self ):
        anon_user = None
        self.trans.set_user( anon_user )

        history1 = self.history_mgr.create( self.trans, name='anon_history', user=anon_user )
        self.trans.set_history( history1 )
        dataset1 = self.dataset_mgr.create( self.trans )
        item1 = self.hda_mgr.create( self.trans, history1, dataset1 )

        self.log( "should not raise an error when checking ownership/access on anonymous' own dataset" )
        self.assertTrue( self.hda_mgr.is_accessible( self.trans, item1, anon_user ) )
        self.assertEqual( self.hda_mgr.error_unless_owner( self.trans, item1, anon_user ), item1 )

        self.log( "should raise an error when checking ownership on anonymous' dataset with other user" )
        non_owner = self.user_mgr.create( self.trans, **user3_data )
        self.assertRaises( exceptions.ItemOwnershipException,
            self.hda_mgr.error_unless_owner, self.trans, item1, non_owner )
示例#15
0
 def __init__(self, app: StructuredApp):
     self.app = app
     self.dataset_manager = DatasetManager(app)
示例#16
0
 def set_up_managers(self):
     super().set_up_managers()
     self.dataset_manager = DatasetManager(self.app)
     self.dataset_serializer = DatasetSerializer(self.app,
                                                 self.user_manager)
     self.role_manager = RoleManager(self.app)
示例#17
0
 def set_up_managers(self):
     super().set_up_managers()
     self.dataset_manager = DatasetManager(self.app)
 def set_up_managers( self ):
     super( DatasetCollectionManagerTestCase, self ).set_up_managers()
     self.dataset_manager = DatasetManager( self.app )
     self.hda_manager = HDAManager( self.app )
     self.history_manager = HistoryManager( self.app )
     self.collection_manager = DatasetCollectionManager( self.app )
class DatasetSerializerTestCase( BaseTestCase ):

    def set_up_managers( self ):
        super( DatasetSerializerTestCase, self ).set_up_managers()
        self.dataset_manager = DatasetManager( self.app )
        self.dataset_serializer = DatasetSerializer( self.app )
        self.role_manager = RoleManager( self.app )

    def test_views( self ):
        dataset = self.dataset_manager.create()

        self.log( 'should have a summary view' )
        summary_view = self.dataset_serializer.serialize_to_view( dataset, view='summary' )
        self.assertKeys( summary_view, self.dataset_serializer.views[ 'summary' ] )

        self.log( 'should have the summary view as default view' )
        self.dataset_serializer.serialize_to_view( dataset, default_view='summary' )
        self.assertKeys( summary_view, self.dataset_serializer.views[ 'summary' ] )

        self.log( 'should have a serializer for all serializable keys' )
        for key in self.dataset_serializer.serializable_keyset:
            instantiated_attribute = getattr( dataset, key, None )
            if not ( ( key in self.dataset_serializer.serializers ) or
                     ( isinstance( instantiated_attribute, self.TYPES_NEEDING_NO_SERIALIZERS ) ) ):
                self.fail( 'no serializer for: %s (%s)' % ( key, instantiated_attribute ) )
        else:
            self.assertTrue( True, 'all serializable keys have a serializer' )

    def test_views_and_keys( self ):
        dataset = self.dataset_manager.create()

        self.log( 'should be able to use keys with views' )
        serialized = self.dataset_serializer.serialize_to_view( dataset,
            # file_name is exposed using app.config.expose_dataset_path = True
            view='summary', keys=[ 'file_name' ] )
        self.assertKeys( serialized,
            self.dataset_serializer.views[ 'summary' ] + [ 'file_name' ] )

        self.log( 'should be able to use keys on their own' )
        serialized = self.dataset_serializer.serialize_to_view( dataset,
            keys=[ 'purgable', 'file_size' ] )
        self.assertKeys( serialized, [ 'purgable', 'file_size' ] )

    def test_serialize_permissions( self ):
        dataset = self.dataset_manager.create()
        who_manages = self.user_manager.create( **user2_data )
        self.dataset_manager.permissions.manage.grant( dataset, who_manages )

        self.log( 'serialized permissions should be returned for the user who can manage and be well formed' )
        permissions = self.dataset_serializer.serialize_permissions( dataset, 'perms', user=who_manages )
        self.assertIsInstance( permissions, dict )
        self.assertKeys( permissions, [ 'manage', 'access' ] )
        self.assertIsInstance( permissions[ 'manage' ], list )
        self.assertIsInstance( permissions[ 'access' ], list )

        manage_perms = permissions[ 'manage' ]
        self.assertTrue( len( manage_perms ) == 1 )
        role_id = manage_perms[0]
        self.assertEncodedId( role_id )
        role_id = self.app.security.decode_id( role_id )
        role = self.role_manager.get( self.trans, role_id )
        self.assertTrue( who_manages in [ user_role.user for user_role in role.users ])
        # wat

        self.log( 'permissions should be not returned for non-managing users' )
        not_my_supervisor = self.user_manager.create( **user3_data )
        self.assertRaises( SkipAttribute, self.dataset_serializer.serialize_permissions,
            dataset, 'perms', user=not_my_supervisor )

        self.log( 'permissions should not be returned for anon users' )
        self.assertRaises( SkipAttribute, self.dataset_serializer.serialize_permissions,
            dataset, 'perms', user=None )

    def test_serializers( self ):
        # self.user_manager.create( **user2_data )
        dataset = self.dataset_manager.create()
        all_keys = list( self.dataset_serializer.serializable_keyset )
        serialized = self.dataset_serializer.serialize( dataset, all_keys )

        self.log( 'everything serialized should be of the proper type' )
        self.assertEncodedId( serialized[ 'id' ] )
        self.assertDate( serialized[ 'create_time' ] )
        self.assertDate( serialized[ 'update_time' ] )

        self.assertUUID( serialized[ 'uuid' ] )
        self.assertIsInstance( serialized[ 'state' ], basestring )
        self.assertIsInstance( serialized[ 'deleted' ], bool )
        self.assertIsInstance( serialized[ 'purged' ], bool )
        self.assertIsInstance( serialized[ 'purgable' ], bool )

        # # TODO: no great way to do these with mocked dataset
        # self.assertIsInstance( serialized[ 'file_size' ], int )
        # self.assertIsInstance( serialized[ 'total_size' ], int )

        self.log( 'serialized should jsonify well' )
        self.assertIsJsonifyable( serialized )
示例#20
0
 def set_up_managers( self ):
     super( DatasetManagerTestCase, self ).set_up_managers()
     self.dataset_mgr = DatasetManager( self.app )
class DatasetDeserializerTestCase( BaseTestCase ):

    def set_up_managers( self ):
        super( DatasetDeserializerTestCase, self ).set_up_managers()
        self.dataset_manager = DatasetManager( self.app )
        self.dataset_serializer = DatasetSerializer( self.app )
        self.dataset_deserializer = DatasetDeserializer( self.app )
        self.role_manager = RoleManager( self.app )

    def test_deserialize_delete( self ):
        dataset = self.dataset_manager.create()

        self.log( 'should raise when deserializing deleted from non-bool' )
        self.assertFalse( dataset.deleted )
        self.assertRaises( exceptions.RequestParameterInvalidException,
            self.dataset_deserializer.deserialize, dataset, data={ 'deleted': None } )
        self.assertFalse( dataset.deleted )
        self.log( 'should be able to deserialize deleted from True' )
        self.dataset_deserializer.deserialize( dataset, data={ 'deleted': True } )
        self.assertTrue( dataset.deleted )
        self.log( 'should be able to reverse by deserializing deleted from False' )
        self.dataset_deserializer.deserialize( dataset, data={ 'deleted': False } )
        self.assertFalse( dataset.deleted )

    def test_deserialize_purge( self ):
        dataset = self.dataset_manager.create()

        self.log( 'should raise when deserializing purged from non-bool' )
        self.assertRaises( exceptions.RequestParameterInvalidException,
            self.dataset_deserializer.deserialize, dataset, data={ 'purged': None } )
        self.assertFalse( dataset.purged )
        self.log( 'should be able to deserialize purged from True' )
        self.dataset_deserializer.deserialize( dataset, data={ 'purged': True } )
        self.assertTrue( dataset.purged )
        # TODO: should this raise an error?
        self.log( 'should NOT be able to deserialize purged from False (will remain True)' )
        self.dataset_deserializer.deserialize( dataset, data={ 'purged': False } )
        self.assertTrue( dataset.purged )

    def test_deserialize_permissions( self ):
        dataset = self.dataset_manager.create()
        who_manages = self.user_manager.create( **user2_data )
        self.dataset_manager.permissions.manage.grant( dataset, who_manages )
        existing_permissions = self.dataset_serializer.serialize_permissions( dataset, 'permissions', user=who_manages )
        existing_manage_permissions = existing_permissions[ 'manage' ]

        user3 = self.user_manager.create( **user3_data )

        self.log( 'deserializing permissions from a non-dictionary should error' )
        not_a_dict = []
        self.assertRaises( exceptions.RequestParameterInvalidException, self.dataset_deserializer.deserialize,
            dataset, user=who_manages, data={ 'permissions': not_a_dict })

        self.log( 'deserializing permissions from a malformed dictionary should error' )
        self.assertRaises( exceptions.RequestParameterInvalidException, self.dataset_deserializer.deserialize,
            dataset, user=who_manages, data={ 'permissions': dict( nope=[], access=[] ) })

        self.log( 'deserializing permissions with no manage roles should error' )
        self.assertRaises( exceptions.RequestParameterInvalidException, self.dataset_deserializer.deserialize,
            dataset, user=who_manages, data={ 'permissions': dict( manage=[], access=[] ) })

        self.log( 'deserializing permissions using a non-managing user should error' )
        self.assertRaises( rbac_secured.DatasetManagePermissionFailedException, self.dataset_deserializer.deserialize,
            dataset, user=user3, data={ 'permissions': existing_permissions })

        self.log( 'deserializing permissions with a single access should make the dataset private' )
        private_role = self.user_manager.private_role( who_manages )
        private_role = private_role.to_dict( value_mapper={ 'id' : self.app.security.encode_id } )
        permissions = dict( manage=existing_manage_permissions, access=[ private_role[ 'id' ] ] )
        self.dataset_deserializer.deserialize( dataset, user=who_manages, data={
            'permissions': permissions
        })
        self.assertFalse( self.dataset_manager.is_accessible( dataset, user=user3 ) )

        self.log( 'deserializing permissions manage should make the permissions available' )
        self.assertRaises( SkipAttribute, self.dataset_serializer.serialize_permissions,
            dataset, 'perms', user=user3 )
        # now, have who_manages give a manage permission to user3
        private_role = self.user_manager.private_role( user3 )
        new_manage_permissions = existing_manage_permissions + [ self.app.security.encode_id( private_role.id ) ]
        permissions = dict( manage=new_manage_permissions, access=[] )
        self.dataset_deserializer.deserialize( dataset, user=who_manages, data={
            'permissions': permissions
        })

        # deserializing for user3 shouldn't throw a skip bc they can manage
        permissions = self.dataset_serializer.serialize_permissions( dataset, 'perms', user=who_manages )
        self.assertEqual( new_manage_permissions, permissions[ 'manage' ] )
示例#22
0
class JobController(BaseAPIController, UsesLibraryMixinItems):

    def __init__(self, app):
        super(JobController, self).__init__(app)
        self.dataset_manager = DatasetManager(app)
        self.job_search = JobSearch(app)

    @expose_api
    def index(self, trans, **kwd):
        """
        index( trans, state=None, tool_id=None, history_id=None, date_range_min=None, date_range_max=None, user_details=False )
        * GET /api/jobs:
            return jobs for current user

            !! if user is admin and user_details is True, then
                return jobs for all galaxy users based on filtering - this is an extended service

        :type   state: string or list
        :param  state: limit listing of jobs to those that match one of the included states. If none, all are returned.
        Valid Galaxy job states include:
                'new', 'upload', 'waiting', 'queued', 'running', 'ok', 'error', 'paused', 'deleted', 'deleted_new'

        :type   tool_id: string or list
        :param  tool_id: limit listing of jobs to those that match one of the included tool_ids. If none, all are returned.

        :type   user_details: boolean
        :param  user_details: if true, and requestor is an admin, will return external job id and user email.

        :type   date_range_min: string '2014-01-01'
        :param  date_range_min: limit the listing of jobs to those updated on or after requested date

        :type   date_range_max: string '2014-12-31'
        :param  date_range_max: limit the listing of jobs to those updated on or before requested date

        :type   history_id: string
        :param  history_id: limit listing of jobs to those that match the history_id. If none, all are returned.

        :rtype:     list
        :returns:   list of dictionaries containing summary job information
        """
        state = kwd.get('state', None)
        is_admin = trans.user_is_admin()
        user_details = kwd.get('user_details', False)

        if is_admin:
            query = trans.sa_session.query(trans.app.model.Job)
        else:
            query = trans.sa_session.query(trans.app.model.Job).filter(trans.app.model.Job.user == trans.user)

        def build_and_apply_filters(query, objects, filter_func):
            if objects is not None:
                if isinstance(objects, string_types):
                    query = query.filter(filter_func(objects))
                elif isinstance(objects, list):
                    t = []
                    for obj in objects:
                        t.append(filter_func(obj))
                    query = query.filter(or_(*t))
            return query

        query = build_and_apply_filters(query, state, lambda s: trans.app.model.Job.state == s)

        query = build_and_apply_filters(query, kwd.get('tool_id', None), lambda t: trans.app.model.Job.tool_id == t)
        query = build_and_apply_filters(query, kwd.get('tool_id_like', None), lambda t: trans.app.model.Job.tool_id.like(t))

        query = build_and_apply_filters(query, kwd.get('date_range_min', None), lambda dmin: trans.app.model.Job.table.c.update_time >= dmin)
        query = build_and_apply_filters(query, kwd.get('date_range_max', None), lambda dmax: trans.app.model.Job.table.c.update_time <= dmax)

        history_id = kwd.get('history_id', None)
        if history_id is not None:
            try:
                decoded_history_id = self.decode_id(history_id)
                query = query.filter(trans.app.model.Job.history_id == decoded_history_id)
            except Exception:
                raise exceptions.ObjectAttributeInvalidException()

        out = []
        if kwd.get('order_by') == 'create_time':
            order_by = trans.app.model.Job.create_time.desc()
        else:
            order_by = trans.app.model.Job.update_time.desc()
        for job in query.order_by(order_by).all():
            job_dict = job.to_dict('collection', system_details=is_admin)
            j = self.encode_all_ids(trans, job_dict, True)
            if user_details:
                j['user_email'] = job.user.email
            out.append(j)

        return out

    @expose_api_anonymous
    def show(self, trans, id, **kwd):
        """
        show( trans, id )
        * GET /api/jobs/{id}:
            return jobs for current user

        :type   id: string
        :param  id: Specific job id

        :type   full: boolean
        :param  full: whether to return extra information

        :rtype:     dictionary
        :returns:   dictionary containing full description of job data
        """
        job = self.__get_job(trans, id)
        is_admin = trans.user_is_admin()
        job_dict = self.encode_all_ids(trans, job.to_dict('element', system_details=is_admin), True)
        full_output = util.asbool(kwd.get('full', 'false'))
        if full_output:
            job_dict.update(dict(stderr=job.stderr, stdout=job.stdout))
            if is_admin:
                if job.user:
                    job_dict['user_email'] = job.user.email
                else:
                    job_dict['user_email'] = None

                def metric_to_dict(metric):
                    metric_name = metric.metric_name
                    metric_value = metric.metric_value
                    metric_plugin = metric.plugin
                    title, value = trans.app.job_metrics.format(metric_plugin, metric_name, metric_value)
                    return dict(
                        title=title,
                        value=value,
                        plugin=metric_plugin,
                        name=metric_name,
                        raw_value=str(metric_value),
                    )

                job_dict['job_metrics'] = [metric_to_dict(metric) for metric in job.metrics]
        return job_dict

    @expose_api
    def inputs(self, trans, id, **kwd):
        """
        show( trans, id )
        * GET /api/jobs/{id}/inputs
            returns input datasets created by job

        :type   id: string
        :param  id: Encoded job id

        :rtype:     dictionary
        :returns:   dictionary containing input dataset associations
        """
        job = self.__get_job(trans, id)
        return self.__dictify_associations(trans, job.input_datasets, job.input_library_datasets)

    @expose_api
    def outputs(self, trans, id, **kwd):
        """
        outputs( trans, id )
        * GET /api/jobs/{id}/outputs
            returns output datasets created by job

        :type   id: string
        :param  id: Encoded job id

        :rtype:     dictionary
        :returns:   dictionary containing output dataset associations
        """
        job = self.__get_job(trans, id)
        return self.__dictify_associations(trans, job.output_datasets, job.output_library_datasets)

    @expose_api
    def delete(self, trans, id, **kwd):
        """
        delete( trans, id )
        * Delete /api/jobs/{id}
            cancels specified job

        :type   id: string
        :param  id: Encoded job id
        """
        job = self.__get_job(trans, id)
        if not job.finished:
            job.mark_deleted(self.app.config.track_jobs_in_database)
            trans.sa_session.flush()
            self.app.job_manager.job_stop_queue.put(job.id)
            return True
        else:
            return False

    @expose_api_anonymous
    def build_for_rerun(self, trans, id, **kwd):
        """
        * GET /api/jobs/{id}/build_for_rerun
            returns a tool input/param template prepopulated with this job's
            information, suitable for rerunning or rendering parameters of the
            job.

        :type   id: string
        :param  id: Encoded job id

        :rtype:     dictionary
        :returns:   dictionary containing output dataset associations
        """

        job = self.__get_job(trans, id)
        if not job:
            raise exceptions.ObjectNotFound("Could not access job with id '%s'" % id)
        tool = self.app.toolbox.get_tool(job.tool_id, kwd.get('tool_version') or job.tool_version)
        if tool is None:
            raise exceptions.ObjectNotFound("Requested tool not found")
        if not tool.is_workflow_compatible:
            raise exceptions.ConfigDoesNotAllowException("Tool '%s' cannot be rerun." % (job.tool_id))
        return tool.to_json(trans, {}, job=job)

    def __dictify_associations(self, trans, *association_lists):
        rval = []
        for association_list in association_lists:
            rval.extend(self.__dictify_association(trans, a) for a in association_list)
        return rval

    def __dictify_association(self, trans, job_dataset_association):
        dataset_dict = None
        dataset = job_dataset_association.dataset
        if dataset:
            if isinstance(dataset, model.HistoryDatasetAssociation):
                dataset_dict = dict(src="hda", id=trans.security.encode_id(dataset.id))
            else:
                dataset_dict = dict(src="ldda", id=trans.security.encode_id(dataset.id))
        return dict(name=job_dataset_association.name, dataset=dataset_dict)

    def __get_job(self, trans, id):
        try:
            decoded_job_id = self.decode_id(id)
        except Exception:
            raise exceptions.MalformedId()
        job = trans.sa_session.query(trans.app.model.Job).filter(trans.app.model.Job.id == decoded_job_id).first()
        if job is None:
            raise exceptions.ObjectNotFound()
        belongs_to_user = (job.user == trans.user) if job.user else (job.session_id == trans.get_galaxy_session().id)
        if not trans.user_is_admin() and not belongs_to_user:
            # Check access granted via output datasets.
            if not job.output_datasets:
                raise exceptions.ItemAccessibilityException("Job has no output datasets.")
            for data_assoc in job.output_datasets:
                if not self.dataset_manager.is_accessible(data_assoc.dataset.dataset, trans.user):
                    raise exceptions.ItemAccessibilityException("You are not allowed to rerun this job.")
        return job

    @expose_api
    def create(self, trans, payload, **kwd):
        """ See the create method in tools.py in order to submit a job. """
        raise exceptions.NotImplemented('Please POST to /api/tools instead.')

    @expose_api
    def search(self, trans, payload, **kwd):
        """
        search( trans, payload )
        * POST /api/jobs/search:
            return jobs for current user

        :type   payload: dict
        :param  payload: Dictionary containing description of requested job. This is in the same format as
            a request to POST /apt/tools would take to initiate a job

        :rtype:     list
        :returns:   list of dictionaries containing summary job information of the jobs that match the requested job run

        This method is designed to scan the list of previously run jobs and find records of jobs that had
        the exact some input parameters and datasets. This can be used to minimize the amount of repeated work, and simply
        recycle the old results.
        """
        tool_id = payload.get('tool_id')
        if tool_id is None:
            raise exceptions.ObjectAttributeMissingException("No tool id")
        tool = trans.app.toolbox.get_tool(tool_id)
        if tool is None:
            raise exceptions.ObjectNotFound("Requested tool not found")
        if 'inputs' not in payload:
            raise exceptions.ObjectAttributeMissingException("No inputs defined")
        inputs = payload.get('inputs', {})
        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.items():
            if k.startswith('files_') or k.startswith('__files_'):
                inputs[k] = v
        request_context = WorkRequestContext(app=trans.app, user=trans.user, history=trans.history)
        all_params, all_errors, _, _ = tool.expand_incoming(trans=trans, incoming=inputs, request_context=request_context)
        if any(all_errors):
            return []
        params_dump = [tool.params_to_strings(param, self.app, nested=True) for param in all_params]
        jobs = []
        for param_dump, param in zip(params_dump, all_params):
            job = self.job_search.by_tool_input(trans=trans,
                                                tool_id=tool_id,
                                                tool_version=tool.version,
                                                param=param,
                                                param_dump=param_dump,
                                                job_state=payload.get('state'))
            if job:
                jobs.append(job)
        return [self.encode_all_ids(trans, single_job.to_dict('element'), True) for single_job in jobs]

    @expose_api
    def error(self, trans, id, **kwd):
        """
        error( trans, id )
        * POST /api/jobs/{id}/error
            submits a bug report via the API.

        :type   id: string
        :param  id: Encoded job id

        :rtype:     dictionary
        :returns:   dictionary containing information regarding where the error report was sent.
        """
        # Get dataset on which this error was triggered
        try:
            decoded_dataset_id = self.decode_id(kwd['dataset_id'])
        except Exception:
            raise exceptions.MalformedId()
        dataset = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(decoded_dataset_id)

        # Get job
        job = self.__get_job(trans, id)
        tool = trans.app.toolbox.get_tool(job.tool_id, tool_version=job.tool_version) or None
        messages = trans.app.error_reports.default_error_plugin.submit_report(
            dataset, job, tool, user_submission=True, user=trans.user,
            email=kwd.get('email', trans.user.email),
            message=kwd.get('message', None)
        )

        return {'messages': messages}
 def set_up_managers( self ):
     super( DatasetDeserializerTestCase, self ).set_up_managers()
     self.dataset_manager = DatasetManager( self.app )
     self.dataset_serializer = DatasetSerializer( self.app )
     self.dataset_deserializer = DatasetDeserializer( self.app )
     self.role_manager = RoleManager( self.app )
示例#24
0
 def set_up_managers( self ):
     super( HDATestCase, self ).set_up_managers()
     self.hda_manager = hdas.HDAManager( self.app )
     self.history_manager = HistoryManager( self.app )
     self.dataset_manager = DatasetManager( self.app )
class DatasetCollectionManagerTestCase(BaseTestCase, CreatesCollectionsMixin):

    def set_up_managers(self):
        super(DatasetCollectionManagerTestCase, self).set_up_managers()
        self.dataset_manager = DatasetManager(self.app)
        self.hda_manager = HDAManager(self.app)
        self.history_manager = HistoryManager(self.app)
        self.collection_manager = DatasetCollectionManager(self.app)

    def test_create_simple_list(self):
        owner = self.user_manager.create(**user2_data)

        history = self.history_manager.create(name='history1', user=owner)

        hda1 = self.hda_manager.create(name='one',
            history=history, dataset=self.dataset_manager.create())
        hda2 = self.hda_manager.create(name='two',
            history=history, dataset=self.dataset_manager.create())
        hda3 = self.hda_manager.create(name='three',
            history=history, dataset=self.dataset_manager.create())

        self.log("should be able to create a new Collection via ids")
        element_identifiers = self.build_element_identifiers([hda1, hda2, hda3])
        hdca = self.collection_manager.create(self.trans, history, 'test collection', 'list',
                                           element_identifiers=element_identifiers)
        self.assertIsInstance(hdca, model.HistoryDatasetCollectionAssociation)
        self.assertEqual(hdca.name, 'test collection')
        self.assertEqual(hdca.hid, 4)
        self.assertFalse(hdca.deleted)
        self.assertTrue(hdca.visible)

        self.log("should contain an underlying, well-formed DatasetCollection")
        self.assertIsInstance(hdca.collection, model.DatasetCollection)
        collection = hdca.collection
        self.assertEqual(collection.collection_type, 'list')
        self.assertEqual(collection.state, 'ok')
        self.assertEqual(len(collection.dataset_instances), 3)
        self.assertEqual(len(collection.elements), 3)

        self.log("and that collection should have three well-formed Elements")
        self.assertIsInstance(collection.elements[0], model.DatasetCollectionElement)
        self.assertEqual(collection.elements[0].element_identifier, 'one')
        self.assertEqual(collection.elements[0].element_index, 0)
        self.assertEqual(collection.elements[0].element_type, 'hda')
        self.assertEqual(collection.elements[0].element_object, hda1)

        self.assertIsInstance(collection.elements[1], model.DatasetCollectionElement)
        self.assertEqual(collection.elements[1].element_identifier, 'two')
        self.assertEqual(collection.elements[1].element_index, 1)
        self.assertEqual(collection.elements[1].element_type, 'hda')
        self.assertEqual(collection.elements[1].element_object, hda2)

        self.assertIsInstance(collection.elements[2], model.DatasetCollectionElement)
        self.assertEqual(collection.elements[2].element_identifier, 'three')
        self.assertEqual(collection.elements[2].element_index, 2)
        self.assertEqual(collection.elements[2].element_type, 'hda')
        self.assertEqual(collection.elements[2].element_object, hda3)

        self.log("should be able to create a new Collection via objects")
        elements = dict(one=hda1, two=hda2, three=hda3)
        hdca2 = self.collection_manager.create(self.trans, history, 'test collection 2', 'list', elements=elements)
        self.assertIsInstance(hdca2, model.HistoryDatasetCollectionAssociation)

    def test_update_from_dict(self):
        owner = self.user_manager.create(**user2_data)

        history = self.history_manager.create(name='history1', user=owner)

        hda1 = self.hda_manager.create(name='one',
            history=history, dataset=self.dataset_manager.create())
        hda2 = self.hda_manager.create(name='two',
            history=history, dataset=self.dataset_manager.create())
        hda3 = self.hda_manager.create(name='three',
            history=history, dataset=self.dataset_manager.create())

        elements = dict(one=hda1, two=hda2, three=hda3)
        hdca = self.collection_manager.create(self.trans, history, 'test collection', 'list', elements=elements)

        self.log("should be set from a dictionary")
        self.collection_manager._set_from_dict(self.trans, hdca, {
            'deleted': True,
            'visible': False,
            'name': 'New Name',
            # TODO: doesn't work
            # 'tags'      : [ 'one', 'two', 'three' ]
            # 'annotations'      : [?]
        })
        self.assertEqual(hdca.name, 'New Name')
        self.assertTrue(hdca.deleted)
        self.assertFalse(hdca.visible)