Example #1
0
def _setup_mapping_and_user():
    with TestConfig(DISK_TEST_CONFIG) as (test_config, object_store):
        # Start the database and connect the mapping
        model = mapping.init("/tmp", "sqlite:///:memory:", create_tables=True, object_store=object_store, slow_query_log_threshold=SLOW_QUERY_LOG_THRESHOLD, thread_local_log=THREAD_LOCAL_LOG)

        u = model.User(email="*****@*****.**", password="******")
        h1 = model.History(name="HistoryCopyHistory1", user=u)
        model.context.add_all([u, h1])
        model.context.flush()
        yield test_config, object_store, model, h1
    def test_default_disk_usage(self):
        model = self.model

        u = model.User(email="*****@*****.**", password="******")
        self.persist(u)
        u.adjust_total_disk_usage(1)
        u_id = u.id
        self.expunge()
        user_reload = model.session.query(model.User).get(u_id)
        assert user_reload.disk_usage == 1
Example #3
0
def _run_jihaw_cleanup(archive_dir, app=None):
    app = app or _mock_app()
    job = model.Job()
    job.user = model.User(email="*****@*****.**", password='******')
    job.tool_stderr = ''
    jiha = model.JobImportHistoryArchive(job=job, archive_dir=archive_dir)
    app.model.context.current.add_all([job, jiha])
    app.model.context.flush()
    jihaw = JobImportHistoryArchiveWrapper(app, job.id)  # yeehaw!
    return app, jihaw.cleanup_after_job()
Example #4
0
    def test_tasks( self ):
        model = self.model
        u = model.User( email="*****@*****.**", password="******" )
        job = model.Job()
        task = model.Task( job=job, working_directory="/tmp", prepare_files_cmd="split.sh" )
        job.user = u
        self.persist( u, job, task )

        loaded_task = model.session.query( model.Task ).filter( model.Task.job == job ).first()
        assert loaded_task.prepare_input_files_cmd == "split.sh"
Example #5
0
    def test_interactive_environ_plugin_load(self):
        """
        """
        mock_app_dir = galaxy_mock.MockDir({
            'plugins': {
                'ipython': {
                    'config': {
                        'ipython.xml': ipython_config
                    },
                    'templates': {
                        'ipython.mako': ipython_template
                    }
                },
            }
        })
        mock_app = galaxy_mock.MockApp(root=mock_app_dir.root_path)
        plugin_mgr = VisualizationsRegistry(
            mock_app,
            directories_setting='plugins',
            template_cache_dir=mock_app_dir.root_path)
        # use a mock request factory - this will be written into the filled template to show it was used
        plugin_mgr.IE_REQUEST_FACTORY = lambda t, p: 'mock_ie'

        expected_plugins_path = os.path.join(mock_app_dir.root_path, 'plugins')
        expected_plugin_names = ['ipython']

        self.assertEqual(plugin_mgr.base_url, 'visualizations')
        self.assertItemsEqual(plugin_mgr.directories, [expected_plugins_path])
        self.assertItemsEqual(plugin_mgr.plugins.keys(), expected_plugin_names)

        ipython_ie = plugin_mgr.plugins['ipython']
        config = ipython_ie.get('config')

        self.assertEqual(ipython_ie.name, 'ipython')
        self.assertEqual(config.get('plugin_type'), 'interactive_environment')

        # get_api_key needs a user, fill_template a trans
        user = model.User(email="*****@*****.**",
                          password="******")
        trans = galaxy_mock.MockTrans(user=user)

        # should return the (new) api key for the above user (see the template above)
        response = plugin_mgr.fill_template(trans, ipython_ie, 'ipython.mako')
        response.strip()
        self.assertIsInstance(response, basestring)
        self.assertTrue('-' in response)
        ie_request, api_key = response.split('-')

        self.assertEqual(ie_request, 'mock_ie')

        match = re.match(r'[a-f0-9]{32}', api_key)
        self.assertIsNotNone(match)
        self.assertEqual(match.span(), (0, 32))

        mock_app_dir.remove()
Example #6
0
    def test_basic(self):
        model = self.model

        original_user_count = len(model.session.query(model.User).all())

        # Make some changes and commit them
        u = model.User(email="*****@*****.**", password="******")
        # gs = model.GalaxySession()
        h1 = model.History(name="History 1", user=u)
        # h1.queries.append( model.Query( "h1->q1" ) )
        # h1.queries.append( model.Query( "h1->q2" ) )
        h2 = model.History(name=("H" * 1024))
        self.persist(u, h1, h2)
        # q1 = model.Query( "h2->q1" )
        metadata = dict(chromCol=1, startCol=2, endCol=3)
        d1 = model.HistoryDatasetAssociation(extension="interval",
                                             metadata=metadata,
                                             history=h2,
                                             create_dataset=True,
                                             sa_session=model.session)
        # h2.queries.append( q1 )
        # h2.queries.append( model.Query( "h2->q2" ) )
        self.persist(d1)

        # Check
        users = model.session.query(model.User).all()
        assert len(users) == original_user_count + 1
        user = [user for user in users if user.email == "*****@*****.**"][0]
        assert user.email == "*****@*****.**"
        assert user.password == "password"
        assert len(user.histories) == 1
        assert user.histories[0].name == "History 1"
        hists = model.session.query(model.History).all()
        hist0 = [history for history in hists
                 if history.name == "History 1"][0]
        hist1 = [history for history in hists if history.name == "H" * 255][0]
        assert hist0.name == "History 1"
        assert hist1.name == ("H" * 255)
        assert hist0.user == user
        assert hist1.user is None
        assert hist1.datasets[0].metadata.chromCol == 1
        # The filename test has moved to objectstore
        # id = hist1.datasets[0].id
        # assert hist1.datasets[0].file_name == os.path.join( "/tmp", *directory_hash_id( id ) ) + ( "/dataset_%d.dat" % id )
        # Do an update and check
        hist1.name = "History 2b"
        self.expunge()
        hists = model.session.query(model.History).all()
        hist0 = [history for history in hists
                 if history.name == "History 1"][0]
        hist1 = [history for history in hists
                 if history.name == "History 2b"][0]
        assert hist0.name == "History 1"
        assert hist1.name == "History 2b"
Example #7
0
    def test_jobs(self):
        model = self.model
        u = model.User(email="*****@*****.**", password="******")
        job = model.Job()
        job.user = u
        job.tool_id = "cat1"

        self.persist(u, job)

        loaded_job = model.session.query(model.Job).filter(model.Job.user == u).first()
        assert loaded_job.tool_id == "cat1"
Example #8
0
def test_import_export_composite_datasets():
    app = _mock_app()
    sa_session = app.model.context

    u = model.User(email="*****@*****.**", password="******")
    h = model.History(name="Test History", user=u)

    d1 = _create_datasets(sa_session, h, 1, extension="html")[0]
    d1.dataset.create_extra_files_path()
    sa_session.add_all((h, d1))
    sa_session.flush()

    primary = NamedTemporaryFile("w")
    primary.write("cool primary file")
    primary.flush()
    app.object_store.update_from_file(d1.dataset,
                                      file_name=primary.name,
                                      create=True,
                                      preserve_symlinks=True)

    composite1 = NamedTemporaryFile("w")
    composite1.write("cool composite file")
    composite1.flush()

    app.object_store.update_from_file(d1.dataset,
                                      extra_dir=os.path.normpath(
                                          os.path.join(d1.extra_files_path,
                                                       "parent_dir")),
                                      alt_name="child_file",
                                      file_name=composite1.name,
                                      create=True,
                                      preserve_symlinks=True)

    temp_directory = mkdtemp()
    with store.DirectoryModelExportStore(temp_directory,
                                         app=app,
                                         export_files="copy") as export_store:
        export_store.add_dataset(d1)

    import_history = model.History(name="Test History for Import", user=u)
    sa_session.add(import_history)
    sa_session.flush()
    _perform_import_from_directory(temp_directory, app, u, import_history)
    assert len(import_history.datasets) == 1
    import_dataset = import_history.datasets[0]
    root_extra_files_path = import_dataset.extra_files_path
    assert len(os.listdir(root_extra_files_path)) == 1
    assert os.listdir(root_extra_files_path)[0] == "parent_dir"
    composite_sub_dir = os.path.join(root_extra_files_path, "parent_dir")
    child_files = os.listdir(composite_sub_dir)
    assert len(child_files) == 1
    with open(os.path.join(composite_sub_dir, child_files[0]), "r") as f:
        contents = f.read()
        assert contents == "cool composite file"
Example #9
0
 def test_collection_get_interface(self):
     model = self.model
     u = model.User(email="*****@*****.**", password="******")
     h1 = model.History(name="History 1", user=u)
     d1 = model.HistoryDatasetAssociation(extension="txt", history=h1, create_dataset=True, sa_session=model.session)
     c1 = model.DatasetCollection(collection_type="list")
     elements = 100
     dces = [model.DatasetCollectionElement(collection=c1, element=d1, element_identifier=f"{i}", element_index=i) for i in range(elements)]
     self.persist(u, h1, d1, c1, *dces, flush=False, expunge=False)
     model.session.flush()
     for i in range(elements):
         assert c1[i] == dces[i]
Example #10
0
def __setup_fixtures(app):
    # user1 has 3 jobs queued and 2 jobs running on cluster1 and one queued and
    # on running job on local. user2 has a queued and running job on the cluster.
    # user3 has no jobs.
    user1 = model.User(email=USER_EMAIL_1, password="******")
    user2 = model.User(email=USER_EMAIL_2, password="******")
    user3 = model.User(email=USER_EMAIL_2, password="******")

    app.add(user1, user2, user3)

    app.add(__new_job(user=user1, destination_id="cluster1", state="queued"))
    app.add(__new_job(user=user1, destination_id="cluster1", state="queued"))
    app.add(__new_job(user=user1, destination_id="cluster1", state="queued"))
    app.add(__new_job(user=user1, destination_id="cluster1", state="running"))
    app.add(__new_job(user=user1, destination_id="cluster1", state="running"))

    app.add(__new_job(user=user1, destination_id="local", state="queued"))
    app.add(__new_job(user=user1, destination_id="local", state="running"))

    app.add(__new_job(user=user2, destination_id="cluster1", state="queued"))
    app.add(__new_job(user=user2, destination_id="cluster1", state="running"))
Example #11
0
 def test_populated_optimized_ok(self):
     model = self.model
     u = model.User(email="*****@*****.**", password="******")
     h1 = model.History(name="History 1", user=u)
     d1 = model.HistoryDatasetAssociation(extension="txt", history=h1, create_dataset=True, sa_session=model.session)
     d2 = model.HistoryDatasetAssociation(extension="txt", history=h1, create_dataset=True, sa_session=model.session)
     c1 = model.DatasetCollection(collection_type='paired')
     dce1 = model.DatasetCollectionElement(collection=c1, element=d1, element_identifier="forward", element_index=0)
     dce2 = model.DatasetCollectionElement(collection=c1, element=d2, element_identifier="reverse", element_index=1)
     model.session.add_all([d1, d2, c1, dce1, dce2])
     model.session.flush()
     assert c1.populated
     assert c1.populated_optimized
Example #12
0
 def __test_workflow( self ):
     stored_workflow = model.StoredWorkflow()
     workflow = model.Workflow()
     workflow.stored_workflow = stored_workflow
     stored_workflow.latest_workflow = workflow
     user = model.User()
     user.email = "*****@*****.**"
     user.password = "******"
     stored_workflow.user = user
     self.app.model.context.add( workflow )
     self.app.model.context.add( stored_workflow )
     self.app.model.context.flush()
     return stored_workflow
Example #13
0
def test_job_context_discover_outputs_flushes_once(mocker):
    app = _mock_app()
    sa_session = app.model.context
    # mocker is a pytest-mock fixture

    u = model.User(email="*****@*****.**", password="******")
    h = model.History(name="Test History", user=u)

    tool = Tool(app)
    tool_provided_metadata = None
    job = model.Job()
    job.history = h
    sa_session.add(job)
    sa_session.flush()
    job_working_directory = tempfile.mkdtemp()
    setup_data(job_working_directory)
    permission_provider = PermissionProvider()
    metadata_source_provider = MetadataSourceProvider()
    object_store = app.object_store
    input_dbkey = '?'
    final_job_state = 'ok'
    collection_description = FilePatternDatasetCollectionDescription(
        pattern="__name__")
    collection = model.DatasetCollection(collection_type='list',
                                         populated=False)
    sa_session.add(collection)
    job_context = JobContext(tool, tool_provided_metadata, job,
                             job_working_directory, permission_provider,
                             metadata_source_provider, input_dbkey,
                             object_store, final_job_state)
    collection_builder = builder.BoundCollectionBuilder(collection)
    dataset_collectors = [dataset_collector(collection_description)]
    output_name = 'output'
    filenames = job_context.find_files(output_name, collection,
                                       dataset_collectors)
    assert len(filenames) == 10
    spy = mocker.spy(sa_session, 'flush')
    job_context.populate_collection_elements(
        collection,
        collection_builder,
        filenames,
        name=output_name,
        metadata_source_name='',
        final_job_state=job_context.final_job_state,
    )
    collection_builder.populate()
    assert spy.call_count == 0
    sa_session.flush()
    assert len(collection.dataset_instances) == 10
    assert collection.dataset_instances[0].dataset.file_size == 1
Example #14
0
 def test_dataset_dbkeys_and_extensions_summary(self):
     model = self.model
     u = model.User(email="*****@*****.**", password="******")
     h1 = model.History(name="History 1", user=u)
     d1 = model.HistoryDatasetAssociation(extension="bam", dbkey="hg19", history=h1, create_dataset=True, sa_session=model.session)
     d2 = model.HistoryDatasetAssociation(extension="txt", dbkey="hg19", history=h1, create_dataset=True, sa_session=model.session)
     c1 = model.DatasetCollection(collection_type='paired')
     dce1 = model.DatasetCollectionElement(collection=c1, element=d1, element_identifier="forward", element_index=0)
     dce2 = model.DatasetCollectionElement(collection=c1, element=d2, element_identifier="reverse", element_index=1)
     hdca = model.HistoryDatasetCollectionAssociation(collection=c1, history=h1)
     model.session.add_all([d1, d2, c1, dce1, dce2, hdca])
     model.session.flush()
     assert hdca.dataset_dbkeys_and_extensions_summary[0] == {"hg19"}
     assert hdca.dataset_dbkeys_and_extensions_summary[1] == {"bam", "txt"}
Example #15
0
    def test_ratings(self):
        model = self.model

        u = model.User(email="*****@*****.**", password="******")
        self.persist(u)

        def persist_and_check_rating(rating_class, **kwds):
            rating_association = rating_class()
            rating_association.rating = 5
            rating_association.user = u
            for key, value in kwds.items():
                setattr(rating_association, key, value)
            self.persist(rating_association)
            self.expunge()
            stored_annotation = self.query(rating_class).all()[0]
            assert stored_annotation.rating == 5
            assert stored_annotation.user.email == "*****@*****.**"

        sw = model.StoredWorkflow()
        sw.user = u
        self.persist(sw)
        persist_and_check_rating(model.StoredWorkflowRatingAssociation, stored_workflow=sw)

        h = model.History(name="History for Rating", user=u)
        self.persist(h)
        persist_and_check_rating(model.HistoryRatingAssociation, history=h)

        d1 = model.HistoryDatasetAssociation(extension="txt", history=h, create_dataset=True, sa_session=model.session)
        self.persist(d1)
        persist_and_check_rating(model.HistoryDatasetAssociationRatingAssociation, hda=d1)

        page = model.Page()
        page.user = u
        self.persist(page)
        persist_and_check_rating(model.PageRatingAssociation, page=page)

        visualization = model.Visualization()
        visualization.user = u
        self.persist(visualization)
        persist_and_check_rating(model.VisualizationRatingAssociation, visualization=visualization)

        dataset_collection = model.DatasetCollection(collection_type="paired")
        history_dataset_collection = model.HistoryDatasetCollectionAssociation(collection=dataset_collection)
        self.persist(history_dataset_collection)
        persist_and_check_rating(model.HistoryDatasetCollectionRatingAssociation, history_dataset_collection=history_dataset_collection)

        library_dataset_collection = model.LibraryDatasetCollectionAssociation(collection=dataset_collection)
        self.persist(library_dataset_collection)
        persist_and_check_rating(model.LibraryDatasetCollectionRatingAssociation, library_dataset_collection=library_dataset_collection)
Example #16
0
    def test_collections_in_library_folders(self):
        model = self.model

        u = model.User(email="*****@*****.**", password="******")
        lf = model.LibraryFolder(name="RootFolder")
        l = model.Library(name="Library1", root_folder=lf)
        ld1 = model.LibraryDataset()
        ld2 = model.LibraryDataset()

        ldda1 = model.LibraryDatasetDatasetAssociation(extension="txt", library_dataset=ld1)
        ldda2 = model.LibraryDatasetDatasetAssociation(extension="txt", library_dataset=ld1)

        c1 = model.DatasetCollection(collection_type="pair")
        dce1 = model.DatasetCollectionElement(collection=c1, element=ldda1)
        dce2 = model.DatasetCollectionElement(collection=c1, element=ldda2)
        self.persist(u, l, lf, ld1, ld2, c1, ldda1, ldda2, dce1, dce2)
Example #17
0
def test_expunge_all(transaction):
    user = model.User("foo", "bar1")
    transaction.sa_session.add(user)

    user.password = "******"
    transaction.sa_session.flush()

    assert transaction.sa_session.query(model.User).first().password == "bar2"

    transaction.sa_session.expunge_all()

    user.password = "******"
    transaction.sa_session.flush()

    # Password unchange because not attached to session/context.
    assert transaction.sa_session.query(model.User).first().password == "bar2"
Example #18
0
    def test_ratings(self):
        model = self.model

        user_email = "*****@*****.**"
        u = model.User(email=user_email, password="******")
        self.persist(u)

        def persist_and_check_rating(rating_class, item):
            rating = 5
            rating_association = rating_class(u, item, rating)
            self.persist(rating_association)
            self.expunge()
            stored_rating = self.query(rating_class).all()[0]
            assert stored_rating.rating == rating
            assert stored_rating.user.email == user_email

        sw = model.StoredWorkflow()
        sw.user = u
        self.persist(sw)
        persist_and_check_rating(model.StoredWorkflowRatingAssociation, sw)

        h = model.History(name="History for Rating", user=u)
        self.persist(h)
        persist_and_check_rating(model.HistoryRatingAssociation, h)

        d1 = model.HistoryDatasetAssociation(extension="txt", history=h, create_dataset=True, sa_session=model.session)
        self.persist(d1)
        persist_and_check_rating(model.HistoryDatasetAssociationRatingAssociation, d1)

        page = model.Page()
        page.user = u
        self.persist(page)
        persist_and_check_rating(model.PageRatingAssociation, page)

        visualization = model.Visualization()
        visualization.user = u
        self.persist(visualization)
        persist_and_check_rating(model.VisualizationRatingAssociation, visualization)

        dataset_collection = model.DatasetCollection(collection_type="paired")
        history_dataset_collection = model.HistoryDatasetCollectionAssociation(collection=dataset_collection)
        self.persist(history_dataset_collection)
        persist_and_check_rating(model.HistoryDatasetCollectionRatingAssociation, history_dataset_collection)

        library_dataset_collection = model.LibraryDatasetCollectionAssociation(collection=dataset_collection)
        self.persist(library_dataset_collection)
        persist_and_check_rating(model.LibraryDatasetCollectionRatingAssociation, library_dataset_collection)
Example #19
0
 def test_nested_collection_attributes(self):
     model = self.model
     u = model.User(email="*****@*****.**", password="******")
     h1 = model.History(name="History 1", user=u)
     d1 = model.HistoryDatasetAssociation(extension="bam", history=h1, create_dataset=True, sa_session=model.session)
     index = NamedTemporaryFile("w")
     index.write("cool bam index")
     index2 = NamedTemporaryFile("w")
     index2.write("cool bam index 2")
     metadata_dict = {"bam_index": MetadataTempFile.from_JSON({"kwds": {}, "filename": index.name}), "bam_csi_index": MetadataTempFile.from_JSON({"kwds": {}, "filename": index2.name})}
     d1.metadata.from_JSON_dict(json_dict=metadata_dict)
     assert d1.metadata.bam_index
     assert d1.metadata.bam_csi_index
     assert isinstance(d1.metadata.bam_index, model.MetadataFile)
     assert isinstance(d1.metadata.bam_csi_index, model.MetadataFile)
     d2 = model.HistoryDatasetAssociation(extension="txt", history=h1, create_dataset=True, sa_session=model.session)
     c1 = model.DatasetCollection(collection_type='paired')
     dce1 = model.DatasetCollectionElement(collection=c1, element=d1, element_identifier="forward", element_index=0)
     dce2 = model.DatasetCollectionElement(collection=c1, element=d2, element_identifier="reverse", element_index=1)
     c2 = model.DatasetCollection(collection_type="list:paired")
     dce3 = model.DatasetCollectionElement(collection=c2, element=c1, element_identifier="inner_list", element_index=0)
     c3 = model.DatasetCollection(collection_type="list:list")
     c4 = model.DatasetCollection(collection_type="list:list:paired")
     dce4 = model.DatasetCollectionElement(collection=c4, element=c2, element_identifier="outer_list", element_index=0)
     model.session.add_all([d1, d2, c1, dce1, dce2, c2, dce3, c3, c4, dce4])
     model.session.flush()
     q = c2._get_nested_collection_attributes(element_attributes=('element_identifier',), hda_attributes=('extension',), dataset_attributes=('state',))
     assert [(r.keys()) for r in q] == [['element_identifier_0', 'element_identifier_1', 'extension', 'state'], ['element_identifier_0', 'element_identifier_1', 'extension', 'state']]
     assert q.all() == [('inner_list', 'forward', 'bam', 'new'), ('inner_list', 'reverse', 'txt', 'new')]
     q = c2._get_nested_collection_attributes(return_entities=(model.HistoryDatasetAssociation,))
     assert q.all() == [d1, d2]
     q = c2._get_nested_collection_attributes(return_entities=(model.HistoryDatasetAssociation, model.Dataset))
     assert q.all() == [(d1, d1.dataset), (d2, d2.dataset)]
     # Assert properties that use _get_nested_collection_attributes return correct content
     assert c2.dataset_instances == [d1, d2]
     assert c2.dataset_elements == [dce1, dce2]
     assert c2.dataset_action_tuples == []
     assert c2.populated_optimized
     assert c2.dataset_states_and_extensions_summary == ({'new'}, {'txt', 'bam'})
     assert c2.element_identifiers_extensions_paths_and_metadata_files == [[('inner_list', 'forward'), 'bam', 'mock_dataset_14.dat', [('bai', 'mock_dataset_14.dat'), ('bam.csi', 'mock_dataset_14.dat')]], [('inner_list', 'reverse'), 'txt', 'mock_dataset_14.dat', []]]
     assert c3.dataset_instances == []
     assert c3.dataset_elements == []
     assert c3.dataset_states_and_extensions_summary == (set(), set())
     q = c4._get_nested_collection_attributes(element_attributes=('element_identifier',))
     assert q.all() == [('outer_list', 'inner_list', 'forward'), ('outer_list', 'inner_list', 'reverse')]
     assert c4.dataset_elements == [dce1, dce2]
     assert c4.element_identifiers_extensions_and_paths == [(('outer_list', 'inner_list', 'forward'), 'bam', 'mock_dataset_14.dat'), (('outer_list', 'inner_list', 'reverse'), 'txt', 'mock_dataset_14.dat')]
Example #20
0
def __workflow_fixure( trans ):
    user = model.User(
        email="*****@*****.**",
        password="******"
    )
    stored_workflow = model.StoredWorkflow()
    stored_workflow.user = user
    workflow = model.Workflow()
    workflow.stored_workflow = stored_workflow

    def add_step( **kwds ):
        workflow_step = model.WorkflowStep()
        for key, value in kwds.iteritems():
            setattr(workflow_step, key, value)
        workflow.steps.append( workflow_step )

    trans.app.model.context.add(
        workflow,
    )

    add_step(
        type="data_input",
        order_index=0,
        tool_inputs={"name": "input1"}
    )
    add_step(
        type="data_input",
        order_index=1,
        tool_inputs={"name": "input2"}
    )
    add_step(
        type="tool",
        tool_id="cat1",
        order_index=2,
    )
    add_step(
        type="tool",
        tool_id="cat1",
        order_index=4,
    )
    trans.app.model.context.flush()
    # Expunge and reload to ensure step state is as expected from database.
    workflow_id = workflow.id
    trans.app.model.context.expunge_all()

    return trans.app.model.context.query( model.Workflow ).get( workflow_id )
Example #21
0
    def test_quota(self):
        u = model.User(email="*****@*****.**", password="******")
        self.persist(u)

        self._assert_user_quota_is(u, None)

        quota = model.Quota(name="default registered", amount=20)
        self.quota_agent.set_default_quota(
            model.DefaultQuotaAssociation.types.REGISTERED,
            quota,
        )

        self._assert_user_quota_is(u, 20)

        quota = model.Quota(name="user quota add", amount=30, operation="+")
        self._add_user_quota(u, quota)

        self._assert_user_quota_is(u, 50)

        quota = model.Quota(name="user quota bigger base", amount=70, operation="=")
        self._add_user_quota(u, quota)

        self._assert_user_quota_is(u, 100)

        quota = model.Quota(name="user quota del", amount=10, operation="-")
        self._add_user_quota(u, quota)

        self._assert_user_quota_is(u, 90)

        quota = model.Quota(name="group quota add", amount=7, operation="+")
        self._add_group_quota(u, quota)
        self._assert_user_quota_is(u, 97)

        quota = model.Quota(name="group quota bigger base", amount=100, operation="=")
        self._add_group_quota(u, quota)
        self._assert_user_quota_is(u, 127)

        quota.deleted = True
        self.persist(quota)
        self._assert_user_quota_is(u, 97)

        quota = model.Quota(name="group quota unlimited", amount=-1, operation="=")
        self._add_group_quota(u, quota)
        self._assert_user_quota_is(u, None)
Example #22
0
def _import_library_target(target, work_directory):
    app = _mock_app(store_by="uuid")
    temp_directory = mkdtemp()
    with store.DirectoryModelExportStore(
            temp_directory, app=app,
            serialize_dataset_objects=True) as export_store:
        persist_target_to_export_store(target, export_store, app.object_store,
                                       work_directory)

    u = model.User(email="*****@*****.**", password="******")

    import_options = store.ImportOptions(allow_dataset_object_edit=True,
                                         allow_library_creation=True)
    import_model_store = store.get_import_model_store_for_directory(
        temp_directory, app=app, user=u, import_options=import_options)
    import_model_store.perform_import()

    sa_session = app.model.context
    return sa_session
Example #23
0
def _import_directory_to_history(app, target, work_directory):
    sa_session = app.model.context

    u = model.User(email="*****@*****.**", password="******")
    import_history = model.History(name="Test History for Import", user=u)

    sa_session = app.model.context
    sa_session.add_all([u, import_history])
    sa_session.flush()

    assert len(import_history.datasets) == 0

    import_options = store.ImportOptions(allow_dataset_object_edit=True)
    import_model_store = store.get_import_model_store_for_directory(
        target, app=app, user=u, import_options=import_options)
    with import_model_store.target_history(default_history=import_history):
        import_model_store.perform_import(import_history)

    return import_history
Example #24
0
    def test_tags(self):
        model = self.model

        my_tag = model.Tag(name="Test Tag")
        u = model.User(email="*****@*****.**", password="******")
        self.persist(my_tag, u)

        def tag_and_test(taggable_object, tag_association_class, backref_name):
            assert len(getattr(self.query(model.Tag).filter(model.Tag.name == "Test Tag").all()[0], backref_name)) == 0

            tag_association = tag_association_class()
            tag_association.tag = my_tag
            taggable_object.tags = [tag_association]
            self.persist(tag_association, taggable_object)

            assert len(getattr(self.query(model.Tag).filter(model.Tag.name == "Test Tag").all()[0], backref_name)) == 1

        sw = model.StoredWorkflow()
        sw.user = u
        tag_and_test(sw, model.StoredWorkflowTagAssociation, "tagged_workflows")

        h = model.History(name="History for Tagging", user=u)
        tag_and_test(h, model.HistoryTagAssociation, "tagged_histories")

        d1 = model.HistoryDatasetAssociation(extension="txt", history=h, create_dataset=True, sa_session=model.session)
        tag_and_test(d1, model.HistoryDatasetAssociationTagAssociation, "tagged_history_dataset_associations")

        page = model.Page()
        page.user = u
        tag_and_test(page, model.PageTagAssociation, "tagged_pages")

        visualization = model.Visualization()
        visualization.user = u
        tag_and_test(visualization, model.VisualizationTagAssociation, "tagged_visualizations")

        dataset_collection = model.DatasetCollection(collection_type="paired")
        history_dataset_collection = model.HistoryDatasetCollectionAssociation(collection=dataset_collection)
        tag_and_test(history_dataset_collection, model.HistoryDatasetCollectionTagAssociation, "tagged_history_dataset_collections")

        library_dataset_collection = model.LibraryDatasetCollectionAssociation(collection=dataset_collection)
        tag_and_test(library_dataset_collection, model.LibraryDatasetCollectionTagAssociation, "tagged_library_dataset_collections")
Example #25
0
    def test_job_metrics(self):
        model = self.model
        u = model.User(email="*****@*****.**", password="******")
        job = model.Job()
        job.user = u
        job.tool_id = "cat1"

        job.add_metric("gx", "galaxy_slots", 5)
        job.add_metric("system", "system_name", "localhost")

        self.persist(u, job)

        task = model.Task(job=job, working_directory="/tmp", prepare_files_cmd="split.sh")
        task.add_metric("gx", "galaxy_slots", 5)
        task.add_metric("system", "system_name", "localhost")

        big_value = ":".join("%d" % i for i in range(2000))
        task.add_metric("env", "BIG_PATH", big_value)
        self.persist(task)
        # Ensure big values truncated
        assert len(task.text_metrics[1].metric_value) <= 1023
Example #26
0
    def test_collections_in_histories(self):
        model = self.model

        u = model.User(email="*****@*****.**", password="******")
        h1 = model.History(name="History 1", user=u)
        d1 = model.HistoryDatasetAssociation(extension="txt", history=h1, create_dataset=True, sa_session=model.session)
        d2 = model.HistoryDatasetAssociation(extension="txt", history=h1, create_dataset=True, sa_session=model.session)

        c1 = model.DatasetCollection(collection_type="pair")
        hc1 = model.HistoryDatasetCollectionAssociation(history=h1, collection=c1, name="HistoryCollectionTest1")

        dce1 = model.DatasetCollectionElement(collection=c1, element=d1, element_identifier="left")
        dce2 = model.DatasetCollectionElement(collection=c1, element=d2, element_identifier="right")

        self.persist(u, h1, d1, d2, c1, hc1, dce1, dce2)

        loaded_dataset_collection = self.query(model.HistoryDatasetCollectionAssociation).filter(model.HistoryDatasetCollectionAssociation.name == "HistoryCollectionTest1").first().collection
        self.assertEqual(len(loaded_dataset_collection.elements), 2)
        assert loaded_dataset_collection.collection_type == "pair"
        assert loaded_dataset_collection["left"] == dce1
        assert loaded_dataset_collection["right"] == dce2
Example #27
0
    def test_history_audit(self):
        model = self.model
        u = model.User(email="*****@*****.**", password="******")
        h1 = model.History(name="HistoryAuditHistory", user=u)
        h2 = model.History(name="HistoryAuditHistory", user=u)

        def get_audit_table_entries(history):
            return self.session().query(model.HistoryAudit.table).filter(
                model.HistoryAudit.table.c.history_id == history.id).all()

        def get_latest_entry(entries):
            # key ensures result is correct if new columns are added
            return max(entries, key=lambda x: x.update_time)

        self.persist(u, h1, h2, expunge=False)
        assert len(get_audit_table_entries(h1)) == 1
        assert len(get_audit_table_entries(h2)) == 1

        self.new_hda(h1, name="1")
        self.new_hda(h2, name="2")
        self.session().flush()
        # db_next_hid modifies history, plus trigger on HDA means 2 additional audit rows per history

        h1_audits = get_audit_table_entries(h1)
        h2_audits = get_audit_table_entries(h2)
        assert len(h1_audits) == 3
        assert len(h2_audits) == 3

        h1_latest = get_latest_entry(h1_audits)
        h2_latest = get_latest_entry(h2_audits)

        model.HistoryAudit.prune(self.session())

        h1_audits = get_audit_table_entries(h1)
        h2_audits = get_audit_table_entries(h2)
        assert len(h1_audits) == 1
        assert len(h2_audits) == 1
        assert h1_audits[0] == h1_latest
        assert h2_audits[0] == h2_latest
Example #28
0
def _setup_simple_cat_job(app, state='ok'):
    sa_session = app.model.context

    u = model.User(email="*****@*****.**", password="******")
    h = model.History(name="Test History", user=u)

    d1, d2 = _create_datasets(sa_session, h, 2)
    d1.state = d2.state = state

    j = model.Job()
    j.user = u
    j.tool_id = "cat1"
    j.state = state

    j.add_input_dataset("input1", d1)
    j.add_output_dataset("out_file1", d2)

    sa_session.add_all((d1, d2, h, j))
    sa_session.flush()

    app.object_store.update_from_file(d1, file_name=TEST_PATH_1, create=True)
    app.object_store.update_from_file(d2, file_name=TEST_PATH_2, create=True)

    return u, h, d1, d2, j
Example #29
0
 def user(self):
     if self._user is None:
         self._user = model.User(email="*****@*****.**",
                                 password="******")
     return self._user
Example #30
0
    def test_workflows(self):
        model = self.model
        user = model.User(
            email="*****@*****.**",
            password="******"
        )

        def workflow_from_steps(steps):
            stored_workflow = model.StoredWorkflow()
            stored_workflow.user = user
            workflow = model.Workflow()
            workflow.steps = steps
            workflow.stored_workflow = stored_workflow
            return workflow

        child_workflow = workflow_from_steps([])
        self.persist(child_workflow)

        workflow_step_1 = model.WorkflowStep()
        workflow_step_1.order_index = 0
        workflow_step_1.type = "data_input"
        workflow_step_2 = model.WorkflowStep()
        workflow_step_2.order_index = 1
        workflow_step_2.type = "subworkflow"
        workflow_step_2.subworkflow = child_workflow

        workflow_step_1.get_or_add_input("moo1")
        workflow_step_1.get_or_add_input("moo2")
        workflow_step_2.get_or_add_input("moo")
        workflow_step_1.add_connection("foo", "cow", workflow_step_2)

        workflow = workflow_from_steps([workflow_step_1, workflow_step_2])
        self.persist(workflow)
        workflow_id = workflow.id

        annotation = model.WorkflowStepAnnotationAssociation()
        annotation.annotation = "Test Step Annotation"
        annotation.user = user
        annotation.workflow_step = workflow_step_1
        self.persist(annotation)

        assert workflow_step_1.id is not None
        h1 = model.History(name="WorkflowHistory1", user=user)

        invocation_uuid = uuid.uuid1()

        workflow_invocation = model.WorkflowInvocation()
        workflow_invocation.uuid = invocation_uuid
        workflow_invocation.history = h1

        workflow_invocation_step1 = model.WorkflowInvocationStep()
        workflow_invocation_step1.workflow_invocation = workflow_invocation
        workflow_invocation_step1.workflow_step = workflow_step_1

        subworkflow_invocation = model.WorkflowInvocation()
        workflow_invocation.attach_subworkflow_invocation_for_step(workflow_step_2, subworkflow_invocation)

        workflow_invocation_step2 = model.WorkflowInvocationStep()
        workflow_invocation_step2.workflow_invocation = workflow_invocation
        workflow_invocation_step2.workflow_step = workflow_step_2

        workflow_invocation.workflow = workflow

        d1 = self.new_hda(h1, name="1")
        workflow_request_dataset = model.WorkflowRequestToInputDatasetAssociation()
        workflow_request_dataset.workflow_invocation = workflow_invocation
        workflow_request_dataset.workflow_step = workflow_step_1
        workflow_request_dataset.dataset = d1
        self.persist(workflow_invocation)
        assert workflow_request_dataset is not None
        assert workflow_invocation.id is not None

        history_id = h1.id
        self.expunge()

        loaded_invocation = self.query(model.WorkflowInvocation).get(workflow_invocation.id)
        assert loaded_invocation.uuid == invocation_uuid, "%s != %s" % (loaded_invocation.uuid, invocation_uuid)
        assert loaded_invocation
        assert loaded_invocation.history.id == history_id

        step_1, step_2 = loaded_invocation.workflow.steps

        assert not step_1.subworkflow
        assert step_2.subworkflow
        assert len(loaded_invocation.steps) == 2

        subworkflow_invocation_assoc = loaded_invocation.get_subworkflow_invocation_association_for_step(step_2)
        assert subworkflow_invocation_assoc is not None
        assert isinstance(subworkflow_invocation_assoc.subworkflow_invocation, model.WorkflowInvocation)
        assert isinstance(subworkflow_invocation_assoc.parent_workflow_invocation, model.WorkflowInvocation)

        assert subworkflow_invocation_assoc.subworkflow_invocation.history.id == history_id

        loaded_workflow = self.query(model.Workflow).get(workflow_id)
        assert len(loaded_workflow.steps[0].annotations) == 1
        copied_workflow = loaded_workflow.copy(user=user)
        annotations = copied_workflow.steps[0].annotations
        assert len(annotations) == 1