def __init__(self, implicit_output_name=None, job=None, hid=1): self.id = 124 self.copied_from_history_dataset_collection_association = None self.history_content_type = "dataset_collection" self.implicit_output_name = implicit_output_name self.hid = 1 self.collection = model.DatasetCollection() self.creating_job_associations = [] element = model.DatasetCollectionElement( collection=self.collection, element=model.HistoryDatasetAssociation(), element_index=0, element_identifier="moocow", ) element.dataset_instance.dataset = model.Dataset() element.dataset_instance.dataset.state = "ok" creating = model.JobToOutputDatasetAssociation( implicit_output_name, element.dataset_instance, ) creating.job = job element.dataset_instance.creating_job_associations = [ creating, ] self.collection.elements = [element]
def _new_hda( self ): hda = model.HistoryDatasetAssociation() hda.visible = True hda.dataset = model.Dataset() self.app.model.context.add( hda ) self.app.model.context.flush( ) return hda
def __add_dataset(self, state='ok'): hda = model.HistoryDatasetAssociation() hda.dataset = model.Dataset() hda.dataset.state = 'ok' hda.dataset.external_filename = "/tmp/datasets/dataset_001.dat" self.history.add_dataset(hda) self.app.model.context.flush() return hda
def test_dataset_job_relationship(self): model = self.model dataset = model.Dataset() job = model.Job() dataset.job = job self.persist(job, dataset) loaded_dataset = model.session.query(model.Dataset).filter(model.Dataset.id == dataset.id).one() assert loaded_dataset.job_id == job.id
def _setup_test_output(self): dataset = model.Dataset() dataset.external_filename = "example_output" # This way object store isn't asked about size... self.hda = model.HistoryDatasetAssociation(name="test", dataset=dataset) job = model.Job() job.add_output_dataset(DEFAULT_TOOL_OUTPUT, self.hda) self.app.model.context.add(job) self.job = job self.history = self._new_history(hdas=[self.hda]) self.outputs = {DEFAULT_TOOL_OUTPUT: self.hda}
def create(self, trans, flush=True, **kwargs): """ Create and return a new Dataset object. """ # default to NEW state on new datasets kwargs.update( dict(state=(kwargs.get('state', model.Dataset.states.NEW)))) dataset = model.Dataset(**kwargs) self.app.model.context.add(dataset) if flush: self.app.model.context.flush() return dataset
def _run_jihaw_cleanup(history_archive, msg): app = MockApp() job = model.Job() job.stderr = '' jiha = model.JobImportHistoryArchive(job=job, archive_dir=history_archive.arc_directory) app.model.context.current.add_all([job, jiha]) app.model.context.flush() jihaw = JobImportHistoryArchiveWrapper(app, 1) # yeehaw! try: jihaw.cleanup_after_job() data = app.object_store.get_data(model.Dataset(1)) assert data != 'insecure', msg except MalformedContents: pass
def create(self, manage_roles=None, access_roles=None, flush=True, **kwargs): """ Create and return a new Dataset object. """ # default to NEW state on new datasets kwargs.update(dict(state=(kwargs.get('state', model.Dataset.states.NEW)))) dataset = model.Dataset(**kwargs) self.session().add(dataset) self.permissions.set(dataset, manage_roles, access_roles, flush=False) if flush: self.session().flush() return dataset
def main(): options, args = doc_optparse.parse( __doc__ ) try: extension = options.ext except: doc_optparse.exception() # create datatype data = model.Dataset( extension=extension, id=int( args[0] ) ) data.file_path = "/home/ian/trunk/database/files/" if options.metadata: data.metadata = util.string_to_object( options.metadata ) errors = data.datatype.validate( data ) print util.object_to_string(errors)
def __init__(self, test_dataset=None, name="Test Dataset", id=1): if not test_dataset: test_dataset = model.Dataset() test_dataset.state = model.Dataset.states.OK self.states = model.HistoryDatasetAssociation.states self.deleted = False self.dataset = test_dataset self.visible = True self.conversion_destination = (None, None) self.extension = "txt" self.dbkey = "hg19" self.implicitly_converted_parent_datasets = False self.name = name self.hid = id self.id = id self.children = [] self.tags = []
def __init__(self, test_dataset=None, name="Test Dataset", id=1): if not test_dataset: test_dataset = model.Dataset() self.states = model.HistoryDatasetAssociation.states self.deleted = False self.dataset = test_dataset self.visible = True self.datatype_matches = True self.conversion_destination = (None, None) self.datatype = bunch.Bunch( matches_any=lambda formats: self.datatype_matches, ) self.dbkey = "hg19" self.implicitly_converted_parent_datasets = False self.name = name self.hid = id self.id = id self.children = []
def _import_export(app, h, dest_export=None): if dest_export is None: dest_parent = mkdtemp() dest_export = os.path.join(dest_parent, "moo.tgz") dataset = model.Dataset(id=100) jeha = model.JobExportHistoryArchive(job=model.Job(), history=h, dataset=dataset, compressed=True) wrapper = JobExportHistoryArchiveWrapper(app, 1) wrapper.setup_job(jeha) from galaxy.tools.imp_exp import export_history ret = export_history.main(["--gzip", jeha.temp_directory, dest_export]) assert ret == 0, ret _, imported_history = import_archive(dest_export, app=app) assert imported_history return imported_history