def setUp(self): fd, self.seqs_fp = mkstemp(suffix='_seqs.fastq') close(fd) fd, self.barcodes_fp = mkstemp(suffix='_barcodes.fastq') close(fd) self.filetype = 2 self.filepaths = [(self.seqs_fp, 1), (self.barcodes_fp, 2)] self.studies = [Study(1)] _, self.db_test_raw_dir = get_mountpoint('raw_data')[0] with open(self.seqs_fp, "w") as f: f.write("\n") with open(self.barcodes_fp, "w") as f: f.write("\n") self._clean_up_files = [] # Create a new study info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "portal_type_id": 3, "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "emp_person_id": StudyPerson(2), "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } Study.create(User("*****@*****.**"), "Test study 2", [1], info)
def test_delete_study_empty_study(self): info = { "timeseries_type_id": '1', "metadata_complete": 'true', "mixs_compliant": 'true', "number_samples_collected": 25, "number_samples_promised": 28, "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "emp_person_id": StudyPerson(2), "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } new_study = Study.create(User('*****@*****.**'), "Fried Chicken Microbiome %s" % time(), info) job = self._create_job('delete_study', {'study': new_study.id}) private_task(job.id) self.assertEqual(job.status, 'success') # making sure the study doesn't exist with self.assertRaises(QiitaDBUnknownIDError): Study(new_study.id)
def _create_study(self, study_title): """Creates a new study Parameters ---------- study_title: str The title of the new study Returns ------- qiita_db.study.Study The newly created study """ info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "study_alias": "ALIAS", "study_description": "DESC", "study_abstract": "ABS", "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } return Study.create(User('*****@*****.**'), study_title, info)
def _build_study_info(studytype, user=None): """builds list of namedtuples for study listings""" if studytype == "private": studylist = user.user_studies elif studytype == "shared": studylist = user.shared_studies elif studytype == "public": studylist = Study.get_by_status('public') else: raise IncompetentQiitaDeveloperError("Must use private, shared, " "or public!") StudyTuple = namedtuple( 'StudyInfo', 'id title meta_complete ' 'num_samples_collected shared num_raw_data pi ' 'pmids owner status') infolist = [] for s_id in studylist: study = Study(s_id) status = study.status # Just passing the email address as the name here, since # name is not a required field in qiita.qiita_user owner = study_person_linkifier((study.owner, study.owner)) info = study.info PI = StudyPerson(info['principal_investigator_id']) PI = study_person_linkifier((PI.email, PI.name)) pmids = ", ".join([pubmed_linkifier([pmid]) for pmid in study.pmids]) shared = _get_shared_links_for_study(study) infolist.append( StudyTuple(study.id, study.title, info["metadata_complete"], info["number_samples_collected"], shared, len(study.raw_data()), PI, pmids, owner, status)) return infolist
def setUp(self): info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } self.new_study = Study.create(User('*****@*****.**'), "Fried Chicken Microbiome", info) self._clean_up_files = []
def test_patch_no_sample_template(self): info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "study_alias": "FCM", "study_description": "DESC", "study_abstract": "ABS", "principal_investigator_id": StudyPerson(3), 'first_contact': datetime(2015, 5, 19, 16, 10), 'most_recent_contact': datetime(2015, 5, 19, 16, 11), } new_study = Study.create(User('*****@*****.**'), "Some New Study for test jr", info) body = { 'sampleid1': { 'category_a': 'value_a' }, 'sampleid2': { 'category_b': 'value_b' } } exp = {'message': 'No sample information found'} response = self.patch('/api/v1/study/%d/samples' % new_study.id, headers=self.headers, data=body, asjson=True) self.assertEqual(response.code, 404) obs = json_decode(response.body) self.assertEqual(obs, exp)
def test_get_no_samples(self): # /api/v1/study/%d/samples/info -> {'number-of-samples':<int>, # 'categories': [<str>]} info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "study_alias": "FCM", "study_description": "DESC", "study_abstract": "ABS", "principal_investigator_id": StudyPerson(3), 'first_contact': datetime(2015, 5, 19, 16, 10), 'most_recent_contact': datetime(2015, 5, 19, 16, 11), } new_study = Study.create(User('*****@*****.**'), "Some New Study for test", info) exp = {'message': 'Study does not have sample information'} response = self.get('/api/v1/study/%d/samples/categories=foo' % new_study.id, headers=self.headers) self.assertEqual(response.code, 404) obs = json_decode(response.body) self.assertEqual(obs, exp)
def _build_single_study_info(study, info, study_proc, proc_samples): """Clean up and add to the study info for HTML purposes Parameters ---------- study : Study object The study to build information for info : dict Information from Study.get_info study_proc : dict of dict of lists Dictionary keyed on study_id that lists all processed data associated with that study. This list of processed data ids is keyed by data type proc_samples : dict of lists Dictionary keyed on proc_data_id that lists all samples associated with that processed data. Returns ------- dict info-information + extra information for the study, slightly HTML formatted """ PI = StudyPerson(info['principal_investigator_id']) status = study.status if info['publication_doi'] is not None: pmids = get_pubmed_ids_from_dois(info['publication_doi']).values() info['pmid'] = ", ".join([pubmed_linkifier([p]) for p in pmids]) info['publication_doi'] = ", ".join( [doi_linkifier([p]) for p in info['publication_doi']]) else: info['publication_doi'] = "" info['pmid'] = "" if info["number_samples_collected"] is None: info["number_samples_collected"] = 0 info["shared"] = _get_shared_links_for_study(study) # raw data is any artifact that is not Demultiplexed or BIOM info["num_raw_data"] = len([ a for a in study.artifacts() if a.artifact_type not in ['Demultiplexed', 'BIOM'] ]) info["status"] = status info["study_id"] = study.id info["pi"] = study_person_linkifier((PI.email, PI.name)) del info["principal_investigator_id"] del info["email"] # Build the proc data info list for the child row in datatable info["proc_data_info"] = [] for data_type, proc_datas in viewitems(study_proc[study.id]): info["proc_data_info"].extend([ _build_single_proc_data_info(pd_id, data_type, proc_samples[pd_id]) for pd_id in proc_datas ]) return info
def setUp(self): # Create a sample template file self.st_contents = SAMPLE_TEMPLATE # create a new study to attach the sample template info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 4, "number_samples_promised": 4, "portal_type_id": 3, "study_alias": "TestStudy", "study_description": "Description of a test study", "study_abstract": "No abstract right now...", "emp_person_id": StudyPerson(2), "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } self.study = Study.create(User('*****@*****.**'), "Test study", [1], info)
def test_delete_sample_template(self): # Error case job = self._create_job('delete_sample_template', {'study': 1}) private_task(job.id) self.assertEqual(job.status, 'error') self.assertIn( "Sample template cannot be erased because there are " "prep templates associated", job.log.msg) # Success case info = { "timeseries_type_id": '1', "metadata_complete": 'true', "mixs_compliant": 'true', "number_samples_collected": 25, "number_samples_promised": 28, "study_alias": "TDST", "study_description": "Test delete sample template", "study_abstract": "Test delete sample template", "principal_investigator_id": StudyPerson(1) } study = Study.create(User('*****@*****.**'), "Delete Sample Template test", info) metadata = pd.DataFrame.from_dict( { 'Sample1': { 'physical_specimen_location': 'location1', 'physical_specimen_remaining': 'true', 'dna_extracted': 'true', 'sample_type': 'type1', 'collection_timestamp': '2014-05-29 12:24:15', 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 1', 'latitude': '42.42', 'longitude': '41.41', 'taxon_id': '9606', 'scientific_name': 'h**o sapiens' } }, orient='index', dtype=str) SampleTemplate.create(metadata, study) job = self._create_job('delete_sample_template', {'study': study.id}) private_task(job.id) self.assertEqual(job.status, 'success') self.assertFalse(SampleTemplate.exists(study.id))
def test_create_sample_template(self): # Test error job = self._create_job( 'create_sample_template', { 'fp': self.fp, 'study_id': 1, 'is_mapping_file': False, 'data_type': None }) private_task(job.id) self.assertEqual(job.status, 'error') self.assertIn( "The 'SampleTemplate' object with attributes (id: 1) " "already exists.", job.log.msg) # Test success with a warning info = { "timeseries_type_id": '1', "metadata_complete": 'true', "mixs_compliant": 'true', "study_alias": "TDST", "study_description": "Test create sample template", "study_abstract": "Test create sample template", "principal_investigator_id": StudyPerson(1) } study = Study.create(User('*****@*****.**'), "Create Sample Template test", info) job = self._create_job( 'create_sample_template', { 'fp': self.fp, 'study_id': study.id, 'is_mapping_file': False, 'data_type': None }) private_task(job.id) self.assertEqual(job.status, 'success') obs = r_client.get("sample_template_%d" % study.id) self.assertIsNotNone(obs) obs = loads(obs) self.assertCountEqual(obs, ['job_id', 'alert_type', 'alert_msg']) self.assertEqual(obs['job_id'], job.id) self.assertEqual(obs['alert_type'], 'warning') self.assertIn( 'Some functionality will be disabled due to missing columns:', obs['alert_msg']) # making sure that the error name is not in the messages self.assertNotIn('QiitaDBWarning', obs['alert_msg'])
def render(self, study): study_info = study.info abstract = study_info['study_abstract'] description = study_info['study_description'] pmids = ", ".join([pubmed_linkifier([pmid]) for pmid in study.pmids]) princ_inv = StudyPerson(study_info['principal_investigator_id']) pi_link = study_person_linkifier((princ_inv.email, princ_inv.name)) number_samples_promised = study_info['number_samples_promised'] number_samples_collected = study_info['number_samples_collected'] metadata_complete = study_info['metadata_complete'] # Retrieve the files from the uploads folder, so the user can choose # the sample template of the study files = [f for _, f in get_files_from_uploads_folders(str(study.id))] # If the sample template exists, retrieve all its filepaths if SampleTemplate.exists(study.id): sample_templates = SampleTemplate(study.id).get_filepaths() else: # If the sample template does not exist, just pass an empty list sample_templates = [] # Check if the request came from a local source is_local_request = self._is_local() # The user can choose the sample template only if the study is # sandboxed or the current user is an admin show_select_sample = ( study.status == 'sandbox' or self.current_user.level == 'admin') return self.render_string( "study_description_templates/study_information_tab.html", abstract=abstract, description=description, pmids=pmids, principal_investigator=pi_link, number_samples_promised=number_samples_promised, number_samples_collected=number_samples_collected, metadata_complete=metadata_complete, show_select_sample=show_select_sample, files=files, study_id=study.id, sample_templates=sample_templates, is_local_request=is_local_request)
def test_set_info(self): """Set info in a study""" newinfo = { "timeseries_type_id": 2, "metadata_complete": False, "number_samples_collected": 28, "lab_person_id": StudyPerson(2), "vamps_id": 'MBE_111222', "first_contact": "June 11, 2014" } new = Study.create( User('*****@*****.**'), 'NOT Identification of the ' 'Microbiomes for Cannabis Soils', [1], self.info) self.infoexp.update(newinfo) new.info = newinfo # add missing table cols self.infoexp["funding"] = None self.infoexp["spatial_series"] = None self.infoexp["most_recent_contact"] = None self.infoexp["reprocess"] = False self.infoexp["lab_person_id"] = 2 self.assertEqual(new.info, self.infoexp)
def test_get_study_no_samples(self): info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "study_alias": "FCM", "study_description": "DESC", "study_abstract": "ABS", "principal_investigator_id": StudyPerson(3), 'first_contact': datetime(2015, 5, 19, 16, 10), 'most_recent_contact': datetime(2015, 5, 19, 16, 11), } new_study = Study.create(User('*****@*****.**'), "Some New Study for test", info) exp = [] response = self.get('/api/v1/study/%d/samples' % new_study.id, headers=self.headers) self.assertEqual(response.code, 200) obs = json_decode(response.body) self.assertEqual(obs, exp)
def test_post(self): person_count_before = get_count('qiita.study_person') study_count_before = get_count('qiita.study') post_data = { 'new_people_names': ['Adam', 'Ethan'], 'new_people_emails': ['*****@*****.**', '*****@*****.**'], 'new_people_affiliations': ['CU Boulder', 'NYU'], 'new_people_addresses': ['Some St., Boulder, CO 80305', ''], 'new_people_phones': ['', ''], 'study_title': 'dummy title', 'study_alias': 'dummy alias', 'pubmed_id': 'dummy pmid', 'environmental_packages': ['air'], 'timeseries': '1', 'study_abstract': "dummy abstract", 'study_description': 'dummy description', 'principal_investigator': '-2', 'notes': '', 'lab_person': '1' } self.post('/study/create/', post_data) # Check that the new person was created expected_id = person_count_before + 1 self.assertTrue(check_count('qiita.study_person', expected_id)) new_person = StudyPerson(expected_id) self.assertTrue(new_person.name == 'Ethan') self.assertTrue(new_person.email == '*****@*****.**') self.assertTrue(new_person.affiliation == 'NYU') self.assertTrue(new_person.address is None) self.assertTrue(new_person.phone is None) # Check the study was created expected_id = study_count_before + 1 self.assertTrue(check_count('qiita.study', expected_id))
def generate_new_study_with_preprocessed_data(self): """Creates a new study up to the processed data for testing""" info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 3, "number_samples_promised": 3, "study_alias": "Test EBI", "study_description": "Study for testing EBI", "study_abstract": "Study for testing EBI", "emp_person_id": StudyPerson(2), "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } study = Study.create(User('*****@*****.**'), "Test EBI study", info) metadata_dict = { 'Sample1': { 'collection_timestamp': datetime(2015, 6, 1, 7, 0, 0), 'physical_specimen_location': 'location1', 'taxon_id': 9606, 'scientific_name': 'h**o sapiens', 'Description': 'Test Sample 1' }, 'Sample2': { 'collection_timestamp': datetime(2015, 6, 2, 7, 0, 0), 'physical_specimen_location': 'location1', 'taxon_id': 9606, 'scientific_name': 'h**o sapiens', 'Description': 'Test Sample 2' }, 'Sample3': { 'collection_timestamp': datetime(2015, 6, 3, 7, 0, 0), 'physical_specimen_location': 'location1', 'taxon_id': 9606, 'scientific_name': 'h**o sapiens', 'Description': 'Test Sample 3' } } metadata = pd.DataFrame.from_dict(metadata_dict, orient='index', dtype=str) SampleTemplate.create(metadata, study) metadata_dict = { 'Sample1': { 'primer': 'GTGCCAGCMGCCGCGGTAA', 'barcode': 'CGTAGAGCTCTC', 'center_name': 'KnightLab', 'platform': 'ILLUMINA', 'instrument_model': 'Illumina MiSeq', 'library_construction_protocol': 'Protocol ABC', 'experiment_design_description': "Random value 1" }, 'Sample2': { 'primer': 'GTGCCAGCMGCCGCGGTAA', 'barcode': 'CGTAGAGCTCTA', 'center_name': 'KnightLab', 'platform': 'ILLUMINA', 'instrument_model': 'Illumina MiSeq', 'library_construction_protocol': 'Protocol ABC', 'experiment_design_description': "Random value 2" }, 'Sample3': { 'primer': 'GTGCCAGCMGCCGCGGTAA', 'barcode': 'CGTAGAGCTCTT', 'center_name': 'KnightLab', 'platform': 'ILLUMINA', 'instrument_model': 'Illumina MiSeq', 'library_construction_protocol': 'Protocol ABC', 'experiment_design_description': "Random value 3" }, } metadata = pd.DataFrame.from_dict(metadata_dict, orient='index', dtype=str) pt = PrepTemplate.create(metadata, study, "16S", 'Metagenomics') fna_fp = join(self.temp_dir, 'seqs.fna') demux_fp = join(self.temp_dir, 'demux.seqs') with open(fna_fp, 'w') as f: f.write(FASTA_EXAMPLE_2.format(study.id)) with File(demux_fp, 'w') as f: to_hdf5(fna_fp, f) ppd = Artifact.create([(demux_fp, 6)], "Demultiplexed", prep_template=pt) return ppd
def test_retrieve_dropped_samples(self): # Create and populate second study to do test with info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "portal_type_id": 3, "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "emp_person_id": StudyPerson(2), "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } metadata_dict = { 'SKB8.640193': { 'physical_location': 'location1', 'has_physical_specimen': True, 'has_extracted_data': True, 'sample_type': 'type1', 'required_sample_info_status': 'received', 'collection_timestamp': datetime(2014, 5, 29, 12, 24, 51), 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 1', 'str_column': 'Value for sample 1', 'latitude': 42.42, 'longitude': 41.41 }, 'SKD8.640184': { 'physical_location': 'location1', 'has_physical_specimen': True, 'has_extracted_data': True, 'sample_type': 'type1', 'required_sample_info_status': 'received', 'collection_timestamp': datetime(2014, 5, 29, 12, 24, 51), 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 2', 'str_column': 'Value for sample 2', 'latitude': 4.2, 'longitude': 1.1 }, 'SKB7.640196': { 'physical_location': 'location1', 'has_physical_specimen': True, 'has_extracted_data': True, 'sample_type': 'type1', 'required_sample_info_status': 'received', 'collection_timestamp': datetime(2014, 5, 29, 12, 24, 51), 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 3', 'str_column': 'Value for sample 3', 'latitude': 4.8, 'longitude': 4.41 }, } metadata = pd.DataFrame.from_dict(metadata_dict, orient='index') Study.create(User("*****@*****.**"), "Test study 2", [1], info) SampleTemplate.create(metadata, Study(2)) mp = get_mountpoint("processed_data")[0][1] study_fp = join(mp, "2_study_1001_closed_reference_otu_table.biom") ProcessedData.create("processed_params_uclust", 1, [(study_fp, 6)], study=Study(2), data_type="16S") self.conn_handler.execute( "INSERT INTO qiita.analysis_sample (analysis_id, " "processed_data_id, sample_id) VALUES " "(1,2,'2.SKB8.640193'), (1,2,'2.SKD8.640184'), " "(1,2,'2.SKB7.640196')") samples = { 1: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'], 2: ['2.SKB8.640193', '2.SKD8.640184'] } self.analysis._build_biom_tables(samples, 10000, conn_handler=self.conn_handler) exp = {1: {'1.SKM4.640180', '1.SKM9.640192'}, 2: {'2.SKB7.640196'}} self.assertEqual(self.analysis.dropped_samples, exp)
def setUp(self): metadata_dict = { 'Sample1': { 'physical_location': 'location1', 'has_physical_specimen': True, 'has_extracted_data': True, 'sample_type': 'type1', 'required_sample_info_status_id': 1, 'collection_timestamp': datetime(2014, 5, 29, 12, 24, 51), 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 1', 'str_column': 'Value for sample 1', 'latitude': 42.42, 'longitude': 41.41 }, 'Sample2': { 'physical_location': 'location1', 'has_physical_specimen': True, 'has_extracted_data': True, 'sample_type': 'type1', 'required_sample_info_status_id': 1, 'collection_timestamp': datetime(2014, 5, 29, 12, 24, 51), 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 2', 'str_column': 'Value for sample 2', 'latitude': 4.2, 'longitude': 1.1 }, 'Sample3': { 'physical_location': 'location1', 'has_physical_specimen': True, 'has_extracted_data': True, 'sample_type': 'type1', 'required_sample_info_status_id': 1, 'collection_timestamp': datetime(2014, 5, 29, 12, 24, 51), 'host_subject_id': 'NotIdentified', 'Description': 'Test Sample 3', 'str_column': 'Value for sample 3', 'latitude': 4.8, 'longitude': 4.41 }, } self.metadata = pd.DataFrame.from_dict(metadata_dict, orient='index') self.test_study = Study(1) info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "portal_type_id": 3, "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "emp_person_id": StudyPerson(2), "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } self.new_study = Study.create(User('*****@*****.**'), "Fried Chicken Microbiome", [1], info) self.tester = SampleTemplate(1) self.exp_sample_ids = { 'SKB1.640202', 'SKB2.640194', 'SKB3.640195', 'SKB4.640189', 'SKB5.640181', 'SKB6.640176', 'SKB7.640196', 'SKB8.640193', 'SKB9.640200', 'SKD1.640179', 'SKD2.640178', 'SKD3.640198', 'SKD4.640185', 'SKD5.640186', 'SKD6.640190', 'SKD7.640191', 'SKD8.640184', 'SKD9.640182', 'SKM1.640183', 'SKM2.640199', 'SKM3.640197', 'SKM4.640180', 'SKM5.640177', 'SKM6.640187', 'SKM7.640188', 'SKM8.640201', 'SKM9.640192' } self._clean_up_files = []
def test_retrieve_phone_null(self): person = StudyPerson(3) self.assertEqual(person.phone, None)
def render(self, study): study_info = study.info id = study.id abstract = study_info['study_abstract'] description = study_info['study_description'] publications = [] for doi, pmid in study.publications: if doi is not None: publications.append(doi_linkifier([doi])) if pmid is not None: publications.append(pubmed_linkifier([pmid])) publications = ", ".join(publications) princ_inv = StudyPerson(study_info['principal_investigator_id']) pi_link = study_person_linkifier((princ_inv.email, princ_inv.name)) number_samples_promised = study_info['number_samples_promised'] number_samples_collected = study_info['number_samples_collected'] metadata_complete = study_info['metadata_complete'] data_types = sorted(viewitems(get_data_types()), key=itemgetter(1)) # Retrieve the files from the uploads folder, so the user can choose # the sample template of the study. Filter them to only include the # ones that ends with 'txt' or 'tsv'. files = [ f for _, f in get_files_from_uploads_folders(str(study.id)) if f.endswith(('txt', 'tsv')) ] # If the sample template exists, retrieve all its filepaths if SampleTemplate.exists(study.id): sample_templates = SampleTemplate(study.id).get_filepaths() else: # If the sample template does not exist, just pass an empty list sample_templates = [] # Check if the request came from a local source is_local_request = is_localhost(self.request.headers['host']) # The user can choose the sample template only if the study is # sandboxed or the current user is an admin show_select_sample = (study.status == 'sandbox' or self.current_user.level == 'admin') # EBI information ebi_status = study.ebi_submission_status ebi_accession = study.ebi_study_accession if ebi_accession: ebi_accession = (EBI_LINKIFIER.format(ebi_accession)) return self.render_string( "study_description_templates/study_information_tab.html", abstract=abstract, description=description, id=id, publications=publications, principal_investigator=pi_link, number_samples_promised=number_samples_promised, number_samples_collected=number_samples_collected, metadata_complete=metadata_complete, show_select_sample=show_select_sample, files=files, study_id=study.id, sample_templates=sample_templates, is_local_request=is_local_request, data_types=data_types, ebi_status=ebi_status, ebi_accession=ebi_accession)
def test_retrieve_address_null(self): person = StudyPerson(2) self.assertEqual(person.address, None)
def setUp(self): self.studyperson = StudyPerson(1)
def setUp(self): self.study = Study(1) self.info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "portal_type_id": 3, "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "emp_person_id": StudyPerson(2), "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } self.infoexp = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "portal_type_id": 3, "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "emp_person_id": 2, "principal_investigator_id": 3, "lab_person_id": 1 } self.existingexp = { 'mixs_compliant': True, 'metadata_complete': True, 'reprocess': False, 'number_samples_promised': 27, 'emp_person_id': StudyPerson(2), 'funding': None, 'vamps_id': None, 'first_contact': datetime(2014, 5, 19, 16, 10), 'principal_investigator_id': StudyPerson(3), 'timeseries_type_id': 1, 'study_abstract': "This is a preliminary study to examine the " "microbiota associated with the Cannabis plant. Soils samples " "from the bulk soil, soil associated with the roots, and the " "rhizosphere were extracted and the DNA sequenced. Roots " "from three independent plants of different strains were " "examined. These roots were obtained November 11, 2011 from " "plants that had been harvested in the summer. Future " "studies will attempt to analyze the soils and rhizospheres " "from the same location at different time points in the plant " "lifecycle.", 'spatial_series': False, 'study_description': 'Analysis of the Cannabis Plant Microbiome', 'portal_type_id': 2, 'study_alias': 'Cannabis Soils', 'most_recent_contact': '2014-05-19 16:11', 'most_recent_contact': datetime(2014, 5, 19, 16, 11), 'lab_person_id': StudyPerson(1), 'number_samples_collected': 27 }
def setUp(self): # Create a directory with the test split libraries output self.test_slo = mkdtemp(prefix='test_slo_') path_builder = partial(join, self.test_slo) fna_fp = path_builder('seqs.fna') fastq_fp = path_builder('seqs.fastq') log_fp = path_builder('split_library_log.txt') demux_fp = path_builder('seqs.demux') with open(fna_fp, 'w') as f: f.write(FASTA_SEQS) with open(fastq_fp, 'w') as f: f.write(FASTQ_SEQS) with open(log_fp, 'w') as f: f.write("Test log\n") generate_demux_file(self.test_slo) self._filepaths_to_remove = [fna_fp, fastq_fp, demux_fp, log_fp] self._dirpaths_to_remove = [self.test_slo] # Generate a directory with test split libraries output missing files self.missing_slo = mkdtemp(prefix='test_missing_') path_builder = partial(join, self.test_slo) fna_fp = path_builder('seqs.fna') fastq_fp = path_builder('seqs.fastq') with open(fna_fp, 'w') as f: f.write(FASTA_SEQS) with open(fastq_fp, 'w') as f: f.write(FASTQ_SEQS) self._filepaths_to_remove.append(fna_fp) self._filepaths_to_remove.append(fastq_fp) self._dirpaths_to_remove.append(self.missing_slo) # Create a study with no preprocessed data info = { "timeseries_type_id": 1, "metadata_complete": True, "mixs_compliant": True, "number_samples_collected": 25, "number_samples_promised": 28, "portal_type_id": 3, "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "emp_person_id": StudyPerson(2), "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } self.no_ppd_study = Study.create(User('*****@*****.**'), "Test study", [1], info) # Get the directory where the preprocessed data is usually copied. _, self.db_ppd_dir = get_mountpoint('preprocessed_data')[0]
def test_delete_study(self): # as samples have been submitted to EBI, this will fail job = self._create_job('delete_study', {'study': 1}) private_task(job.id) self.assertEqual(job.status, 'error') self.assertIn("Artifact 2 has been submitted to EBI", job.log.msg) # making sure the analysis, first thing to delete, still exists self.assertTrue(Analysis.exists(1)) info = { "timeseries_type_id": '1', "metadata_complete": 'true', "mixs_compliant": 'true', "study_alias": "FCM", "study_description": "Microbiome of people who eat nothing but " "fried chicken", "study_abstract": "Exploring how a high fat diet changes the " "gut microbiome", "principal_investigator_id": StudyPerson(3), "lab_person_id": StudyPerson(1) } new_study = Study.create(User('*****@*****.**'), "Fried Chicken Microbiome %s" % time(), info) # adding tags new_study.update_tags(User('*****@*****.**'), ['my new tag!']) # creating a sample information file metadata = pd.DataFrame.from_dict( { 'Sample1': { 'physical_specimen_location': 'location1', 'taxon_id': '9606', 'scientific_name': 'h**o sapiens' }, 'Sample2': { 'physical_specimen_location': 'location1', 'taxon_id': '9606', 'scientific_name': 'h**o sapiens' }, 'Sample3': { 'physical_specimen_location': 'location1', 'taxon_id': '9606', 'scientific_name': 'h**o sapiens' } }, orient='index') SampleTemplate.create(metadata, new_study) # creating a preparation information file metadata = pd.DataFrame.from_dict( { 'Sample1': { 'center_name': 'ANL', 'target_subfragment': 'V4', 'center_project_name': 'Test Project' } }, orient='index', dtype=str) PrepTemplate.create(metadata, new_study, '16S') job = self._create_job('delete_study', {'study': new_study.id}) private_task(job.id) self.assertEqual(job.status, 'success') # making sure the study doesn't exist with self.assertRaises(QiitaDBUnknownIDError): Study(new_study.id)