def setUp(self): self.experiment1 = Experiment(orig_study_id=1) self.experiment2 = Experiment(orig_study_id=2) self.experiment3 = Experiment(orig_study_id=3) self.subject1 = Subject(orig_subject_id=1) self.subject2 = Subject(orig_subject_id=2) self.sample1 = Sample(orig_sample_id=1) self.sample2 = Sample(orig_sample_id=2) self.sample3 = Sample(orig_sample_id=3) self.sample4 = Sample(orig_sample_id=4) # TODO: Delete? # Set up relationships self.subject1._samples = {self.sample1, self.sample2} self.subject2._samples = {self.sample3} self.sample1._subject = self.subject1 self.sample2._subject = self.subject1 self.sample3._subject = self.subject2 self.experiment1._samples = {self.sample1, self.sample2} self.experiment1._subjects = {self.subject1} self.experiment2._samples = {self.sample1} self.experiment2._subjects = {self.subject1} self.experiment3._samples = {self.sample1, self.sample3} self.experiment3._subjects = {self.subject1, self.subject2} self.subject1._experiments = { self.experiment1, self.experiment2, self.experiment3 } self.subject2._experiments = {self.experiment3} self.sample1._experiments = { self.experiment1, self.experiment2, self.experiment3 } self.sample2._experiments = {self.experiment1} self.sample3._experiments = {self.experiment3}
def setUp(self): self.experiment1 = Experiment() self.subject1 = Subject(orig_subject_id=1) self.subject2 = Subject(orig_subject_id=2) self.sample1 = Sample(orig_sample_id=1) self.sample2 = Sample(orig_sample_id=2) self.sample3 = Sample(orig_sample_id=3) self.sample4 = Sample(orig_sample_id=4) # Set up relationships self.subject1._samples = {self.sample1, self.sample2} self.subject2._samples = {self.sample3} self.sample1._subject = self.subject1 self.sample2._subject = self.subject1 self.sample3._subject = self.subject2
def parse_subject(row, source=None): """Parse a row into a Subject. Parameters ---------- row : dict-like Object whose keys are column headings and values are the row values. source : model.Source Source for the returned subject. Returns ------- Subject """ subject = Subject() subject.sex = get_sex(row) subject.country = get_country(row) subject.race = get_race(row) subject.csection = get_csection(row) subject.disease = get_disease(row) subject.dob = get_dob(row) # Initialize equality attrs if not source: subject.source = parse_source(row) elif isinstance(source, Source): subject.source = source else: raise TypeError(f'Given source was not of type {type(Source())!r}.') subject.orig_study_id = get_study_id(row) subject.orig_subject_id = get_subject_id(row) return subject
def subject(id): if current_user.role_name != 'AdminUser': return render_error(403) if id == 'new': s = Subject() else: try: id = int(id) except ValueError: return render_error(400) s = db.session.query(Subject).filter(Subject.id == id).one_or_none() if s is None: return render_error(404) form = SubjectForm(request.form if request.method == 'POST' else None, obj=s) if form.button_delete.data: form.validate() if db.session.query(CurriculumUnit).filter(CurriculumUnit.subject_id == s.id).count() > 0: form.button_delete.errors.append('Невозможно удалить предмет, к которому привязаны единицы учебного плана') if len(form.button_delete.errors) == 0: db.session.delete(s) db.session.commit() db.session.flush() return redirect(url_for('subjects')) if form.button_save.data and form.validate(): form.populate_obj(s) db.session.add(s) db.session.commit() if id == 'new': db.session.flush() return redirect(url_for('subject', id=s.id)) return render_template('subject.html', subject=s, form=form)
def set_up_domains(): classes = [] for i in subjects: lector = list(filter(lambda t: i["name"] in t["lector"], teachers)) practices_teachers = list( filter( lambda t: i["name"] in t["lector"] or i["name"] in t[ "practices"], teachers)) classes.append( Class( Subject(lector[0]["name"], i["name"], i["students"], 0, i["groups"]), None)) for j in range(i["groups"]): classes.append( Class( Subject( practices_teachers[random.randint( 0, len(practices_teachers) - 1)]["name"], i["name"], i["students"] // i["groups"], j + 1, i["groups"]), None)) return classes
def category_handler(subject_dict, book_object, subject_list, source): """Takes in categories returned from OpenLib or Google Books. Normalizes category names & saves subjects to dictionary.""" for category in subject_list: category = category.lower() delimiters = [" in fiction", "(", ", ", "=", "/"] for sep in delimiters: keep, delim, delete = category.partition(sep) category = keep is_subject = Subject.query.filter_by(subject=category).first() if is_subject == None: new_subject = Subject(subject=category, source=source) db.session.add(new_subject) if subject_dict.get(book_object.book_id, None): subject_dict[book_object.book_id].add(category) else: subject_dict[book_object.book_id] = {category} return subject_dict
def load_subjects(soup, poem): """loads subjects from poem meta tags""" poem_id = poem.poem_id subjects = Parse.parse_subjects(soup) if subjects: for subject in subjects: try: subject_id = Subject.query.filter(Subject.subject_name == subject).one().subject_id except NoResultFound: log_err('subject', f, subject) s = Subject(subject_name=subject) db.session.add(s) db.session.flush() subject_id = s.subject_id poemsubject = PoemSubject(poem_id=poem_id, subject_id=subject_id) db.session.add(poemsubject) db.session.flush()
def parse_subject(row): subject = Subject() return subject
def parse_subject(row, attr_map): subject = Subject() for index, attr in attr_map.items(): setattr(subject, attr, row[index]) return subject
class SampleParserTest(unittest.TestCase): sample_test_file = './data/test_data/samp_metadata/sample1.txt' row = OrderedDict([ ('sample_name', '317.F10'), ('age', '22'), ('age_unit', 'years'), ('altitude', '0'), ('anatomical_body_site', 'FMA:Palm'), ('anonymized_name', 'F10'), ('body_habitat', 'UBERON:skin'), ('body_product', 'UBERON:sebum'), ('body_site', 'UBERON:zone of skin of hand'), ('collection_date', '11/12/2006'), ('country', 'GAZ:United States of America'), ('depth', '0'), ('description', 'human skin metagenome'), ('dna_extracted', 'true'), ('dominant_hand', ''), ('elevation', '1591.99'), ('env_biome', 'ENVO:human-associated habitat'), ('env_feature', 'ENVO:human-associated habitat'), ('host_common_name', 'human'), ('host_subject_id', 'F1'), ('host_taxid', '9606'), ('latitude', '40'), ('longitude', '-105'), ('palm_size', ''), ('physical_specimen_remaining', 'false'), ('public', 'true'), ('qiita_study_id', '317'), ('sample_type', 'XXQIITAXX'), ('sex', 'female'), ('time_since_last_wash', '0'), ('title', 'The influence of sex handedness and washing on the diversity of hand surface bacteriaS1_V160' ) ]) dayfirst_dict = {'collection_date': False} # TODO Update details of source (when necessary) source1 = Source(name='qiita', type_='Database (Public)', url='https://qiita.ucsd.edu/study/description/0') experiment1 = Experiment(source=source1, orig_study_id='317') subject1 = Subject( source=source1, orig_study_id='317', orig_subject_id='F1', sex='female', country='United States of America', race=None, csection=None, disease=None, dob=None, ) subject2 = Subject( source=source1, orig_study_id='317', orig_subject_id='F2', sex='female', country='United States of America', race=None, csection=None, disease=None, dob=None, ) sampling_site = SamplingSite( uberon_habitat_term='UBERON:skin', uberon_product_term='UBERON:sebum', uberon_site_term='UBERON:zone of skin of hand', env_biom_term='ENVO:human-associated habitat', env_feature_term='ENVO:human-associated habitat') sampling_time = Time(timestamp=datetime.datetime(2006, 11, 12), uncertainty=None, date=datetime.date(2006, 11, 12), time=None, year=2006, month=11, day=12, hour=None, minute=None, second=None, season='autumn') sample1 = Sample(source=source1, orig_study_id='317', orig_subject_id='F1', orig_sample_id='317.F10', age_units=ureg.years, age=22.0, latitude=40.0, longitude=-105.0, elevation=1591.99, height_units=ureg.metres, height=None, weight_units=ureg.kilograms, weight=None, bmi=None, sample_date=datetime.date(2006, 11, 12), sample_time=None, sampling_site=sampling_site, sampling_time=sampling_time) sample2 = Sample(source=source1, orig_study_id='317', orig_subject_id='F1', orig_sample_id='317.F12', age_units=ureg.years, age=22.0, latitude=40.0, longitude=-105.0, elevation=1591.99, height_units=ureg.metres, height=None, weight_units=ureg.kilograms, weight=None, bmi=None, sample_date=datetime.date(2006, 11, 12), sample_time=None, sampling_site=sampling_site, sampling_time=sampling_time) sample3 = Sample(source=source1, orig_study_id='317', orig_subject_id='F2', orig_sample_id='317.F20', age_units=ureg.years, age=None, latitude=40.0, longitude=-105.0, elevation=1591.99, height_units=ureg.metres, height=None, weight_units=ureg.kilograms, weight=None, bmi=None, sample_date=datetime.date(2006, 11, 12), sample_time=None, sampling_site=sampling_site, sampling_time=sampling_time) # Not necessary to establish these relationships for purpose of # test_parse_objects: sample1._subject = subject1 sample2._subject = subject1 sample3._subject = subject2 subject1._samples = {sample1, sample2} subject2._samples = {sample3} experiment1._subjects = {subject1, subject2} experiment1._samples = {sample1, sample2, sample3} def test_parse_objects(self): experiment_ids = parse_objects(self.sample_test_file) self.assertIn('317', experiment_ids) experiment = experiment_ids['317'] self.assertEqual(self.experiment1, experiment) self.assertIn(self.subject1, experiment.subjects) self.assertIn(self.subject2, experiment.subjects) self.assertIn(self.sample1, experiment.samples) self.assertIn(self.sample2, experiment.samples) self.assertIn(self.sample3, experiment.samples) # TODO: We will have to test without the source keyword at some point. def test_parse_sample(self): self.maxDiff = None blacklist_attrs = [ '_sa_instance_state', 'source', 'counts', '_experiments', '_subject', '_preparations' ] sample = parse_sample(self.row, self.dayfirst_dict, source=self.source1) sample_attrs = set((key, value) for key, value in sample.__dict__.items() if key not in blacklist_attrs) expected_attrs = set((key, value) for key, value in self.sample1.__dict__.items() if key not in blacklist_attrs) self.assertEqual(sample_attrs, expected_attrs) self.assertEqual(sample.source, self.source1) self.assertEqual(sample.counts, self.sample1.counts) # When sample is parsed, it is not yet associated with subject/experiments self.assertEqual(sample._subject, None) self.assertEqual(sample._experiments, set()) self.assertEqual(sample._preparations, set()) def test_parse_subject(self): self.maxDiff = None blacklist_attrs = [ '_sa_instance_state', 'source', 'counts', 'perturbation_facts', '_experiments', '_samples', '_perturbations' ] subject = parse_subject(self.row, source=self.source1) subject_attrs = set((key, value) for key, value in subject.__dict__.items() if key not in blacklist_attrs) expected_attrs = set((key, value) for key, value in self.subject1.__dict__.items() if key not in blacklist_attrs) self.assertEqual(subject_attrs, expected_attrs) self.assertEqual(subject.source, self.source1) self.assertEqual(subject.counts, self.subject1.counts) self.assertEqual(subject.perturbation_facts, self.subject1.perturbation_facts) # When subject is parsed, it is not yet associated with samples/experiments self.assertEqual(subject._experiments, set()) self.assertEqual(subject._samples, set()) self.assertEqual(subject._perturbations, set()) def test_parse_processing(self): self.maxDiff = None processing1 = Processing(parent=None, parameter_values='{}', orig_prep_id='577', orig_proc_id='2593') processing2 = Processing(parent=processing1, parameter_values='{' '"barcode_type":"golay_12",' '"command":"Split libraries (QIIMEq2 1.9.1)",' '"disable_bc_correction":"False",' '"disable_primers":"False",' '"generated on":"2016-01-14 17:01",' '"input_data":"2593",' '"max_ambig":"6",' '"max_barcode_errors":"1.5",' '"max_homopolymer":"6",' '"max_primer_mismatch":"0",' '"max_seq_len":"1000",' '"min_qual_score":"25",' '"min_seq_len":"200",' '"qual_score_window":"0",' '"reverse_primer_mismatches":"0",' '"reverse_primers":"disable",' '"trim_seq_length":"False",' '"truncate_ambi_bases":"False"' '}', orig_prep_id='577', orig_proc_id='310') processing3 = Processing( parent=processing2, parameter_values='{' '"command":"Pick closed-reference OTUs (QIIMEq2 1.9.1)",' '"generated on":"2015-06-30 14:06",' '"input_data":"310",' '"reference-seq":"/databases/gg/13_8/rep_set/97_otus.fasta",' '"reference-tax":"/databases/gg/13_8/taxonomy/97_otu_taxonomy.txt",' '"similarity":"0.97",' '"sortmerna_coverage":"0.97",' '"sortmerna_e_value":"1",' '"sortmerna_max_pos":"10000",' '"threads":"1"' '}', orig_prep_id='577', orig_proc_id='2594') expected_processings = { '2593': processing1, '310': processing2, '2594': processing3 } processings = parse_processings('./data/test_data/proc1.json') # TODO: Implement workflows and parents as mocks? blacklist_attrs = ['_sa_instance_state', 'workflows', 'parent'] for proc_id, processing in processings.items(): self.assertIn(proc_id, expected_processings) processing_attrs = set( (key, value) for key, value in processing.__dict__.items() if key not in blacklist_attrs) expected_attrs = set( (key, value) for key, value in expected_processings[proc_id].__dict__.items() if key not in blacklist_attrs) self.assertEqual(processing_attrs, expected_attrs)