def test_colleague_model_search_result_dict_with_urls(self): source = factory.SourceFactory() colleague = factory.ColleagueFactory() instances = DBSession.query(Colleague).all() self.assertEqual(1, len(instances)) self.assertEqual(colleague, instances[0]) colleague_url_1 = factory.ColleagueUrlFactory(colleague_id=colleague.colleague_id) colleague_url_2 = factory.ColleagueUrlFactory(colleague_id=colleague.colleague_id, url_type="Lab") instances = DBSession.query(Colleague).all() self.assertEqual(1, len(instances)) self.assertEqual(colleague, instances[0]) self.assertEqual(colleague.to_search_results_dict(), { 'format_name': colleague.format_name, 'first_name': colleague.first_name, 'last_name': colleague.last_name, 'organization': colleague.institution, 'work_phone': colleague.work_phone, 'fax': colleague.fax, 'email': colleague.email, 'webpages': { 'lab_url': colleague_url_2.obj_url, 'research_summary_url': colleague_url_1.obj_url } })
def ask_competition(self, update, context): user = context.user_data['user'] club = self.parent.selected_object(context) competition = DBSession.query(Competition).filter( Competition.club_id == club.id).first() dog = self.selected_object(context) buttons = [] dog_competition_aosc = DBSession.query(DogCompetitions).get( (dog.id, competition.id)) if dog_competition_aosc: message_text = "Участие в сореввновании" + '\n' message_text += f"Название: {competition.date.strftime('%d.%m.%Y ')}" + '\n' message_text += f"Дата проведения: {competition.date.strftime('%d.%m.%Y ')}" + '\n' message_text += f"Взнос: {competition.contributiont}" + '\n' message_text += f"Кол-во зрителей: {competition.viewers_quantity}" + '\n' message_text += f"Награда: {dog_competition_aosc.reward}" + '\n' else: message_text = "Данные о соревнованиях отсутствуют!" buttons.append( [InlineKeyboardButton("🔙 Back", callback_data=f'back_to_dog')]) send_or_edit(context, chat_id=user.chat_id, text=message_text, reply_markup=InlineKeyboardMarkup(buttons, resize_keyboard=True)) return self.States.ASK_COMPETITION
def load_locus(): print("Loading genes into Redis...") genes = DBSession.query(Locusdbentity).all() aliases = DBSession.query( LocusAlias.locus_id, LocusAlias.display_name).filter( LocusAlias.alias_type.in_(['Uniform', 'Non-uniform'])).all() ids_to_aliases = {} for alias in aliases: if alias.locus_id in ids_to_aliases: ids_to_aliases[alias.locus_id].append(alias.display_name) else: ids_to_aliases[alias.locus_id] = [alias.display_name] # in case of collisions, the table_set will overwrite the value # indexing each name separately assures priority for gene in genes: for alias in ids_to_aliases.get(gene.dbentity_id, []): table_set(str(alias).upper(), gene.dbentity_id, "locus") for gene in genes: table_set(str(gene.sgdid.upper()), gene.dbentity_id, "locus") for gene in genes: table_set(str(gene.systematic_name.upper()), gene.dbentity_id, "locus") for gene in genes: table_set(str(gene.display_name.upper()), gene.dbentity_id, "locus") for gene in genes: table_set(str(gene.dbentity_id), gene.dbentity_id, "locus")
def index_reserved_names(): # only index reservednames that do not have a locus associated with them reserved_names = DBSession.query(Reservedname).all() print("Indexing " + str(len(reserved_names)) + " reserved names") for reserved_name in reserved_names: name = reserved_name.display_name href = reserved_name.obj_url keys = [reserved_name.display_name.lower()] # change name if has an orf if reserved_name.locus_id: locus = DBSession.query(Locusdbentity).filter(Locusdbentity.dbentity_id == reserved_name.locus_id).one_or_none() name = name + ' / ' + locus.systematic_name href = locus.obj_url keys = [] obj = { "name": name, "href": href, "description": reserved_name.name_description, "category": "reserved_name", "keys": keys } es.index( index=INDEX_NAME, doc_type=DOC_TYPE, body=obj, id="reserved_" + reserved_name.format_name)
def index_strains(): strains = DBSession.query(Straindbentity).all() print("Indexing " + str(len(strains)) + " strains") for strain in strains: key_values = [ strain.display_name, strain.format_name, strain.genbank_id ] keys = set([]) for k in key_values: if k is not None: keys.add(k.lower()) paragraph = DBSession.query(Strainsummary.text).filter_by( strain_id=strain.dbentity_id).one_or_none() description = None if paragraph: description = paragraph[0] obj = { "name": strain.display_name, "href": strain.obj_url, "description": strain.headline, "category": "strain", "keys": list(keys) } es.index( index=INDEX_NAME, doc_type=DOC_TYPE, body=obj, id="strain_" + str(strain.dbentity_id))
def ask_payment(self, update, context): user = context.user_data['user'] service = self.parent.selected_object(context) payment = DBSession.query(Payment).filter( Payment.taxation_service_id == service.id).first() employee = self.selected_object(context) buttons = [] employee_payment_aosc = DBSession.query(EmployeePayment).get( (employee.id, payment.id)) if employee_payment_aosc: message_text = "Оформленные платежи" + '\n' message_text += f"Дата: {payment.date.strftime('%d.%m.%Y ')}" + '\n' message_text += f"Сумма: {payment.amount}" + '\n' message_text += f"Тип: {payment.type.to_str()}" + '\n' else: message_text = "Данные о оформленных платежах отсутствуют!" buttons.append([ InlineKeyboardButton("🔙 Back", callback_data=f'back_to_employee') ]) send_or_edit(context, chat_id=user.chat_id, text=message_text, reply_markup=InlineKeyboardMarkup(buttons, resize_keyboard=True)) return self.States.ASK_PAYMENT
def get_phenotypes_condition(cls, condition_str=None): ''' Get join between phenotypeannotation and phenotype condition :param cls: not required ''' obj = {} if condition_str is not None: _phenotypes_condition = DBSession.query( Phenotypeannotation, PhenotypeannotationCond).filter( Phenotypeannotation.annotation_id == PhenotypeannotationCond.annotation_id, PhenotypeannotationCond.condition_class == condition_str).all() for item in _phenotypes_condition: if item[0].phenotype_id not in obj: obj[item[0].phenotype_id] = [] obj[item[0].phenotype_id].append(item) else: _phenotypes_condition = DBSession.query( Phenotypeannotation, PhenotypeannotationCond).filter( Phenotypeannotation.annotation_id == PhenotypeannotationCond.annotation_id).all() for item in _phenotypes_condition: if item[0].phenotype_id not in obj: obj[item[0].phenotype_id] = [] obj[item[0].phenotype_id].append(item) return obj
def add_pmids(file_name, file_pmids, src_id, uname): """ add pmids """ try: if len(file_pmids) > 0: existing = DBSession.query(Filedbentity).filter( Filedbentity.display_name == file_name).one_or_none() pmid_list = file_pmids if existing: for pmid in pmid_list: pmid = int(pmid.strip()) existing_ref_file = DBSession.query(ReferenceFile).filter( ReferenceFile.file_id == existing.dbentity_id).one_or_none() ref = DBSession.query(Referencedbentity).filter( Referencedbentity.pmid == pmid).one_or_none() if ref and not existing_ref_file: new_ref_file = ReferenceFile( created_by=uname, file_id=existing.dbentity_id, reference_id=ref.dbentity_id, source_id=src_id) DBSession.add(new_ref_file) except Exception as e: logging.error("Exception occurred", exc_info=True)
def add_path_entries(file_name, file_path, src_id, uname): """ add paths to file_path table """ try: existing = DBSession.query(Filedbentity).filter( Filedbentity.display_name == file_name).one_or_none() if not existing: logging.error('error with ' + file_name) path = DBSession.query(Path).filter_by(path=file_path).one_or_none() if path is None: logging.warning('Could not find path ') else: if existing: existing_filepath = DBSession.query(FilePath).filter( and_(FilePath.file_id == existing.dbentity_id, FilePath.path_id == path.path_id)).one_or_none() if not existing_filepath: new_filepath = FilePath(file_id=existing.dbentity_id, path_id=path.path_id, source_id=src_id, created_by=uname) DBSession.add(new_filepath) except Exception as e: logging.error("Exception occurred", exc_info=True)
def get_entities(self): self.logger.info('getting genes') attempts = 0 while attempts < 3: try: # get S288C genes gene_ids_so = DBSession.query( Dnasequenceannotation.dbentity_id, Dnasequenceannotation.so_id).filter( Dnasequenceannotation.taxonomy_id == 274901).all() dbentity_ids_to_so = {} dbentity_ids = set([]) so_ids = set([]) for gis in gene_ids_so: dbentity_ids.add(gis[0]) so_ids.add(gis[1]) dbentity_ids_to_so[gis[0]] = gis[1] all_genes = DBSession.query(Locusdbentity).filter( Locusdbentity.dbentity_id.in_(list(dbentity_ids)), Locusdbentity.dbentity_status == 'Active').all() break except StatementError: traceback.print_exc() log.info( 'DB error corrected. Rollingback previous error in db connection' ) DBSession.rollback() attempts += 1 return all_genes
def main(): """""" _ = lambda text: text """-----------------User-----------------""" Permission.create('start_menu_access', _('Access to "Start" menu')) """-----------------Admin-----------------""" # menus Permission.create('admin_menu_access', _('Access to "Admin" menu')) Permission.create('distribution_menu_access', _('Access to "Distribution" admin menu')) Permission.create('settings_menu_access', _('Access to "Settings" admin menu')) # permissions menu Permission.create(Permission.view_permission, _('Access to "Permissions" menu')) Permission.create(Permission.add_permission, _('Allow add permission')) Permission.create(Permission.delete_permission, _('Allow remove permission')) Permission.create('superuser', _('Superuser')) DBSession.commit() users = DBSession.query(User).filter( User.chat_id.in_(SUPERUSER_ACCOUNTS)).all() permission = DBSession.query(Permission).get('superuser') for user in users: if not user.has_permission(permission.code): user.permissions.append(permission) DBSession.add(user) DBSession.commit()
def get_referals(self, user_id): lvl_1_referals = DBSession.query(User).filter( User.parent_referal_id == user_id).all() lvl_2_referals = [] lvl_3_referals = [] lvl_4_referals = [] if len(lvl_1_referals) > 0: for ref in lvl_1_referals: lvl_2_referals += DBSession.query(User).filter( User.parent_referal_id == ref.chat_id).all() if len(lvl_2_referals) > 0: for ref in lvl_2_referals: lvl_3_referals += DBSession.query(User).filter( User.parent_referal_id == ref.chat_id).all() if len(lvl_3_referals) > 0: for ref in lvl_3_referals: lvl_4_referals += DBSession.query(User).filter( User.parent_referal_id == ref.chat_id).all() return [ len(lvl_1_referals), len(lvl_2_referals), len(lvl_3_referals), len(lvl_4_referals) ]
def index_colleagues(): colleagues = DBSession.query(Colleague).all() print "Indexing " + str(len(colleagues)) + " colleagues" bulk_data = [] for c in colleagues: description_fields = [] for field in [c.institution, c.country]: if field: description_fields.append(field) description = ", ".join(description_fields) position = "Lab Member" if c.is_pi == 1: position = "Head of Lab" locus = set() locus_ids = DBSession.query(ColleagueLocus.locus_id).filter(ColleagueLocus.colleague_id == c.colleague_id).all() if len(locus_ids) > 0: ids_query = [k[0] for k in locus_ids] locus_names = ( DBSession.query(Locusdbentity.gene_name, Locusdbentity.systematic_name) .filter(Locusdbentity.dbentity_id.in_(ids_query)) .all() ) for l in locus_names: if l[0]: locus.add(l[0]) if l[1]: locus.add(l[1]) obj = { "name": c.last_name + ", " + c.first_name, "category": "colleague", "href": "/colleague/" + c.format_name + "/overview", "description": description, "first_name": c.first_name, "last_name": c.last_name, "institution": c.institution, "position": position, "country": c.country, "state": c.state, "colleague_loci": sorted(list(locus)), } c._include_keywords_to_dict(obj) # adds 'keywords' to obj bulk_data.append({"index": {"_index": INDEX_NAME, "_type": DOC_TYPE, "_id": c.format_name}}) bulk_data.append(obj) if len(bulk_data) == 1000: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True) bulk_data = [] if len(bulk_data) > 0: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def test_dbuser_model(self): instances = DBSession.query(Dbuser).all() self.assertEqual(0, len(instances)) dbuser = factory.DbuserFactory() instances = DBSession.query(Dbuser).all() self.assertEqual(1, len(instances)) self.assertEqual(dbuser, instances[0])
def test_source_model(self): instances = DBSession.query(Source).all() self.assertEqual(0, len(instances)) source = factory.SourceFactory() instances = DBSession.query(Source).all() self.assertEqual(1, len(instances)) self.assertEqual(source, instances[0])
def index_disease_terms(): dos = DBSession.query(Disease).all() print(("Indexing " + str(len(dos)) + " DO terms")) bulk_data = [] for do in dos: synonyms = DBSession.query(DiseaseAlias.display_name).filter_by( disease_id=do.disease_id).all() references = set([]) disease_loci = set([]) annotations = DBSession.query(Diseaseannotation).filter_by( disease_id=do.disease_id).all() for annotation in annotations: if annotation.disease_qualifier != "NOT": disease_loci.add(annotation.dbentity.display_name) references.add(annotation.reference.display_name) if do.doid != 'derives_from': numerical_id = do.doid.split(":")[1] key_values = [ do.doid, "DO:" + str(int(numerical_id)), numerical_id, str(int(numerical_id)) ] keys = set([]) for k in key_values: if k is not None: keys.add(k.lower()) obj = { "name": do.display_name, "category": "disease", "href": do.obj_url, "description": do.description, "synonyms": [s[0] for s in synonyms], "doid": do.doid, "disease_loci": sorted(list(disease_loci)), "number_annotations": len(annotations), "references": list(references), "keys": list(keys) } bulk_data.append({ "index": { "_index": INDEX_NAME, "_type": DOC_TYPE, "_id": str(uuid.uuid4()) } }) bulk_data.append(obj) if len(bulk_data) == 800: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True) bulk_data = [] if len(bulk_data) > 0: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_phenotypes(): phenotypes = DBSession.query(Phenotype).all() bulk_data = [] print "Indexing " + str(len(phenotypes)) + " phenotypes" for phenotype in phenotypes: annotations = DBSession.query(Phenotypeannotation).filter_by(phenotype_id=phenotype.phenotype_id).all() references = set([]) loci = set([]) chemicals = set([]) mutant = set([]) for annotation in annotations: references.add(annotation.reference.display_name) loci.add(annotation.dbentity.display_name) mutant.add(annotation.mutant.display_name) annotation_conds = ( DBSession.query(PhenotypeannotationCond) .filter_by(annotation_id=annotation.annotation_id, condition_class="chemical") .all() ) for annotation_cond in annotation_conds: chemicals.add(annotation_cond.condition_name) qualifier = None if phenotype.qualifier: qualifier = phenotype.qualifier.display_name obj = { "name": phenotype.display_name, "href": phenotype.obj_url, "description": phenotype.description, "observable": phenotype.observable.display_name, "qualifier": qualifier, "references": list(references), "phenotype_loci": list(loci), "number_annotations": len(list(loci)), "chemical": list(chemicals), "mutant_type": list(mutant), "category": "phenotype", "keys": [], } bulk_data.append({"index": {"_index": INDEX_NAME, "_type": DOC_TYPE, "_id": phenotype.format_name}}) bulk_data.append(obj) if len(bulk_data) == 500: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True) bulk_data = [] if len(bulk_data) > 0: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def test_reporter_model(self): source = factory.SourceFactory() instances = DBSession.query(Reporter).all() self.assertEqual(0, len(instances)) reporter = factory.ReporterFactory() instances = DBSession.query(Reporter).all() self.assertEqual(1, len(instances)) self.assertEqual(reporter, instances[0])
def test_allele_model(self): source = factory.SourceFactory() instances = DBSession.query(Allele).all() self.assertEqual(0, len(instances)) allele = factory.AlleleFactory() instances = DBSession.query(Allele).all() self.assertEqual(1, len(instances)) self.assertEqual(allele, instances[0])
def test_apo_model(self): source = factory.SourceFactory() instances = DBSession.query(Apo).all() self.assertEqual(0, len(instances)) apo = factory.ApoFactory() instances = DBSession.query(Apo).all() self.assertEqual(1, len(instances)) self.assertEqual(apo, instances[0])
def test_taxonomy_model(self): source = factory.SourceFactory() instances = DBSession.query(Taxonomy).all() self.assertEqual(0, len(instances)) taxonomy = factory.TaxonomyFactory() instances = DBSession.query(Taxonomy).all() self.assertEqual(1, len(instances)) self.assertEqual(taxonomy, instances[0])
def test_locusdbentity_model(self): source = factory.SourceFactory() instances = DBSession.query(Locusdbentity).all() self.assertEqual(0, len(instances)) locus = factory.LocusdbentityFactory() instances = DBSession.query(Locusdbentity).all() self.assertEqual(1, len(instances)) self.assertEqual(locus, instances[0])
def test_obi_model(self): source = factory.SourceFactory() instances = DBSession.query(Obi).all() self.assertEqual(0, len(instances)) obi = factory.ObiFactory() instances = DBSession.query(Obi).all() self.assertEqual(1, len(instances)) self.assertEqual(obi, instances[0])
def test_filepath_model(self): source = factory.SourceFactory() instances = DBSession.query(Filepath).all() self.assertEqual(0, len(instances)) filepath = factory.FilepathFactory() instances = DBSession.query(Filepath).all() self.assertEqual(1, len(instances)) self.assertEqual(filepath, instances[0])
def test_journal_model(self): source = factory.SourceFactory() instances = DBSession.query(Journal).all() self.assertEqual(0, len(instances)) journal = factory.JournalFactory() instances = DBSession.query(Journal).all() self.assertEqual(1, len(instances)) self.assertEqual(journal, instances[0])
def test_edam_model(self): source = factory.SourceFactory() instances = DBSession.query(Edam).all() self.assertEqual(0, len(instances)) edam = factory.EdamFactory() instances = DBSession.query(Edam).all() self.assertEqual(1, len(instances)) self.assertEqual(edam, instances[0])
def test_book_model(self): source = factory.SourceFactory() instances = DBSession.query(Book).all() self.assertEqual(0, len(instances)) book = factory.BookFactory() instances = DBSession.query(Book).all() self.assertEqual(1, len(instances)) self.assertEqual(book, instances[0])
def test_keywords_model(self): source = factory.SourceFactory() instances = DBSession.query(Keyword).all() self.assertEqual(0, len(instances)) keyword = factory.KeywordFactory() instances = DBSession.query(Keyword).all() self.assertEqual(1, len(instances)) self.assertEqual(keyword, instances[0])
def entry(self, bot, update, user_data, args=None): self.delete_interface(user_data) tuser = update.effective_user user = DBSession.query(User).filter(User.chat_id == tuser.id).first() data = get_settings_file(SETTINGS_FILE) silent_days = timedelta_from_str(data['silence_mode']) if user is None: user = User() user.join_date = date.today() user.expiration_date = user.join_date + silent_days user.chat_id = tuser.id user.name = tuser.full_name user.username = tuser.username user.active = True if args and args != [] and len(args) == 1: referal_id = args[0] if len(referal_id) and referal_id.isdigit(): if referal_id != user.chat_id and not user.parent_referal_id: parent_user = DBSession.query(User).filter( User.chat_id == referal_id).first() parent_user.referal_count += 1 user.parent_referal_id = referal_id if not add_to_db([user, parent_user], session=DBSession): return self.conv_fallback(user_data) else: self.logger.warning("Referal link is not valid.") else: if not add_to_db(user, session=DBSession): return self.conv_fallback(user_data) else: user.chat_id = tuser.id user.name = tuser.full_name user.username = tuser.username user.active = True if not add_to_db(user, session=DBSession): return self.conv_fallback(user_data) user_data['user'] = user _ = user_data['_'] = generate_underscore(user) buttons = [[KeyboardButton('Баланс')], [KeyboardButton('Пригласить партнера')], [KeyboardButton('Чат')]] reply_markup = ReplyKeyboardMarkup(buttons, resize_keyboard=True) bot.send_message(text='Главное меню', chat_id=user.chat_id, reply_markup=reply_markup) return StartMenu.States.ACTION
def test_chebiurl_model(self): source = factory.SourceFactory() chebi = factory.ChebiFactory() instances = DBSession.query(ChebiUrl).all() self.assertEqual(0, len(instances)) chebiurl = factory.ChebiUrlFactory() instances = DBSession.query(ChebiUrl).all() self.assertEqual(1, len(instances)) self.assertEqual(chebiurl, instances[0])
def test_phenotype_model(self): source = factory.SourceFactory() apo = factory.ApoFactory() instances = DBSession.query(Phenotype).all() self.assertEqual(0, len(instances)) pheno = factory.PhenotypeFactory() instances = DBSession.query(Phenotype).all() self.assertEqual(1, len(instances)) self.assertEqual(pheno, instances[0])
def test_colleague_model(self): instances = DBSession.query(Colleague).all() self.assertEqual(0, len(instances)) source = factory.SourceFactory() colleague = factory.ColleagueFactory() instances = DBSession.query(Colleague).all() self.assertEqual(1, len(instances)) self.assertEqual(colleague, instances[0]) self.assertEqual(colleague.source, source)
def get_chebi_annotations(cls, chebi_data): ''' Get a join between chebi and phenotypeannotationcondition ''' obj = {} _dict_chebi = {} chebi_names = list(set([x.display_name for x in chebi_data])) chebi_format_names = list(set([x.format_name for x in chebi_data])) chebi_format_to_display_names = {} for chebi in chebi_data: chebi_format_to_display_names[ chebi.format_name] = chebi.display_name for chebi_item in chebi_data: if chebi_item.display_name not in _dict_chebi: _dict_chebi[chebi_item.display_name] = [] _dict_chebi[chebi_item.display_name].append(chebi_item) if len(chebi_names) > 0: _conditions = DBSession.query( Phenotypeannotation, PhenotypeannotationCond).join( PhenotypeannotationCond, Phenotypeannotation.annotation_id == PhenotypeannotationCond.annotation_id).filter( PhenotypeannotationCond.condition_name.in_( chebi_names)).all() for item_cond in _conditions: temp = _dict_chebi.get(item_cond[1].condition_name) if temp is not None: for item in temp: if len(temp) > 0: obj[item.chebi_id] = item _conditions = DBSession.query(Goextension).filter( Goextension.dbxref_id.in_(chebi_format_names)).all() duplicate = 0 for item_cond in _conditions: temp = _dict_chebi.get( chebi_format_to_display_names[item_cond.dbxref_id]) if temp is not None and len(temp) > 0: for item in temp: obj[item.chebi_id] = item _conditions = DBSession.query(Interactor).filter( Interactor.format_name.in_(chebi_format_names)).all() duplicate = 0 for item_cond in _conditions: temp = _dict_chebi.get( chebi_format_to_display_names[item_cond.format_name]) if temp is not None and len(temp) > 0: for item in temp: obj[item.chebi_id] = item return obj
def get_journal_id(record, created_by): journal_abbr = record.get('TA', '') journal_full_name = record.get('JT', '') # 1469-221X (Print) 1469-221X (Linking) # 1573-6881 (Electronic) 0145-479X (Linking) issn_list = record.get('IS', '').split(') ') issn_print = '' issn_electronic = '' for issn in issn_list: if "Print" in issn or "Linking" in issn: issn_print = issn.split(' ')[0] if "Electronic" in issn: issn_electronic = issn.split(' ')[0] if issn_print: journals = DBSession.query(Journal).filter_by( issn_print=issn_print).all() if len(journals) > 0: return journals[0].journal_id, journals[ 0].med_abbr, journal_full_name, issn_print if journal_abbr == '': return None, '', '', '' if journal_abbr: journals = DBSession.query(Journal).filter_by( med_abbr=journal_abbr).all() if len(journals) > 0: return journals[0].journal_id, journals[ 0].med_abbr, journal_full_name, issn_print source_id = 824 # 'PubMed' shortened_full_name = ( journal_full_name[:197] + '...') if len(journal_full_name) > 200 else journal_full_name format_name = journal_full_name.replace(' ', '_') + journal_abbr.replace( ' ', '_') j = Journal(issn_print=issn_print, issn_electronic=issn_electronic, display_name=shortened_full_name, format_name=(format_name[:97] + '...') if len(format_name) > 100 else format_name, title=shortened_full_name, med_abbr=journal_abbr, source_id=source_id, obj_url='/journal/' + format_name, created_by=created_by) DBSession.add(j) DBSession.flush() DBSession.refresh(j) return j.journal_id, j.med_abbr, journal_full_name, issn_print
def index_go_terms(): go_id_blacklist = load_go_id_blacklist("scripts/go_id_blacklist.lst") gos = DBSession.query(Go).all() print "Indexing " + str(len(gos) - len(go_id_blacklist)) + " GO terms" bulk_data = [] for go in gos: if go.goid in go_id_blacklist: continue synonyms = DBSession.query(GoAlias.display_name).filter_by(go_id=go.go_id).all() references = set([]) go_loci = set([]) annotations = DBSession.query(Goannotation).filter_by(go_id=go.go_id).all() for annotation in annotations: if annotation.go_qualifier != "NOT": go_loci.add(annotation.dbentity.display_name) references.add(annotation.reference.display_name) numerical_id = go.goid.split(":")[1] key_values = [go.goid, "GO:" + str(int(numerical_id)), numerical_id, str(int(numerical_id))] keys = set([]) for k in key_values: if k is not None: keys.add(k.lower()) obj = { "name": go.display_name, "href": go.obj_url, "description": go.description, "synonyms": [s[0] for s in synonyms], "go_id": go.goid, "go_loci": sorted(list(go_loci)), "number_annotations": len(annotations), "references": list(references), "category": go.go_namespace.replace(" ", "_"), "keys": keys, } bulk_data.append({"index": {"_index": INDEX_NAME, "_type": DOC_TYPE, "_id": go.goid}}) bulk_data.append(obj) if len(bulk_data) == 800: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True) bulk_data = [] if len(bulk_data) > 0: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def get_statistics(self, bot, update, user_data): user = user_data['user'] active_users = (DBSession.query(func.count( User.id)).filter(User.active == 'True').scalar()) inactive_users = (DBSession.query(func.count( User.id)).filter(User.active == 'False').scalar()) stats_message = 'Активные пользователи бота: {0}\nНеактивные пользователи: {1}\n\nВсего: {2}' \ .format(active_users, inactive_users, active_users + inactive_users) bot.send_message(text=stats_message, chat_id=user.chat_id) return AdminMenu.States.ACTION
def test_colleague_association_model(self): source = factory.SourceFactory() colleague = factory.ColleagueFactory() colleague = factory.ColleagueFactory(colleague_id=113699) instances = DBSession.query(ColleagueAssociation).all() self.assertEqual(0, len(instances)) association = factory.ColleagueAssociationFactory() instances = DBSession.query(ColleagueAssociation).all() self.assertEqual(1, len(instances)) self.assertEqual(association, instances[0])
def test_colleague_keywords_model(self): source = factory.SourceFactory() colleague = factory.ColleagueFactory() keyword = factory.KeywordFactory() instances = DBSession.query(ColleagueKeyword).all() self.assertEqual(0, len(instances)) colleague_keyword = factory.ColleagueKeywordFactory() instances = DBSession.query(ColleagueKeyword).all() self.assertEqual(1, len(instances)) self.assertEqual(colleague_keyword, instances[0])
def test_filekeyword_model(self): source = factory.SourceFactory() filedbentity = factory.FiledbentityFactory() filepath = factory.FilepathFactory() edam = factory.EdamFactory() keyword = factory.KeywordFactory() instances = DBSession.query(FileKeyword).all() self.assertEqual(0, len(instances)) fkeyword = factory.FileKeywordFactory() instances = DBSession.query(FileKeyword).all() self.assertEqual(1, len(instances)) self.assertEqual(fkeyword, instances[0])
def test_reference_document_model(self): source = factory.SourceFactory() journal = factory.JournalFactory() book = factory.BookFactory() refdbentity = factory.ReferencedbentityFactory() instances = DBSession.query(ReferenceDocument).all() self.assertEqual(0, len(instances)) refdoc = factory.ReferenceDocumentFactory() instances = DBSession.query(ReferenceDocument).all() self.assertEqual(1, len(instances)) self.assertEqual(refdoc, instances[0])
def get_all_collegue_locus_by_id(self, id): """ Get all colleague_locus data filter by colleague_id """ colleague_locus = DBSession.query(ColleagueLocus).filter( ColleagueLocus.colleague_id == id).all() return colleague_locus
def get_all_colleague_data_by_id(self, id): """ Get all colleague associated data filter by colleague_id """ colleague = DBSession.query(Colleague).filte( Colleague.colleague_id == id).first() return colleague
def main(): users = DBSession.query(User).filter(User.is_active == True).all() for user in users: user.init_permissions() DBSession.add(user) DBSession.commit()
def index_observables(): observables = DBSession.query(Apo).filter_by(apo_namespace="observable").all() print "Indexing " + str(len(observables)) + " observables" bulk_data = [] for observable in observables: obj = { "name": observable.display_name, "href": observable.obj_url, "description": observable.description, "category": "observable", "keys": [], } bulk_data.append( {"index": {"_index": INDEX_NAME, "_type": DOC_TYPE, "_id": "observable_" + str(observable.apo_id)}} ) bulk_data.append(obj) if len(bulk_data) == 300: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True) bulk_data = [] if len(bulk_data) > 0: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def get_chebi_annotations(cls, chebi_data): ''' Get a join between chebi and phenotypeannotationcondition ''' obj = {} _dict_chebi = {} chebi_names = list(set([x.display_name for x in chebi_data])) for chebi_item in chebi_data: if chebi_item.display_name not in _dict_chebi: _dict_chebi[chebi_item.display_name] = [] _dict_chebi[chebi_item.display_name].append(chebi_item) if len(chebi_names) > 0: _conditions = DBSession.query( Phenotypeannotation, PhenotypeannotationCond).join( PhenotypeannotationCond, Phenotypeannotation.annotation_id == PhenotypeannotationCond.annotation_id).filter( PhenotypeannotationCond.condition_name.in_( chebi_names)).all() for item_cond in _conditions: temp = _dict_chebi.get(item_cond[1].condition_name) if temp is not None: for item in temp: if len(temp) > 0: obj[item.chebi_id] = item return obj
def index_observables(): observables = DBSession.query(Apo).filter_by( apo_namespace="observable").all() print("Indexing " + str(len(observables)) + " observables") bulk_data = [] for observable in observables: obj = { "name": observable.display_name, "href": observable.obj_url, "description": observable.description, "category": "observable", "keys": [] } bulk_data.append({ 'index': { '_index': INDEX_NAME, '_type': DOC_TYPE, '_id': 'observable_' + str(observable.apo_id) } }) bulk_data.append(obj) if len(bulk_data) == 300: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True) bulk_data = [] if len(bulk_data) > 0: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_chemicals(): all_chebi_data = DBSession.query(Chebi).all() _result = IndexESHelper.get_chebi_annotations(all_chebi_data) bulk_data = [] print("Indexing " + str(len(all_chebi_data)) + " chemicals") for item_key, item_v in _result.items(): if item_v is not None: obj = { "name": item_v.display_name, "href": item_v.obj_url, "description": item_v.description, "category": "chemical", "keys": [] } bulk_data.append({ 'index': { '_index': INDEX_NAME, '_type': DOC_TYPE, '_id': 'chemical_' + str(item_key) } }) bulk_data.append(obj) if len(bulk_data) == 300: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True) bulk_data = [] if len(bulk_data) > 0: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def get_all_colleague_relation_by_id(self, id): """ Get all colleague_relation data filter by colleague_id """ colleague_relation = DBSession.query(ColleagueRelation).filter( ColleagueRelation.colleague_id == id).all() return colleague_relation
def test_colleague_model_search_results_doesnt_send_email_if_required(self): source = factory.SourceFactory() colleague = factory.ColleagueFactory(display_email=0) instances = DBSession.query(Colleague).all() self.assertEqual(1, len(instances)) self.assertEqual(colleague, instances[0]) self.assertNotIn('email', colleague.to_search_results_dict())
def index_observables(): observables = DBSession.query(Apo).filter_by( apo_namespace="observable").all() print(("Indexing " + str(len(observables)) + " observables")) bulk_data = [] for observable in observables: obj = { "name": observable.display_name, "observable_name": observable.display_name, "href": observable.obj_url, "description": observable.description, "category": "observable", "keys": [] } bulk_data.append( {"index": { "_index": INDEX_NAME, "_id": str(uuid.uuid4()) }}) bulk_data.append(obj) if len(bulk_data) == 300: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True) bulk_data = [] if len(bulk_data) > 0: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def test_colleague_model_info_dict_doesnt_send_email_if_required(self): source = factory.SourceFactory() colleague = factory.ColleagueFactory(display_email = 0) instances = DBSession.query(Colleague).all() colleague_url_1 = factory.ColleagueUrlFactory(colleague_id=colleague.colleague_id) colleague_url_2 = factory.ColleagueUrlFactory(colleague_id=colleague.colleague_id, url_type="Lab") self.assertEqual(colleague.to_info_dict(), { 'orcid': colleague.orcid, 'first_name': colleague.first_name, 'last_name': colleague.last_name, 'position': colleague.job_title, 'profession': colleague.profession, 'organization': colleague.institution, 'address': [colleague.address1], 'city': colleague.city, 'state': colleague.state, 'country': colleague.country, 'postal_code': colleague.postal_code, 'work_phone': colleague.work_phone, 'fax': colleague.fax, 'webpages': { 'lab_url': colleague_url_2.obj_url, 'research_summary_url': colleague_url_1.obj_url }, 'research_interests': colleague.research_interest, 'last_update': str(colleague.date_last_modified) })
def upload_test_gene(): start = time.time() gene = DBSession.query(Locusdbentity).filter(Locusdbentity.dbentity_id == 1268789).one_or_none() upload_gene(gene) end = time.time() elapsed = end - start log.info('RAD54 done in ' + str(elapsed) + ' seconds')
def get_protein_scores(locus_id_list, strain_to_id): all = [] if len(locus_id_list) == 0: all = DBSession.query(Proteinsequencealignment).order_by( Proteinsequencealignment.locus_id).all() else: all = DBSession.query(Proteinsequencealignment).filter( Proteinsequencealignment.locus_id.in_(locus_id_list)).order_by( Proteinsequencealignment.locus_id).all() S288C_seq = None strain_to_seq = {} locus_id = None locus_id_to_protein_scores = {} for x in all: if x.locus_id != locus_id and locus_id is not None: scores = [] for strain in sorted(strain_to_id, key=strain_to_id.get): if strain in strain_to_seq: scores.append( calculate_score(S288C_seq, strain_to_seq[strain], len(S288C_seq))) else: scores.append(None) locus_id_to_protein_scores[locus_id] = scores locus_id = None S288C_seq = None strain_to_seq = {} if x.display_name.endswith('S288C'): S288C_seq = x.aligned_sequence locus_id = x.locus_id [name, strain] = x.display_name.split('_') strain_to_seq[strain] = x.aligned_sequence if locus_id is not None: scores = [] for strain in sorted(strain_to_id, key=strain_to_id.get): if strain in strain_to_seq: scores.append( calculate_score(S288C_seq, strain_to_seq[strain], len(S288C_seq))) else: scores.append(None) locus_id_to_protein_scores[locus_id] = scores return locus_id_to_protein_scores
def index_downloads(): bulk_data = [] dbentity_file_obj = IndexESHelper.get_file_dbentity_keyword() files = DBSession.query(Filedbentity).filter(Filedbentity.is_public == True, Filedbentity.s3_url != None, Filedbentity.readme_file_id != None).all() print('indexing ' + str(len(files)) + ' download files') for x in files: keyword = [] status = '' temp = dbentity_file_obj.get(x.dbentity_id) if temp: keyword = temp if (x.dbentity_status == "Active" or x.dbentity_status == "Archived"): if x.dbentity_status == "Active": status = "Active" else: status = "Archived" obj = { 'name': x.display_name, 'href': x.s3_url, 'category': 'download', 'description': x.description, 'keyword': keyword, 'format': str(x.format.display_name), 'status': str(status), 'file_size': str(IndexESHelper.convertBytes(x.file_size)) if x.file_size is not None else x.file_size, 'year': str(x.year), 'readme_url': x.readme_file[0].s3_url, 'topic': x.topic.display_name, 'data': x.data.display_name, 'path_id': x.get_path_id() } bulk_data.append({ 'index': { '_index': INDEX_NAME, '_type': DOC_TYPE, '_id': x.sgdid } }) bulk_data.append(obj) if len(bulk_data) == 50: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True) bulk_data = [] if len(bulk_data) > 0: es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def test_keyword_model_to_dict(self): source = factory.SourceFactory() instances = DBSession.query(Keyword).all() self.assertEqual(0, len(instances)) keyword = factory.KeywordFactory() self.assertEqual(keyword.to_dict(), {'id': keyword.keyword_id, 'name': keyword.display_name})
def test_edam_model_to_dict(self): source = factory.SourceFactory() instances = DBSession.query(Edam).all() self.assertEqual(0, len(instances)) edam = factory.EdamFactory() self.assertEqual(edam.to_dict(), {'id': edam.edam_id, 'name': edam.format_name})
def load_keyword(): print("Loading Keywords into Redis...") keywords = DBSession.query(Keyword).all() for keyword in keywords: table_set(keyword.keyword_id, keyword.keyword_id, "keyword") table_set(keyword.format_name.upper(), keyword.keyword_id, "keyword")
def load_dataset(): print("Loading datasets into Redis...") datasets = DBSession.query(Dataset).all() for dataset in datasets: table_set(dataset.format_name.upper(), dataset.dataset_id, "dataset") table_set(dataset.dataset_id, dataset.dataset_id, "dataset")
def load_contigs(): print("Loading contigs into Redis...") contigs = DBSession.query(Contig).all() for contig in contigs: table_set(contig.format_name.upper(), contig.contig_id, "contig") table_set(contig.contig_id, contig.contig_id, "contig")
def test_colleague_model_should_include_urls_in_dict(self): source = factory.SourceFactory() colleague = factory.ColleagueFactory() instances = DBSession.query(Colleague).all() colleague_url_1 = factory.ColleagueUrlFactory(colleague_id=colleague.colleague_id) colleague_url_2 = factory.ColleagueUrlFactory(colleague_id=colleague.colleague_id, url_type="Lab") colleague_dict = {} colleague._include_urls_to_dict(colleague_dict) self.assertEqual(colleague_dict, {'webpages': {'lab_url': colleague_url_2.obj_url, 'research_summary_url': colleague_url_1.obj_url}})