def obs_hist(): ''' generate histogram for data Returns ------- html: histogram ''' dbi, obs_table, file_table, log_table = db_objs() with dbi.session_scope() as s: all_query = s.query(obs_table, func.count(obs_table))\ .group_by(func.substr(obs_table.date, 1, 7)) all_query = tuple((int(float(q.date)), count) for q, count in all_query.all()) all_days, all_counts = zip(*all_query) obs_query = s.query(obs_table, func.count(obs_table))\ .filter(obs_table.status == 'COMPLETE')\ .group_by(func.substr(obs_table.date, 1, 7)) obs_query = ((int(float(q.date)), count) for q, count in obs_query.all()) obs_counts = [count if day in all_days else 0 for day, count in obs_query] #obs_days, obs_counts = zip(*obs_query) obs_days = [] return render_template('obs_hist.html', obs_days=obs_days, obs_counts=obs_counts, all_days=all_days, all_counts=all_counts)
def getMealByDateByRecipe(cls, date, user, recipe): meal = db.session.query(Meals).filter(Meals.user_fk==user).\ filter(Meals.recipe_fk==recipe).\ filter(func.substr(Meals.date_planned,0,11)==func.substr(date,0,11)).\ all() print "\n\n\n\nELEMENT ALREADY EXISTS:", meal
def stream_plot(): ''' generate streaming data Returns ------- ''' start_utc, end_utc, pol, era_type, host, filetype = page_form() dbi, obs_table, file_table, log_table = db_objs() with dbi.session_scope() as s: file_query = s.query(file_table, func.count(file_table))\ .join(obs_table) file_query = obs_filter(file_query, obs_table, start_utc, end_utc, pol, era_type) file_query = file_filter(file_query, file_table, host, filetype) file_query = file_query.group_by(func.substr(obs_table.date, 1, 7))\ .order_by(func.substr(obs_table.date, 1, 7)\ .asc())\ .limit(1) file_count = [count for q, count in file_query] return jsonify({'count': file_count})
def getMealsByDate(cls, date, user): mealsPlanned = db.session.query(Meals).filter(Meals.user_fk==user).\ filter(func.substr(Meals.date_planned,0,11)==func.substr(date,0,11)).\ all() return mealsPlanned
def get_industry_all(level=1, adjust=True): """[summary] adjust {bool} -- [由于中信变更行业分类,是否调整兼容之前的代码] (default: {True}) """ clss = WIND_DB.ASHAREINDUSTRIESCLASSCITICS ind_code = WIND_DB.ASHAREINDUSTRIESCODE df = WIND_DB.query( clss.S_INFO_WINDCODE, clss.ENTRY_DT, clss.REMOVE_DT, ind_code.INDUSTRIESNAME).filter(ind_code.LEVELNUM == (level + 1)) try: df = df.filter( func.substring(clss.CITICS_IND_CODE, 1, 2 + 2 * level) == func. substring(ind_code.INDUSTRIESCODE, 1, 2 + 2 * level)).to_df() except: df = df.filter( func.substr(clss.CITICS_IND_CODE, 1, 2 + 2 * level) == func.substr( ind_code.INDUSTRIESCODE, 1, 2 + 2 * level)).to_df() df.columns = ['sid', 'entry_dt', 'out_dt', 'ind'] # 去除行业中的罗马数字 p = re.compile(r"[^\u4e00-\u9fa5]") df.ind = df.ind.str.replace(p, '', regex=True) # 将综合金融放入非银行金融内 if adjust: def ind_map(x): if x == '综合金融': return '非银行金融' elif x in ('多领域控股', '资产管理', '新兴金融服务'): return '多元金融' elif x in ('全国性股份制银行', '区域性银行'): return '股份制与城商行' else: return x df.ind = df.ind.map(ind_map) return df
def get_totals(q_date): query_date = q_date.strftime('%Y-%m-%d') query_month = query_date[0:7] query_year = query_date[0:4] totals = {'day': 0.0, 'month': 0.0, 'year': 0.0} with session_scope() as session: day_total = session.query(func.sum(Expenses.value)) \ .filter(Expenses.date == query_date).group_by(Expenses.date).first() if day_total is not None: totals['day'] = day_total[0] month_query = session.query(func.substr(Expenses.date, 1, 7).label('month'), func.sum(Expenses.value).label('value_sum')) \ .group_by('month') month_subquery = month_query.subquery() month_total = session.query(month_subquery.c.value_sum)\ .filter(month_subquery.c.month == query_month).first() if month_total is not None: totals['month'] = month_total[0] year_query = session.query(func.substr(Expenses.date, 1, 4).label('year'), func.sum(Expenses.value).label('value_sum')) \ .group_by('year') year_subquery = year_query.subquery() year_total = session.query(year_subquery.c.value_sum)\ .filter(year_subquery.c.year == query_year).first() if year_total is not None: totals['year'] = year_total[0] return totals
def day_summary_table(): ''' summary of data in main databases Returns ------- html: day summary table ''' start_utc, end_utc, pol, era_type, host, filetype = page_form() dbi, obs_table, file_table, log_table = db_objs() with dbi.session_scope() as s: pol_query = s.query(func.substr(obs_table.date, 1, 7), obs_table.pol, func.count(obs_table)) pol_query = obs_filter(pol_query, obs_table, start_utc, end_utc, pol, era_type) pol_query = pol_query.group_by(func.substr(obs_table.date, 1, 7), obs_table.pol).order_by( func.substr(obs_table.date, 1, 7).asc()).all() pol_map = tuple( (julian_day, pol, count) for julian_day, pol, count in pol_query) return render_template('day_summary_table.html', pol_map=pol_map)
def getCoreUser(): y = request.form.get('Y') m = request.form.get('M') d = request.form.get('D') print y print m print d Ymd = y + '-' + m + '-' + d t_unix = int(time.mktime(time.strptime(Ymd, '%Y-%m-%d'))) from ..models.coreUser import CoreUser from sqlalchemy import func coreInfo = db.session.query( func.sum(CoreUser.num), func.substr(CoreUser.groupid, 1, 4), CoreUser.created).filter(CoreUser.created >= t_unix).group_by( func.substr(CoreUser.groupid, 1, 4), CoreUser.created).order_by(func.substr(CoreUser.groupid, 1, 4), CoreUser.created).all() arr = [] for list in coreInfo: tmp = (str(list[0]), list[1] + '0000', list[2]) arr.append(tmp) print arr return json.dumps(arr)
def datetime_end_access(cls): dt_column = (func.substr(cls.dt_end_access, 7, 4) + "-" + func.substr(cls.dt_end_access, 4, 2) + "-" + func.substr(cls.dt_end_access, 1, 2) + ' ' + func.substr(cls.dt_end_access, 12)) dt_column = func.datetime(dt_column) return dt_column
def obs_hist(): ''' generate histogram for data Returns ------- html: histogram ''' dbi, obs_table, file_table, log_table = db_objs() with dbi.session_scope() as s: obs_query = s.query(obs_table, func.count(obs_table))\ .filter(obs_table.status == 'COMPLETE')\ .group_by(func.substr(obs_table.date, 1, 7)) obs_query = ((int(float(q.date)), count) for q, count in obs_query.all()) obs_days, obs_counts = zip(*obs_query) all_query = s.query(obs_table, func.count(obs_table))\ .group_by(func.substr(obs_table.date, 1, 7)) all_query = ((int(float(q.date)), count) for q, count in all_query.all()) all_days, all_counts = zip(*all_query) return render_template('obs_hist.html', obs_days=obs_days, obs_counts=obs_counts, all_days=all_days, all_counts=all_counts)
def module_query(self, sessionid, optional_modules): if optional_modules: case_clause = case([ (sql.and_( model.SurveyTreeItem.profile_index != -1, model.SurveyTreeItem.zodb_path.in_(optional_modules)), func.substr(model.SurveyTreeItem.path, 1, 6)), (sql.and_( model.SurveyTreeItem.profile_index == -1, model.SurveyTreeItem.zodb_path.in_(optional_modules)), func.substr(model.SurveyTreeItem.path, 1, 3) + '000-profile'), (sql.and_(model.SurveyTreeItem.profile_index != -1, model.SurveyTreeItem.depth < 2), func.substr(model.SurveyTreeItem.path, 1, 3)), ]) else: case_clause = case([ (sql.and_(model.SurveyTreeItem.profile_index != -1, model.SurveyTreeItem.depth < 2), func.substr(model.SurveyTreeItem.path, 1, 3)), ]) query = self.sql_session.query(case_clause.label('module')).filter( sql.and_(model.SurveyTreeItem.session_id == sessionid, model.SurveyTreeItem.type == 'module')).group_by( 'module').order_by('module') return query
def get_read_years(db: Session): year = func.substr(models.Read.date, 1, 4) reads = ( db.query(year.label("year"), func.count(year).label("num")) .group_by(year) .order_by(year) .all() ) read_map = {read[0]: read[1] for read in reads} year = func.substr(models.UnmatchedRead.date, 1, 4) unmatched = ( db.query(year.label("year"), func.count(year).label("num")) .group_by(year) .order_by(year) .all() ) for read in unmatched: if read[0] in read_map.keys(): read_map[read[0]] += read[1] else: read_map[read[0]] = read[1] return [{"year": year, "num": read_map[year]} for year in sorted(read_map.keys())]
def book_query(self): session = self.util.get_scoped_session() root_section = self.util.get_root_section(session) digits = self.util.get_section_digits() padding = digits * '0' full_number = func.substr(padding + db.Section.number.cast(String), -digits, digits) icteq = session.query( db.Section.id, full_number.label('section_full_number'), ).filter_by( parent=root_section ).cte() rcteq = session.query( db.Section.id, icteq.c.section_full_number.concat('-').concat(full_number), ).join( icteq, icteq.c.id == db.Section.parent_id ) cteq = icteq.union(rcteq) digits = self.util.get_book_digits() padding = digits * '0' full_number = cteq.c.section_full_number.concat('-').concat( func.substr(padding + db.Book.number.cast(String), -digits, digits)) q = session.query(db.Book).join(cteq).order_by( cteq.c.section_full_number, db.Book.number) return q
def inv_getlist(): """ api结果样式 { 'A': { '01A': [ ], '02A': [ { code: '001', name: '电池12', acount: 999 }, { code: '003', name: '电池120', acount: 3000 } ] },... 查询结果 [('A01A', '1', '电池20', 2000, 'A', '01A'), ('A02A', '1', '电池20', 5000, 'A', '02A'), ('A01A', '1', '电池20', 1000, 'A', '01A')] """ inv = db.session.query( Inventory.allocation, Inventory.code, Inventory.name, Inventory.inventory_acount, func.substr(Inventory.allocation, 1, 1), func.substr(Inventory.allocation, 2, 3)).filter(Inventory.inventory_acount != 0).all() result = {} for i in inv: key1 = i[4] key2 = i[5] if key1 not in result: result[key1] = {} if key2 not in result[key1]: result[key1][key2] = [] result[key1][key2].append({'code': i[1], 'name': i[2], 'acount': i[3]}) return jsonify(result)
def index(): para = yrh.reqs(['page:1', 'url:index', 'title:最新发布']) sql_para = { 'pagesize': g.pagesize, 'nowcolumn': g.pagesize * (para['page'] - 1) } # 推荐文章列表 recommands = db.session.query(Article.id, Article.title, (func.substr(Article.body, 1, 100) + '...').label('description')) \ .filter(Article.recommand == 'Y').order_by(Article.order_id.desc(), Article.id.desc()) # 所有文章列表 articles = db.session.query(Article.id, Article.title, Article.visited, Article.create_date, Catalog.catalog, Catalog.catalog_eng, Article.photo, Article.order_id, func.count(Comment.id).label('counts'), (func.substr(Article.body, 1, 100) + '...').label('description')) \ .outerjoin(Comment) \ .outerjoin(Catalog) \ .filter(Article.hidden == None) \ .group_by(Article.id) \ .order_by(Article.order_id.desc(), Article.id.desc()) \ .limit(sql_para['pagesize']) # 检查下一页行数 rows_left = len(articles.offset(g.pagesize * para['page']).all()) return render_template('index.html', articles=articles.offset(sql_para['nowcolumn']), recommands=recommands, para=para, hots=g.hot_list, links=g.links, left=rows_left)
def stream_plot(): ''' generate streaming data Returns ------- ''' start_utc, end_utc, pol, era_type, host, filetype = page_form() dbi, obs_table, file_table, log_table = db_objs() with dbi.session_scope() as s: file_query = s.query(file_table, func.count(file_table))\ .join(obs_table) file_query = obs_filter(file_query, obs_table, start_utc, end_utc, pol, era_type) file_query = file_filter(file_query, file_table, host, filetype) file_query = file_query.group_by(func.substr(obs_table.date, 1, 7))\ .order_by(func.substr(obs_table.date, 1, 7) .asc())\ .limit(1) file_count = [count for q, count in file_query] return jsonify({'count': file_count})
def get_borders(): expense_dict = { 'MEDay': { 'date': '1900-01-01', 'value': 0.0 }, 'LEDay': { 'date': '1900-01-01', 'value': 0.0 }, 'LEMonth': { 'date': '1900-01', 'value': 0.0 }, 'MEMonth': { 'date': '1900-01', 'value': 0.0 } } with session_scope() as session: expense = session.query(Expenses.date, func.sum(Expenses.value).label('value_sum'))\ .having(text("value_sum > 0"))\ .group_by(Expenses.date).order_by(desc('value_sum')).first() if expense is not None: expense_dict['MEDay'] = { 'date': expense[0], 'value': expense[1] } expense = session.query(Expenses.date, func.sum(Expenses.value).label('value_sum')) \ .having(text("value_sum > 0")) \ .group_by(Expenses.date).order_by('value_sum').first() if expense is not None: expense_dict['LEDay'] = { 'date': expense[0], 'value': expense[1] } expense = session.query(func.substr(Expenses.date, 1, 7).label('month'), func.sum(Expenses.value).label('value_sum')) \ .having(text("value_sum > 0")) \ .group_by('month').order_by('value_sum').first() if expense is not None: expense_dict['LEMonth'] = { 'date': expense[0], 'value': expense[1] } expense = session.query(func.substr(Expenses.date, 1, 7).label('month'), func.sum(Expenses.value).label('value_sum')) \ .having(text("value_sum > 0")) \ .group_by('month').order_by(desc('value_sum')).first() if expense is not None: expense_dict['MEMonth'] = { 'date': expense[0], 'value': expense[1] } return expense_dict
def deleteMeal(cls, date, user, meal_type, recipe): db.session.query(Meals).\ filter(func.substr(Meals.date_planned,0,11)==func.substr(date,0,11)).\ filter(Meals.user_fk==user).\ filter(Meals.meal_type==meal_type).\ filter(Meals.recipe_fk==recipe).\ delete(synchronize_session=False)
def call(self, number, offset): session = self.util.get_scoped_session() root_section = self.util.get_root_section(session) digits = self.util.get_section_digits() padding = digits * '0' full_number = func.substr(padding + db.Section.number.cast(String), -digits, digits) icteq = session.query( db.Section.id, full_number.label('number'), db.Section.name, db.Section.abbreviation).filter_by(parent=root_section).cte() rcteq = session.query( db.Section.id, icteq.c.number.concat('-').concat(full_number), db.Section.name, db.Section.abbreviation).join( icteq, icteq.c.id == db.Section.parent_id) cteq = icteq.union(rcteq) subq = session.query(cteq).subquery() digits = self.util.get_book_digits() padding = digits * '0' book_number = func.substr(padding + db.Book.number.cast(String), -digits, digits) q = session.query( db.Book, func.count(db.Book.id), subq.c.number.concat('-').concat(book_number)).join( subq, subq.c.id == db.Book.section_id).order_by( subq.c.number, book_number).group_by( db.Book.number, db.Book.section_id).offset(offset).limit(number) for c, copies, number in q: if c.library is not None: print(c.library.name) print(number, c.title, '({} copies)'.format(copies), c.language) print('tags:', *c.tags) print('authors:', *c.authors, sep=', ') publishing_info = [] if c.publisher is not None: publishing_info.append(c.publisher.name) if c.publication_year is not None: publishing_info.append(str(c.publication_year)) if publishing_info: print(' '.join(publishing_info)) if c.is_electronic: print('electronic') else: print('paper') print()
def getMealsByFutureDate(cls, user): print "TODAY", datetime.today() meals_list = db.session.query(Meals).join(Recipe).join(RecipeIngredient).\ join(Ingredient).\ filter(func.substr(Meals.date_planned,0,11) >= func.substr(datetime.today(),0,11)).\ filter(Meals.recipe_fk==Recipe.recipe_id).\ filter(Recipe.recipe_id==RecipeIngredient.recipe_fk).\ filter(RecipeIngredient.ingredient_name==Ingredient.name).\ filter(Meals.user_fk==user).\ order_by(Meals.date_planned).all() return meals_list
def data_hist(): ''' generate histogram for data Returns ------- html: histogram ''' start_utc, end_utc, pol, era_type, host, filetype = page_form() dbi, obs_table, file_table, log_table = db_objs() with dbi.session_scope() as s: days = list(range(int(start_utc), int(end_utc) + 1)) # get julian_day, count for files, split by raw/compressed file_query = s.query(file_table, func.count(file_table))\ .join(obs_table) file_query = obs_filter(file_query, obs_table, start_utc, end_utc, pol, era_type) file_query = file_filter(file_query, file_table, host, filetype) file_query = file_query.group_by(func.substr(obs_table.date, 1, 7))\ .order_by(func.substr(obs_table.date, 1, 7) .asc())\ .all() file_query = ((int(float(q.observation.date)), count) for q, count in file_query) try: f_days, f_day_counts = zip(*file_query) except: f_days = days f_day_counts = [0] * len(days) # get julian_day, count for observation obs_query = s.query(func.substr(obs_table.date, 1, 7), func.count(obs_table)) obs_query = obs_filter(obs_query, obs_table, start_utc, end_utc, pol, era_type) obs_query = obs_query.group_by(func.substr(obs_table.date, 1, 7))\ .order_by(func.substr(obs_table.date, 1, 7) .asc())\ .all() try: j_days, j_day_counts = zip(*obs_query) except: j_days = days j_day_counts = [0] * len(days) return render_template('data_hist.html', f_days=f_days, f_day_counts=f_day_counts, j_days=j_days, j_day_counts=j_day_counts)
def data_hist(): ''' generate histogram for data Returns ------- html: histogram ''' start_utc, end_utc, pol, era_type, host, filetype = page_form() dbi, obs_table, file_table, log_table = db_objs() with dbi.session_scope() as s: days = list(range(int(start_utc), int(end_utc) + 1)) #get julian_day, count for files, split by raw/compressed file_query = s.query(file_table, func.count(file_table))\ .join(obs_table) file_query = obs_filter(file_query, obs_table, start_utc, end_utc, pol, era_type) file_query = file_filter(file_query, file_table, host, filetype) file_query = file_query.group_by(func.substr(obs_table.date, 1, 7))\ .order_by(func.substr(obs_table.date, 1, 7)\ .asc())\ .all() file_query = ((int(float(q.observation.date)), count) for q, count in file_query) try: f_days, f_day_counts = zip(*file_query) except: f_days = days f_day_counts = [0] * len(days) #get julian_day, count for observation obs_query = s.query(func.substr(obs_table.date, 1, 7), func.count(obs_table)) obs_query = obs_filter(obs_query, obs_table, start_utc, end_utc, pol, era_type) obs_query = obs_query.group_by(func.substr(obs_table.date, 1, 7))\ .order_by(func.substr(obs_table.date, 1, 7)\ .asc())\ .all() try: j_days, j_day_counts = zip(*obs_query) except: j_days = days j_day_counts = [0] * len(days) return render_template('data_hist.html', f_days=f_days, f_day_counts=f_day_counts, j_days=j_days, j_day_counts=j_day_counts)
def mapper_details(concept_id): # subquery for searching for mapped concepts and returning concept details concept_info = db.session.query(VConcepts)\ .filter(VConcepts.concept_id == concept_id)\ .subquery() # mapping history hist = db.session.query(VMapping)\ .filter((VMapping.source_concept_id == concept_id) & (VMapping.valid == True))\ .subquery() concept_history = db.session.query(hist)\ .with_entities(hist.c.valid, func.concat(hist.c.last_name, ', ', func.substr(hist.c.first_name,1,1), ' (', hist.c.organisation_name, ')').label('mapper'), hist.c.destination_code_text, hist.c.destination_term_text, hist.c.event_type_name, func.coalesce(hist.c.comment, '').label('comment'), func.to_char(hist.c.insert_ts, 'YYYY-mm-dd HH24:MI').label('insert_ts'))\ .order_by(hist.c.insert_ts.desc())\ .all() # other source concepts to same destination concept other_concepts = db.session.query(VMapping)\ .filter((VMapping.destination_concept_id == hist.c.destination_concept_id) & (VMapping.valid == True))\ .with_entities(VMapping.valid, func.concat(VMapping.last_name, ', ', func.substr(VMapping.first_name,1,1), ' (', VMapping.organisation_name, ')').label('mapper'), VMapping.source_code_text, VMapping.source_term_text, VMapping.event_type_name, func.coalesce(VMapping.comment, '').label('comment'), func.to_char(VMapping.insert_ts, 'YYYY-mm-dd HH24:MI').label('insert_ts'))\ .order_by(VMapping.insert_ts.desc())\ .all() # concept details to front-end (details) concept_decoded = db.session.query(concept_info)\ .filter(VConcepts.concept_id == concept_id)\ .with_entities(VConcepts.code_text, VConcepts.term_text, VConcepts.obs_number)\ .all() return render_template('home/details.html', history=concept_history, samedest=other_concepts, info=concept_decoded, target=session['target_system'], user_org_name=session['user_organisation_name'])
def get(self): arg_type = statistics_form.parse_args() start, end = (1, 4) if arg_type['type'] == 'year' else ( 1, 7) if arg_type['type'] == 'month' else (1, 10) orders = db.session.query(func.substr(Order.created_at, start, end).label('date'), func.sum(Order.total).label('total'), func.sum(Order.origin_total).label('origin_total') ).select_from(Order). \ filter(Order.is_payed == True). \ order_by(Order.created_at.desc()). \ group_by(func.substr(Order.created_at, start, end)).all() cancel_orders = db.session.query(func.substr(Order.created_at, start, end).label('date'), func.sum(Order.origin_total).label('origin_total')).select_from(Order). \ filter(Order.is_payed == True, Order.status == Order.STATUS_BUYER_CANCEL). \ order_by(Order.created_at.desc()). \ group_by(func.substr(Order.created_at, start, end)).all() data, days = [{ 'name': u'销售总额', 'data': [] }, { 'name': u'支付总额', 'data': [] }, { 'name': u'退货总额', 'data': [] }], [] for order in orders: days_key = order.date[:end] flag = 0 for i in cancel_orders: if days_key == i.date: flag = i.origin_total data[0]['data'].append(order.origin_total) data[1]['data'].append(order.total) data[2]['data'].append(flag) days.append(days_key) data = { "status": True, "message": '', "data": { "data": data, "days": days } } return data
def sequence(cls): """Required joins: Protein""" from .protein import Protein # SQL is 1 based left = cls.position - 8 sequence = func.substr( Protein.sequence, greatest(cls.position - 7, 1), least(15 + least(left, 0), func.length(Protein.sequence) - left)) left_padding = func.substr('-------', 1, greatest(-left, 0)) right_padding = func.substr( '-------', 1, greatest(cls.position + 8 - func.length(Protein.sequence), 0)) return left_padding.concat(sequence).concat(right_padding)
def get_statuses(args=None): if not args: args = request.args last_consensus = Consensus.query.order_by(Consensus.valid_after.desc())\ .first() lookup = args['lookup'] if 'lookup' in args else None if not lookup or len(lookup) != Fingerprint.FP_LEN: return None, None q = StatusEntry.query.filter\ (func.substr(StatusEntry.fingerprint, 0, Fingerprint.FP_SUBSTR_LEN)\ == lookup[:Fingerprint.FP_SUBSTR_LEN-1]) q = q.order_by(StatusEntry.validafter.desc()) q = from_to(q) entries = offset_limit(q, args=args) if OUTPUT_TIME: t1 = time.time() entries = entries.all() if OUTPUT_TIME: t2 = time.time() debug_logger.info(str(datetime.datetime.now()) + \ ' - statusentry query finished, took: %s', t2 - t1) return last_consensus, entries
def save_sn_segment_list(self): query_sn_seg = self.session.query( func.substr(Bike.sn, 1, 6).label("sn_seg")).group_by("sn_seg") with open(self._get_stats_full_path("bike_sn_segments.csv"), "w") as fout: fout.write("sn_segment,count,per_subtype") fout.write("\n") for sn_seg in query_sn_seg.all(): sn_seg = sn_seg[0] query = self.session.query( BikeSubtype.id, func.count(Bike.id).label("count")).join(BikeSubtype.bikes) stmt = query.filter(Bike.sn.like(sn_seg + "%")).group_by( BikeSubtype.id).subquery() query = self.session.query(BikeSubtype.name, stmt.c.count).join( stmt, BikeSubtype.id == stmt.c.id) query = query.order_by(BikeSubtype.name) total_count, per_subtype_str = 0, "" for subtype_stat in query.all(): total_count += subtype_stat[1] per_subtype_str += "{:s}({:d}),".format( subtype_stat[0], subtype_stat[1]) fout.write("|{:s}|,{:d},{:s}".format(sn_seg, total_count, per_subtype_str)) fout.write("\n")
def on_enable_button_clicked(self): if not self.begin_edit_date.date() < self.end_edit_date.date(): PluginUtils.show_message(self, self.tr("Date Validate"), self.tr("End date must be backword!")) return new_user_number = '01' message_box = QMessageBox() message_box.setText(self.tr("Do you want to enable user?")) yes_button = message_box.addButton(self.tr("Yes"), QMessageBox.ActionRole) message_box.addButton(self.tr("Cancel"), QMessageBox.ActionRole) message_box.exec_() if not message_box.clickedButton() == yes_button: return try: count = self.session.query(SetRole) \ .filter(SetRole.user_name == self.username) \ .order_by(func.substr(SetRole.user_name_real, 11, 12).desc()).count() except SQLAlchemyError, e: PluginUtils.show_error( self, self.tr("File Error"), self.tr("Error in line {0}: {1}").format( currentframe().f_lineno, e.message)) return
def search_around(timestamp: T.number, ): get_logger().info('/search_around %s', timestamp) utc_timestamp = timestamp # old 'timestamp' name is legacy # TODO meh. use count/pagination instead? delta_back = timedelta(hours=3).total_seconds() delta_front = timedelta(minutes=2).total_seconds() # TODO not sure about delta_front.. but it also serves as quick hack to accomodate for all the truncations etc return search_common( url= 'http://dummy.org', # NOTE: not used in the where query (below).. perhaps need to get rid of this where=lambda table, url: between( func.strftime( '%s', # NOTE: it's tz aware, e.g. would distinguish +05:00 vs -03:00 # this is a bit fragile, relies on cachew internal timestamp format, e.g. # 2020-11-10T06:13:03.196376+00:00 Europe/London func.substr( table.c.dt, 1, # substr is 1-indexed # instr finds the first match, but if not found it defaults to 0.. which we hack by concatting with ' ' func.instr( func.cast(table.c.dt, types.Unicode).op('||') (' '), ' ') - 1, # for f***s sake.. seems that cast is necessary otherwise it tries to treat ' ' as datetime??? )) - literal(utc_timestamp), literal(-delta_back), literal(delta_front), ), )
def clusters_to_fasta(clusters): '''Convert model.BiosyntheticGeneCluster into FASTA''' query = db.session.query( Bgc, Locus.start_pos, Locus.end_pos, DnaSequence.acc, DnaSequence.version, func.substr(DnaSequence.dna, Locus.start_pos + 1, Locus.end_pos - Locus.start_pos).label('sequence'), Taxa.tax_id, Taxa.genus, Taxa.species, Taxa.strain) query = query.options(joinedload('bgc_types')) query = query.join(Locus).join(DnaSequence).join(Genome).join(Taxa) query = query.filter(Bgc.bgc_id.in_(map(lambda x: x.bgc_id, clusters))).order_by(Bgc.bgc_id) search = '' if g.verbose: search = "|{}".format(g.search_str) for cluster in query: seq = break_lines(cluster.sequence) compiled_type = '-'.join( sorted([t.term for t in cluster.BiosyntheticGeneCluster.bgc_types], key=str.casefold)) fasta = '>{c.acc}.{c.version}|Cluster {cluster_number}|' \ '{compiled_type}|{c.start_pos}-{c.end_pos}|' \ '{c.genus} {c.species} {c.strain}{search}\n{seq}' \ .format(c=cluster, cluster_number=cluster.BiosyntheticGeneCluster.cluster_number, compiled_type=compiled_type, search=search, seq=seq) yield fasta
def connhash_tree_lines(self, lines, mincount): length = 1 + lines * 4 othercount = 0 ret = {} dbres = self.session.query( func.count(Connection.id), func.substr(Connection.connhash, 0, length).label("c"), Connection.stream, Connection.id).group_by("c").all() for c in dbres: count = c[0] connhash = c[1] if count > mincount: ev_in = filter(lambda ev: ev["in"], json.loads(c[2])) if len(ev_in) >= lines: ret[connhash] = { "count": c[0], "connhash": connhash, "text": ev_in[lines - 1]["data"], "childs": [], "sample_id": c[3] } else: othercount += count return ret
def call(self, number, offset): session = self.util.get_scoped_session() root_section = self.util.get_root_section(session) digits = self.util.get_section_digits() padding = digits * '0' full_number = func.substr(padding + db.Section.number.cast(String), -digits, digits) icteq = session.query( db.Section.id, full_number.label('number'), db.Section.name, db.Section.abbreviation, ).filter_by(parent=root_section).cte() rcteq = session.query( db.Section.id, icteq.c.number.concat('-').concat(full_number), db.Section.name, db.Section.abbreviation, ).join(icteq, icteq.c.id == db.Section.parent_id) cteq = icteq.union(rcteq) q = session.query(cteq).order_by('number').offset(offset) if number > 0: q = q.limit(number) for pk, full_number, name, abbr in q: print(full_number, name, abbr)
def format_fasta(domains): '''Generate DNA FASTA records for a list of domains''' query = db.session.query( AsDomain.as_domain_id, AsDomainProfile.name, Gene.locus_tag, Locus.start_pos, Locus.end_pos, Locus.strand, func.substr(DnaSequence.dna, Locus.start_pos + 1, Locus.end_pos - Locus.start_pos).label('sequence'), DnaSequence.acc, DnaSequence.version) query = query.join(AsDomainProfile).join( Locus, AsDomain.locus_id == Locus.locus_id).join(DnaSequence).join( Gene, AsDomain.gene_id == Gene.gene_id) query = query.filter( AsDomain.as_domain_id.in_(map(lambda x: x.as_domain_id, domains))).order_by( AsDomain.as_domain_id) fasta_records = [] for domain in query: sequence = break_lines( calculate_sequence(domain.strand, domain.sequence)) record = '>{d.locus_tag}|{d.name}|{d.acc}.{d.version}|' \ '{d.start_pos}-{d.end_pos}({d.strand})\n' \ '{sequence}'.format(d=domain, sequence=sequence) fasta_records.append(record) return fasta_records
def aset_kibd_act(self): ses = self.request.session req = self.request params = req.params url_dict = req.matchdict pk_id = 'id' in params and int(params['id']) or 0 if url_dict['act']=='grid': # defining columns columns = [] columns.append(ColumnDT('id')) columns.append(ColumnDT('units.kode')) columns.append(ColumnDT('units.nama')) columns.append(ColumnDT('kats.kode')) columns.append(ColumnDT('no_register')) #columns.append(ColumnDT('uraian')) columns.append(ColumnDT('kats.uraian')) #columns.append(ColumnDT('tahun')) columns.append(ColumnDT('tgl_perolehan', filter=self._DTstrftime)) columns.append(ColumnDT('th_beli')) columns.append(ColumnDT('harga')) columns.append(ColumnDT('kondisi')) query = DBSession.query(AsetKib).\ join(AsetKategori, Unit).\ filter(AsetKib.unit_id == Unit.id, #AsetKib.unit_id == ses['unit_id'], AsetKib.kategori_id==AsetKategori.id, AsetKib.kib=='D', func.substr(Unit.kode,1,func.length(ses['unit_kd']))==ses['unit_kd'], or_(AsetKib.disabled=='0',AsetKib.disabled==None)) rowTable = DataTables(req, AsetKib, query, columns) return rowTable.output_result()
def get_scatter_data(start_year, end_year, plants): #----------------------------------------------------- #提供bp.sp_data_module get_scatter_data方法命名sql。 #:param: start_year 开始年份 #:param: end_year 结束年份 #:param: plants 厂区数组,类似["PFA1","PFA2"...] #:return: list #:sql: select upper(substr(asset_no,1,2)),date_format(o_warehouse_date, '%Y-%m') as y_m,count(1),sum(amount) from tm_spare_part_all # where year(o_warehouse_date) >= @start_year and year(o_warehouse_date) <= @end_year and upper(substr(asset_no,1,2)) in @plants # group by upper(substr(asset_no,1,2)), date_format(o_warehouse_date, '%Y-%m') #----------------------------------------------------- tspa = models.TmSparePartAll temp = db.session.query(func.upper(func.substr(tspa.asset_no, 1, 2)).label('plant'), func.date_format(tspa.o_warehouse_date, '%Y-%m').label('year_month'), func.count(tspa.sno), func.sum(tspa.amount)).\ filter(func.upper(func.substr(tspa.asset_no,1,2)).in_(plants), func.year(tspa.o_warehouse_date)>=start_year, func.year(tspa.o_warehouse_date)<=end_year).\ group_by(func.upper(func.substr(tspa.asset_no, 1, 2)), func.date_format(tspa.o_warehouse_date, '%Y-%m')).\ order_by('plant', 'year_month').all() return temp
def module_query(self, sessionid, optional_modules): if optional_modules: case_clause = case( [ ( sql.and_( model.SurveyTreeItem.profile_index != -1, model.SurveyTreeItem.zodb_path.in_(optional_modules) ), func.substr(model.SurveyTreeItem.path, 1, 6) ), ( sql.and_( model.SurveyTreeItem.profile_index == -1, model.SurveyTreeItem.zodb_path.in_(optional_modules) ), func.substr(model.SurveyTreeItem.path, 1, 3) + '000-profile' ), ( sql.and_( model.SurveyTreeItem.profile_index != -1, model.SurveyTreeItem.depth < 2 ), func.substr(model.SurveyTreeItem.path, 1, 3) ), ] ) else: case_clause = case( [ ( sql.and_( model.SurveyTreeItem.profile_index != -1, model.SurveyTreeItem.depth < 2 ), func.substr(model.SurveyTreeItem.path, 1, 3) ), ] ) query = self.sql_session.query( case_clause.label('module') ).filter( sql.and_( model.SurveyTreeItem.session_id == sessionid, model.SurveyTreeItem.type == 'module' ) ).group_by('module').order_by('module') return query
def getExpencesByStore(cls, user, month): expences = db.session.query(Expence.store, func.sum(Expence.total)).\ filter(cast(func.substr(Expence.date_of_purchase,6,2),Numeric(10,4))>=month).\ filter(Expence.user_fk==user).group_by(Expence.store).all() print "Expences by store", expences return expences
def getExpencesGroupedByDate(cls, user, month): expences = db.session.query(Expence).\ filter(cast(func.substr(Expence.date_of_purchase,6,2),Numeric(10,4))>=month).\ filter(Expence.user_fk==user).\ order_by(Expence.date_of_purchase).all() # %m month num. return expences
def getListIngr(cls): """ Creates a list of ingredients for the recipes in the meal planner that have a planned date greater than today's date """ # meals = Meals.getMealsByFutureDate(user=session['User']) list_ingr = db.session.query(RecipeIngredient).join(Recipe).join(Meals).\ join(Ingredient).\ filter(func.substr(Meals.date_planned,0,11) >= func.substr(datetime.today(),0,11)).\ filter(Meals.recipe_fk==Recipe.recipe_id).\ filter(Recipe.recipe_id==RecipeIngredient.recipe_fk).\ filter(RecipeIngredient.ingredient_name==Ingredient.name).\ filter(Meals.user_fk==session['User']).\ order_by(Meals.date_planned).all() return list_ingr
def filtered_query(self): if self.first_letter: return self.family_query.filter( func.lower(tcg_tables.CardFamily.names_table.name).like( '{}%'.format(self.first_letter))) else: return self.family_query.filter( ~func.lower(func.substr( tcg_tables.CardFamily.names_table.name, 1, 1)).in_( string.ascii_lowercase))
def day_summary_table(): ''' summary of data in main databases Returns ------- html: day summary table ''' start_utc, end_utc, pol, era_type, host, filetype = page_form() dbi, obs_table, file_table, log_table = db_objs() with dbi.session_scope() as s: pol_query = s.query(func.substr(obs_table.date, 1, 7), obs_table.pol, func.count(obs_table)) pol_query = obs_filter(pol_query, obs_table, start_utc, end_utc, pol, era_type) pol_query = pol_query.group_by(func.substr(obs_table.date, 1, 7), obs_table.pol).order_by(func.substr(obs_table.date, 1, 7).asc()).all() pol_map = tuple((julian_day, pol, count) for julian_day, pol, count in pol_query) return render_template('day_summary_table.html', pol_map=pol_map)
def created_a(area_lcode, product_type_lcode): area_id = self.request.params.get('area_id') if area_lcode: a = select([areas.c.id, areas.c.name, areas.c.lcode]).where( areas.c.parent_id == area_id) lcode_len = len(area_lcode) + 3 else: a = select([areas.c.id, areas.c.name, areas.c.lcode]).where( areas.c.level_of == 0) lcode_len = 3 return a.alias('a'), func.substr(areas.c.lcode, 0, lcode_len + 1)
def getListIngrName(cls, user): """ Creates a dictionary of ingredients for the recipes in the meal planner that have a planned date greater than today's date. list_ingr = {aisle: [ingr1, ingr2, ...], ...} """ # meals = Meals.getMealsByFutureDate(user=session['User']) list_ingr = db.session.query(RecipeIngredient).join(Recipe).join(Meals).\ join(Ingredient).\ filter(func.substr(Meals.date_planned,0,11) >= func.substr(datetime.today(),0,11)).\ filter(Meals.recipe_fk==Recipe.recipe_id).\ filter(Recipe.recipe_id==RecipeIngredient.recipe_fk).\ filter(RecipeIngredient.ingredient_name==Ingredient.name).\ filter(Meals.user_fk==user).\ order_by(Ingredient.aisle).all() # order_by(Meals.date_planned).all() print "LIST INGREDIENT", list_ingr return list_ingr
def query(self): tables = self.left.from_clause + self.right.from_clause left_lt = self.config.linktab.alias('__left_linktab') right_lt = self.config.linktab.alias('__right_linktab') tables += [left_lt, right_lt] columns = [] score_length = func.greatest(func.length(self.left.key), func.length(self.right.key)) score_leven = func.levenshtein(self.left.key, self.right.key) score_leven = cast(score_leven, Float) score = 1 - (score_leven / score_length) columns.append(score.label("score")) for field in self.left.fields: columns.append(field.column.label(field.column_ref)) for field in self.right.fields: columns.append(field.column.label(field.column_ref)) q = select(columns=columns, from_obj=tables) q = self.left.apply_filters(q) q = self.right.apply_filters(q) q = q.where(left_lt.c.key == self.left.key) q = q.where(left_lt.c.view == self.left.name) q = q.where(right_lt.c.key == self.right.key) q = q.where(right_lt.c.view == self.right.name) if self.config.levenshtein <= 0: q = q.where(right_lt.c.fingerprint == left_lt.c.fingerprint) else: right_fp = func.substr(right_lt.c.fingerprint, 0, 255) left_fp = func.substr(left_lt.c.fingerprint, 0, 255) dist = func.levenshtein(right_fp, left_fp) q = q.where(dist <= self.config.levenshtein) q = q.limit(self.config.cutoff + 1) q = q.order_by(score.desc()) q = q.distinct() # print q return q
def get(self, r_id): user = g.user searchstr = request.args.get('data') searchtype = request.args.get('type') if searchstr is None: recipes = g.user.recipes else: if searchtype == 'name': recipes = db.session.query(Recipe).join(User.recipes).filter(func.substr(func.lower(Recipe.name), 1, len(searchstr)) == func.lower(searchstr)).all() else: searchstr = searchstr.lower() #recipes = db.session.query(Recipe).filter(Recipe.user_id == g.user.id).filter(Recipe.id == RecipeIngredient.parent_id).join(Recipe.ingredients).filter(func.lower(RecipeIngredient.name) == searchstr).all() recipes = db.session.query(Recipe).join(User.recipes).filter(Recipe.user_id == g.user.id).join(Recipe.ingredients).filter(func.lower(RecipeIngredient.name)==searchstr).all() #recipes = db.session.query(User, Recipe).filter(User.id == g.user.id).join(Recipe.ingredients).filter(RecipeIngredient.name==searchstr).all() #recipes = db.session.query(Recipe).join(User.recipes, User.id==g.user.id).join(Recipe.ingredients).filter(RecipeIngredient.name==searchstr).all() return jsonify(items=[recipe.to_json() for recipe in recipes])
def export_csv(self): request = self.request ses = self.request.session query = DBSession.query(Unit.kode.label('Kode_Unit'), Unit.nama.label('Nama_Unit'), AsetKategori.kode.label('Kode_Kategori'), AsetKib.no_register.label('No_Register'), AsetKategori.uraian.label('Nama_Kategori'), AsetPemilik.uraian.label('Pemilik'), AsetKib.keterangan.label('Keterangan'), AsetKib.tgl_perolehan.label('Tgl_Perolehan'), AsetKib.asal_usul.label('Asal_Usul'), AsetKib.harga.label('Harga'), AsetKib.jumlah.label('Jumlah'), AsetKib.kondisi.label('Kondisi'), AsetKib.kib.label('Kib'), AsetKib.masa_manfaat.label('Masa_Manfaat'), AsetKib.d_konstruksi.label('Konstruksi'), AsetKib.d_panjang.label('Panjang'), AsetKib.d_lebar.label('Lebar'), AsetKib.d_luas.label('Luas'), AsetKib.d_lokasi.label('Lokasi'), AsetKib.d_dokumen_tanggal.label('Tgl_Dokumen'), AsetKib.d_dokumen_nomor.label('No_Dokumen'), AsetKib.d_kode_tanah.label('Kode_Tanah'), AsetKib.d_status_tanah.label('Status_Tanah') #).outerjoin(AsetRuang, and_(AsetRuang.id==AsetKib.b_kd_ruang, AsetRuang.unit_id==AsetKib.unit_id) ).filter(AsetKib.unit_id == Unit.id, AsetKib.pemilik_id == AsetPemilik.id, AsetKib.kategori_id==AsetKategori.id, AsetKib.kib=='D', func.substr(Unit.kode,1,func.length(ses['unit_kd']))==ses['unit_kd'], or_(AsetKib.disabled=='0',AsetKib.disabled==None) ).order_by(Unit.kode, AsetKategori.kode, AsetKib.no_register ) r = query.first() if not r: request.session.flash('Data tidak ada') return self.route_list() header = r.keys() query = query.all() rows = [] for item in query: rows.append(list(item)) # override attributes of response filename = 'KIB_D%s.csv' % datetime.now().strftime('%Y%m%d%H%M%S') self.request.response.content_disposition = 'attachment;filename=' + filename return { 'header': header, 'rows': rows, }
def format_fasta(genes): '''Generate DNA FASTA records for a list of genes''' query = db.session.query(Cds.cds_id, Cds.locus_tag, Locus.start_pos, Locus.end_pos, Locus.strand, DnaSequence.acc, DnaSequence.version, func.substr(DnaSequence.dna, Locus.start_pos + 1, Locus.end_pos - Locus.start_pos).label('sequence')) query = query.join(Locus).join(DnaSequence) query = query.filter(Cds.cds_id.in_(map(lambda x: x.cds_id, genes))).order_by(Cds.cds_id) search = '' if g.verbose: search = "|{}".format(g.search_str) fasta_records = [] for gene in query: sequence = break_lines(calculate_sequence(gene.strand, gene.sequence)) record = '>{g.locus_tag}|{g.acc}.{g.version}|' \ '{g.start_pos}-{g.end_pos}({g.strand}){search}\n' \ '{sequence}'.format(g=gene, search=search, sequence=sequence) fasta_records.append(record) return fasta_records
def clusters_to_fasta(clusters): '''Convert model.BiosyntheticGeneCluster into FASTA''' query = db.session.query(Bgc, Locus.start_pos, Locus.end_pos, DnaSequence.acc, DnaSequence.version, func.substr(DnaSequence.dna, Locus.start_pos + 1, Locus.end_pos - Locus.start_pos).label('sequence'), Taxa.tax_id, Taxa.genus, Taxa.species, Taxa.strain) query = query.options(joinedload('bgc_types')) query = query.join(Locus).join(DnaSequence).join(Genome).join(Taxa) query = query.filter(Bgc.bgc_id.in_(map(lambda x: x.bgc_id, clusters))).order_by(Bgc.bgc_id) search = '' if g.verbose: search = "|{}".format(g.search_str) for cluster in query: seq = break_lines(cluster.sequence) compiled_type = '-'.join(sorted([t.term for t in cluster.BiosyntheticGeneCluster.bgc_types], key=str.casefold)) fasta = '>{c.acc}.{c.version}|Cluster {cluster_number}|' \ '{compiled_type}|{c.start_pos}-{c.end_pos}|' \ '{c.genus} {c.species} {c.strain}{search}\n{seq}' \ .format(c=cluster, cluster_number=cluster.BiosyntheticGeneCluster.cluster_number, compiled_type=compiled_type, search=search, seq=seq) yield fasta
def format_fasta(domains): '''Generate DNA FASTA records for a list of domains''' query = db.session.query(AsDomain.as_domain_id, AsDomainProfile.name, Cds.locus_tag, Locus.start_pos, Locus.end_pos, Locus.strand, func.substr(DnaSequence.dna, Locus.start_pos + 1, Locus.end_pos - Locus.start_pos).label('sequence'), DnaSequence.acc, DnaSequence.version) query = query.join(AsDomainProfile).join(Locus, AsDomain.locus_id == Locus.locus_id).join(DnaSequence).join(Cds, AsDomain.cds_id == Cds.cds_id) query = query.filter(AsDomain.as_domain_id.in_(map(lambda x: x.as_domain_id, domains))).order_by(AsDomain.as_domain_id) search = '' if g.verbose: search = "|{}".format(g.search_str) fasta_records = [] for domain in query: sequence = break_lines(calculate_sequence(domain.strand, domain.sequence)) record = '>{d.locus_tag}|{d.name}|{d.acc}.{d.version}|' \ '{d.start_pos}-{d.end_pos}({d.strand}){search}\n' \ '{sequence}'.format(d=domain, search=search, sequence=sequence) fasta_records.append(record) return fasta_records
def export_csv(self): request = self.request ses = self.request.session query = DBSession.query(Unit.kode.label('Kode_Unit'), Unit.nama.label('Nama_Unit'), AsetKategori.kode.label('Kode_Kategori'), AsetKib.no_register.label('No_Register'), AsetKategori.uraian.label('Nama_Kategori'), AsetPemilik.uraian.label('Pemilik'), AsetKib.keterangan.label('Keterangan'), AsetKib.tgl_perolehan.label('Tgl_Perolehan'), AsetKib.asal_usul.label('Asal_Usul'), AsetKib.harga.label('Harga'), AsetKib.jumlah.label('Jumlah'), AsetKib.kondisi.label('Kondisi'), AsetKib.kib.label('Kib'), AsetKib.masa_manfaat.label('Masa_Manfaat'), AsetKib.e_judul.label('Judul'), AsetKib.e_jenis.label('Jenis'), AsetKib.e_spek.label('Spek'), AsetKib.e_ukuran.label('Ukuran'), AsetKib.e_bahan.label('Bahan'), AsetKib.e_pencipta.label('Pencipta'), AsetKib.e_asal.label('Asal') ).filter(AsetKib.unit_id == Unit.id, AsetKib.pemilik_id == AsetPemilik.id, AsetKib.kategori_id==AsetKategori.id, AsetKib.kib=='E', func.substr(Unit.kode,1,func.length(ses['unit_kd']))==ses['unit_kd'], or_(AsetKib.disabled=='0',AsetKib.disabled==None) ).order_by(Unit.kode, AsetKategori.kode, AsetKib.no_register ) r = query.first() if not r: request.session.flash('Data tidak ada') return self.route_list() header = r.keys() query = query.all() rows = [] for item in query: rows.append(list(item)) # override attributes of response filename = 'KIB_E%s.csv' % datetime.now().strftime('%Y%m%d%H%M%S') self.request.response.content_disposition = 'attachment;filename=' + filename return { 'header': header, 'rows': rows, }
def convert_multipart_messages(cls): """ Converting multipart messages to single row in table """ # looking for distinct UDHs udhs = db.session.query(func.substr(cls.udh, 1, 10)) \ .filter(cls.udh.like("%01")) \ .filter(cls.processed==False) \ .all() udhs = [udh[0] for udh in udhs] # iterating over UDHs, looking for messages and merging that to single # one row for udh in udhs: messages = cls.query.filter(cls.udh.like("%s%%" % udh)) \ .filter(cls.processed==False) \ .order_by(cls.udh.asc()) \ .all() if not len(messages): break # creating new single message instead of multiple messages inbox = cls(text="".join([m.text for m in messages]), textEncoded="".join([m.textEncoded for m in messages]), recipient=messages[0].recipient, senderNumber=messages[0].senderNumber, smscNumber=messages[0].smscNumber, processed=False, udh="", klass=messages[0].klass, received=messages[0].received) db.session.add(inbox) # delete multiparts messages for message in messages: db.session.delete(message)
def birthday_year(cls): return func.substr(cls.birthday, 1, 4)
def dummy(cls): """Create a dummy expression.""" return func.substr(cls.name, 0, 3)
def command_hash(arguments, engine=None, schema='main'): dispose = False if engine is None: engine = create(arguments['DATABASE']) dispose = True create_schema(engine, schema) metadata = MetaData(schema=schema) Files = Table('Files', metadata, autoload=True, autoload_with=engine) with engine_dispose(engine, dispose): conn = engine.connect() try: for input in arguments['INPUTS']: input = os.path.realpath(input) for inputRoot, inputFolders, inputFiles in walk(input, followlinks=False, skiplinks=True): badFiles = set() for inputFile in inputFiles: try: stat = inputFile.stat(follow_symlinks=False) except Exception: badFiles.add(inputFile) continue if not S_ISREG(stat.st_mode): badFiles.add(inputFile) continue size = stat.st_size time = stat.st_mtime_ns file = conn.execute(Files.select().where(Files.c.path == inputFile.path)).fetchone() if file is None or size != file.size or time != file.time or (arguments['--quick'] and file.hash_quick is None) or (arguments['--full'] and file.hash_total is None): # calculate changes path = inputFile.path name = inputFile.name extension = os.path.splitext(name)[1] hash_quick = None hash_total = None if size == 0: hash_quick = HASH_ZERO hash_total = HASH_ZERO elif arguments['--quick'] or arguments['--full']: hash_quick, hash_total = hashfile(path, stat, not arguments['--full']) if hash_quick is None and hash_total is None: badFiles.add(inputFile) continue if file is None: upsert = Files.insert().values(path=path, name=name, extension=extension) else: upsert = Files.update().where(Files.c.path==path) conn.execute(upsert.values( size=size, time=time, hash_quick=hash_quick, hash_total=hash_total )) # Are we only updating multiple files? # Note: When a single file update is manually requested, don't do deletes if not (len(inputFiles) == 1 and inputFiles[0].path == input): # Delete everything under root which wasn't found by the walk basePath = os.path.join(inputRoot, '') sql = Files.delete() sql = sql.where(func.substr(Files.c.path, 1, len(basePath)) == basePath) # delete any folders not found by the walk if len(inputFolders) != 0: sql = sql.where(and_(*( func.substr(Files.c.path, 1, len(os.path.join(inputFolder.path, ''))) != os.path.join(inputFolder.path, '') for inputFolder in inputFolders ))) # delete any files not found by the walk AND delete any files we couldn't hash wantedFiles = [file for file in inputFiles if file not in badFiles] if len(wantedFiles) != 0: sql = sql.where(Files.c.path.notin_([unwantedFile.path for unwantedFile in wantedFiles])) conn.execute(sql) elif len(badFiles) != 0: conn.execute(Files.delete().where(Files.c.path == input)) finally: conn.close()
diff_exp_chip_peak = ome.query(diff_exp.c.id.label('diff_exp_id'), chip_peak.c.id.label('chip_peak_id'), chip_peak.c.chip_peak_group, chip_peak.c.target, chip_peak.c.antibody, diff_exp.c.strain1, diff_exp.c.strain2, diff_exp.c.environment_id, diff_exp.c.carbon_source, diff_exp.c.nitrogen_source, diff_exp.c.electron_acceptor, diff_exp.c.supplements, diff_exp.c.expression_type).\ join(chip_peak, and_(chip_peak.c.environment_id == diff_exp.c.environment_id, func.lower(chip_peak.c.target) == func.substr(diff_exp.c.strain1, 7, func.length(diff_exp.c.strain1)), diff_exp.c.strain2 == 'wt', chip_peak.c.strain == 'wt')).subquery() GenomeRegion2 = aliased(GenomeRegion) chip_peak_gene = ome.query(ChIPPeakData.value.label('peak_value'), ChIPPeakData.genome_region_id.label('peak_genome_region_id'), GenomeRegion.leftpos.label('leftpos'), GenomeRegion.rightpos.label('rightpos'), GenomeRegion.strand.label('strand'), chip_peak.c.target.label('target'),
'zaa': z.zaa, 'zam': z.zam, 'zna': z.zna, 'ile_ou_pays': z.ile_ou_pays, 'surtaxe_pcv': z.surtaxe_pcv} # Préfixes triés dans l'ordre inverse de leur longueur, afin de faire # correspondre la destination au préfixe le plus pertinent zones = sorted(zones_data.keys(), cmp=lambda x,y: cmp(len(x), len(y)), reverse=True) # Définition(s) abonnement(s) optimum_soc = Optimum(500, '1') nouveau = erreur = 0 # Pour tous les appels sortants facturés dont le coût n'a pas encore été calculé for cdr in DBSession.query(CDR). \ filter(func.substr(CDR.dstchannel, 0, 4).in_(trunks)). \ filter(CDR.billsec>0). \ filter(CDR.ht==None). \ order_by(CDR.calldate): nouveau += 1 user, dept = c2ud.get(cdr.src, (None, None)) typ = u'Optimum bâtiment' ttc, ht = optimum_soc(cdr) if ht is None: erreur += 1 continue