def filtered_spectrum_group_stream(self, spec_filter=None): if not spec_filter: def default_filter(arg): return True spec_filter = default_filter query = self.session.query( SpecInfo.smiles, SpecInfo.instrument_type, SpecInfo.precursor_type, SpecInfo.author, SpecInfo.num_decimal_places, func.group_concat(SpecInfo.spectrum_id).label("group_id"), ).group_by( SpecInfo.smiles, SpecInfo.instrument_type, SpecInfo.precursor_type, SpecInfo.author, SpecInfo.num_decimal_places, ) for result in query.all(): spectrum_list = [] for spectrum_id in result.group_id.split(","): try: spec = self.get_spectrum_by_id(spectrum_id) except SyntaxError: print("SyntaxError occurred while processing {!r}".format( spectrum_id)) else: if spec_filter(spec): spectrum_list.append(spec) yield spectrum_list
def get_grouped_spectrum_by_inchikey_stream(self, noise_mod='dynamic', list_length_min=3): query = ( self.session.query( SpectrumInfo.inchikey, SpectrumInfo.instrument_type, SpectrumInfo.prec_type, SpectrumInfo.collision_energy, func.group_concat(SpectrumInfo.spec_id).label("group_id"), ) .filter( SpectrumInfo.collision_energy.in_([10, 20, 40, 60, 80]) ) .group_by( SpectrumInfo.inchikey, SpectrumInfo.instrument_type, SpectrumInfo.prec_type, SpectrumInfo.collision_energy ) ) for result in query.all(): spectrum_list = [] for spectrum_id in result.group_id.split(","): temp_spectrum = self.get(spec_id = spectrum_id) if len(temp_spectrum.peaks_cut_noise(noise_mod)) > 4: spectrum_list.append(self.get(spec_id=spectrum_id)) if len(spectrum_list) >= list_length_min: yield result.inchikey, result.instrument_type, result.prec_type, result.collision_energy, spectrum_list
def __query_database(self, search=None, page=0, page_size=0, order_by=None, order_dir=None, host_filter={}): host_bundle = Bundle('host', Host.id, Host.name, Host.os, Host.description, Host.owned,\ Host.default_gateway_ip, Host.default_gateway_mac, EntityMetadata.couchdb_id,\ EntityMetadata.revision, EntityMetadata.update_time, EntityMetadata.update_user,\ EntityMetadata.update_action, EntityMetadata.creator, EntityMetadata.create_time,\ EntityMetadata.update_controller_action, EntityMetadata.owner, EntityMetadata.command_id,\ func.group_concat(distinct(Interface.id)).label('interfaces'),\ func.count(distinct(Vulnerability.id)).label('vuln_count'),\ func.count(distinct(Service.id)).label('open_services_count')) query = self._session.query(host_bundle)\ .outerjoin(EntityMetadata, EntityMetadata.id == Host.entity_metadata_id)\ .outerjoin(Interface, Host.id == Interface.host_id)\ .outerjoin(Vulnerability, Host.id == Vulnerability.host_id)\ .outerjoin(Service, (Host.id == Service.host_id) & (Service.status.in_(('open', 'running', 'opened'))))\ .group_by(Host.id) # Apply pagination, sorting and filtering options to the query query = sort_results(query, self.COLUMNS_MAP, order_by, order_dir, default=Host.id) query = apply_search_filter(query, self.COLUMNS_MAP, search, host_filter, self.STRICT_FILTERING) count = get_count(query, count_col=Host.id) if page_size: query = paginate(query, page, page_size) results = query.all() return results, count
def get_grouped_spectrum_by_smiles_stream(self, smiles, instrument_type, precursor_type, tags): query = (self.session.query( SpecInfo.smiles, SpecInfo.instrument_type, SpecInfo.precursor_type, SpecInfo.author, SpecInfo.num_decimal_places, func.group_concat(SpecInfo.spectrum_id).label("group_id"), ).filter( SpecInfo.smiles == smiles, SpecInfo.instrument_type == instrument_type, SpecInfo.precursor_type == precursor_type, SpecInfo.tags == tags, ).group_by( SpecInfo.smiles, SpecInfo.instrument_type, SpecInfo.precursor_type, SpecInfo.author, SpecInfo.num_decimal_places, )) for result in query.all(): spectrum_list = [] for spectrum_id in result.group_id.split(","): try: spectrum_list.append(self.get_spectrum_by_id(spectrum_id)) except SyntaxError as se: print("SyntaxError occurred while processing {!r}".format( spectrum_id)) except ValueError as ve: print("ValueError occurred while processing {!r}".format( spectrum_id)) if len(spectrum_list) > 0: yield spectrum_list
def synsets(session, pos=None): """Query for synsets, concatenated ids and lemmas of their LUs. Parameters ---------- session : orm.session.Session pos : list Parts of speech to select (default [2]) """ if not pos: pos = [2] return (session.query( Synset.id_, Synset.definition, label('lex_ids', func.group_concat(UnitSynset.lex_id)), label('unitindexes', func.group_concat( UnitSynset.unitindex))).join(UnitSynset).join(LexicalUnit).filter( LexicalUnit.pos.in_(pos)).order_by(Synset.id_).group_by( Synset.id_))
def get_vote_path(self,gal_id): a=self.BC.annotations ac=self.BC.asset_classifications c=self.BC.classifications result=self.session.query(func.group_concat(a.answer_id.op('ORDER BY')(a.id.asc())),ac.classification_id,c.zooniverse_user_id).\ join(ac, a.classification_id==ac.classification_id).\ join(c, c.id==ac.classification_id).\ filter(ac.asset_id==gal_id).\ filter(a.answer_id!=None).\ group_by(ac.classification_id).all() return result
def __query_database(self, search=None, page=0, page_size=0, order_by=None, order_dir=None, vuln_filter={}): # Instead of using SQLAlchemy ORM facilities to fetch rows, we bundle involved columns for # organizational and MAINLY performance reasons. Doing it this way, we improve retrieving # times from large workspaces almost 2x. vuln_bundle = Bundle('vuln', Vulnerability.id.label('server_id'),Vulnerability.name.label('v_name'),\ Vulnerability.confirmed, Vulnerability.data,\ Vulnerability.description, Vulnerability.easeofresolution, Vulnerability.impact_accountability,\ Vulnerability.impact_availability, Vulnerability.impact_confidentiality, Vulnerability.impact_integrity,\ Vulnerability.refs, Vulnerability.resolution, Vulnerability.severity, Vulnerability.owned, Vulnerability.status,\ Vulnerability.website, Vulnerability.path, Vulnerability.request, Vulnerability.response,\ Vulnerability.method, Vulnerability.params, Vulnerability.pname, Vulnerability.query,\ EntityMetadata.couchdb_id, EntityMetadata.revision, EntityMetadata.create_time, EntityMetadata.creator,\ EntityMetadata.owner, EntityMetadata.update_action, EntityMetadata.update_controller_action,\ EntityMetadata.update_time, EntityMetadata.update_user, EntityMetadata.document_type, EntityMetadata.command_id, \ Vulnerability.attachments, Vulnerability.policyviolations) service_bundle = Bundle('service', Service.name.label('s_name'), Service.ports, Service.protocol, Service.id) host_bundle = Bundle('host', Host.name) # IMPORTANT: OUTER JOINS on those tables is IMPERATIVE. Changing them could result in loss of # data. For example, on vulnerabilities not associated with any service and instead to its host # directly. query = self._session.query(vuln_bundle, service_bundle, host_bundle, func.group_concat(Interface.hostnames))\ .group_by(Vulnerability.id)\ .outerjoin(EntityMetadata, EntityMetadata.id == Vulnerability.entity_metadata_id)\ .outerjoin(Service, Service.id == Vulnerability.service_id)\ .outerjoin(Host, Host.id == Vulnerability.host_id)\ .join(Interface, Interface.host_id == Host.id) # Apply pagination, sorting and filtering options to the query query = self.__specialized_sort(query, order_by, order_dir) query = apply_search_filter(query, self.COLUMNS_MAP, search, vuln_filter, self.STRICT_FILTERING) count = get_count(query, count_col=Vulnerability.id) if page_size: query = paginate(query, page, page_size) results = query.all() return results, count
def __query_database(self, search=None, page=0, page_size=0, order_by=None, order_dir=None, vuln_filter={}): # Instead of using SQLAlchemy ORM facilities to fetch rows, we bundle involved columns for # organizational and MAINLY performance reasons. Doing it this way, we improve retrieving # times from large workspaces almost 2x. vuln_bundle = Bundle('vuln', Vulnerability.id.label('server_id'),Vulnerability.name.label('v_name'),\ Vulnerability.confirmed, Vulnerability.data,\ Vulnerability.description, Vulnerability.easeofresolution, Vulnerability.impact_accountability,\ Vulnerability.impact_availability, Vulnerability.impact_confidentiality, Vulnerability.impact_integrity,\ Vulnerability.refs, Vulnerability.resolution, Vulnerability.severity, Vulnerability.owned, Vulnerability.status,\ Vulnerability.website, Vulnerability.path, Vulnerability.request, Vulnerability.response,\ Vulnerability.method, Vulnerability.params, Vulnerability.pname, Vulnerability.query,\ EntityMetadata.couchdb_id, EntityMetadata.revision, EntityMetadata.create_time, EntityMetadata.creator,\ EntityMetadata.owner, EntityMetadata.update_action, EntityMetadata.update_controller_action,\ EntityMetadata.update_time, EntityMetadata.update_user, EntityMetadata.document_type, EntityMetadata.command_id, \ Vulnerability.attachments, Vulnerability.policyviolations) service_bundle = Bundle('service', Service.name.label('s_name'), Service.ports, Service.protocol, Service.id) host_bundle = Bundle('host', Host.name) # IMPORTANT: OUTER JOINS on those tables is IMPERATIVE. Changing them could result in loss of # data. For example, on vulnerabilities not associated with any service and instead to its host # directly. query = self._session.query(vuln_bundle, service_bundle, host_bundle, func.group_concat(Interface.hostnames))\ .group_by(Vulnerability.id)\ .outerjoin(EntityMetadata, EntityMetadata.id == Vulnerability.entity_metadata_id)\ .outerjoin(Service, Service.id == Vulnerability.service_id)\ .outerjoin(Host, Host.id == Vulnerability.host_id)\ .join(Interface, Interface.host_id == Host.id) # Apply pagination, sorting and filtering options to the query query = self.__specialized_sort(query, order_by, order_dir) query = apply_search_filter(query, self.COLUMNS_MAP, search, vuln_filter, self.STRICT_FILTERING) count = get_count(query) if page_size: query = paginate(query, page, page_size) results = query.all() return results, count
def metadata_trend(num_days): results = db.session.query( RepoMean.repo_id, func.substring_index( func.group_concat( RepoMean.value.op('ORDER BY')(expression.desc(RepoMean.created_at)) ), ',', 2) )\ .filter(RepoMean.created_at >= datetime.now() + timedelta(days=num_days * -1))\ .group_by(RepoMean.repo_id)\ .all() for result in filter(lambda x: ',' in x[1], results): curr, prev = map(lambda v: float(v), result[1].split(',')) if is_worth_decreased(curr, prev): log.info( 'Mean value of {0} is {1}, previous was {2}. The "worth" has been decreased by 1' .format(result[0], curr, prev)) db.session.query(Repo)\ .filter(Repo.id == result[0])\ .update({Repo.worth: Repo.worth - 1}) db.session.commit()
def tag_statistic(cls, author=None): # this function is a statistic collection for the article with same tag # eg:..python(2) flask(3)... rv_string = db.session.query(func.group_concat(Article.tags).label("tag_string")).\ filter(Article.author == author, Article.status == "PUBLISHED").all() rv = rv_string[0].tag_string.split(",") od = OrderedDict() for r in rv: rs = db.session.query(func.count(Article.id).label("tag_num"), Article.uuid.label("tag_uuid")).\ filter(Article.author == author, Article.status == "PUBLISHED", Article.tags.like("%.%".replace(".", r)) ).all() od.update({ r: { "nums": rs[0].tag_num, "uuid": rs[0].tag_uuid } }) return od
def analytics_recommendations(): user = db.session.query(User).get(request.args.get("user")) table_header = ["Исполнитель", "Друзья", "Друзей", "Прослушиваний"] table_body = db.session.query( func.concat('<a href="http://last.fm/music/', Scrobble.artist, '">', Scrobble.artist, '</a>'), func.group_concat(distinct(User.username).op("SEPARATOR")(literal_column('", "'))), func.count(distinct(User.username)), func.count(Scrobble.id) ).\ join(User).\ filter( Scrobble.user_id.in_(request.args.getlist("users")), ~Scrobble.artist.in_([a[0] for a in db.session.query(distinct(Scrobble.artist)).filter_by(user=user).all()]) ).\ group_by(Scrobble.artist).\ order_by(-func.count(Scrobble.id) if request.args.get("target") == "scrobbles" else -func.count(distinct(User.username))).\ all()[0:1000] return dict(title="Рекомендации для %s" % user.username, table_header=table_header, table_body=table_body)
def metadata_trend(num_days): results = db.session.query( RepoMean.repo_id, func.substring_index( func.group_concat( RepoMean.value.op('ORDER BY')(expression.desc(RepoMean.created_at)) ), ',', 2) )\ .filter(RepoMean.created_at >= datetime.now() + timedelta(days=num_days * -1))\ .group_by(RepoMean.repo_id)\ .all() for result in filter(lambda x: ',' in x[1], results): curr, prev = result[1].split(',') if curr < prev: app.logger.info( 'Mean value of {0} is {1}, previous was {2}. The "worth" has been decreased by 1' .format(result[0], curr, prev) ) db.session.query(Repo)\ .filter(Repo.id == result[0])\ .update({Repo.worth: Repo.worth - 1}) db.session.commit()
def account_details(id): Account_Fee_Location = aliased(Account) account_query = db.session.query(Account.id.label('id'),Account.name.label('account_name'),Account.account_number.label('account_number'), \ Account.opening_date.label('opening_date'), Account.balance.label('balance'), Account.custodian.label('custodian'),Household.name.label('household'), \ Billing_Group.id.label('billing_group_id'), Fee_Structure.id.label('fee_structure_id'), Account.payment_source.label('payment_source'), \ Account_Fee_Location.id.label('fee_location_id'), func.group_concat(Split.id, ",").label('split_ids')).outerjoin(Household, Account.household_id == Household.id) \ .outerjoin(Billing_Group, Account.billing_group_id == Billing_Group.id).outerjoin(Fee_Structure, Account.fee_id == Fee_Structure.id) \ .outerjoin(Account_Split).outerjoin(Split).filter(Account.id == id).outerjoin(Account_Fee_Location, Account.fee_location).group_by(Account.id) accounts_query=db.session.query(Account.id.label('id'),Account.name.label('text')) fee_structure_query = db.session.query(Fee_Structure.id.label('id'),Fee_Structure.name.label('text')) billing_group_query = db.session.query(Billing_Group.id.label('id'),Billing_Group.name.label('text')) split_query = db.session.query(Split.id.label('id'),Split.name.label('text')) fee_structures=fee_structure_query.all() billing_groups=billing_group_query.all() splits=split_query.all() account=account_query.first() accounts=accounts_query.filter(Account.billing_group_id == account.billing_group_id).filter(Account.id != account.id).all() fee_structure_keys=fee_structures[0].keys() billing_group_keys=billing_groups[0].keys() split_keys=splits[0].keys() accounts_keys=accounts[0].keys() account_keys=account.keys() accounts_json=[dict(zip([key for key in accounts_keys],row)) for row in accounts] fee_structures_json=[dict(zip([key for key in fee_structure_keys],row)) for row in fee_structures] billing_groups_json=[dict(zip([key for key in billing_group_keys],row)) for row in billing_groups] splits_json=[dict(zip([key for key in split_keys],row)) for row in splits] account_json=json.dumps([dict(zip([key for key in account_keys],account))],default=alchemyencoder) payment_sources_json=[{'id':1, 'text': 'Custodian Billed'},{'id':2, 'text': 'Directly Billed'}] if request.method == "POST" and request.json: data=request.json edit_account=db.session.query(Account).filter(Account.id == id).first() fee_structure=db.session.query(Fee_Structure).filter(Fee_Structure.id == data["fee_structure"]).first() billing_group=db.session.query(Billing_Group).filter(Billing_Group.id == data["billing_group"]).first() fee_location=db.session.query(Account).filter(Account.id == data["fee_location"]).first() splits=db.session.query(Split).filter(Split.id.in_(data["splits"])).all() edit_account.fee_structure=fee_structure edit_account.fee_location=fee_location edit_account.billing_group=billing_group edit_account.splits=splits edit_account.payment_source=data['payment_source'] try: db.session.commit() except exc.IntegrityError: db.session.rollback() return redirect(url_for('account_details')) #Not Redirecting!! Have to do it in javascript. Not Ideal! return redirect(url_for('account')) if account: return render_template('account_details.html',splits=splits_json,payment_sources=payment_sources_json, account_json=account_json,account=account, accounts=accounts_json, fee_structures=fee_structures_json, billing_groups=billing_groups_json,page_link=url_for('account')) return redirect(url_for('account'))
def account(): Account_Fee_Location = aliased(Account) accounts_query = db.session.query(Account.id.label('id'),Account.name.label('Account Name'),Account.account_number.label('Account #'), \ Account.opening_date.label('Opening Date'), Account.balance.label('Balance'), Account.custodian.label('Custodian'),Household.name.label('Household'),Billing_Group.name.label('Billing Group'), \ Fee_Structure.name.label('Fee Structure'), Account.payment_source.label('Payment Source'), Account_Fee_Location.name.label('Fee Relocation'), func.group_concat(Split.name, "; ").label('Splits')) \ .outerjoin(Household, Account.household_id == Household.id).outerjoin(Billing_Group, Account.billing_group_id == Billing_Group.id) \ .outerjoin(Fee_Structure, Account.fee_id == Fee_Structure.id).outerjoin(Account_Split).outerjoin(Split).group_by(Account.id) \ .outerjoin(Account_Fee_Location, Account.fee_location) fee_structure_query = db.session.query(Fee_Structure.id.label('id'),Fee_Structure.name.label('text')) billing_group_query = db.session.query(Billing_Group.id.label('id'),Billing_Group.name.label('text')) fee_structures=fee_structure_query.all() billing_groups=billing_group_query.all() accounts=accounts_query.all() account_keys=accounts[0].keys() fee_structure_keys=fee_structures[0].keys() billing_group_keys=billing_groups[0].keys() fee_structures_json=[dict(zip([key for key in fee_structure_keys],row)) for row in fee_structures] billing_groups_json=[dict(zip([key for key in billing_group_keys],row)) for row in billing_groups] columns=[] for account_key in account_keys: columns.append({'data': account_key,'name': account_key}) if request.method == "POST" and request.json: data=request.json assigned_accounts=data['accounts'] if 'fee_structure' in data.keys(): assigned_fee_structure=data['fee_structure'][0] db.session.query(Account).filter(Account.id.in_(assigned_accounts)).update({Account.fee_id : assigned_fee_structure},synchronize_session=False) try: db.session.commit() except exc.IntegrityError: db.session.rollback() elif 'billing_group' in data.keys(): assigned_billing_group=data['billing_group'][0] db.session.query(Account).filter(Account.id.in_(assigned_accounts)).update({Account.billing_group_id : assigned_billing_group},synchronize_session=False) try: db.session.commit() except exc.IntegrityError: db.session.rollback() return render_template('account_display.html',fee_structures=fee_structures_json, billing_groups=billing_groups_json, data_link=url_for('account_data'), page_link = url_for('account'), columns=columns, title='Accounts')
def account_data(): Account_Fee_Location = aliased(Account) accounts_query = db.session.query(Account.id.label('id'),Account.name.label('Account Name'),Account.account_number.label('Account #'), \ Account.opening_date.label('Opening Date'), Account.balance.label('Balance'), Account.custodian.label('Custodian'),Household.name.label('Household'),Billing_Group.name.label('Billing Group'), \ Fee_Structure.name.label('Fee Structure'), Account.payment_source.label('Payment Source'), Account_Fee_Location.name.label('Fee Relocation'), func.group_concat(Split.name, "; ").label('Splits')) \ .outerjoin(Household, Account.household_id == Household.id).outerjoin(Billing_Group, Account.billing_group_id == Billing_Group.id) \ .outerjoin(Fee_Structure, Account.fee_id == Fee_Structure.id).outerjoin(Account_Split).outerjoin(Split).group_by(Account.id) \ .outerjoin(Account_Fee_Location, Account.fee_location) accounts=accounts_query.all() keys=accounts[0].keys() data=[dict(zip([key for key in keys],row)) for row in accounts] data=json.dumps({'data': data}, default = alchemyencoder) return data
for nutrition, tags in itertools.groupby(items, key=keyfunc): print(nutrition) #print (list(tags)) ingkey_tags = ((x[2], x[3]) for x in tags) ingkey_list, tags_list = zip(*ingkey_tags) tags_list = sorted(tags_list) print(" ", ", ".join(tags_list)) print(" ", ", ".join(ingkey_list)) food_tags = FoodNutritionTags(fn_id=nutrition[0], tags=",".join(tags_list)) session.add(food_tags) session.commit() items_fd = session.query(func.group_concat(FoodTag.name) .label("food_tag_names"), FoodNutritionTags.tags) \ .join(FoodTagItem) \ .filter(FoodTagItem.fn_id==FoodNutritionTags.fn_id) \ .group_by(FoodTagItem.fn_id) \ .order_by("food_tag_names", FoodNutritionTags.tags) #print (tabulate(items_fd, headers=["Food tags", "tags"])) def make_food_tags(ingkeys, tag_names, descs): food_tags = set() one_tag = [ "Sadje", "Čokolada", "Keksi", "Pecivo", "Stročnice", "Sladoled", "Slani prigrizek" ] desc_search = [("rižot", "rižota"), ("hash", "hash"), ("palačink", "palačinke"), ("šmorn", "šmorn"),
def send_json(): ''' api: year - filter by year: year=2016 or year=2016,2018,.. type - filter by type: type=techreport or type=article. Possible types are: Incollection Inbook Inproceedings Article PhDThesis Book "" Misc Proceedings Conference Techreport <null> author - filter by one or many authors: author=703 or author=703,709 categories - filter by category ID(s): categories=62 or categories=62,72 kops - if kops=true one will see all publications which are listed in kops. The DB contains <null> and "" values for Publications which are not listed in kops! keywords - filter by keyword ID(s): ''' # default is 1024 and too small for this query db.session.execute("SET SESSION group_concat_max_len = 1000000") cat_pub_subq = db.session.query(Categories.id.label("cat_id"), Categories.name.label("cat_name"), Categories.parent_id, Categories_publications.publication_id) \ .join(Categories_publications, Categories.id == Categories_publications.category_id) \ .join(Publications, Categories_publications.publication_id == Publications.id) \ .subquery() ''' You can dynamically construct the "OR" and "AND" --> see .filter in the query. Because of this feature one can define generic filters. ''' filters = request.args.to_dict() # filters['keywords'] = (filters['keywords'].split(',')) # print(filters['keywords']) result = { "publications": {i: {**r[0].to_dict(), 'authors': uniquifySorted([ {'id': a, 'forename': r[2].split(',')[j], 'surname': r[3].split(',')[j], 'cleanname': r[4].split(',')[j] } for j, a in enumerate(r[1].split(','))]), 'documents': checkForEmtyDocument(r), 'categories': ceckForEmtyCategories(r) } for i, r in enumerate(db.session.query( Publications, func.group_concat(func.ifnull(Authors.id, '').op("ORDER BY")(Authors_publications.position)), func.group_concat(func.ifnull(Authors.forename, '').op("ORDER BY")(Authors_publications.position)), func.group_concat(func.ifnull(Authors.surname, '').op("ORDER BY")(Authors_publications.position)), func.group_concat(func.ifnull(Authors.cleanname, '').op("ORDER BY")(Authors_publications.position)), func.group_concat(func.ifnull(Documents.id, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(Documents.publication_id, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(Documents.visible, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(Documents.remote, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(Documents.filename, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(cat_pub_subq.c.cat_id, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(cat_pub_subq.c.cat_name, '').op("SEPARATOR")(literal_column('\';\''))), func.group_concat(func.ifnull(cat_pub_subq.c.parent_id, '').op("SEPARATOR")(literal_column('\';\''))), ) \ .outerjoin(Documents, Publications.id == Documents.publication_id) \ .outerjoin(cat_pub_subq, Publications.id == cat_pub_subq.c.publication_id) \ .filter(Publications.id == Authors_publications.publication_id) \ .filter(Authors.id == Authors_publications.author_id) \ .filter(Publications.year.in_(filters['year'].split(',')) if 'year' in filters else true()) \ .filter(Publications.type.in_([filters['type']]) if 'type' in filters else true() )\ .having(or_((func.group_concat(Authors.id).op('regexp')('(^|,)' + str(a) + '(,|$)') for a in filters['author'].split(',')) if 'author' in filters else true())) \ .having(and_((func.group_concat(cat_pub_subq.c.cat_id).op('regexp')('(^|,)' + str(a) + '(,|$)') for a in filters['category'].split(',')) if 'category' in filters else true())) \ .filter((and_(Publications.kops != "", Publications.kops != None) if filters['kops'] == 'true' else or_(Publications.kops == None, Publications.kops == "")) if 'kops' in filters else true()) \ .filter((Publications.id.in_(pubIDusesAllKeywords(filters['keywords']))) if 'keywords' in filters else true())\ .filter(Publications.public == 1)\ .group_by(Publications.id) \ .order_by(Publications.year.desc(), Publications.id.desc()) .all())}, "file_dir": "uploadedFiles" } return jsonify(result)
def analytics_hitparade(): user = db.session.query(User).get(request.args.get("user")) year = int(request.args.get("year")) title = "Хит-парад %(user)s за %(year)s год" % { "user" : user.username, "year" : year, } year_start_uts = time.mktime(datetime(year=year, month=1, day=1).timetuple()) year_end_uts = time.mktime(datetime(year=year, month=12, day=31, hour=23, minute=59, second=59).timetuple()) track_albums_raw = func.group_concat(distinct(Scrobble.album).op("SEPARATOR")(literal_column('"@@@@@@"'))) track_albums = lambda track_albums_raw: sorted(filter(None, track_albums_raw.split("@@@@@@"))) if request.method != "POST": return render_template("analytics/hitparade.html", **{ "title" : title + ": шаг 1", "step" : 1, "year" : year, "artists" : [ (artist, scrobble_count, [(track, track_albums(albums_raw)) for track, albums_raw in db.session.query( Scrobble.track, track_albums_raw ).\ filter( Scrobble.user_id == user.id, Scrobble.artist == artist, Scrobble.track.in_(map(operator.itemgetter(0), db.session.query(Scrobble.track).\ filter( Scrobble.artist == artist ).\ group_by(Scrobble.track).\ having( func.min(Scrobble.uts) >= year_start_uts ).\ all())) ).\ group_by(Scrobble.track).\ order_by(func.min(Scrobble.uts)).\ all()]) for artist, scrobble_count in db.session.query(Scrobble.artist, func.count(Scrobble.id)).\ filter( Scrobble.user_id == user.id, Scrobble.uts >= year_start_uts, Scrobble.uts <= year_end_uts, ).\ group_by(Scrobble.artist).\ order_by(-func.count(Scrobble.id))\ [:request.args.get("n")] ] }) else: if request.form.get("step") == "2": return render_template("analytics/hitparade.html", **{ "title" : title + ": шаг 2", "step" : 2, "year" : year, "artist_tracks" : [ (artist, [ (track, track_albums(albums_raw), this_year_first_scrobble == db.session.query(func.min(Scrobble.uts)).\ filter( Scrobble.user_id == user.id, Scrobble.artist == artist, Scrobble.track == track ).\ scalar()) for track, albums_raw, this_year_first_scrobble in db.session.query( Scrobble.track, track_albums_raw, func.min(Scrobble.uts) ).\ filter( Scrobble.user_id == user.id, Scrobble.artist == artist, Scrobble.uts >= year_start_uts, Scrobble.uts <= year_end_uts ).\ group_by(Scrobble.track).\ order_by(func.min(Scrobble.uts)) ]) for artist in request.form.getlist("artist") ] }) if request.form.get("step") == "3": return render_template("analytics/hitparade.html", **{ "title" : title + ": шаг 3", "step" : 3, "year" : year, "artist_tracks" : [ (artist, [ (track[len(artist) + 3:], request.form[track], db.session.query(ApproximateTrackLength).\ filter( ApproximateTrackLength.artist == artist, ApproximateTrackLength.track == track.replace(artist + " - ", "") ).\ first()) for track in request.form.getlist("track") if track.startswith(artist + " - ") ]) for artist in request.form.getlist("artist") ] }) if request.form.get("step") == "4": return render_template("analytics/hitparade.html", **{ "title" : title, "step" : 4, "sort" : request.args.get("sort"), # lol "let" analogue for python "parade" : sorted([ [ (artist, album, tracks, sum([track[1] for track in tracks]), sum([track[3] for track in tracks]), sum([track[1] for track in tracks]) / float(year_end_uts - min([track[4] for track in tracks])) * 86400, sum([track[3] for track in tracks]) / float(year_end_uts - min([track[4] for track in tracks])) * 86400) for artist, album, tracks in [(artist, album, sorted([ [ (track, scrobbles, length, scrobbles * length, first_scrobble, last_scrobble) for track, scrobbles, first_scrobble, last_scrobble, length in [(request.form.getlist("track")[i].replace(artist + " - ", ""),) + db.session.query(func.count(Scrobble), func.min(Scrobble.uts), func.max(Scrobble.uts)).filter( Scrobble.user_id == user.id, Scrobble.artist == artist, Scrobble.track == request.form.getlist("track")[i].replace(artist + " - ", ""), Scrobble.uts >= year_start_uts, Scrobble.uts <= year_end_uts )[0] + (int(request.form.getlist("length_m")[i]) * 60 + int(request.form.getlist("length_s")[i]),)] ][0] for i in range(0, len(request.form.getlist("track"))) if request.form.getlist("track")[i].startswith(artist + " - ") and\ request.form.getlist("album")[i] == album ], key=lambda track_scrobbles_length_total: -track_scrobbles_length_total[3]))] ][0] for artist, album in itertools.chain(*[ [ (artist, album) for album in set([v for i, v in enumerate(request.form.getlist("album")) if request.form.getlist("track")[i].startswith(artist + " - ")]) ] for artist in request.form.getlist("artist") ]) ], key={ "scrobbles" : lambda artist_album_tracks_scrobbles_length_scrobblesADay_lengthADay: -artist_album_tracks_scrobbles_length_scrobblesADay_lengthADay[3], "scrobbles_a_day" : lambda artist_album_tracks_scrobbles_length_scrobblesADay_lengthADay: -artist_album_tracks_scrobbles_length_scrobblesADay_lengthADay[5], "length" : lambda artist_album_tracks_scrobbles_length_scrobblesADay_lengthADay: -artist_album_tracks_scrobbles_length_scrobblesADay_lengthADay[4], "length_a_day" : lambda artist_album_tracks_scrobbles_length_scrobblesADay_lengthADay: -artist_album_tracks_scrobbles_length_scrobblesADay_lengthADay[6], }[request.args.get("sort")]) })