def craft_list(bot: Bot, update: Update, groups: tuple) -> None: logger.debug("Entering: craft_list with args %s", groups) chat = update.effective_chat # type: Chat msg = update.effective_message # type: Message usr = update.effective_user # type: User update.callback_query.answer(text='Filtering...') kb_markup = InlineKeyboardMarkup(item_filter_kb) item_filter = groups[0] craft_cmd = 'craft' if item_filter == 'all': items = dbItem.select(lambda i: i).order_by(lambda i: i.id) elif item_filter == 'basic': items = dbItem.select(lambda i: not i.complex).order_by(lambda i: i.id) elif item_filter == 'complex': items = dbItem.select(lambda i: i.complex).order_by(lambda i: i.id) elif item_filter == 'armour': items = dbItem.select(lambda i: i.id.startswith('a')).order_by( lambda i: i.id) elif item_filter == 'weapon': items = dbItem.select(lambda i: i.id.startswith('w')).order_by( lambda i: i.id) elif item_filter == 'recipe': items = dbItem.select(lambda i: i.id.startswith('r')).order_by( lambda i: i.id) elif item_filter == 'fragment': items = dbItem.select(lambda i: i.id.startswith('k')).order_by( lambda i: i.id) elif item_filter == 'potion': items = dbItem.select(lambda i: i.id.startswith('p')).order_by( lambda i: i.id) craft_cmd = 'brew' elif item_filter == 'herb': # SQL> select * from item WHERE id ~ E'^\\d+$' and id::integer between 39 and 69; items = dbItem.select( lambda i: orm.raw_sql(r"i.id ~ E'^\\d+$$'") and orm.between( orm.raw_sql("i.id::integer"), 39, 69)).order_by(lambda i: i.id) craft_cmd = 'brew' else: items = list() items_list = '<b>{} items</b>\n'.format(item_filter.title()) for item in items: items_list += '<code>{:>3}</code> - {}'.format(item.id, item.name) items_list += ' (/{}_{})\n'.format(craft_cmd, item.id) if item.complex else '\n' msg.edit_text(items_list, reply_markup=kb_markup, parse_mode='HTML') logger.debug("Exiting: craft_list") return
def get_value(): """ Gets latest value they decided on, and the most recent measured value of each node. Only considers events in the last 10 seconds. > {"summary": 3.456, "1": 2.345, "2": 3.456, "3": 4.567, "4": 5.678}} :return: """ latest_vote = orm.select( m for m in DBVoteMessage if m.date > orm.raw_sql("NOW() - '10 seconds'::INTERVAL")).order_by( orm.desc(DBVoteMessage.date)).first() if not latest_vote: return jsonify({}, allow_all_origin=True) # end if assert isinstance(latest_vote, DBVoteMessage) latest_values = DBMessage.select_by_sql(""" SELECT DISTINCT ON (m.node) * FROM ( SELECT * FROM DBmessage WHERE type = $INIT AND date >= NOW() - '10 seconds'::INTERVAL ) as m ORDER BY m.node, m.date DESC """) data = {"summary": latest_vote.value} for msg in latest_values: assert isinstance(msg, DBInitMessage) data[str(msg.node)] = msg.value # end for return jsonify(data, allow_all_origin=True)
def format_similarity(field: str, search_string: str, table='s') -> raw_sql: """ build a similarity where close :param table: table of the field :param field: field to perform the similarity search in :param search_string: value to search in the database """ return raw_sql('"{table}"."{field}" %% $search_string'.format(table=table, field=field))
def search_keyword(cls, query, lim=100): # Requires FTS5 table "FtsIndex" to be generated and populated. # FTS table is maintained automatically by SQL triggers. # BM25 ranking is embedded in FTS5. # Sanitize FTS query if not query or query == "*": return [] fts_ids = raw_sql( 'SELECT rowid FROM FtsIndex WHERE FtsIndex MATCH $query ORDER BY bm25(FtsIndex) LIMIT $lim') return cls.select(lambda g: g.rowid in fts_ids)
def get_channel_with_dirname(cls, dirname): # It is impossible to use LIKE queries on BLOBs, so we have to use comparisons def extend_to_bitmask(txt): return txt + "0" * (PUBLIC_KEY_LEN * 2 - CHANNEL_DIR_NAME_LENGTH) dirname_binmask_start = "x'" + extend_to_bitmask(dirname) + "'" binmask_plus_one = ("%X" % (int(dirname, 16) + 1)).zfill(len(dirname)) dirname_binmask_end = "x'" + extend_to_bitmask(binmask_plus_one) + "'" sql = "g.public_key >= " + dirname_binmask_start + " AND g.public_key < " + dirname_binmask_end return orm.get(g for g in cls if raw_sql(sql))
def search_keyword(cls, query, lim=100): # Requires FTS5 table "FtsIndex" to be generated and populated. # FTS table is maintained automatically by SQL triggers. # BM25 ranking is embedded in FTS5. # Sanitize FTS query if not query or query == "*": return [] fts_ids = raw_sql( 'SELECT rowid FROM FtsIndex WHERE FtsIndex MATCH $query ORDER BY bm25(FtsIndex) LIMIT $lim' ) return cls.select(lambda g: g.rowid in fts_ids)
def search_keyword(self, query, lim=100): # Requires FTS5 table "FtsIndex" to be generated and populated. # FTS table is maintained automatically by SQL triggers. # BM25 ranking is embedded in FTS5. # Sanitize FTS query if not query or query == "*": return [] fts_ids = raw_sql( """SELECT rowid FROM ChannelNode WHERE rowid IN (SELECT rowid FROM FtsIndex WHERE FtsIndex MATCH $query ORDER BY bm25(FtsIndex) LIMIT $lim) GROUP BY coalesce(infohash, rowid)""" ) return left_join(g for g in self.MetadataNode if g.rowid in fts_ids) # pylint: disable=E1135
def search_keyword(cls, query, lim=100): # Requires FTS5 table "FtsIndex" to be generated and populated. # FTS table is maintained automatically by SQL triggers. # BM25 ranking is embedded in FTS5. # Sanitize FTS query if not query or query == "*": return [] # !!! FIXME !!! Fix GROUP BY for entries without infohash !!! # TODO: optimize this query by removing unnecessary select nests (including Pony-manages selects) fts_ids = raw_sql( """SELECT rowid FROM ChannelNode WHERE rowid IN (SELECT rowid FROM FtsIndex WHERE FtsIndex MATCH $query ORDER BY bm25(FtsIndex) LIMIT $lim) GROUP BY infohash""") return cls.select(lambda g: g.rowid in fts_ids)
def search_keyword(cls, query, lim=100): # Requires FTS5 table "FtsIndex" to be generated and populated. # FTS table is maintained automatically by SQL triggers. # BM25 ranking is embedded in FTS5. # Sanitize FTS query if not query or query == "*": return [] # TODO: optimize this query by removing unnecessary select nests (including Pony-manages selects) fts_ids = raw_sql( """SELECT rowid FROM ChannelNode WHERE rowid IN (SELECT rowid FROM FtsIndex WHERE FtsIndex MATCH $query ORDER BY bm25(FtsIndex) LIMIT $lim) GROUP BY coalesce(infohash, rowid)""" ) return left_join(g for g in cls if g.rowid in fts_ids) # pylint: disable=E1135
def search_keyword(cls, query, entry_type=None, lim=100): # Requires FTS5 table "FtsIndex" to be generated and populated. # FTS table is maintained automatically by SQL triggers. # BM25 ranking is embedded in FTS5. # Sanitize FTS query if not query: return [] if query.endswith("*"): query = "\"" + query[:-1] + "\"" + "*" else: query = "\"" + query + "\"" metadata_type = entry_type or cls._discriminator_ sql_search_fts = "metadata_type = %d AND rowid IN (SELECT rowid FROM FtsIndex WHERE " \ "FtsIndex MATCH $query ORDER BY bm25(FtsIndex) LIMIT %d)" % (metadata_type, lim) return cls.select(lambda x: orm.raw_sql(sql_search_fts))[:]
def get_channel_with_dirname(cls, dirname): # Parse the public key part of the dirname pk_part = dirname[:-CHANNEL_DIR_NAME_ID_LENGTH] def extend_to_bitmask(txt): return txt + "0" * (PUBLIC_KEY_LEN * 2 - CHANNEL_DIR_NAME_LENGTH) pk_binmask_start = "x'" + extend_to_bitmask(pk_part) + "'" pk_plus_one = f"{int(pk_part, 16) + 1:X}".zfill(len(pk_part)) pk_binmask_end = "x'" + extend_to_bitmask(pk_plus_one) + "'" # It is impossible to use LIKE queries on BLOBs, so we have to use comparisons sql = "g.public_key >= " + pk_binmask_start + " AND g.public_key < " + pk_binmask_end # Parse the id part of the dirname id_part = dirname[-CHANNEL_DIR_NAME_ID_LENGTH:] id_ = int(id_part, 16) return orm.select(g for g in cls if g.id_ == id_ and raw_sql(sql)).first()
def get_value_v2(): """ Gets latest value they decided on, and the most recent measured value of each node. Only considers events in the last 10 seconds. { "summary": None, "leader": 1, # done later via observing latest LeaderChange events. "nodes": [] } :return: """ latest_vote = orm.select( m for m in DBVoteMessage if m.date > orm.raw_sql("NOW() - '10 seconds'::INTERVAL")).order_by( orm.desc(DBVoteMessage.date)).first() latest_values = DBMessage.select_by_sql(""" SELECT DISTINCT ON (m.node) * FROM ( SELECT * FROM DBmessage WHERE type = $INIT AND date >= NOW() - '10 seconds'::INTERVAL ) as m ORDER BY m.node, m.date DESC """) data = { "summary": None, "leader": 1, # done later via observing latest LeaderChange events. "nodes": [] } if latest_vote: assert isinstance(latest_vote, DBVoteMessage) data["summary"] = {"value": latest_vote.value} # end if for msg in latest_values: assert isinstance(msg, DBInitMessage) data["nodes"].append({"node": str(msg.node), "value": msg.value}) # end for return jsonify(data, allow_all_origin=True)
def parse(condition, column="data"): if '||' in condition: conda, condb = condition.split('||', 1) # conda, condb = conda.strip(), condb.strip() return ab_parse(conda.strip(), condb.strip(), 'or', column=column) if '&&' in condition: conda, condb = condition.split('&&', 1) return ab_parse(conda.strip(), condb.strip(), 'and', column=column) #### ### primary key if condition == '*': return if '%' in condition: # not work in mysql right now return raw_sql(f'e.key like "{condition}"') if condition[0] == '(' and condition[-1] == ')': return f'"{condition[1:-1]}" in e.key' if condition[0] == ')' and condition[-1] == '(': return f'"{condition[1:-1]}" not in e.key' if condition[0] == '~': return f'e.key != "{condition[1:]}"' if condition[0] == '^' and condition[-1] == '$': return f'e.key == "{condition[1:-1]}"' if condition[0] == '^': return f'e.key.startswith("{condition[1:]}")' if condition[-1] == '$': return f'e.key.endswith("{condition[:-1]}")' if '=' not in condition and '>' not in condition and '<' not in condition and \ '!' not in condition and ':' not in condition and '.' not in condition and '?' not in condition: return f'e.key == "{condition}"' # json key return parse_filter(condition, column=column)
def get(limit, padding, orderby: list, search=None, lastInterpretation=None, interpretationNumber=None, score=None, showlights=None, vocals=None, odlc=None, arrangements=None): search_results = orm.select(s for s in Song) # fuzzy search if search: search_results = search_results.where( orm.raw_sql('similarity("s"."fts_col", $search) > .1')) # add similarity to the order by array orderby.insert(0, '-similarity') # does the song has showlights if showlights: search_results = search_results.where( lambda s: s.showlights == showlights) # does the song display lyrics if vocals: search_results = search_results.where(lambda s: s.vocals == vocals) # is the song a odlc or a cdlc if odlc is not None: search_results = search_results.where(lambda s: s.official == odlc) # --- arrangement specific fields --- # does the song have certain arrangements if arrangements: filter_function = '' for i in range(0, len(arrangements)): filter_function += 'or arrangements[{}] in s.arrangements.type '.format( i) filter_function = filter_function.split('or ', 1)[1] search_results = search_results.where(filter_function) # --- interpretation specific fields --- # how many times does the song was played if interpretationNumber != [0, 100]: lower_bound = min(interpretationNumber[0], interpretationNumber[1]) upper_bound = max(interpretationNumber[0], interpretationNumber[1]) search_results = search_results.where( lambda s: lower_bound <= orm.count(s.interpretations) and (orm.count(s.interpretations) <= upper_bound or upper_bound >= 100 )) if lastInterpretation != [0, 100]: # higher bound in days to allow no maximum calculation when >=100 upper_bound = max(lastInterpretation[0], lastInterpretation[1]) # datetime bounds to be used in where clause older_bound = datetime.now() - timedelta( days=max(lastInterpretation[0], lastInterpretation[1])) youger_bound = datetime.now() - timedelta( days=min(lastInterpretation[0], lastInterpretation[1])) search_results = search_results.where(lambda s: youger_bound > orm.max( s.interpretations.date) and (orm.max(s.interpretations.date) <= older_bound or upper_bound >= 100)) if score != [0, 100]: lower_bound = Decimal(min(score[0], score[1])) upper_bound = Decimal(max(score[0], score[1])) search_results = search_results.where( lambda s: lower_bound <= orm.max(s.interpretations.score) and orm. max(s.interpretations.score) <= upper_bound) # apply order by, limit and padding search_results = search_results \ .order_by(format_order_by(orderby)) \ .limit(limit=limit, offset=padding) return {'data': [s.serialize() for s in search_results]}, 200
def _size(self): return select( raw_sql('array_length(x.molecules, 1)') for x in self.__class__ if x.id == self.id).first()
def get_branches(cls): branches = select(raw_sql("SUBSTRING_INDEX(ref, '/', -1)") for b in cls) return branches
def suggest_artist(search): return orm.select((s.artist, orm.raw_sql('similarity("s"."artist", $search)')) for s in Song) \ .distinct() \ .where(format_similarity('artist', search)) \ .order_by(format_order_by(['-similarity', 'artist'], similarity_col='artist')) \ .limit(limit=5)
def suggest_song(search, lastInterpretation=None, interpretationNumber=None, score=None, showlights=None, vocals=None, odlc=None, arrangements=None): # fuzzy search song_results = orm \ .select((s.name, s.album, s.artist, orm.raw_sql('similarity("s"."fts_col", $search)')) for s in Song) \ .distinct() \ .where(format_similarity('fts_col', search)) # does the song has showlights if showlights: song_results = song_results.where(lambda s: s.showlights == showlights) # does the song display lyrics if vocals: song_results = song_results.where(lambda s: s.vocals == vocals) # is the song a odlc or a cdlc if odlc is not None: song_results = song_results.where(lambda s: s.official == odlc) # --- arrangement specific fields --- # does the song have certain arrangements if arrangements: filter_function = '' for i in range(0, len(arrangements)): filter_function += 'or arrangements[{}] in s.arrangements.type '.format( i) filter_function = filter_function.split('or ', 1)[1] song_results = song_results.where(filter_function) # --- interpretation specific fields --- # how many times does the song was played if interpretationNumber != [0, 100]: lower_bound = min(interpretationNumber[0], interpretationNumber[1]) upper_bound = max(interpretationNumber[0], interpretationNumber[1]) song_results = song_results.where(lambda s: lower_bound <= orm.count( s.interpretations) and (orm.count(s.interpretations) <= upper_bound or upper_bound >= 100)) if lastInterpretation != [0, 100]: # higher bound in days to allow no maximum calculation when >=100 upper_bound = max(lastInterpretation[0], lastInterpretation[1]) # datetime bounds to be used in where clause older_bound = datetime.now() - timedelta( days=max(lastInterpretation[0], lastInterpretation[1])) youger_bound = datetime.now() - timedelta( days=min(lastInterpretation[0], lastInterpretation[1])) song_results = song_results.where(lambda s: youger_bound > orm.max( s.interpretations.date) and (orm.max(s.interpretations.date) <= older_bound or upper_bound >= 100)) if score != [0, 100]: lower_bound = Decimal(min(score[0], score[1])) upper_bound = Decimal(max(score[0], score[1])) song_results = song_results.where( lambda s: lower_bound <= orm.max(s.interpretations.score) and orm. max(s.interpretations.score) <= upper_bound) # apply order by, limit and padding return song_results \ .order_by(format_order_by(['-similarity', 'name', 'album', 'artist'])) \ .limit(limit=5)
def get_entries_query( self, metadata_type=None, channel_pk=None, exclude_deleted=False, hide_xxx=False, exclude_legacy=False, origin_id=None, sort_by=None, sort_desc=True, max_rowid=None, txt_filter=None, subscribed=None, category=None, attribute_ranges=None, infohash=None, infohash_set=None, id_=None, complete_channel=None, self_checked_torrent=None, cls=None, health_checked_after=None, popular=None, ): """ This method implements REST-friendly way to get entries from the database. :return: PonyORM query object corresponding to the given params. """ # Warning! For Pony magic to work, iteration variable name (e.g. 'g') should be the same everywhere! if cls is None: cls = self.ChannelNode pony_query = self.search_keyword(txt_filter, lim=1000) if txt_filter else left_join(g for g in cls) infohash_set = infohash_set or ({infohash} if infohash else None) if popular: if metadata_type != REGULAR_TORRENT: raise TypeError('With `popular=True`, only `metadata_type=REGULAR_TORRENT` is allowed') t = time() - POPULAR_TORRENTS_FRESHNESS_PERIOD health_list = list( select( health for health in self.TorrentState if health.last_check >= t and (health.seeders > 0 or health.leechers > 0) ).order_by( lambda health: (desc(health.seeders), desc(health.leechers), desc(health.last_check)) )[:POPULAR_TORRENTS_COUNT] ) pony_query = pony_query.where(lambda g: g.health in health_list) if max_rowid is not None: pony_query = pony_query.where(lambda g: g.rowid <= max_rowid) if metadata_type is not None: try: pony_query = pony_query.where(lambda g: g.metadata_type in metadata_type) except TypeError: pony_query = pony_query.where(lambda g: g.metadata_type == metadata_type) pony_query = ( pony_query.where(public_key=(b"" if channel_pk == NULL_KEY_SUBST else channel_pk)) if channel_pk is not None else pony_query ) if attribute_ranges is not None: for attr, left, right in attribute_ranges: if ( self.ChannelNode._adict_.get(attr) # pylint: disable=W0212 or self.ChannelNode._subclass_adict_.get(attr) # pylint: disable=W0212 ) is None: # Check against code injection raise AttributeError("Tried to query for non-existent attribute") if left is not None: pony_query = pony_query.where(f"g.{attr} >= left") if right is not None: pony_query = pony_query.where(f"g.{attr} < right") # origin_id can be zero, for e.g. root channel pony_query = pony_query.where(id_=id_) if id_ is not None else pony_query pony_query = pony_query.where(origin_id=origin_id) if origin_id is not None else pony_query pony_query = pony_query.where(lambda g: g.subscribed) if subscribed is not None else pony_query pony_query = pony_query.where(lambda g: g.tags == category) if category else pony_query pony_query = pony_query.where(lambda g: g.status != TODELETE) if exclude_deleted else pony_query pony_query = pony_query.where(lambda g: g.xxx == 0) if hide_xxx else pony_query pony_query = pony_query.where(lambda g: g.status != LEGACY_ENTRY) if exclude_legacy else pony_query pony_query = pony_query.where(lambda g: g.infohash in infohash_set) if infohash_set else pony_query pony_query = ( pony_query.where(lambda g: g.health.self_checked == self_checked_torrent) if self_checked_torrent is not None else pony_query ) # ACHTUNG! Setting complete_channel to True forces the metadata type to Channels only! pony_query = ( pony_query.where(lambda g: g.metadata_type == CHANNEL_TORRENT and g.timestamp == g.local_version) if complete_channel else pony_query ) if health_checked_after is not None: pony_query = pony_query.where(lambda g: g.health.last_check >= health_checked_after) # Sort the query pony_query = pony_query.sort_by("desc(g.rowid)" if sort_desc else "g.rowid") if sort_by == "HEALTH": pony_query = pony_query.sort_by( "(desc(g.health.seeders), desc(g.health.leechers))" if sort_desc else "(g.health.seeders, g.health.leechers)" ) elif sort_by == "size" and not issubclass(cls, self.ChannelMetadata): # Remark: this can be optimized to skip cases where size field does not matter # When querying for mixed channels / torrents lists, channels should have priority over torrents sort_expression = "desc(g.num_entries), desc(g.size)" if sort_desc else "g.num_entries, g.size" pony_query = pony_query.sort_by(sort_expression) elif sort_by: sort_expression = raw_sql(f"g.{sort_by} COLLATE NOCASE" + (" DESC" if sort_desc else "")) pony_query = pony_query.sort_by(sort_expression) if sort_by is None: if txt_filter: pony_query = pony_query.sort_by( f""" (1 if g.metadata_type == {CHANNEL_TORRENT} else 2 if g.metadata_type == {COLLECTION_NODE} else 3), desc(g.health.seeders), desc(g.health.leechers) """ ) elif popular: pony_query = pony_query.sort_by('(desc(g.health.seeders), desc(g.health.leechers))') return pony_query
def search_multi(self, conditions, on='data', for_update=False, fuzzy=True, debug=False, mode='normal', order='desc', order_by=None, begin=None, end=None, force=False): if on not in ['data', 'meta']: raise StoreException('on invalid') elems = select(e for e in self.store) or_sqls = [] for condition in conditions: and_sqls = [] for key, value in condition.items(): if isinstance(value, bool): if self.provider == 'mysql': if value == True: sql = f'json_extract(`e`.`{on}`, "$$.{key}") = true' else: sql = f'json_extract(`e`.`{on}`, "$$.{key}") = false' # elems = elems.filter(lambda e: raw_sql(sql)) else: if value == True: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == true)\')' else: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == false)\')' # elems = elems.filter(lambda e: raw_sql(sql)) elif isinstance(value, int) or isinstance(value, float): if self.provider == 'mysql': sql = f'json_extract(`e`.`{on}`, "$$.{key}") = {value}' # elems = elems.filter(lambda e: raw_sql(sql)) else: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == {value})\')' # elems = elems.filter(lambda e: raw_sql(sql)) elif isinstance(value, str): if fuzzy: if self.provider == 'mysql': sql = f'json_search(`e`.`{on}`, "all", "%%{value}%%", NULL, "$$.{key}")' # elems = elems.filter(lambda e: raw_sql(sql)) else: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ like_regex "{value}" flag "i")\')' # elems = elems.filter(lambda e: raw_sql(sql)) else: if self.provider == 'mysql': sql = f'json_extract(`e`.`{on}`, "$$.{key}") = "{value}"' # elems = elems.filter(lambda e: raw_sql(sql)) else: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == "{value}")\')' # elems = elems.filter(lambda e: raw_sql(sql)) else: raise StoreException('value type not support') and_sqls.append(sql) and_sql = ' AND '.join(and_sqls) or_sqls.append(and_sql) or_sql = ' OR '.join(or_sqls) elems = elems.filter(lambda e: raw_sql(or_sql)) return self.search_return(elems, mode=mode, order_by=order_by, order=order, for_update=for_update, begin=begin, end=end, force=force, debug=debug)
def search(self, condition, on='data', for_update=False, fuzzy=True, debug=False, mode='normal', order='desc', order_by=None, begin=None, end=None, force=False): if on == 'key': if isinstance(condition, str): elems = select(e for e in self.store if e.key == condition) return self.search_return(elems, mode=mode, order_by=order_by, order=order, for_update=for_update, begin=begin, end=end, force=force, debug=debug) elif isinstance(condition, list): elems = select(e for e in self.store if e.key in condition) return self.search_return(elems, mode=mode, order_by=order_by, order=order, for_update=for_update, begin=begin, end=end, force=force, debug=debug) raise StoreException('on key invalid') if on not in ['data', 'meta']: raise StoreException('on invalid') elems = select(e for e in self.store) if condition: for key, value in condition.items(): if isinstance(value, list): if '.' in key: keys = key.split('.') else: keys = [key] if self.provider == 'mysql': for i, k in enumerate(keys): if i == 0: sql = f'e.{on}["{k}"]' else: sql += f'["{k}"]' sql += f' in {value}' elems = elems.filter(sql) else: sql = f'e.data' for i, k in enumerate(keys): if i == len(keys) - 1: sql += '->>' else: sql += '->' sql += f"'{k}'" v = [] cast = None for e in value: if isinstance(e, bool): cast = 'boolean' if e == True: v.append("true") elif e == False: v.append("false") elif isinstance(e, float): ee = f'{e}' v.append(ee) cast = 'float' elif isinstance(e, int): ee = f'{e}' v.append(ee) cast = 'integer' elif isinstance(e, str): ee = f"'{e}'" v.append(ee) else: raise StoreException('k invalid') value_str = ', '.join(v) if cast: sql = f'cast({sql} as {cast}) in ({value_str})' else: sql += f' in ({value_str})' elems = elems.filter(lambda e: raw_sql(sql)) elif isinstance(value, dict): op = value.get('operator') or value.get('op') val = value.get('value') or value.get('val') if op is None or val is None: raise StoreException('operator and value not found') if op == 'in' or op == 'any_in': if isinstance(val, list): if self.provider == 'mysql': sqls = [] for v in val: # sql = f'(json_contains(`e`.`data`, \'["{v}"]\', \'$$.{key}\') or json_contains_path(`e`.`data`, \'one\', \'$$.{key}.{v}\'))' sql = f'json_contains(`e`.`{on}`, \'["{v}"]\', \'$$.{key}\')' sqls.append(sql) sql = ' OR '.join(sqls) elems = elems.filter(lambda e: raw_sql(sql)) else: if '.' in key: key = key.replace('.', ',') # raise StoreException('jsonpath not support for in operator') sqls = [] for v in val: sql = f'("e"."{on}" #> \'{{ {key} }}\' ? \'{v}\')' sqls.append(sql) sql = ' OR '.join(sqls) elems = elems.filter(lambda e: raw_sql(sql)) else: if self.provider == 'mysql': # sql = f'(json_contains(`e`.`data`, \'["{val}"]\', \'$$.{key}\') or json_contains_path(`e`.`data`, \'one\', \'$$.{key}.{val}\'))' sql = f'json_contains(`e`.`{on}`, \'["{val}"]\', \'$$.{key}\')' # or json_contains_path(`e`.`data`, \'one\', \'$$.{key}.{val}\'))' elems = elems.filter(lambda e: raw_sql(sql)) else: if '.' in key: key = key.replace('.', ',') sql = f'("e"."{on}" #> \'{{ {key} }}\' ? \'{val}\')' elems = elems.filter(lambda e: raw_sql(sql)) elif op == 'ain' or op == 'all_in': if isinstance(val, list): if self.provider == 'mysql': sql = f'json_contains(`e`.`{on}`, \'{json.dumps(val)}\', \'$$.{key}\')' elems = elems.filter(lambda e: raw_sql(sql)) else: if '.' in key: key = key.replace('.', ',') # raise StoreException('jsonpath not support for in operator') for v in val: sql = f'("e"."{on}" #> \'{{ {key} }}\' ? \'{v}\')' elems = elems.filter( lambda e: raw_sql(sql)) else: if self.provider == 'mysql': # sql = f'(json_contains(`e`.`data`, \'["{val}"]\', \'$$.{key}\') or json_contains_path(`e`.`data`, \'one\', \'$$.{key}.{val}\'))' sql = f'json_contains(`e`.`{on}`, \'["{val}"]\', \'$$.{key}\')' # or json_contains_path(`e`.`data`, \'one\', \'$$.{key}.{val}\'))' elems = elems.filter(lambda e: raw_sql(sql)) else: if '.' in key: key = key.replace('.', ',') # raise StoreException('jsonpath not support for in operator') sql = f'("e"."{on}" #> \'{{ {key} }}\' ? \'{val}\')' elems = elems.filter(lambda e: raw_sql(sql)) else: if self.provider == 'mysql': if op == '==': op = '=' sql = None if isinstance(val, bool): if val == True: sql = f'json_extract(`e`.`{on}`, "$$.{key}") {op} true' else: sql = f'json_extract(`e`.`{on}`, "$$.{key}") {op} false' elif isinstance(val, int) or isinstance( val, float): sql = f'json_extract(`e`.`{on}`, "$$.{key}") {op} {val}' elif isinstance(val, str): sql = f'json_extract(`e`.`{on}`, "$$.{key}") {op} "{val}"' else: detail = f'val {val} type {type(val)} invalid' raise StoreException(detail) if sql: elems = elems.filter(lambda e: raw_sql(sql)) else: if op == '=': op = '==' sql = None if isinstance(val, bool): if val == True: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == true)\')' else: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == false)\')' elif isinstance(val, int) or isinstance( val, float): sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ {op} {val})\')' elif isinstance(val, str): sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ {op} "{val}")\')' else: detail = f'val {val} type {type(val)} invalid' raise StoreException(detail) if sql: elems = elems.filter(lambda e: raw_sql(sql)) elif isinstance(value, bool): if self.provider == 'mysql': if value == True: sql = f'json_extract(`e`.`{on}`, "$$.{key}") = true' else: sql = f'json_extract(`e`.`{on}`, "$$.{key}") = false' elems = elems.filter(lambda e: raw_sql(sql)) else: if value == True: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == true)\')' else: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == false)\')' elems = elems.filter(lambda e: raw_sql(sql)) elif isinstance(value, int) or isinstance(value, float): if self.provider == 'mysql': sql = f'json_extract(`e`.`{on}`, "$$.{key}") = {value}' elems = elems.filter(lambda e: raw_sql(sql)) else: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == {value})\')' elems = elems.filter(lambda e: raw_sql(sql)) elif isinstance(value, str): if fuzzy: if self.provider == 'mysql': sql = f'json_search(`e`.`{on}`, "all", "%%{value}%%", NULL, "$$.{key}")' elems = elems.filter(lambda e: raw_sql(sql)) else: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ like_regex "{value}" flag "i")\')' elems = elems.filter(lambda e: raw_sql(sql)) else: if self.provider == 'mysql': sql = f'json_extract(`e`.`{on}`, "$$.{key}") = "{value}"' elems = elems.filter(lambda e: raw_sql(sql)) else: sql = f'jsonb_path_exists("e"."{on}", \'$$.{key} ? (@ == "{value}")\')' elems = elems.filter(lambda e: raw_sql(sql)) else: raise StoreException('value type not support') return self.search_return(elems, mode=mode, order_by=order_by, order=order, for_update=for_update, begin=begin, end=end, force=force, debug=debug)