def to_sql(self, schema, not_null=False, boolean=False): lhs = self.lhs.to_sql(schema) rhs = self.rhs.to_sql(schema) acc = [] if len(lhs) != len(rhs): Log.error("lhs and rhs have different dimensionality!?") for l, r in zip(lhs, rhs): for t in "bsnj": if l.sql[t] == None: if r.sql[t] == None: pass else: acc.append(sql_iso(r.sql[t]) + " IS " + SQL_NULL) else: if r.sql[t] == None: acc.append(sql_iso(l.sql[t]) + " IS " + SQL_NULL) else: acc.append("(" + sql_iso(l.sql[t]) + " = " + sql_iso(r.sql[t]) + " OR (" + sql_iso(l.sql[t]) + " IS" + SQL_NULL + SQL_AND + "(" + r.sql[t] + ") IS NULL))") if not acc: return FALSE.to_sql(schema) else: return wrap([{"name": ".", "sql": {"b": SQL_OR.join(acc)}}])
def _db_insert_column(self, column): try: self.db.execute( "INSERT INTO" + db_table_name + sql_iso(all_columns) + "VALUES" + sql_iso( sql_list( [ quote_value(column[c.name]) if c.name not in ("nested_path", "partitions") else quote_value(value2json(column[c.name])) for c in METADATA_COLUMNS ] ) ) ) except Exception as e: e = Except.wrap(e) if "UNIQUE constraint failed" in e or " are not unique" in e: # THIS CAN HAPPEN BECAUSE todo HAS OLD COLUMN DATA self.todo.add((UPDATE, column), force=True) else: Log.error("do not know how to handle", cause=e)
def to_sql(self, schema, not_null=False, boolean=False): lhs = SQLang[self.lhs].partial_eval() rhs = SQLang[self.rhs].partial_eval() lhs_sql = lhs.to_sql(schema, not_null=True) rhs_sql = rhs.to_sql(schema, not_null=True) if is_literal(rhs) and lhs_sql[0].sql.b != None and rhs.value in ('T', 'F'): rhs_sql = BooleanOp(rhs).to_sql(schema) if is_literal(lhs) and rhs_sql[0].sql.b != None and lhs.value in ('T', 'F'): lhs_sql = BooleanOp(lhs).to_sql(schema) if len(lhs_sql) != len(rhs_sql): Log.error("lhs and rhs have different dimensionality!?") acc = [] for l, r in zip(lhs_sql, rhs_sql): for t in "bsnj": if r.sql[t] == None: if l.sql[t] == None: pass else: acc.append(ConcatSQL((l.sql[t], SQL_IS_NULL))) elif l.sql[t] == None: acc.append(ConcatSQL((r.sql[t], SQL_IS_NULL))) else: acc.append( ConcatSQL( (sql_iso(l.sql[t]), SQL_EQ, sql_iso(r.sql[t])))) if not acc: return FALSE.to_sql(schema) else: return wrap([{"name": ".", "sql": {"b": JoinSQL(SQL_OR, acc)}}])
def _db_create(self): with self._db_transaction(): self.db.execute( "CREATE TABLE " + db_table_name + sql_iso( sql_list( [ quote_column(c.name) + " " + json_type_to_sqlite_type[c.jx_type] for c in METADATA_COLUMNS ] + [ "PRIMARY KEY" + sql_iso( sql_list(map(quote_column, ["es_index", "es_column"])) ) ] ) ) ) for c in METADATA_COLUMNS: self._add(c) self._db_insert_column(c)
def _build_list_sql(self, db, first, batch_size): # TODO: ENSURE THE LAST COLUMN IS THE id if first: dim = len(self._extract.field) where = SQL_OR.join( sql_iso( sql_and( quote_column(f) + ineq(i, e, dim) + db.quote_value(Date(v) if t == "time" else v) for e, (f, v, t) in enumerate( zip(self._extract.field[0:i + 1:], first, self._extract.type[0:i + 1:])))) for i in range(dim)) else: where = SQL_TRUE selects = [] for t, f in zip(self._extract.type, self._extract.field): if t == "time": selects.append( "CAST" + sql_iso(sql_alias(quote_column(f), SQL("DATETIME(6)")))) else: selects.append(quote_column(f)) sql = (SQL_SELECT + sql_list(selects) + SQL_FROM + self.settings.snowflake.fact_table + SQL_WHERE + where + SQL_ORDERBY + sql_list(quote_column(f) for f in self._extract.field) + SQL_LIMIT + db.quote_value(batch_size)) return sql
def create_fact(self, uid=UID): """ MAKE NEW TABLE WITH GIVEN guid :param uid: name, or list of names, for the GUID :return: None """ self.add_table_to_schema(["."]) uid = listwrap(uid) new_columns = [] for u in uid: if u == UID: pass else: c = Column(names={".": u}, type="string", es_column=typed_column(u, "string"), es_index=self.fact) self.add_column_to_schema(c) new_columns.append(c) command = ("CREATE TABLE " + quote_column(self.fact) + sql_iso( sql_list([quoted_GUID + " TEXT "] + [quoted_UID + " INTEGER"] + [ quote_column(c.es_column) + " " + sql_types[c.type] for c in self.tables["."].schema.columns ] + [ "PRIMARY KEY " + sql_iso( sql_list([quoted_GUID] + [quoted_UID] + [ quote_column(c.es_column) for c in self.tables["."].schema.columns ])) ]))) self.db.execute(command)
def sql_create(table, properties, primary_key=None, unique=None): """ :param table: NAME OF THE TABLE TO CREATE :param properties: DICT WITH {name: type} PAIRS (type can be plain text) :param primary_key: COLUMNS THAT MAKE UP THE PRIMARY KEY :param unique: COLUMNS THAT SHOULD BE UNIQUE :return: """ acc = [ SQL_CREATE, quote_column(table), SQL_OP, sql_list([quote_column(k) + SQL(v) for k, v in properties.items()]), ] if primary_key: acc.append(SQL_COMMA), acc.append(SQL(" PRIMARY KEY ")), acc.append(sql_iso(sql_list([quote_column(c) for c in listwrap(primary_key)]))) if unique: acc.append(SQL_COMMA), acc.append(SQL(" UNIQUE ")), acc.append(sql_iso(sql_list([quote_column(c) for c in listwrap(unique)]))) acc.append(SQL_CP) return ConcatSQL(acc)
def to_sql(self, schema, not_null=False, boolean=False): lhs = self.lhs.partial_eval().to_sql(schema)[0].sql.values()[0] rhs = self.rhs.partial_eval().to_sql(schema)[0].sql.values()[0] return wrap([{"name": ".", "sql": { "b": sql_iso(lhs) + "=" + sql_iso(rhs) }}])
def insert_list(self, table_name, records): if not records: return columns = set() for r in records: columns |= set(r.keys()) columns = jx.sort(columns) try: self.execute( "DELETE FROM " + self.quote_column(table_name) + SQL_WHERE + "_id IN {{ids}}", {"ids": self.quote_column([r["_id"] for r in records])}) command = (SQL_INSERT + self.quote_column(table_name) + sql_iso(sql_list(self.quote_column(k) for k in columns)) + SQL_VALUES + sql_iso( sql_list( self.quote_value(r.get(k, None)) for k in columns for r in records))) self.execute(command) except Exception as e: Log.error("problem with insert", e)
def insert_new(self, table_name, candidate_key, new_record): candidate_key = listwrap(candidate_key) condition = SQL_AND.join([ quote_column(k) + "=" + quote_value(new_record[k]) if new_record[k] != None else quote_column(k) + SQL_IS_NULL for k in candidate_key ]) command = ( "INSERT INTO " + quote_column(table_name) + sql_iso(sql_list( quote_column(k) for k in new_record.keys() )) + SQL_SELECT + "a.*" + SQL_FROM + sql_iso( SQL_SELECT + sql_list([quote_value(v) + " " + quote_column(k) for k, v in new_record.items()]) + SQL_FROM + "DUAL" ) + " a" + SQL_LEFT_JOIN + sql_iso( SQL_SELECT + "'dummy' exist " + SQL_FROM + quote_column(table_name) + SQL_WHERE + condition + SQL_LIMIT + SQL_ONE ) + " b ON " + SQL_TRUE + SQL_WHERE + " exist " + SQL_IS_NULL ) self.execute(command, {})
def _make_range_domain(self, domain, column_name): width = (domain.max - domain.min) / domain.interval digits = mo_math.floor(mo_math.log10(width - 1)) if digits == 0: value = "a.value" else: value = SQL("+").join("1" + ("0" * j) + "*" + text_type(chr(ord(b'a') + j)) + ".value" for j in range(digits + 1)) if domain.interval == 1: if domain.min == 0: domain = (SQL_SELECT + value + column_name + SQL_FROM + "__digits__ a") else: domain = (SQL_SELECT + sql_iso(value) + " + " + quote_value(domain.min) + column_name + SQL_FROM + "__digits__ a") else: if domain.min == 0: domain = (SQL_SELECT + value + " * " + quote_value(domain.interval) + column_name + SQL_FROM + "__digits__ a") else: domain = ( SQL_SELECT + sql_iso(value + " * " + quote_value(domain.interval)) + " + " + quote_value(domain.min) + column_name + SQL_FROM + "__digits__ a") for j in range(digits): domain += SQL_INNER_JOIN + "__digits__" + text_type( chr(ord(b'a') + j + 1)) + " ON " + SQL_TRUE domain += SQL_WHERE + value + " < " + quote_value(width) return domain
def _insert(self, collection): for nested_path, details in collection.items(): active_columns = wrap(list(details.active_columns)) rows = details.rows num_rows = len(rows) table_name = concat_field(self.name, nested_path) if table_name == self.name: # DO NOT REQUIRE PARENT OR ORDER COLUMNS meta_columns = [GUID, UID] else: meta_columns = [UID, PARENT, ORDER] all_columns = meta_columns + active_columns.es_column # ONLY THE PRIMITIVE VALUE COLUMNS command = ConcatSQL([ SQL_INSERT, quote_column(table_name), sql_iso(sql_list(map(quote_column, all_columns))), SQL_VALUES, sql_list( sql_iso( sql_list(quote_value(row.get(c)) for c in all_columns)) for row in unwrap(rows)) ]) with self.db.transaction() as t: t.execute(command)
def _nest_column(self, column, new_path): destination_table = concat_field(self.fact_name, new_path[0]) existing_table = concat_field(self.fact_name, column.nested_path[0]) # FIND THE INNER COLUMNS WE WILL BE MOVING moving_columns = [] for c in self.columns: if destination_table != column.es_index and column.es_column == c.es_column: moving_columns.append(c) c.nested_path = new_path # TODO: IF THERE ARE CHILD TABLES, WE MUST UPDATE THEIR RELATIONS TOO? # DEFINE A NEW TABLE? # LOAD THE COLUMNS details = self.namespace.db.about(destination_table) if not details.data: command = (SQL_CREATE + quote_column(destination_table) + sql_iso( sql_list([ quoted_UID + "INTEGER", quoted_PARENT + "INTEGER", quoted_ORDER + "INTEGER", "PRIMARY KEY " + sql_iso(quoted_UID), "FOREIGN KEY " + sql_iso(quoted_PARENT) + " REFERENCES " + quote_column(existing_table) + sql_iso(quoted_UID) ]))) with self.namespace.db.transaction() as t: t.execute(command) self.add_table(new_path) # TEST IF THERE IS ANY DATA IN THE NEW NESTED ARRAY if not moving_columns: return column.es_index = destination_table with self.namespace.db.transaction() as t: t.execute("ALTER TABLE " + quote_column(destination_table) + " ADD COLUMN " + quote_column(column.es_column) + " " + column.es_type) # Deleting parent columns for col in moving_columns: column = col.es_column tmp_table = "tmp_" + existing_table columns = list( map( text, t.query(SQL_SELECT + SQL_STAR + SQL_FROM + quote_column(existing_table) + SQL_LIMIT + SQL_ZERO).header)) t.execute("ALTER TABLE " + quote_column(existing_table) + " RENAME TO " + quote_column(tmp_table)) t.execute( SQL_CREATE + quote_column(existing_table) + SQL_AS + SQL_SELECT + sql_list([quote_column(c) for c in columns if c != column]) + SQL_FROM + quote_column(tmp_table)) t.execute("DROP TABLE " + quote_column(tmp_table))
def to_sql(self, schema, not_null=False, boolean=False): value = self.value.to_sql(schema, not_null=True)[0].sql.s start = self.start.to_sql(schema, not_null=True)[0].sql.n if self.length is NULL: sql = "SUBSTR" + sql_iso(sql_list([value, start])) else: length = self.length.to_sql(schema, not_null=True)[0].sql.n sql = "SUBSTR" + sql_iso(sql_list([value, start, length])) return wrap([{"name": ".", "sql": sql}])
def to_sql(self, schema, not_null=False, boolean=False): op, zero = _sql_operators[self.op] lhs = self.lhs.to_sql(schema)[0].sql.n rhs = self.rhs.to_sql(schema)[0].sql.n return wrap([{ "name": ".", "sql": { "n": sql_iso(lhs) + " " + op + " " + sql_iso(rhs) } }])
def sql_insert(table, records): records = listwrap(records) keys = list({k for r in records for k in r.keys()}) return ConcatSQL([ SQL_INSERT, quote_column(table), sql_iso(sql_list(map(quote_column, keys))), SQL_VALUES, sql_list( sql_iso(sql_list([quote_value(r[k]) for k in keys])) for r in records), ])
def insert(self, table_name, record): keys = list(record.keys()) try: command = ( "INSERT INTO " + quote_column(table_name) + sql_iso(sql_list([quote_column(k) for k in keys])) + " VALUES " + sql_iso(sql_list([quote_value(record[k]) for k in keys])) ) self.execute(command) except Exception as e: Log.error("problem with record: {{record}}", record=record, cause=e)
def to_sql(self, schema, not_null=False, boolean=False): defult = self.default.to_sql(schema) if len(self.terms) == 0: return defult defult = coalesce(defult[0].sql, SQL_NULL) sep = self.separator.to_sql(schema)[0].sql.s acc = [] for t in self.terms: missing = t.missing().partial_eval() term = t.to_sql(schema, not_null=True)[0].sql if term.s: term_sql = term.s elif term.n: term_sql = "cast(" + term.n + " as text)" else: term_sql = (SQL_CASE + SQL_WHEN + term.b + SQL_THEN + quote_value("true") + SQL_ELSE + quote_value("false") + SQL_END) if isinstance(missing, TrueOp): acc.append(SQL_EMPTY_STRING) elif missing: acc.append( SQL_CASE + SQL_WHEN + sql_iso(missing.to_sql(schema, boolean=True)[0].sql.b) + SQL_THEN + SQL_EMPTY_STRING + SQL_ELSE + sql_iso(sql_concat([sep, term_sql])) + SQL_END) else: acc.append(sql_concat([sep, term_sql])) expr_ = ("substr(" + sql_concat(acc) + ", " + LengthOp(None, self.separator).to_sql(schema)[0].sql.n + "+1)") missing = self.missing() if not missing: return wrap([{"name": ".", "sql": {"s": expr_}}]) else: return wrap([{ "name": ".", "sql": { "s": SQL_CASE + SQL_WHEN + "(" + missing.to_sql(schema, boolean=True)[0].sql.b + ")" + SQL_THEN + "(" + defult + ")" + SQL_ELSE + "(" + expr_ + ")" + SQL_END }, }])
def to_sql(self, schema, not_null=False, boolean=False): lhs = self.lhs.to_sql(schema, not_null=True)[0].sql rhs = self.rhs.to_sql(schema, not_null=True)[0].sql lhs_exists = self.lhs.exists().to_sql(schema)[0].sql rhs_exists = self.rhs.exists().to_sql(schema)[0].sql if len(lhs) == 1 and len(rhs) == 1: return wrap([{"name": ".", "sql": { "b": sql_iso(lhs.values()[0]) + " " + InequalityOp.operators[self.op] + " " + sql_iso(rhs.values()[0]) }}]) ors = [] for l in "bns": ll = lhs[l] if not ll: continue for r in "bns": rr = rhs[r] if not rr: continue elif r == l: ors.append( sql_iso(lhs_exists[l]) + SQL_AND + sql_iso(rhs_exists[r]) + SQL_AND + sql_iso(lhs[l]) + " " + InequalityOp.operators[self.op] + " " + sql_iso(rhs[r]) ) elif (l > r and self.op in ["gte", "gt"]) or (l < r and self.op in ["lte", "lt"]): ors.append( sql_iso(lhs_exists[l]) + SQL_AND + sql_iso(rhs_exists[r]) ) sql = sql_iso(SQL_OR.join(sql_iso(o) for o in ors)) return wrap([{"name": ".", "sql": {"b": sql}}])
def insert(self, table_name, record): keys = list(record.keys()) try: command = (SQL_INSERT + quote_column(table_name) + sql_iso(sql_list([quote_column(k) for k in keys])) + SQL_VALUES + sql_iso(sql_list([quote_value(record[k]) for k in keys]))) self.execute(command) except Exception as e: Log.error("problem with record: {{record}}", record=record, cause=e)
def _inequality_to_sql(self, schema, not_null=False, boolean=False, many=True): op, identity = _sql_operators[self.op] lhs = NumberOp(self.lhs).partial_eval().to_sql(schema, not_null=True)[0].sql.n rhs = NumberOp(self.rhs).partial_eval().to_sql(schema, not_null=True)[0].sql.n sql = sql_iso(lhs) + op + sql_iso(rhs) output = SQLScript(data_type=BOOLEAN, expr=sql, frum=self, miss=OrOp([self.lhs.missing(), self.rhs.missing()]), schema=schema) return output
def _binaryop_to_sql(self, schema, not_null=False, boolean=False, many=True): op, identity = _sql_operators[self.op] lhs = NumberOp(self.lhs).partial_eval().to_sql(schema, not_null=True)[0].sql.n rhs = NumberOp(self.rhs).partial_eval().to_sql(schema, not_null=True)[0].sql.n script = sql_iso(lhs) + op + sql_iso(rhs) missing = OrOp([self.lhs.missing(), self.rhs.missing()]).partial_eval() if missing is FALSE: sql = script else: sql = "CASE WHEN " + missing.to_sql( schema, boolean=True)[0].sql.b + " THEN NULL ELSE " + script + " END" return wrap([{"name": ".", "sql": {"n": sql}}])
def test_transactions2(service): inserting = [('testing_transaction2_1', '1'), ('testing_transaction2_2', '2')] with service.conn.transaction() as t: # Make a change t.execute( "INSERT OR REPLACE INTO latestFileMod (file, revision) VALUES " + sql_list( sql_iso(sql_list(map(quote_value, i))) for i in inserting)) try: # Query for one change query_res1 = service.conn.get( "SELECT revision FROM latestFileMod WHERE file=?", ('testing_transaction2_1', )) assert False except Exception as e: assert DOUBLE_TRANSACTION_ERROR in e # Query for the other change query_res2 = t.get("SELECT revision FROM latestFileMod WHERE file=?", ('testing_transaction2_2', )) assert query_res2[0][0] == '2'
def _load_functions(self): global _load_extension_warning_sent library_loc = File.new_instance(sys.modules[__name__].__file__, "../..") full_path = File.new_instance( library_loc, "vendor/sqlite/libsqlitefunctions.so").abspath try: trace = extract_stack(0)[0] if self.upgrade: if os.name == 'nt': file = File.new_instance( trace["file"], "../../vendor/sqlite/libsqlitefunctions.so") else: file = File.new_instance( trace["file"], "../../vendor/sqlite/libsqlitefunctions") full_path = file.abspath self.db.enable_load_extension(True) self.db.execute(SQL_SELECT + "load_extension" + sql_iso(quote_value(full_path))) except Exception as e: if not _load_extension_warning_sent: _load_extension_warning_sent = True Log.warning( "Could not load {{file}}, doing without. (no SQRT for you!)", file=full_path, cause=e)
def to_sql(self, schema, not_null=False, boolean=False): acc = [] for term in self.terms: sqls = SQLang[term].to_sql(schema) if len(sqls) > 1: acc.append(SQL_TRUE) else: for t, v in sqls[0].sql.items(): if t in ["b", "s", "n"]: acc.append( ConcatSQL(( SQL_CASE, SQL_WHEN, sql_iso(v), SQL_IS_NULL, SQL_THEN, SQL_ZERO, SQL_ELSE, SQL_ONE, SQL_END, ))) else: acc.append(SQL_TRUE) if not acc: return wrap([{}]) else: return wrap([{"nanme": ".", "sql": {"n": SQL("+").join(acc)}}])
def _insert(self, collection): for nested_path, details in collection.items(): active_columns = wrap(list(details.active_columns)) rows = details.rows table_name = concat_field(self.facts.snowflake.fact_name, nested_path) if table_name == self.facts.snowflake.fact_name: # DO NOT REQUIRE PARENT OR ORDER COLUMNS meta_columns = [GUID, UID] else: meta_columns = [UID, PARENT, ORDER] all_columns = meta_columns + active_columns.es_column prefix = ("INSERT INTO " + quote_column(table_name) + sql_iso(sql_list(map(quote_column, all_columns)))) # BUILD THE RECORDS records = SQL_UNION_ALL.join( SQL_SELECT + sql_list(quote_value(row.get(c)) for c in all_columns) for row in unwrap(rows)) with self.db.transaction() as t: t.execute(prefix + records)
def to_sql(self, schema, not_null=False, boolean=False): value = self.value.to_sql(schema)[0].sql.s find = self.find.to_sql(schema)[0].sql.s return wrap([{"name": ".", "sql": { "n": "INSTR" + sql_iso(sql_list([value, find])) }}])
def to_sql(self, schema, not_null=False, boolean=False): if not self.expr: return wrap([{"name": ".", "sql": {"b": SQL_TRUE}}]) else: return wrap([{"name": ".", "sql": { "b": "INSTR" + sql_iso(self.expr.to_sql(schema)[0].sql.s + ", " + self.prefix.to_sql(schema)[0].sql.s) + "==1" }}])
def insert_new(self, table_name, candidate_key, new_record): candidate_key = listwrap(candidate_key) condition = sql_eq(**{k: new_record[k] for k in candidate_key}) command = ( SQL_INSERT + quote_column(table_name) + sql_iso(sql_list(quote_column(k) for k in new_record.keys())) + SQL_SELECT + "a.*" + SQL_FROM + sql_iso(SQL_SELECT + sql_list([ quote_value(v) + " " + quote_column(k) for k, v in new_record.items() ]) + SQL_FROM + "DUAL") + " a" + SQL_LEFT_JOIN + sql_iso(SQL_SELECT + "'dummy' exist " + SQL_FROM + quote_column(table_name) + SQL_WHERE + condition + SQL_LIMIT + SQL_ONE) + " b ON " + SQL_TRUE + SQL_WHERE + " exist " + SQL_IS_NULL) self.execute(command, {})
def to_sql(self, schema, not_null=False, boolean=False): v = self.value.to_sql(schema)[0].sql return wrap([{ "name": ".", "sql": { "n": "FROM_UNIXTIME" + sql_iso(v.n) } }])
def about(self, table_name): """ :param table_name: TABLE IF INTEREST :return: SOME INFORMATION ABOUT THE TABLE (cid, name, dtype, notnull, dfft_value, pk) tuples """ details = self.query("PRAGMA table_info" + sql_iso(quote_column(table_name))) return details.data
def basic_multiop_to_sql(self, schema, not_null=False, boolean=False, many=False): op, identity = _sql_operators[self.op.split("basic.")[1]] sql = op.join(sql_iso(t.to_sql(schema)[0].sql.n) for t in self.terms) return wrap([{"name": ".", "sql": {"n": sql}}])
def insert_list(self, table_name, records): if not records: return keys = set() for r in records: keys |= set(r.keys()) keys = jx.sort(keys) try: command = ( "INSERT INTO " + quote_column(table_name) + sql_iso(sql_list([quote_column(k) for k in keys])) + " VALUES " + sql_list([ sql_iso(sql_list([quote_value(r[k]) for k in keys])) for r in records ]) ) self.execute(command) except Exception as e: Log.error("problem with record: {{record}}", record=records, cause=e)
def _work(name, db, sigs, please_stop): try: sigs[0].begin.wait() with db.transaction() as t: sigs[0].done.go() sigs[1].begin.wait() t.execute("INSERT INTO my_table VALUES " + sql_iso(quote_value(name))) sigs[1].done.go() sigs[2].begin.wait() result = t.query("SELECT * FROM my_table WHERE value=" + quote_value(name)) assert len(result.data) == 1 assert result.data[0][0] == name sigs[2].done.go() finally: # RELEASE ALL SIGNALS, THIS IS ENDING BADLY for s in sigs: s.done.go()
def _esfilter2sqlwhere(db, esfilter): """ CONVERT ElassticSearch FILTER TO SQL FILTER db - REQUIRED TO PROPERLY QUOTE VALUES AND COLUMN NAMES """ esfilter = wrap(esfilter) if esfilter is True: return SQL_TRUE elif esfilter["and"]: return sql_iso(SQL_AND.join([esfilter2sqlwhere(db, a) for a in esfilter["and"]])) elif esfilter["or"]: return sql_iso(SQL_OR.join([esfilter2sqlwhere(db, a) for a in esfilter["or"]])) elif esfilter["not"]: return SQL_NOT + sql_iso(esfilter2sqlwhere(db, esfilter["not"])) elif esfilter.term: return sql_iso(SQL_AND.join([ quote_column(col) + SQL("=") + quote_value(val) for col, val in esfilter.term.items() ])) elif esfilter.terms: for col, v in esfilter.terms.items(): if len(v) == 0: return "FALSE" try: int_list = convert.value2intlist(v) has_null = False for vv in v: if vv == None: has_null = True break if int_list: filter = int_list_packer(col, int_list) if has_null: return esfilter2sqlwhere(db, {"or": [{"missing": col}, filter]}) elif 'terms' in filter and set(filter['terms'].get(col, []))==set(int_list): return quote_column(col) + " in " + quote_list(int_list) else: return esfilter2sqlwhere(db, filter) else: if has_null: return esfilter2sqlwhere(db, {"missing": col}) else: return "false" except Exception as e: e = Except.wrap(e) pass return quote_column(col) + " in " + quote_list(v) elif esfilter.script: return sql_iso(esfilter.script) elif esfilter.range: name2sign = { "gt": SQL(">"), "gte": SQL(">="), "lte": SQL("<="), "lt": SQL("<") } def single(col, r): min = coalesce(r["gte"], r[">="]) max = coalesce(r["lte"], r["<="]) if min != None and max != None: # SPECIAL CASE (BETWEEN) sql = quote_column(col) + SQL(" BETWEEN ") + quote_value(min) + SQL_AND + quote_value(max) else: sql = SQL_AND.join( quote_column(col) + name2sign[sign] + quote_value(value) for sign, value in r.items() ) return sql terms = [single(col, ranges) for col, ranges in esfilter.range.items()] if len(terms) == 1: output = terms[0] else: output = sql_iso(SQL_AND.join(terms)) return output elif esfilter.missing: if isinstance(esfilter.missing, text_type): return sql_iso(quote_column(esfilter.missing) + SQL_IS_NULL) else: return sql_iso(quote_column(esfilter.missing.field) + SQL_IS_NULL) elif esfilter.exists: if isinstance(esfilter.exists, text_type): return sql_iso(quote_column(esfilter.exists) + SQL_IS_NOT_NULL) else: return sql_iso(quote_column(esfilter.exists.field) + SQL_IS_NOT_NULL) elif esfilter.match_all: return SQL_TRUE elif esfilter.instr: return sql_iso(SQL_AND.join(["instr" + sql_iso(quote_column(col) + ", " + quote_value(val)) + ">0" for col, val in esfilter.instr.items()])) else: Log.error("Can not convert esfilter to SQL: {{esfilter}}", esfilter=esfilter)
def quote_list(values): return sql_iso(sql_list(map(quote_value, values)))