def append_query(self, query_path, es_query): if is_op(self.edge.value, FirstOp) and is_op(self.edge.value.term, Variable): self.edge.value = self.edge.value.term # ES USES THE FIRST TERM FOR {"terms": } AGGREGATION if not is_op(self.edge.value, Variable): terms = TermsAggs( "_match", { "script": {"lang": "painless", "inline": self.script.expr}, "size": self.domain.limit, "order": self.es_order }, self ) else: terms = TermsAggs( "_match", { "field": first(self.schema.leaves(self.edge.value.var)).es_column, "size": self.domain.limit, "order": self.es_order }, self ) output = Aggs() output.add(FilterAggs("_filter", self.exists, None).add(terms.add(es_query))) output.add(FilterAggs("_missing", self.missing, self).add(es_query)) return output
def select(self, select): selects = listwrap(select) if len(selects) == 1 and is_op(selects[0].value, Variable) and selects[0].value.var == ".": new_schema = self.schema if selects[0].name == ".": return self else: new_schema = None if is_list(select): if all( is_op(s.value, Variable) and s.name == s.value.var for s in select ): names = set(s.value.var for s in select) new_schema = Schema(".", [c for c in self.schema.columns if c.name in names]) push_and_pull = [(s.name, jx_expression_to_function(s.value)) for s in selects] def selector(d): output = Data() for n, p in push_and_pull: output[n] = unwraplist(p(wrap(d))) return unwrap(output) new_data = map(selector, self.data) else: select_value = jx_expression_to_function(select.value) new_data = map(select_value, self.data) if is_op(select.value, Variable): column = copy(first(c for c in self.schema.columns if c.name == select.value.var)) column.name = '.' new_schema = Schema("from " + self.name, [column]) return ListContainer("from "+self.name, data=new_data, schema=new_schema)
def to_es_script(self, schema, not_null=False, boolean=False, many=True): if is_op(self.term, Variable_): columns = schema.values(self.term.var) if len(columns) == 0: return null_script if len(columns) == 1: return self.term.to_es_script(schema, many=False) term = self.term.to_es_script(schema) if is_op(term.frum, CoalesceOp_): return CoalesceOp( [ FirstOp(t.partial_eval().to_es_script(schema)) for t in term.frum.terms ] ).to_es_script(schema) if term.many: return EsScript( miss=term.miss, type=term.type, expr="(" + term.expr + ")[0]", frum=term.frum, schema=schema, ).to_es_script(schema) else: return term
def convert(self, expr): """ EXPAND INSTANCES OF name TO value """ if expr is True or expr == None or expr is False: return expr elif is_number(expr): return expr elif expr == ".": return "." elif is_variable_name(expr): return coalesce(self.dimensions[expr], expr) elif is_text(expr): Log.error("{{name|quote}} is not a valid variable name", name=expr) elif isinstance(expr, Date): return expr elif is_op(expr, QueryOp): return self._convert_query(expr) elif is_data(expr): if expr["from"]: return self._convert_query(expr) elif len(expr) >= 2: #ASSUME WE HAVE A NAMED STRUCTURE, NOT AN EXPRESSION return wrap({name: self.convert(value) for name, value in expr.leaves()}) else: # ASSUME SINGLE-CLAUSE EXPRESSION k, v = expr.items()[0] return converter_map.get(k, self._convert_bop)(self, k, v) elif is_many(expr): return wrap([self.convert(value) for value in expr]) else: return expr
def jx_sort_to_es_sort(sort, schema): if not sort: return [] output = [] for s in sort: if is_op(s.value, Variable): cols = schema.leaves(s.value.var) if s.sort == -1: types = OBJECT, STRING, NUMBER, BOOLEAN else: types = BOOLEAN, NUMBER, STRING, OBJECT for type in types: for c in cols: if c.jx_type is type: if s.sort == -1: output.append({c.es_column: "desc"}) else: output.append(c.es_column) else: from mo_logs import Log Log.error("do not know how to handle") return output
def to_es_script(self, schema, not_null=False, boolean=False, many=True): if is_op(self.expr, Variable_): if self.expr.var == "_id": return EsScript(type=BOOLEAN, expr="false", frum=self, schema=schema) else: columns = schema.leaves(self.expr.var) return ( AndOp( [ EsScript( type=BOOLEAN, expr="doc[" + quote(c.es_column) + "].empty", frum=self, schema=schema, ) for c in columns ] ) .partial_eval() .to_es_script(schema) ) elif is_literal(self.expr): return self.expr.missing().to_es_script(schema) else: return self.expr.missing().partial_eval().to_es_script(schema)
def query(self, _query): try: query = QueryOp.wrap(_query, container=self, namespace=self.namespace) for s in listwrap(query.select): if s.aggregate != None and not aggregates.get(s.aggregate): Log.error( "ES can not aggregate {{name}} because {{aggregate|quote}} is not a recognized aggregate", name=s.name, aggregate=s.aggregate ) frum = query["from"] if is_op(frum, QueryOp): result = self.query(frum) q2 = query.copy() q2.frum = result return jx.run(q2) if is_deepop(self.es, query): return es_deepop(self.es, query) if is_aggsop(self.es, query): return es_aggsop(self.es, frum, query) if is_setop(self.es, query): return es_setop(self.es, query) Log.error("Can not handle") except Exception as e: e = Except.wrap(e) if "Data too large, data for" in e: http.post(self.es.cluster.url / "_cache/clear") Log.error("Problem (Tried to clear Elasticsearch cache)", e) Log.error("problem", e)
def to_es_script(self, schema, not_null=False, boolean=False, many=True): term = FirstOp(self.term).partial_eval() value = term.to_es_script(schema) if is_op(value.frum, CoalesceOp_): return CoalesceOp( [ NumberOp(t).partial_eval().to_es_script(schema) for t in value.frum.terms ] ).to_es_script(schema) if value.type == BOOLEAN: return EsScript( miss=term.missing().partial_eval(), type=NUMBER, expr=value.expr + " ? 1 : 0", frum=self, schema=schema, ) elif value.type == INTEGER: return EsScript( miss=term.missing().partial_eval(), type=NUMBER, expr=value.expr, frum=self, schema=schema, ) elif value.type == NUMBER: return EsScript( miss=term.missing().partial_eval(), type=NUMBER, expr=value.expr, frum=self, schema=schema, ) elif value.type == STRING: return EsScript( miss=term.missing().partial_eval(), type=NUMBER, expr="Double.parseDouble(" + value.expr + ")", frum=self, schema=schema, ) elif value.type == OBJECT: return EsScript( miss=term.missing().partial_eval(), type=NUMBER, expr="((" + value.expr + ") instanceof String) ? Double.parseDouble(" + value.expr + ") : (" + value.expr + ")", frum=self, schema=schema, )
def append_query(self, query_path, es_query): domain = self.domain domain_key = domain.key value = Painless[self.edge.value] cnv = pull_functions[value.type] include = tuple(cnv(p[domain_key]) for p in domain.partitions) exists = Painless[AndOp([ InOp([value, Literal(include)]) ])].partial_eval() limit = coalesce(self.limit, len(domain.partitions)) if is_op(value, Variable): es_field = first(self.query.frum.schema.leaves(value.var)).es_column # ALREADY CHECKED THERE IS ONLY ONE match = TermsAggs( "_match", { "field": es_field, "size": limit, "order": {"_term": self.sorted} if self.sorted else None }, self ) else: match = TermsAggs( "_match", { "script": text_type(value.to_es_script(self.schema)), "size": limit }, self ) output = Aggs().add(FilterAggs("_filter", exists, None).add(match.add(es_query))) if self.edge.allowNulls: # FIND NULLS AT EACH NESTED LEVEL for p in self.schema.query_path: if p == query_path: # MISSING AT THE QUERY DEPTH output.add( NestedAggs(p).add(FilterAggs("_missing0", NotOp(exists), self).add(es_query)) ) else: # PARENT HAS NO CHILDREN, SO MISSING column = first(self.schema.values(query_path, (OBJECT, EXISTS))) output.add( NestedAggs(column.nested_path[0]).add( FilterAggs( "_missing1", NotOp(ExistsOp(Variable(column.es_column.replace(NESTED_TYPE, EXISTS_TYPE)))), self ).add(es_query) ) ) return output
def jx_expression_to_function(expr): """ RETURN FUNCTION THAT REQUIRES PARAMETERS (row, rownum=None, rows=None): """ if is_expression(expr): if is_op(expr, ScriptOp) and not is_text(expr.script): return expr.script else: return compile_expression(Python[expr].to_python()) if ( expr != None and not is_data(expr) and not is_list(expr) and hasattr(expr, "__call__") ): return expr return compile_expression(Python[jx_expression(expr)].to_python())
def wrap(query, container, namespace): """ NORMALIZE QUERY SO IT CAN STILL BE JSON """ if is_op(query, QueryOp) or query == None: return query query = wrap(query) table = container.get_table(query['from']) schema = table.schema output = QueryOp( frum=table, format=query.format, limit=mo_math.min(MAX_LIMIT, coalesce(query.limit, DEFAULT_LIMIT)) ) if query.select or isinstance(query.select, (Mapping, list)): output.select = _normalize_selects(query.select, query.frum, schema=schema) else: if query.edges or query.groupby: output.select = DEFAULT_SELECT else: output.select = _normalize_selects(".", query.frum) if query.groupby and query.edges: Log.error("You can not use both the `groupby` and `edges` clauses in the same query!") elif query.edges: output.edges = _normalize_edges(query.edges, limit=output.limit, schema=schema) output.groupby = Null elif query.groupby: output.edges = Null output.groupby = _normalize_groupby(query.groupby, limit=output.limit, schema=schema) else: output.edges = Null output.groupby = Null output.where = _normalize_where(query.where, schema=schema) output.window = [_normalize_window(w) for w in listwrap(query.window)] output.having = None output.sort = _normalize_sort(query.sort) if not mo_math.is_integer(output.limit) or output.limit < 0: Log.error("Expecting limit >= 0") output.isLean = query.isLean return output
def to_es_script(self, schema, not_null=False, boolean=False, many=True): term = FirstOp(self.term).partial_eval() value = term.to_es_script(schema) if is_op(value.frum, CoalesceOp_): return CoalesceOp( [StringOp(t).partial_eval() for t in value.frum.terms] ).to_es_script(schema) if value.miss is TRUE or value.type is IS_NULL: return empty_string_script elif value.type == BOOLEAN: return EsScript( miss=self.term.missing().partial_eval(), type=STRING, expr=value.expr + ' ? "T" : "F"', frum=self, schema=schema, ) elif value.type == INTEGER: return EsScript( miss=self.term.missing().partial_eval(), type=STRING, expr="String.valueOf(" + value.expr + ")", frum=self, schema=schema, ) elif value.type == NUMBER: return EsScript( miss=self.term.missing().partial_eval(), type=STRING, expr=expand_template(NUMBER_TO_STRING, {"expr": value.expr}), frum=self, schema=schema, ) elif value.type == STRING: return value else: return EsScript( miss=self.term.missing().partial_eval(), type=STRING, expr=expand_template(NUMBER_TO_STRING, {"expr": value.expr}), frum=self, schema=schema, )
def format_cube(aggs, es_query, query, decoders, all_selects): new_edges = count_dim(aggs, es_query, decoders) dims = [] for e in new_edges: if is_op(e.value, TupleOp): e.allowNulls = False extra = 0 if e.allowNulls is False else 1 dims.append(len(e.domain.partitions) + extra) dims = tuple(dims) if any(s.default != canonical_aggregates[s.aggregate].default for s in all_selects): # UNUSUAL DEFAULT VALUES MESS THE union() FUNCTION is_default = Matrix(dims=dims, zeros=True) matricies = {s.name: Matrix(dims=dims) for s in all_selects} for row, coord, agg, selects in aggs_iterator(aggs, es_query, decoders): for select in selects: m = matricies[select.name] v = select.pull(agg) if v == None: continue is_default[coord] = False union(m, coord, v, select.aggregate) # FILL THE DEFAULT VALUES for c, v in is_default: if v: for s in all_selects: matricies[s.name][c] = s.default else: matricies = {s.name: Matrix(dims=dims, zeros=s.default) for s in all_selects} for row, coord, agg, selects in aggs_iterator(aggs, es_query, decoders): for select in selects: m = matricies[select.name] v = select.pull(agg) union(m, coord, v, select.aggregate) cube = Cube( query.select, sort_using_key(new_edges, key=lambda e: e.dim), # ENSURE EDGES ARE IN SAME ORDER AS QUERY matricies ) cube.frum = query return cube
def __init__(self, edge, query, limit): AggsDecoder.__init__(self, edge, query, limit) if is_op(edge.value, LeavesOp): prefix = edge.value.term.var flatter = lambda k: literal_field(relative_field(k, prefix)) else: prefix = edge.value.var flatter = lambda k: relative_field(k, prefix) self.put, self.fields = transpose(*[ (flatter(untype_path(c.name)), c.es_column) for c in query.frum.schema.leaves(prefix) ]) self.domain = self.edge.domain = wrap({"dimension": {"fields": self.fields}}) self.domain.limit = mo_math.min(coalesce(self.domain.limit, query.limit, 10), MAX_LIMIT) self.parts = list() self.key2index = {} self.computed_domain = False
def format_table(T, select, query=None): data = [] num_columns = (MAX(select.put.index) + 1) for row in T: r = [None] * num_columns for s in select: value = unwraplist(s.pull(row)) if value == None: continue index, child = s.put.index, s.put.child if child == ".": r[index] = value else: if r[index] is None: r[index] = Data() r[index][child] = value data.append(r) header = [None] * num_columns if is_data(query.select) and not is_op(query.select.value, LeavesOp): for s in select: header[s.put.index] = s.name else: for s in select: if header[s.put.index]: continue if s.name == ".": header[s.put.index] = "." else: header[s.put.index] = s.name return Data( meta={"format": "table"}, header=header, data=data )
def format_list(T, select, query=None): data = [] if is_list(query.select): for row in T: r = Data() for s in select: v = unwraplist(s.pull(row)) if v is not None: try: r[s.put.name][s.put.child] = v except Exception as e: Log.error("what's happening here?") data.append(r if r else None) elif is_op(query.select.value, LeavesOp): for row in T: r = Data() for s in select: r[s.put.name][s.put.child] = unwraplist(s.pull(row)) data.append(r if r else None) else: for row in T: r = None for s in select: v = unwraplist(s.pull(row)) if v is None: continue if s.put.child == ".": r = v else: if r is None: r = Data() r[s.put.child] = v data.append(r) return Data( meta={"format": "list"}, data=data )
def _range_composer(self, edge, domain, es_query, to_float, schema): # USE RANGES _min = coalesce(domain.min, MIN(domain.partitions.min)) _max = coalesce(domain.max, MAX(domain.partitions.max)) output = Aggs() if edge.allowNulls: output.add(FilterAggs( "_missing", NotOp(AndOp([ edge.value.exists(), GteOp([edge.value, Literal(to_float(_min))]), LtOp([edge.value, Literal(to_float(_max))]) ]).partial_eval()), self ).add(es_query)) if is_op(edge.value, Variable): calc = {"field": first(schema.leaves(edge.value.var)).es_column} else: calc = {"script": text_type(Painless[edge.value].to_es_script(schema))} calc['ranges'] = [{"from": to_float(p.min), "to": to_float(p.max)} for p in domain.partitions] return output.add(RangeAggs("_match", calc, self).add(es_query))
def __new__(cls, terms): if is_op(terms[0], Variable) and is_op(terms[1], Literal): name, value = terms if not is_many(value.value): return (EqOp([name, Literal([value.value])])) return object.__new__(cls)
def _normalize_select(select, frum, schema=None): """ :param select: ONE SELECT COLUMN :param frum: TABLE TO get_columns() :param schema: SCHEMA TO LOOKUP NAMES FOR DEFINITIONS :return: AN ARRAY OF SELECT COLUMNS """ if not _Column: _late_import() if is_text(select): canonical = select = Data(value=select) else: select = wrap(select) canonical = select.copy() canonical.aggregate = coalesce(canonical_aggregates[select.aggregate].name, select.aggregate, "none") canonical.default = coalesce(select.default, canonical_aggregates[canonical.aggregate].default) if hasattr(unwrap(frum), "_normalize_select"): return frum._normalize_select(canonical) output = [] if not select.value or select.value == ".": output.extend([ set_default( { "name": c.name, "value": jx_expression(c.name, schema=schema) }, canonical ) for c in frum.get_leaves() ]) elif is_text(select.value): if select.value.endswith(".*"): canonical.name = coalesce(select.name, ".") value = jx_expression(select[:-2], schema=schema) if not is_op(value, Variable): Log.error("`*` over general expression not supported yet") output.append([ set_default( { "value": LeavesOp(value, prefix=select.prefix), "format": "dict" # MARKUP FOR DECODING }, canonical ) for c in frum.get_columns() if c.jx_type not in STRUCT ]) else: Log.error("do not know what to do") else: canonical.name = coalesce(select.name, select.value, select.aggregate) canonical.value = jx_expression(select.value, schema=schema) output.append(canonical) output = wrap(output) if any(n==None for n in output.name): Log.error("expecting select to have a name: {{select}}", select=select) return output
def es_deepop(es, query): schema = query.frum.schema query_path = schema.query_path[0] # TODO: FIX THE GREAT SADNESS CAUSED BY EXECUTING post_expressions # THE EXPRESSIONS SHOULD BE PUSHED TO THE CONTAINER: ES ALLOWS # {"inner_hit":{"script_fields":[{"script":""}...]}}, BUT THEN YOU # LOOSE "_source" BUT GAIN "fields", FORCING ALL FIELDS TO BE EXPLICIT post_expressions = {} es_query, es_filters = es_query_template(query_path) # SPLIT WHERE CLAUSE BY DEPTH wheres = split_expression_by_depth(query.where, schema) for f, w in zip_longest(es_filters, wheres): script = ES52[AndOp(w)].partial_eval().to_esfilter(schema) set_default(f, script) if not wheres[1]: # INCLUDE DOCS WITH NO NESTED DOCS more_filter = { "bool": { "filter": [AndOp(wheres[0]).partial_eval().to_esfilter(schema)], "must_not": { "nested": { "path": query_path, "query": { "match_all": {} } } } } } else: more_filter = None es_query.size = coalesce(query.limit, DEFAULT_LIMIT) # es_query.sort = jx_sort_to_es_sort(query.sort) map_to_es_columns = schema.map_to_es() # {c.name: c.es_column for c in schema.leaves(".")} query_for_es = query.map(map_to_es_columns) es_query.sort = jx_sort_to_es_sort(query_for_es.sort, schema) es_query.stored_fields = [] is_list = is_list_(query.select) selects = wrap([unwrap(s.copy()) for s in listwrap(query.select)]) new_select = FlatList() put_index = 0 for select in selects: if is_op(select.value, LeavesOp) and is_op(select.value.term, Variable): # IF THERE IS A *, THEN INSERT THE EXTRA COLUMNS leaves = schema.leaves(select.value.term.var) col_names = set() for c in leaves: if c.nested_path[0] == ".": if c.jx_type == NESTED: continue es_query.stored_fields += [c.es_column] c_name = untype_path(relative_field(c.name, query_path)) col_names.add(c_name) new_select.append({ "name": concat_field(select.name, c_name), "nested_path": c.nested_path[0], "put": {"name": concat_field(select.name, literal_field(c_name)), "index": put_index, "child": "."}, "pull": get_pull_function(c) }) put_index += 1 # REMOVE DOTS IN PREFIX IF NAME NOT AMBIGUOUS for n in new_select: if n.name.startswith("..") and n.name.lstrip(".") not in col_names: n.put.name = n.name = n.name.lstrip(".") col_names.add(n.name) elif is_op(select.value, Variable): net_columns = schema.leaves(select.value.var) if not net_columns: new_select.append({ "name": select.name, "nested_path": ".", "put": {"name": select.name, "index": put_index, "child": "."}, "pull": NULL }) else: for n in net_columns: pull = get_pull_function(n) if n.nested_path[0] == ".": if n.jx_type == NESTED: continue es_query.stored_fields += [n.es_column] # WE MUST FIGURE OUT WHICH NAMESSPACE s.value.var IS USING SO WE CAN EXTRACT THE child for np in n.nested_path: c_name = untype_path(relative_field(n.name, np)) if startswith_field(c_name, select.value.var): child = relative_field(c_name, select.value.var) break else: continue # REMOVED BECAUSE SELECTING INNER PROPERTIES IS NOT ALLOWED # child = relative_field(untype_path(relative_field(n.name, n.nested_path[0])), s.value.var) new_select.append({ "name": select.name, "pull": pull, "nested_path": n.nested_path[0], "put": { "name": select.name, "index": put_index, "child": child } }) put_index += 1 else: expr = select.value for v in expr.vars(): for c in schema[v.var]: if c.nested_path[0] == ".": es_query.stored_fields += [c.es_column] # else: # Log.error("deep field not expected") pull_name = EXPRESSION_PREFIX + select.name map_to_local = MapToLocal(schema) pull = jx_expression_to_function(pull_name) post_expressions[pull_name] = jx_expression_to_function(expr.map(map_to_local)) new_select.append({ "name": select.name if is_list else ".", "pull": pull, "value": expr.__data__(), "put": {"name": select.name, "index": put_index, "child": "."} }) put_index += 1 # <COMPLICATED> ES needs two calls to get all documents more = [] def get_more(please_stop): more.append(es_post( es, Data( query=more_filter, stored_fields=es_query.stored_fields ), query.limit )) if more_filter: need_more = Thread.run("get more", target=get_more) with Timer("call to ES") as call_timer: data = es_post(es, es_query, query.limit) # EACH A HIT IS RETURNED MULTIPLE TIMES FOR EACH INNER HIT, WITH INNER HIT INCLUDED def inners(): for t in data.hits.hits: for i in t.inner_hits[literal_field(query_path)].hits.hits: t._inner = i._source for k, e in post_expressions.items(): t[k] = e(t) yield t if more_filter: Thread.join(need_more) for t in more[0].hits.hits: yield t # </COMPLICATED> try: formatter, groupby_formatter, mime_type = format_dispatch[query.format] output = formatter(inners(), new_select, query) output.meta.timing.es = call_timer.duration output.meta.content_type = mime_type output.meta.es_query = es_query return output except Exception as e: Log.error("problem formatting", e)
def __eq__(self, other): if is_op(other, Variable): return self.var == other.var elif is_text(other): return self.var == other return False
def __eq__(self, other): if is_op(other, EqOp): return self.lhs == other.lhs and self.rhs == other.rhs return False
def _accumulate_nested(rows, row, nested_doc_details, parent_doc_id, parent_id_coord): """ :param rows: REVERSED STACK OF ROWS (WITH push() AND pop()) :param row: CURRENT ROW BEING EXTRACTED :param nested_doc_details: { "nested_path": wrap_nested_path(nested_path), "index_to_column": map from column number to column details "children": all possible direct decedents' nested_doc_details } :param parent_doc_id: the id of the parent doc (for detecting when to step out of loop) :param parent_id_coord: the column number for the parent id (so we ca extract from each row) :return: the nested property (usually an array) """ previous_doc_id = None doc = Null output = [] id_coord = nested_doc_details['id_coord'] while True: doc_id = row[id_coord] if doc_id == None or (parent_id_coord is not None and row[parent_id_coord] != parent_doc_id): rows.append( row ) # UNDO PREVIOUS POP (RECORD IS NOT A NESTED RECORD OF parent_doc) return output if doc_id != previous_doc_id: previous_doc_id = doc_id doc = Null curr_nested_path = nested_doc_details['nested_path'][0] index_to_column = nested_doc_details[ 'index_to_column'].items() for i, c in index_to_column: value = row[i] if is_list(query.select) or is_op( query.select.value, LeavesOp): # ASSIGN INNER PROPERTIES relative_field = concat_field( c.push_name, c.push_child) else: # FACT IS EXPECTED TO BE A SINGLE VALUE, NOT AN OBJECT relative_field = c.push_child if relative_field == ".": if exists(value): doc = value elif exists(value): if doc is Null: doc = Data() doc[relative_field] = value for child_details in nested_doc_details['children']: # EACH NESTED TABLE MUST BE ASSEMBLED INTO A LIST OF OBJECTS child_id = row[child_details['id_coord']] if child_id is not None: nested_value = _accumulate_nested( rows, row, child_details, doc_id, id_coord) if nested_value != None: push_name = child_details['nested_path'][0] if is_list(query.select) or is_op( query.select.value, LeavesOp): # ASSIGN INNER PROPERTIES relative_field = relative_field( push_name, curr_nested_path) else: # FACT IS EXPECTED TO BE A SINGLE VALUE, NOT AN OBJECT relative_field = "." if relative_field == ".": doc = unwraplist(nested_value) else: doc[relative_field] = unwraplist(nested_value) output.append(doc) try: row = rows.pop() except IndexError: return output
def to_esfilter(self, schema): if is_op(self.term, Variable_): return {"term": {self.term.var: True}} else: return self.to_es_script(schema).to_esfilter(schema)
def to_esfilter(self, schema): if not is_op(self.lhs, Variable_) or not is_literal(self.rhs): return self.to_es_script(schema).to_esfilter(schema) return es_not({"term": {self.lhs.var: self.rhs.to_esfilter(schema)}})
def partial_eval(self): output = self.lang[self.expr].partial_eval().missing() if is_op(output, MissingOp): return output else: return output.partial_eval()
def run(query, container=Null): """ THIS FUNCTION IS SIMPLY SWITCHING BASED ON THE query["from"] CONTAINER, BUT IT IS ALSO PROCESSING A list CONTAINER; SEPARATE TO A ListContainer """ if container == None: container = wrap(query)["from"] query_op = QueryOp.wrap(query, container=container, namespace=container.schema) else: query_op = QueryOp.wrap(query, container, container.namespace) if container == None: from jx_python.containers.list_usingPythonList import DUAL return DUAL.query(query_op) elif isinstance(container, Container): return container.query(query_op) elif is_many(container): container = wrap(list(container)) elif isinstance(container, Cube): if is_aggs(query_op): return cube_aggs(container, query_op) elif is_op(container, QueryOp): container = run(container) elif is_data(container): query = container container = query["from"] container = run(QueryOp.wrap(query, container, container.namespace), container) else: Log.error( "Do not know how to handle {{type}}", type=container.__class__.__name__ ) if is_aggs(query_op): container = list_aggs(container, query_op) else: # SETOP if query_op.where is not TRUE: container = filter(container, query_op.where) if query_op.sort: container = sort(container, query_op.sort, already_normalized=True) if query_op.select: container = select(container, query_op.select) if query_op.window: if isinstance(container, Cube): container = list(container.values()) for param in query_op.window: window(container, param) # AT THIS POINT frum IS IN LIST FORMAT, NOW PACKAGE RESULT if query_op.format == "cube": container = convert.list2cube(container) elif query_op.format == "table": container = convert.list2table(container) container.meta.format = "table" else: container = wrap({"meta": {"format": "list"}, "data": container}) return container
def __eq__(self, other): if not is_op(other, MissingOp): return False else: return self.expr == other.expr
def __eq__(self, other): if not is_op(other, OrOp): return False if len(self.terms) != len(other.terms): return False return all(t == u for t, u in zip(self.terms, other.terms))
def __new__(cls, e=None, query=None, *args, **kwargs): e.allowNulls = coalesce(e.allowNulls, True) if e.value and e.domain.type == "default": # if query.groupby: # return object.__new__(DefaultDecoder, e) if is_text(e.value): Log.error("Expecting Variable or Expression, not plain string") if is_op(e.value, LeavesOp): return object.__new__(ObjectDecoder) elif is_op(e.value, TupleOp): # THIS domain IS FROM A dimension THAT IS A SIMPLE LIST OF fields # JUST PULL THE FIELDS if not all(is_op(t, Variable) for t in e.value.terms): Log.error("Can only handle variables in tuples") e.domain = Data(dimension={"fields": e.value.terms}) return object.__new__(DimFieldListDecoder) elif is_op(e.value, Variable): schema = query.frum.schema cols = schema.leaves(e.value.var) if not cols: return object.__new__(DefaultDecoder) if len(cols) != 1: return object.__new__(ObjectDecoder) col = first(cols) limit = coalesce(e.domain.limit, query.limit, DEFAULT_LIMIT) if col.cardinality == None: DEBUG and Log.warning( "metadata for column {{name|quote}} (id={{id}}) is not ready", name=concat_field(col.es_index, col.es_column), id=id(col)) if unnest_path(e.value.var) in KNOWN_MULTITYPES: Log.warning("{{var}} is not multivalued", var=e.value.var) return object.__new__(MultivalueDecoder) e.domain = set_default(DefaultDomain(limit=limit), e.domain.__data__()) return object.__new__(DefaultDecoder) elif col.multi <= 1 and col.partitions == None: if unnest_path(e.value.var) in KNOWN_MULTITYPES: Log.warning("{{var}} is not multivalued", var=e.value.var) return object.__new__(MultivalueDecoder) e.domain = set_default(DefaultDomain(limit=limit), e.domain.__data__()) return object.__new__(DefaultDecoder) else: DEBUG and Log.note("id={{id}} has parts!!!", id=id(col)) if col.multi > 1: return object.__new__(MultivalueDecoder) partitions = col.partitions[:limit:] if e.domain.sort == -1: partitions = list(reversed(sorted(partitions))) else: partitions = sorted(partitions) e.domain = SimpleSetDomain(partitions=partitions, limit=limit) else: return object.__new__(DefaultDecoder) if e.value and e.domain.type in PARTITION: return object.__new__(SetDecoder) if isinstance(e.domain.dimension, Dimension): e.domain = e.domain.dimension.getDomain() return object.__new__(SetDecoder) if e.value and e.domain.type == "time": return object.__new__(TimeDecoder) if e.range: return object.__new__(GeneralRangeDecoder) if e.value and e.domain.type == "duration": return object.__new__(DurationDecoder) elif e.value and e.domain.type == "range": return object.__new__(RangeDecoder) elif not e.value and e.domain.dimension.fields: # THIS domain IS FROM A dimension THAT IS A SIMPLE LIST OF fields # JUST PULL THE FIELDS fields = e.domain.dimension.fields if is_data(fields): Log.error("No longer allowed: All objects are expressions") else: return object.__new__(DimFieldListDecoder) elif not e.value and all(e.domain.partitions.where): return object.__new__(GeneralSetDecoder) else: Log.error("domain type of {{type}} is not supported yet", type=e.domain.type)
def __data__(self): if is_op(self.value, Variable) and is_literal(self.length): return {"left": {self.value.var: self.length.value}} else: return {"left": [self.value.__data__(), self.length.__data__()]}
sql_name = "e" + text(edge_index) + "c" + text(column_index) edge_names.append(sql_name) num_sql_columns = len(index_to_column) if not query_edge.value and any(query_edge.domain.partitions.where): def __(parts, num_sql_columns): def _get(row): return parts[row[num_sql_columns]].name return _get pull = __(query_edge.domain.partitions, num_sql_columns) else: pull = get_column(num_sql_columns) if is_op(query_edge.value, TupleOp): query_edge.allowNulls = False push_child = column_index num_push_columns = len(query_edge.value.terms) else: push_child = "." num_push_columns = None index_to_column[num_sql_columns] = ColumnMapping( is_edge=True, push_name=query_edge.name, push_column_name=query_edge.name, push_column=edge_index, num_push_columns=num_push_columns, push_child=push_child, # CAN NOT HANDLE TUPLES IN COLUMN pull=pull,
def __eq__(self, other): if not is_op(other, BasicEqOp): return False return self.lhs == other.lhs and self.rhs == other.rhs
def _set_op(self, query): # GET LIST OF SELECTED COLUMNS vars_ = UNION([ v.var for select in listwrap(query.select) for v in select.value.vars() ]) schema = self.schema known_vars = schema.keys() active_columns = {".": set()} for v in vars_: for c in schema.leaves(v): nest = c.nested_path[0] active_columns.setdefault(nest, set()).add(c) # ANY VARS MENTIONED WITH NO COLUMNS? for v in vars_: if not any(startswith_field(cname, v) for cname in known_vars): active_columns["."].add( Column(name=v, jx_type=IS_NULL, es_column=".", es_index=".", es_type='NULL', nested_path=["."], last_updated=Date.now())) # EVERY COLUMN, AND THE INDEX IT TAKES UP index_to_column = {} # MAP FROM INDEX TO COLUMN (OR SELECT CLAUSE) index_to_uid = {} # FROM NESTED PATH TO THE INDEX OF UID sql_selects = [ ] # EVERY SELECT CLAUSE (NOT TO BE USED ON ALL TABLES, OF COURSE) nest_to_alias = { nested_path[0]: "__" + unichr(ord('a') + i) + "__" for i, nested_path in enumerate(self.snowflake.query_paths) } sorts = [] if query.sort: for select in query.sort: col = SQLang[select.value].to_sql(schema)[0] for t, sql in col.sql.items(): json_type = sql_type_to_json_type[t] if json_type in STRUCT: continue column_number = len(sql_selects) # SQL HAS ABS TABLE REFERENCE column_alias = _make_column_name(column_number) sql_selects.append(sql_alias(sql, column_alias)) if select.sort == -1: sorts.append(quote_column(column_alias) + SQL_IS_NULL) sorts.append(quote_column(column_alias) + " DESC") else: sorts.append(quote_column(column_alias) + SQL_IS_NULL) sorts.append(quote_column(column_alias)) primary_doc_details = Data() # EVERY SELECT STATEMENT THAT WILL BE REQUIRED, NO MATTER THE DEPTH # WE WILL CREATE THEM ACCORDING TO THE DEPTH REQUIRED nested_path = [] for step, sub_table in self.snowflake.tables: nested_path.insert(0, step) nested_doc_details = { "sub_table": sub_table, "children": [], "index_to_column": {}, "nested_path": nested_path } # INSERT INTO TREE if not primary_doc_details: primary_doc_details = nested_doc_details else: def place(parent_doc_details): if startswith_field(step, parent_doc_details['nested_path'][0]): for c in parent_doc_details['children']: if place(c): return True parent_doc_details['children'].append( nested_doc_details) place(primary_doc_details) alias = nested_doc_details['alias'] = nest_to_alias[step] # WE ALWAYS ADD THE UID column_number = index_to_uid[step] = nested_doc_details[ 'id_coord'] = len(sql_selects) sql_select = quote_column(alias, UID) sql_selects.append( sql_alias(sql_select, _make_column_name(column_number))) if step != ".": # ID AND ORDER FOR CHILD TABLES index_to_column[column_number] = ColumnMapping( sql=sql_select, type="number", nested_path=nested_path, column_alias=_make_column_name(column_number)) column_number = len(sql_selects) sql_select = quote_column(alias, ORDER) sql_selects.append( sql_alias(sql_select, _make_column_name(column_number))) index_to_column[column_number] = ColumnMapping( sql=sql_select, type="number", nested_path=nested_path, column_alias=_make_column_name(column_number)) # WE DO NOT NEED DATA FROM TABLES WE REQUEST NOTHING FROM if step not in active_columns: continue # ADD SQL SELECT COLUMNS FOR EACH jx SELECT CLAUSE si = 0 for select in listwrap(query.select): try: column_number = len(sql_selects) select.pull = get_column(column_number) db_columns = SQLang[select.value].partial_eval().to_sql( schema) for column in db_columns: for t, unsorted_sql in column.sql.items(): json_type = sql_type_to_json_type[t] if json_type in STRUCT: continue column_number = len(sql_selects) column_alias = _make_column_name(column_number) sql_selects.append( sql_alias(unsorted_sql, column_alias)) if startswith_field(schema.path, step) and is_op( select.value, LeavesOp): # ONLY FLATTEN primary_nested_path AND PARENTS, NOT CHILDREN index_to_column[ column_number] = nested_doc_details[ 'index_to_column'][ column_number] = ColumnMapping( push_name=literal_field( get_property_name( concat_field( select.name, column.name))), push_child=".", push_column_name= get_property_name( concat_field( select.name, column.name)), push_column=si, pull=get_column(column_number), sql=unsorted_sql, type=json_type, column_alias=column_alias, nested_path=nested_path) si += 1 else: index_to_column[ column_number] = nested_doc_details[ 'index_to_column'][ column_number] = ColumnMapping( push_name=select.name, push_child=column.name, push_column_name=select.name, push_column=si, pull=get_column(column_number), sql=unsorted_sql, type=json_type, column_alias=column_alias, nested_path=nested_path) finally: si += 1 where_clause = BooleanOp(query.where).partial_eval().to_sql( schema, boolean=True)[0].sql.b unsorted_sql = self._make_sql_for_one_nest_in_set_op( ".", sql_selects, where_clause, active_columns, index_to_column) for n, _ in self.snowflake.tables: sorts.append(quote_column(COLUMN + text(index_to_uid[n]))) ordered_sql = ConcatSQL(SQL_SELECT, SQL_STAR, SQL_FROM, sql_iso(unsorted_sql), SQL_ORDERBY, sql_list(sorts), SQL_LIMIT, quote_value(query.limit)) result = self.db.query(ordered_sql) def _accumulate_nested(rows, row, nested_doc_details, parent_doc_id, parent_id_coord): """ :param rows: REVERSED STACK OF ROWS (WITH push() AND pop()) :param row: CURRENT ROW BEING EXTRACTED :param nested_doc_details: { "nested_path": wrap_nested_path(nested_path), "index_to_column": map from column number to column details "children": all possible direct decedents' nested_doc_details } :param parent_doc_id: the id of the parent doc (for detecting when to step out of loop) :param parent_id_coord: the column number for the parent id (so we ca extract from each row) :return: the nested property (usually an array) """ previous_doc_id = None doc = Null output = [] id_coord = nested_doc_details['id_coord'] while True: doc_id = row[id_coord] if doc_id == None or (parent_id_coord is not None and row[parent_id_coord] != parent_doc_id): rows.append( row ) # UNDO PREVIOUS POP (RECORD IS NOT A NESTED RECORD OF parent_doc) return output if doc_id != previous_doc_id: previous_doc_id = doc_id doc = Null curr_nested_path = nested_doc_details['nested_path'][0] index_to_column = nested_doc_details[ 'index_to_column'].items() for i, c in index_to_column: value = row[i] if is_list(query.select) or is_op( query.select.value, LeavesOp): # ASSIGN INNER PROPERTIES relative_field = concat_field( c.push_name, c.push_child) else: # FACT IS EXPECTED TO BE A SINGLE VALUE, NOT AN OBJECT relative_field = c.push_child if relative_field == ".": if exists(value): doc = value elif exists(value): if doc is Null: doc = Data() doc[relative_field] = value for child_details in nested_doc_details['children']: # EACH NESTED TABLE MUST BE ASSEMBLED INTO A LIST OF OBJECTS child_id = row[child_details['id_coord']] if child_id is not None: nested_value = _accumulate_nested( rows, row, child_details, doc_id, id_coord) if nested_value != None: push_name = child_details['nested_path'][0] if is_list(query.select) or is_op( query.select.value, LeavesOp): # ASSIGN INNER PROPERTIES relative_field = relative_field( push_name, curr_nested_path) else: # FACT IS EXPECTED TO BE A SINGLE VALUE, NOT AN OBJECT relative_field = "." if relative_field == ".": doc = unwraplist(nested_value) else: doc[relative_field] = unwraplist(nested_value) output.append(doc) try: row = rows.pop() except IndexError: return output cols = tuple( [i for i in index_to_column.values() if i.push_name != None]) rows = list(reversed(unwrap(result.data))) if rows: row = rows.pop() data = _accumulate_nested(rows, row, primary_doc_details, None, None) else: data = result.data if query.format == "cube": # for f, full_name in self.snowflake.tables: # if f != '.' or (test_dots(cols) and is_list(query.select)): # num_rows = len(result.data) # num_cols = MAX([c.push_column for c in cols]) + 1 if len(cols) else 0 # map_index_to_name = {c.push_column: c.push_column_name for c in cols} # temp_data = [[None] * num_rows for _ in range(num_cols)] # for rownum, d in enumerate(result.data): # for c in cols: # if c.push_child == ".": # temp_data[c.push_column][rownum] = c.pull(d) # else: # column = temp_data[c.push_column][rownum] # if column is None: # column = temp_data[c.push_column][rownum] = {} # column[c.push_child] = c.pull(d) # output = Data( # meta={"format": "cube"}, # data={n: temp_data[c] for c, n in map_index_to_name.items()}, # edges=[{ # "name": "rownum", # "domain": { # "type": "rownum", # "min": 0, # "max": num_rows, # "interval": 1 # } # }] # ) # return output if is_list(query.select) or is_op(query.select.value, LeavesOp): num_rows = len(data) temp_data = { c.push_column_name: [None] * num_rows for c in cols } for rownum, d in enumerate(data): for c in cols: temp_data[c.push_column_name][rownum] = d[c.push_name] return Data(meta={"format": "cube"}, data=temp_data, edges=[{ "name": "rownum", "domain": { "type": "rownum", "min": 0, "max": num_rows, "interval": 1 } }]) else: num_rows = len(data) map_index_to_name = { c.push_column: c.push_column_name for c in cols } temp_data = [data] return Data(meta={"format": "cube"}, data={ n: temp_data[c] for c, n in map_index_to_name.items() }, edges=[{ "name": "rownum", "domain": { "type": "rownum", "min": 0, "max": num_rows, "interval": 1 } }]) elif query.format == "table": # for f, _ in self.snowflake.tables: # if frum.endswith(f): # num_column = MAX([c.push_column for c in cols]) + 1 # header = [None] * num_column # for c in cols: # header[c.push_column] = c.push_column_name # # output_data = [] # for d in result.data: # row = [None] * num_column # for c in cols: # set_column(row, c.push_column, c.push_child, c.pull(d)) # output_data.append(row) # # return Data( # meta={"format": "table"}, # header=header, # data=output_data # ) if is_list(query.select) or is_op(query.select.value, LeavesOp): column_names = [None] * (max(c.push_column for c in cols) + 1) for c in cols: column_names[c.push_column] = c.push_column_name temp_data = [] for rownum, d in enumerate(data): row = [None] * len(column_names) for c in cols: row[c.push_column] = d[c.push_name] temp_data.append(row) return Data(meta={"format": "table"}, header=column_names, data=temp_data) else: column_names = listwrap(query.select).name return Data(meta={"format": "table"}, header=column_names, data=[[d] for d in data]) else: # for f, _ in self.snowflake.tables: # if frum.endswith(f) or (test_dots(cols) and is_list(query.select)): # data = [] # for d in result.data: # row = Data() # for c in cols: # if c.push_child == ".": # row[c.push_name] = c.pull(d) # elif c.num_push_columns: # tuple_value = row[c.push_name] # if not tuple_value: # tuple_value = row[c.push_name] = [None] * c.num_push_columns # tuple_value[c.push_child] = c.pull(d) # else: # row[c.push_name][c.push_child] = c.pull(d) # # data.append(row) # # return Data( # meta={"format": "list"}, # data=data # ) if is_list(query.select) or is_op(query.select.value, LeavesOp): temp_data = [] for rownum, d in enumerate(data): row = {} for c in cols: row[c.push_column_name] = d[c.push_name] temp_data.append(row) return Data(meta={"format": "list"}, data=temp_data) else: return Data(meta={"format": "list"}, data=data)
def __eq__(self, other): if is_op(other, CoalesceOp): if len(self.terms) == len(other.terms): return all(s == o for s, o in zip(self.terms, other.terms)) return False
def get_selects(query): schema = query.frum.schema query_level = len(schema.query_path) query_path = schema.query_path[0] # SPLIT select INTO ES_SELECT AND RESULTSET SELECT split_select = OrderedDict((p, ESSelectOp(p)) for p in schema.query_path) def expand_split_select(c_nested_path): es_select = split_select.get(c_nested_path) if not es_select: temp = [(k, v) for k, v in split_select.items()] split_select.clear() split_select.update({c_nested_path: ESSelectOp(c_nested_path)}) split_select.update(temp) return split_select[c_nested_path] new_select = FlatList() post_expressions = {} selects = list_to_data([unwrap(s.copy()) for s in listwrap(query.select)]) # WHAT PATH IS _source USED, IF ANY? for select in selects: # IF THERE IS A *, THEN INSERT THE EXTRA COLUMNS if is_op(select.value, LeavesOp) and is_op(select.value.term, Variable): term = select.value.term leaves = schema.leaves(term.var) if any(c.jx_type == NESTED for c in leaves): split_select["."].source_path = "." elif is_op(select.value, Variable): for selected_column in schema.values(select.value.var, exclude_type=(OBJECT, EXISTS)): if selected_column.jx_type == NESTED: expand_split_select( selected_column.es_column ).source_path = selected_column.es_column continue leaves = schema.leaves(selected_column.es_column) for c in leaves: if c.jx_type == NESTED: split_select[c.es_column].source_path = c.es_column # IF WE GET THE SOURCE FOR PARENT, WE ASSUME WE GOT SOURCE FOR CHILD source_path = None source_level = 0 for level, es_select in enumerate(reversed(list(split_select.values()))): if source_path: es_select.source_path = source_path elif es_select.source_path: source_level = level + 1 source_path = es_select.source_path def get_pull_source(c): nested_path = c.nested_path nested_level = len(nested_path) pos = text(nested_level) if nested_level <= query_level: if not source_level or nested_level < source_level: field = join_field([pos, "fields", c.es_column]) return jx_expression_to_function(field) elif nested_level == source_level: field = relative_field(c.es_column, nested_path[0]) def pull_source(row): return untyped(row.get(pos, Null)._source[field]) return pull_source else: field = relative_field(c.es_column, nested_path[0]) def pull_property(row): return untyped(row.get(pos, Null)[field]) return pull_property else: pos = text(query_level) if not source_level or nested_level < source_level: # PULL FIELDS AND THEN AGGREGATE THEM value = jx_expression_to_function( join_field(["fields", c.es_column])) name = literal_field(nested_path[0]) index = jx_expression_to_function("_nested.offset") def pull_nested_field(doc): hits = doc.get(pos, Null).inner_hits[name].hits.hits if not hits: return [] temp = [(index(h), value(h)) for h in hits] acc = [None] * len(temp) for i, v in temp: acc[i] = unwraplist(v) return acc return pull_nested_field else: # PULL SOURCES value = jx_expression_to_function( concat_field("_source", relative_field(c.es_column, nested_path[0]))) name = literal_field(nested_path[0]) index = jx_expression_to_function( join_field(["_nested"] * (len(c.nested_path) - 1) + ["offset"])) def pull_nested_source(doc): hits = doc.get(pos, Null).inner_hits[name].hits.hits if not hits: return [] temp = [(index(h), value(h)) for h in hits] acc = [None] * len(temp) for i, v in temp: acc[i] = untyped(v) return acc return pull_nested_source put_index = 0 for select in selects: # IF THERE IS A *, THEN INSERT THE EXTRA COLUMNS if is_op(select.value, LeavesOp) and is_op(select.value.term, Variable): term = select.value.term leaves = schema.leaves(term.var) for c in leaves: c_nested_path = c.nested_path[0] simple_name = relative_field(c.es_column, query_path).lstrip(".") name = concat_field(select.name, untype_path(simple_name)) put_name = concat_field( select.name, literal_field(untype_path(simple_name))) split_select[c_nested_path].fields.append(c.es_column) new_select.append({ "name": name, "value": Variable(c.es_column), "put": { "name": put_name, "index": put_index, "child": ".", }, "pull": get_pull_source(c), }) put_index += 1 elif is_op(select.value, Variable): if select.value.var == ".": # PULL ALL SOURCE new_select.append({ "name": select.name, "value": select.value, "put": { "name": select.name, "index": put_index, "child": "." }, "pull": get_pull_source( Data(es_column=query_path, nested_path=schema.query_path)), }) continue for selected_column in schema.values(select.value.var, exclude_type=(EXISTS, OBJECT)): if selected_column.jx_type == NESTED: new_select.append({ "name": select.name, "value": select.value, "put": { "name": select.name, "index": put_index, "child": "." }, "pull": get_pull_source( Data( es_column=selected_column.es_column, nested_path=(selected_column.es_column, ) + selected_column.nested_path, )), }) continue leaves = schema.leaves( selected_column.es_column, exclude_type=INTERNAL) # LEAVES OF OBJECT if leaves: for c in leaves: if c.es_column == "_id": new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": { "name": select.name, "index": put_index, "child": ".", }, "pull": pull_id, }) continue c_nested_path = c.nested_path[0] expand_split_select(c_nested_path).fields.append( c.es_column) child = untype_path( relative_field( c.es_column, selected_column.es_column, )) new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": { "name": select.name, "index": put_index, "child": child, }, "pull": get_pull_source(c), }) else: new_select.append({ "name": select.name, "value": NULL, "put": { "name": select.name, "index": put_index, "child": "." }, }) put_index += 1 else: op, split_scripts = split_expression_by_path(select.value, schema, lang=Painless) for p, script in split_scripts.items(): es_select = split_select[p] es_select.scripts[select.name] = { "script": text(Painless[script].partial_eval().to_es_script(schema)) } new_select.append({ "name": select.name, "pull": jx_expression_to_function( join_field([ text(p), "fields", select.name, ])), "put": { "name": select.name, "index": put_index, "child": "." }, }) put_index += 1 def inners(query_path, parent_pos): """ :param query_path: :return: ITERATOR OVER TUPLES ROWS AS TUPLES, WHERE row[len(nested_path)] HAS INNER HITS AND row[0] HAS post_expressions """ pos = text(int(parent_pos) + 1) if not query_path: def base_case(row): extra = {} for k, e in post_expressions.items(): extra[k] = e(row) row["0"] = extra yield row return base_case if pos == "1": more = inners(query_path[:-1], "1") def first_case(results): for result in results: for hit in result.hits.hits: seed = {"0": None, pos: hit} for row in more(seed): yield row return first_case else: more = inners(query_path[:-1], pos) if source_path and source_path < query_path[-1]: rel_path = relative_field(query_path[-1], source_path) def source(acc): for inner_row in acc[parent_pos][rel_path]: acc[pos] = inner_row for tt in more(acc): yield tt return source else: path = literal_field(query_path[-1]) def recurse(acc): hits = acc[parent_pos].inner_hits[path].hits.hits if hits: for inner_row in hits: acc[pos] = inner_row for tt in more(acc): yield tt else: for tt in more(acc): yield tt return recurse return new_select, split_select, inners(schema.query_path, "0")
def es_deepop(es, query): schema = query.frum.schema query_path = schema.query_path[0] # TODO: FIX THE GREAT SADNESS CAUSED BY EXECUTING post_expressions # THE EXPRESSIONS SHOULD BE PUSHED TO THE CONTAINER: ES ALLOWS # {"inner_hit":{"script_fields":[{"script":""}...]}}, BUT THEN YOU # LOOSE "_source" BUT GAIN "fields", FORCING ALL FIELDS TO BE EXPLICIT post_expressions = {} es_query, es_filters = es_query_template(query_path) # SPLIT WHERE CLAUSE BY DEPTH wheres = split_expression_by_depth(query.where, schema) for f, w in zip_longest(es_filters, wheres): script = ES52[AndOp(w)].partial_eval().to_esfilter(schema) set_default(f, script) if not wheres[1]: # INCLUDE DOCS WITH NO NESTED DOCS more_filter = { "bool": { "filter": [AndOp(wheres[0]).partial_eval().to_esfilter(schema)], "must_not": { "nested": { "path": query_path, "query": MATCH_ALL } } } } else: more_filter = None es_query.size = coalesce(query.limit, DEFAULT_LIMIT) # es_query.sort = jx_sort_to_es_sort(query.sort) map_to_es_columns = schema.map_to_es() # {c.name: c.es_column for c in schema.leaves(".")} query_for_es = query.map(map_to_es_columns) es_query.sort = jx_sort_to_es_sort(query_for_es.sort, schema) es_query.stored_fields = [] is_list = is_list_(query.select) selects = wrap([unwrap(s.copy()) for s in listwrap(query.select)]) new_select = FlatList() put_index = 0 for select in selects: if is_op(select.value, LeavesOp) and is_op(select.value.term, Variable): # IF THERE IS A *, THEN INSERT THE EXTRA COLUMNS leaves = schema.leaves(select.value.term.var) col_names = set() for c in leaves: if c.nested_path[0] == ".": if c.jx_type == NESTED: continue es_query.stored_fields += [c.es_column] c_name = untype_path(relative_field(c.name, query_path)) col_names.add(c_name) new_select.append({ "name": concat_field(select.name, c_name), "nested_path": c.nested_path[0], "put": { "name": concat_field(select.name, literal_field(c_name)), "index": put_index, "child": "." }, "pull": get_pull_function(c) }) put_index += 1 # REMOVE DOTS IN PREFIX IF NAME NOT AMBIGUOUS for n in new_select: if n.name.startswith("..") and n.name.lstrip( ".") not in col_names: n.put.name = n.name = n.name.lstrip(".") col_names.add(n.name) elif is_op(select.value, Variable): net_columns = schema.leaves(select.value.var) if not net_columns: new_select.append({ "name": select.name, "nested_path": ".", "put": { "name": select.name, "index": put_index, "child": "." }, "pull": NULL }) else: for n in net_columns: pull = get_pull_function(n) if n.nested_path[0] == ".": if n.jx_type == NESTED: continue es_query.stored_fields += [n.es_column] # WE MUST FIGURE OUT WHICH NAMESSPACE s.value.var IS USING SO WE CAN EXTRACT THE child for np in n.nested_path: c_name = untype_path(relative_field(n.name, np)) if startswith_field(c_name, select.value.var): child = relative_field(c_name, select.value.var) break else: continue # REMOVED BECAUSE SELECTING INNER PROPERTIES IS NOT ALLOWED # child = relative_field(untype_path(relative_field(n.name, n.nested_path[0])), s.value.var) new_select.append({ "name": select.name, "pull": pull, "nested_path": n.nested_path[0], "put": { "name": select.name, "index": put_index, "child": child } }) put_index += 1 else: expr = select.value for v in expr.vars(): for c in schema[v.var]: if c.nested_path[0] == ".": es_query.stored_fields += [c.es_column] # else: # Log.error("deep field not expected") pull_name = EXPRESSION_PREFIX + select.name map_to_local = MapToLocal(schema) pull = jx_expression_to_function(pull_name) post_expressions[pull_name] = jx_expression_to_function( expr.map(map_to_local)) new_select.append({ "name": select.name if is_list else ".", "pull": pull, "value": expr.__data__(), "put": { "name": select.name, "index": put_index, "child": "." } }) put_index += 1 # <COMPLICATED> ES needs two calls to get all documents more = [] def get_more(please_stop): more.append( es_post( es, Data(query=more_filter, stored_fields=es_query.stored_fields), query.limit)) if more_filter: need_more = Thread.run("get more", target=get_more) with Timer("call to ES") as call_timer: data = es_post(es, es_query, query.limit) # EACH A HIT IS RETURNED MULTIPLE TIMES FOR EACH INNER HIT, WITH INNER HIT INCLUDED def inners(): for t in data.hits.hits: for i in t.inner_hits[literal_field(query_path)].hits.hits: t._inner = i._source for k, e in post_expressions.items(): t[k] = e(t) yield t if more_filter: Thread.join(need_more) for t in more[0].hits.hits: yield t # </COMPLICATED> try: formatter, groupby_formatter, mime_type = format_dispatch[query.format] output = formatter(inners(), new_select, query) output.meta.timing.es = call_timer.duration output.meta.content_type = mime_type output.meta.es_query = es_query return output except Exception as e: Log.error("problem formatting", e)
def __new__(cls, e=None, query=None, *args, **kwargs): e.allowNulls = coalesce(e.allowNulls, True) if e.value and e.domain.type == "default": # if query.groupby: # return object.__new__(DefaultDecoder, e) if is_text(e.value): Log.error("Expecting Variable or Expression, not plain string") if is_op(e.value, LeavesOp): return object.__new__(ObjectDecoder) elif is_op(e.value, TupleOp): # THIS domain IS FROM A dimension THAT IS A SIMPLE LIST OF fields # JUST PULL THE FIELDS if not all(is_op(t, Variable) for t in e.value.terms): Log.error("Can only handle variables in tuples") e.domain = Data( dimension={"fields": e.value.terms} ) return object.__new__(DimFieldListDecoder) elif is_op(e.value, Variable): schema = query.frum.schema cols = schema.leaves(e.value.var) if not cols: return object.__new__(DefaultDecoder) if len(cols) != 1: return object.__new__(ObjectDecoder) col = first(cols) limit = coalesce(e.domain.limit, query.limit, DEFAULT_LIMIT) if col.cardinality == None: DEBUG and Log.warning( "metadata for column {{name|quote}} (id={{id}}) is not ready", name=concat_field(col.es_index, col.es_column), id=id(col) ) e.domain = set_default(DefaultDomain(limit=limit), e.domain.__data__()) return object.__new__(DefaultDecoder) elif col.partitions == None: e.domain = set_default(DefaultDomain(limit=limit), e.domain.__data__()) return object.__new__(DefaultDecoder) else: DEBUG and Log.note("id={{id}} has parts!!!", id=id(col)) if col.multi > 1 and len(col.partitions) < 10: return object.__new__(MultivalueDecoder) partitions = col.partitions[:limit:] if e.domain.sort == -1: partitions = list(reversed(sorted(partitions))) else: partitions = sorted(partitions) e.domain = SimpleSetDomain(partitions=partitions, limit=limit) else: return object.__new__(DefaultDecoder) if e.value and e.domain.type in PARTITION: return object.__new__(SetDecoder) if isinstance(e.domain.dimension, Dimension): e.domain = e.domain.dimension.getDomain() return object.__new__(SetDecoder) if e.value and e.domain.type == "time": return object.__new__(TimeDecoder) if e.range: return object.__new__(GeneralRangeDecoder) if e.value and e.domain.type == "duration": return object.__new__(DurationDecoder) elif e.value and e.domain.type == "range": return object.__new__(RangeDecoder) elif not e.value and e.domain.dimension.fields: # THIS domain IS FROM A dimension THAT IS A SIMPLE LIST OF fields # JUST PULL THE FIELDS fields = e.domain.dimension.fields if is_data(fields): Log.error("No longer allowed: All objects are expressions") else: return object.__new__(DimFieldListDecoder) elif not e.value and all(e.domain.partitions.where): return object.__new__(GeneralSetDecoder) else: Log.error("domain type of {{type}} is not supported yet", type=e.domain.type)
def __eq__(self, other): if is_op(other, BasicStartsWithOp): return self.value == other.value and self.prefix == other.prefix
def __eq__(self, other): return (is_op(other, NestedOp) and self.path == other.path and self.select == other.select and self.where == other.where and self.sort == other.sort and self.limit == other.limit)
def __eq__(self, other): return ( is_op(other, OuterJoinOp) and self.frum == other.frum and self.nests == other.nests )
def __eq__(self, other): if is_op(other, BasicInOp): return self.value == other.value and self.superset == other.superset return False
def __eq__(self, other): if not is_op(other, StringOp): return False return self.term == other.term
def __data__(self): if is_op(self.value, Variable) and is_literal(self.superset): return {"basic.in": {self.value.var: self.superset.value}} else: return {"basic.in": [self.value.__data__(), self.superset.__data__()]}
def __data__(self): if is_op(self.value, Variable) and is_literal(self.find): return {"split": {self.value.var, self.find.value}} else: return {"split": [self.value.__data__(), self.find.__data__()]}
def es_aggsop(es, frum, query): query = query.copy() # WE WILL MARK UP THIS QUERY schema = frum.schema query_path = schema.query_path[0] select = listwrap(query.select) new_select = Data( ) # MAP FROM canonical_name (USED FOR NAMES IN QUERY) TO SELECT MAPPING formula = [] for s in select: if is_op(s.value, Variable_): s.query_path = query_path if s.aggregate == "count": new_select["count_" + literal_field(s.value.var)] += [s] else: new_select[literal_field(s.value.var)] += [s] elif s.aggregate: split_select = split_expression_by_path(s.value, schema, lang=Painless) for si_key, si_value in split_select.items(): if si_value: if s.query_path: Log.error( "can not handle more than one depth per select") s.query_path = si_key formula.append(s) acc = Aggs() for _, many in new_select.items(): for s in many: canonical_name = s.name if s.aggregate in ("value_count", "count"): columns = frum.schema.values(s.value.var, exclude_type=(OBJECT, NESTED)) else: columns = frum.schema.values(s.value.var) if s.aggregate == "count": canonical_names = [] for column in columns: es_name = column.es_column + "_count" if column.jx_type == EXISTS: if column.nested_path[0] == query_path: canonical_names.append("doc_count") acc.add( NestedAggs(column.nested_path[0]).add( CountAggs(s))) else: canonical_names.append("value") acc.add( NestedAggs(column.nested_path[0]).add( ExprAggs(es_name, { "value_count": { "field": column.es_column } }, s))) if len(canonical_names) == 1: s.pull = jx_expression_to_function(canonical_names[0]) else: s.pull = jx_expression_to_function( {"add": canonical_names}) elif s.aggregate == "median": if len(columns) > 1: Log.error( "Do not know how to count columns with more than one type (script probably)" ) # ES USES DIFFERENT METHOD FOR PERCENTILES key = canonical_name + " percentile" acc.add( ExprAggs( key, { "percentiles": { "field": first(columns).es_column, "percents": [50] } }, s)) s.pull = jx_expression_to_function("values.50\\.0") elif s.aggregate == "percentile": if len(columns) > 1: Log.error( "Do not know how to count columns with more than one type (script probably)" ) # ES USES DIFFERENT METHOD FOR PERCENTILES key = canonical_name + " percentile" if is_text( s.percentile) or s.percetile < 0 or 1 < s.percentile: Log.error( "Expecting percentile to be a float from 0.0 to 1.0") percent = mo_math.round(s.percentile * 100, decimal=6) acc.add( ExprAggs( key, { "percentiles": { "field": first(columns).es_column, "percents": [percent], "tdigest": { "compression": 2 } } }, s)) s.pull = jx_expression_to_function( join_field(["values", text_type(percent)])) elif s.aggregate == "cardinality": for column in columns: path = column.es_column + "_cardinality" acc.add( ExprAggs(path, {"cardinality": { "field": column.es_column }}, s)) s.pull = jx_expression_to_function("value") elif s.aggregate == "stats": if len(columns) > 1: Log.error( "Do not know how to count columns with more than one type (script probably)" ) # REGULAR STATS acc.add( ExprAggs(canonical_name, { "extended_stats": { "field": first(columns).es_column } }, s)) s.pull = get_pull_stats() # GET MEDIAN TOO! select_median = s.copy() select_median.pull = jx_expression_to_function( {"select": [{ "name": "median", "value": "values.50\\.0" }]}) acc.add( ExprAggs( canonical_name + "_percentile", { "percentiles": { "field": first(columns).es_column, "percents": [50] } }, select_median)) elif s.aggregate == "union": for column in columns: script = { "scripted_metric": { 'init_script': 'params._agg.terms = new HashSet()', 'map_script': 'for (v in doc[' + quote(column.es_column) + '].values) params._agg.terms.add(v);', 'combine_script': 'return params._agg.terms.toArray()', 'reduce_script': 'HashSet output = new HashSet(); for (a in params._aggs) { if (a!=null) for (v in a) {output.add(v)} } return output.toArray()', } } stats_name = column.es_column acc.add( NestedAggs(column.nested_path[0]).add( ExprAggs(stats_name, script, s))) s.pull = jx_expression_to_function("value") elif s.aggregate == "count_values": # RETURN MAP FROM VALUE TO THE NUMBER OF TIMES FOUND IN THE DOCUMENTS # NOT A NESTED DOC, RATHER A MULTIVALUE FIELD for column in columns: script = { "scripted_metric": { 'params': { "_agg": {} }, 'init_script': 'params._agg.terms = new HashMap()', 'map_script': 'for (v in doc[' + quote(column.es_column) + '].values) params._agg.terms.put(v, Optional.ofNullable(params._agg.terms.get(v)).orElse(0)+1);', 'combine_script': 'return params._agg.terms', 'reduce_script': ''' HashMap output = new HashMap(); for (agg in params._aggs) { if (agg!=null){ for (e in agg.entrySet()) { String key = String.valueOf(e.getKey()); output.put(key, e.getValue() + Optional.ofNullable(output.get(key)).orElse(0)); } } } return output; ''' } } stats_name = encode_property(column.es_column) acc.add( NestedAggs(column.nested_path[0]).add( ExprAggs(stats_name, script, s))) s.pull = jx_expression_to_function("value") else: if not columns: s.pull = jx_expression_to_function(NULL) else: for c in columns: acc.add( NestedAggs(c.nested_path[0]).add( ExprAggs( canonical_name, {"extended_stats": { "field": c.es_column }}, s))) s.pull = jx_expression_to_function(aggregates[s.aggregate]) for i, s in enumerate(formula): s_path = [ k for k, v in split_expression_by_path( s.value, schema=schema, lang=Painless).items() if v ] if len(s_path) == 0: # FOR CONSTANTS nest = NestedAggs(query_path) acc.add(nest) elif len(s_path) == 1: nest = NestedAggs(first(s_path)) acc.add(nest) else: Log.error("do not know how to handle") canonical_name = s.name if is_op(s.value, TupleOp): if s.aggregate == "count": # TUPLES ALWAYS EXIST, SO COUNTING THEM IS EASY s.pull = jx_expression_to_function("doc_count") elif s.aggregate in ('max', 'maximum', 'min', 'minimum'): if s.aggregate in ('max', 'maximum'): dir = 1 op = "max" else: dir = -1 op = 'min' nully = Painless[TupleOp( [NULL] * len(s.value.terms))].partial_eval().to_es_script(schema) selfy = text_type( Painless[s.value].partial_eval().to_es_script(schema)) script = { "scripted_metric": { 'init_script': 'params._agg.best = ' + nully + ';', 'map_script': 'params._agg.best = ' + expand_template( MAX_OF_TUPLE, { "expr1": "params._agg.best", "expr2": selfy, "dir": dir, "op": op }) + ";", 'combine_script': 'return params._agg.best', 'reduce_script': 'return params._aggs.stream().' + op + '(' + expand_template(COMPARE_TUPLE, { "dir": dir, "op": op }) + ').get()', } } nest.add( NestedAggs(query_path).add( ExprAggs(canonical_name, script, s))) s.pull = jx_expression_to_function("value") else: Log.error("{{agg}} is not a supported aggregate over a tuple", agg=s.aggregate) elif s.aggregate == "count": nest.add( ExprAggs( canonical_name, { "value_count": { "script": text_type(Painless[ s.value].partial_eval().to_es_script(schema)) } }, s)) s.pull = jx_expression_to_function("value") elif s.aggregate == "median": # ES USES DIFFERENT METHOD FOR PERCENTILES THAN FOR STATS AND COUNT key = literal_field(canonical_name + " percentile") nest.add( ExprAggs( key, { "percentiles": { "script": text_type(Painless[s.value].to_es_script(schema)), "percents": [50] } }, s)) s.pull = jx_expression_to_function(join_field(["50.0"])) elif s.aggregate == "percentile": # ES USES DIFFERENT METHOD FOR PERCENTILES THAN FOR STATS AND COUNT key = literal_field(canonical_name + " percentile") percent = mo_math.round(s.percentile * 100, decimal=6) nest.add( ExprAggs( key, { "percentiles": { "script": text_type(Painless[s.value].to_es_script(schema)), "percents": [percent] } }, s)) s.pull = jx_expression_to_function( join_field(["values", text_type(percent)])) elif s.aggregate == "cardinality": # ES USES DIFFERENT METHOD FOR CARDINALITY key = canonical_name + " cardinality" nest.add( ExprAggs( key, { "cardinality": { "script": text_type(Painless[s.value].to_es_script(schema)) } }, s)) s.pull = jx_expression_to_function("value") elif s.aggregate == "stats": # REGULAR STATS nest.add( ExprAggs( canonical_name, { "extended_stats": { "script": text_type(Painless[s.value].to_es_script(schema)) } }, s)) s.pull = get_pull_stats() # GET MEDIAN TOO! select_median = s.copy() select_median.pull = jx_expression_to_function( {"select": [{ "name": "median", "value": "values.50\\.0" }]}) nest.add( ExprAggs( canonical_name + "_percentile", { "percentiles": { "script": text_type(Painless[s.value].to_es_script(schema)), "percents": [50] } }, select_median)) s.pull = get_pull_stats() elif s.aggregate == "union": # USE TERMS AGGREGATE TO SIMULATE union nest.add( TermsAggs( canonical_name, { "script_field": text_type(Painless[s.value].to_es_script(schema)) }, s)) s.pull = jx_expression_to_function("key") else: # PULL VALUE OUT OF THE stats AGGREGATE s.pull = jx_expression_to_function(aggregates[s.aggregate]) nest.add( ExprAggs( canonical_name, { "extended_stats": { "script": text_type(Painless[s.value].to_es_script(schema)) } }, s)) acc = NestedAggs(query_path).add(acc) split_decoders = get_decoders_by_path(query) split_wheres = split_expression_by_path(query.where, schema=frum.schema, lang=ES52) start = 0 decoders = [None] * (len(query.edges) + len(query.groupby)) paths = list(reversed(sorted(split_wheres.keys() | split_decoders.keys()))) for path in paths: literal_path = literal_field(path) decoder = split_decoders[literal_path] where = split_wheres[literal_path] for d in decoder: decoders[d.edge.dim] = d acc = d.append_query(path, acc) start += d.num_columns if where: acc = FilterAggs("_filter", AndOp(where), None).add(acc) acc = NestedAggs(path).add(acc) acc = NestedAggs('.').add(acc) acc = simplify(acc) es_query = wrap(acc.to_es(schema)) es_query.size = 0 with Timer("ES query time", silent=not DEBUG) as es_duration: result = es_post(es, es_query, query.limit) try: format_time = Timer("formatting", silent=not DEBUG) with format_time: # result.aggregations.doc_count = coalesce(result.aggregations.doc_count, result.hits.total) # IT APPEARS THE OLD doc_count IS GONE aggs = unwrap(result.aggregations) formatter, groupby_formatter, aggop_formatter, mime_type = format_dispatch[ query.format] if query.edges: output = formatter(aggs, acc, query, decoders, select) elif query.groupby: output = groupby_formatter(aggs, acc, query, decoders, select) else: output = aggop_formatter(aggs, acc, query, decoders, select) output.meta.timing.formatting = format_time.duration output.meta.timing.es_search = es_duration.duration output.meta.content_type = mime_type output.meta.es_query = es_query return output except Exception as e: if query.format not in format_dispatch: Log.error("Format {{format|quote}} not supported yet", format=query.format, cause=e) Log.error("Some problem", cause=e)
def __data__(self): if is_op(self.lhs, Variable) and is_literal(self.rhs): return {"eq": {self.lhs.var, self.rhs.value}} else: return {"eq": [self.lhs.__data__(), self.rhs.__data__()]}
def __eq__(self, other): if is_op(other, CaseOp): return all(s == o for s, o in zip(self.whens, other.whens))
def es_setop(es, query): schema = query.frum.schema query_path = schema.query_path[0] split_select = {".": ESSelect('.')} def get_select(path): es_select = split_select.get(path) if not es_select: es_select = split_select[path] = ESSelect(path) return es_select selects = wrap([unwrap(s.copy()) for s in listwrap(query.select)]) new_select = FlatList() put_index = 0 for select in selects: # IF THERE IS A *, THEN INSERT THE EXTRA COLUMNS if is_op(select.value, LeavesOp) and is_op(select.value.term, Variable): term = select.value.term leaves = schema.leaves(term.var) for c in leaves: full_name = concat_field(select.name, relative_field(untype_path(c.name), term.var)) if c.jx_type == NESTED: get_select('.').use_source = True new_select.append({ "name": full_name, "value": Variable(c.es_column), "put": {"name": literal_field(full_name), "index": put_index, "child": "."}, "pull": get_pull_source(c.es_column) }) put_index += 1 else: get_select(c.nested_path[0]).fields.append(c.es_column) new_select.append({ "name": full_name, "value": Variable(c.es_column), "put": {"name": literal_field(full_name), "index": put_index, "child": "."} }) put_index += 1 elif is_op(select.value, Variable): s_column = select.value.var if s_column == ".": # PULL ALL SOURCE get_select('.').use_source = True new_select.append({ "name": select.name, "value": select.value, "put": {"name": select.name, "index": put_index, "child": "."}, "pull": get_pull_source(".") }) continue leaves = schema.leaves(s_column) # LEAVES OF OBJECT # nested_selects = {} if leaves: if any(c.jx_type == NESTED for c in leaves): # PULL WHOLE NESTED ARRAYS get_select('.').use_source = True for c in leaves: if len(c.nested_path) == 1: # NESTED PROPERTIES ARE IGNORED, CAPTURED BY THESE FIRST LEVEL PROPERTIES pre_child = join_field(decode_property(n) for n in split_field(c.name)) new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": {"name": select.name, "index": put_index, "child": untype_path(relative_field(pre_child, s_column))}, "pull": get_pull_source(c.es_column) }) else: # PULL ONLY WHAT'S NEEDED for c in leaves: c_nested_path = c.nested_path[0] if c_nested_path == ".": if c.es_column == "_id": new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": {"name": select.name, "index": put_index, "child": "."}, "pull": lambda row: row._id }) elif c.jx_type == NESTED: get_select('.').use_source = True pre_child = join_field(decode_property(n) for n in split_field(c.name)) new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": {"name": select.name, "index": put_index, "child": untype_path(relative_field(pre_child, s_column))}, "pull": get_pull_source(c.es_column) }) else: get_select(c_nested_path).fields.append(c.es_column) pre_child = join_field(decode_property(n) for n in split_field(c.name)) new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": {"name": select.name, "index": put_index, "child": untype_path(relative_field(pre_child, s_column))} }) else: es_select = get_select(c_nested_path) es_select.fields.append(c.es_column) child = relative_field(untype_path(relative_field(c.name, schema.query_path[0])), s_column) pull = accumulate_nested_doc(c_nested_path, Variable(relative_field(s_column, unnest_path(c_nested_path)))) new_select.append({ "name": select.name, "value": select.value, "put": { "name": select.name, "index": put_index, "child": child }, "pull": pull }) else: new_select.append({ "name": select.name, "value": Variable("$dummy"), "put": {"name": select.name, "index": put_index, "child": "."} }) put_index += 1 else: split_scripts = split_expression_by_path(select.value, schema, lang=Painless) for p, script in split_scripts.items(): es_select = get_select(p) es_select.scripts[select.name] = {"script": text(Painless[first(script)].partial_eval().to_es_script(schema))} new_select.append({ "name": select.name, "pull": jx_expression_to_function("fields." + literal_field(select.name)), "put": {"name": select.name, "index": put_index, "child": "."} }) put_index += 1 for n in new_select: if n.pull: continue elif is_op(n.value, Variable): if get_select('.').use_source: n.pull = get_pull_source(n.value.var) elif n.value == "_id": n.pull = jx_expression_to_function("_id") else: n.pull = jx_expression_to_function(concat_field("fields", literal_field(n.value.var))) else: Log.error("Do not know what to do") split_wheres = split_expression_by_path(query.where, schema, lang=ES52) es_query = es_query_proto(query_path, split_select, split_wheres, schema) es_query.size = coalesce(query.limit, DEFAULT_LIMIT) es_query.sort = jx_sort_to_es_sort(query.sort, schema) with Timer("call to ES", silent=DEBUG) as call_timer: data = es_post(es, es_query, query.limit) T = data.hits.hits # Log.note("{{output}}", output=T) try: formatter, groupby_formatter, mime_type = format_dispatch[query.format] with Timer("formatter", silent=True): output = formatter(T, new_select, query) output.meta.timing.es = call_timer.duration output.meta.content_type = mime_type output.meta.es_query = es_query return output except Exception as e: Log.error("problem formatting", e)
def es_setop(es, query): schema = query.frum.schema query_path = schema.query_path[0] split_select = {".": ESSelect('.')} def get_select(path): es_select = split_select.get(path) if not es_select: es_select = split_select[path] = ESSelect(path) return es_select selects = wrap([unwrap(s.copy()) for s in listwrap(query.select)]) new_select = FlatList() put_index = 0 for select in selects: # IF THERE IS A *, THEN INSERT THE EXTRA COLUMNS if is_op(select.value, LeavesOp) and is_op(select.value.term, Variable): term = select.value.term leaves = schema.leaves(term.var) for c in leaves: full_name = concat_field(select.name, relative_field(untype_path(c.name), term.var)) if c.jx_type == NESTED: get_select('.').use_source = True new_select.append({ "name": full_name, "value": Variable(c.es_column), "put": {"name": literal_field(full_name), "index": put_index, "child": "."}, "pull": get_pull_source(c.es_column) }) put_index += 1 else: get_select(c.nested_path[0]).fields.append(c.es_column) new_select.append({ "name": full_name, "value": Variable(c.es_column), "put": {"name": literal_field(full_name), "index": put_index, "child": "."} }) put_index += 1 elif is_op(select.value, Variable): s_column = select.value.var if s_column == ".": # PULL ALL SOURCE get_select('.').use_source = True new_select.append({ "name": select.name, "value": select.value, "put": {"name": select.name, "index": put_index, "child": "."}, "pull": get_pull_source(".") }) continue leaves = schema.leaves(s_column) # LEAVES OF OBJECT # nested_selects = {} if leaves: if any(c.jx_type == NESTED for c in leaves): # PULL WHOLE NESTED ARRAYS get_select('.').use_source = True for c in leaves: if len(c.nested_path) == 1: # NESTED PROPERTIES ARE IGNORED, CAPTURED BY THESE FIRST LEVEL PROPERTIES pre_child = join_field(decode_property(n) for n in split_field(c.name)) new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": {"name": select.name, "index": put_index, "child": untype_path(relative_field(pre_child, s_column))}, "pull": get_pull_source(c.es_column) }) else: # PULL ONLY WHAT'S NEEDED for c in leaves: c_nested_path = c.nested_path[0] if c_nested_path == ".": if c.es_column == "_id": new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": {"name": select.name, "index": put_index, "child": "."}, "pull": lambda row: row._id }) elif c.jx_type == NESTED: get_select('.').use_source = True pre_child = join_field(decode_property(n) for n in split_field(c.name)) new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": {"name": select.name, "index": put_index, "child": untype_path(relative_field(pre_child, s_column))}, "pull": get_pull_source(c.es_column) }) else: get_select(c_nested_path).fields.append(c.es_column) pre_child = join_field(decode_property(n) for n in split_field(c.name)) new_select.append({ "name": select.name, "value": Variable(c.es_column), "put": {"name": select.name, "index": put_index, "child": untype_path(relative_field(pre_child, s_column))} }) else: es_select = get_select(c_nested_path) es_select.fields.append(c.es_column) child = relative_field(untype_path(relative_field(c.name, schema.query_path[0])), s_column) pull = accumulate_nested_doc(c_nested_path, Variable(relative_field(s_column, unnest_path(c_nested_path)))) new_select.append({ "name": select.name, "value": select.value, "put": { "name": select.name, "index": put_index, "child": child }, "pull": pull }) else: new_select.append({ "name": select.name, "value": Variable("$dummy"), "put": {"name": select.name, "index": put_index, "child": "."} }) put_index += 1 else: split_scripts = split_expression_by_path(select.value, schema, lang=Painless) for p, script in split_scripts.items(): es_select = get_select(p) es_select.scripts[select.name] = {"script": text_type(Painless[first(script)].partial_eval().to_es_script(schema))} new_select.append({ "name": select.name, "pull": jx_expression_to_function("fields." + literal_field(select.name)), "put": {"name": select.name, "index": put_index, "child": "."} }) put_index += 1 for n in new_select: if n.pull: continue elif is_op(n.value, Variable): if get_select('.').use_source: n.pull = get_pull_source(n.value.var) elif n.value == "_id": n.pull = jx_expression_to_function("_id") else: n.pull = jx_expression_to_function(concat_field("fields", literal_field(n.value.var))) else: Log.error("Do not know what to do") split_wheres = split_expression_by_path(query.where, schema, lang=ES52) es_query = es_query_proto(query_path, split_select, split_wheres, schema) es_query.size = coalesce(query.limit, DEFAULT_LIMIT) es_query.sort = jx_sort_to_es_sort(query.sort, schema) with Timer("call to ES", silent=True) as call_timer: data = es_post(es, es_query, query.limit) T = data.hits.hits # Log.note("{{output}}", output=T) try: formatter, groupby_formatter, mime_type = format_dispatch[query.format] with Timer("formatter", silent=True): output = formatter(T, new_select, query) output.meta.timing.es = call_timer.duration output.meta.content_type = mime_type output.meta.es_query = es_query return output except Exception as e: Log.error("problem formatting", e)
def _normalize_select(select, frum, schema=None): """ :param select: ONE SELECT COLUMN :param frum: TABLE TO get_columns() :param schema: SCHEMA TO LOOKUP NAMES FOR DEFINITIONS :return: AN ARRAY OF SELECT COLUMNS """ if is_text(select): canonical = select = Data(value=select) else: select = to_data(select) canonical = select.copy() canonical.aggregate = coalesce(canonical_aggregates[select.aggregate].name, select.aggregate, "none") canonical.default = coalesce( select.default, canonical_aggregates[canonical.aggregate].default) if hasattr(unwrap(frum), "_normalize_select"): return frum._normalize_select(canonical) output = [] if len(select) and not select.value: Log.error(BAD_SELECT, select=select) elif not select.value or select.value == ".": output.extend([ set_default( { "name": c.name, "value": jx_expression(c.name, schema=schema) }, canonical) for c in schema.leaves('.') # TOP LEVEL COLUMNS ONLY if len(c.nested_path) == 1 ]) elif is_text(select.value): if select.value.endswith(".*"): canonical.name = coalesce(select.name, ".") value = jx_expression(select[:-2], schema=schema) if not is_op(value, Variable): Log.error("`*` over general expression not supported yet") output.append([ set_default( { "value": LeavesOp(value, prefix=select.prefix), "format": "dict" # MARKUP FOR DECODING }, canonical) for c in frum.get_columns() if c.jx_type not in INTERNAL ]) else: Log.error("do not know what to do") else: canonical.name = coalesce(select.name, select.value, select.aggregate) canonical.value = jx_expression(select.value, schema=schema) output.append(canonical) output = to_data(output) if any(n == None for n in output.name): Log.error("expecting select to have a name: {{select}}", select=select) return output