def _select(template, data, fields, depth): output = DictList() deep_path = [] deep_fields = UniqueIndex(["name"]) for d in data: if isinstance(d, Dict): Log.error("programmer error, _select can not handle Dict") record = template.copy() children = None for f in fields: index, c = _select_deep(d, f, depth, record) children = c if children is None else children if index: path = f.value[0:index:] if not deep_fields[f]: deep_fields.add( f) # KEEP TRACK OF WHICH FIELDS NEED DEEPER SELECT short = MIN([len(deep_path), len(path)]) if path[:short:] != deep_path[:short:]: Log.error( "Dangerous to select into more than one branch at time" ) if len(deep_path) < len(path): deep_path = path if not children: output.append(record) else: output.extend(_select(record, children, deep_fields, depth + 1)) return output
def _select(template, data, fields, depth): output = DictList() deep_path = [] deep_fields = UniqueIndex(["name"]) for d in data: if isinstance(d, Dict): Log.error("programmer error, _select can not handle Dict") record = template.copy() children = None for f in fields: index, c = _select_deep(d, f, depth, record) children = c if children is None else children if index: path = f.value[0:index:] if not deep_fields[f]: deep_fields.add(f) # KEEP TRACK OF WHICH FIELDS NEED DEEPER SELECT short = MIN(len(deep_path), len(path)) if path[:short:] != deep_path[:short:]: Log.error("Dangerous to select into more than one branch at time") if len(deep_path) < len(path): deep_path = path if not children: output.append(record) else: output.extend(_select(record, children, deep_fields, depth + 1)) return output
def parse_properties(parent_index_name, parent_query_path, esProperties): """ RETURN THE COLUMN DEFINITIONS IN THE GIVEN esProperties OBJECT """ from pyLibrary.queries.meta import Column columns = DictList() for name, property in esProperties.items(): if parent_query_path: index_name, query_path = parent_index_name, join_field(split_field(parent_query_path) + [name]) else: index_name, query_path = parent_index_name, name if property.type == "nested" and property.properties: # NESTED TYPE IS A NEW TYPE DEFINITION # MARKUP CHILD COLUMNS WITH THE EXTRA DEPTH self_columns = parse_properties(index_name, query_path, property.properties) for c in self_columns: c.nested_path = unwraplist([query_path] + listwrap(c.nested_path)) columns.extend(self_columns) columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type="nested", nested_path=query_path )) continue if property.properties: child_columns = parse_properties(index_name, query_path, property.properties) columns.extend(child_columns) columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type="source" if property.enabled == False else "object" )) if property.dynamic: continue if not property.type: continue if property.type == "multi_field": property.type = property.fields[name].type # PULL DEFAULT TYPE for i, (n, p) in enumerate(property.fields.items()): if n == name: # DEFAULT columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type=p.type )) else: columns.append(Column( table=index_name, es_index=index_name, name=query_path + "\\." + n, es_column=query_path + "\\." + n, type=p.type )) continue if property.type in ["string", "boolean", "integer", "date", "long", "double"]: columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type=property.type )) if property.index_name and name != property.index_name: columns.append(Column( table=index_name, es_index=index_name, es_column=query_path, name=query_path, type=property.type )) elif property.enabled == None or property.enabled == False: columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type="source" if property.enabled==False else "object" )) else: Log.warning("unknown type {{type}} for property {{path}}", type=property.type, path=query_path) return columns
def normalize_edges(self, edges): output = DictList() for e in listwrap(edges): output.extend(self._normalize_edge(e)) return output
def parse_properties(parent_index_name, parent_query_path, esProperties): """ RETURN THE COLUMN DEFINITIONS IN THE GIVEN esProperties OBJECT """ from pyLibrary.queries.meta import Column columns = DictList() for name, property in esProperties.items(): if parent_query_path: index_name, query_path = parent_index_name, join_field(split_field(parent_query_path) + [name]) else: index_name, query_path = parent_index_name, name if property.type == "nested" and property.properties: # NESTED TYPE IS A NEW TYPE DEFINITION # MARKUP CHILD COLUMNS WITH THE EXTRA DEPTH self_columns = parse_properties(index_name, query_path, property.properties) for c in self_columns: c.nested_path = unwraplist([query_path] + listwrap(c.nested_path)) columns.extend(self_columns) columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type="nested", nested_path=query_path )) continue if property.properties: child_columns = parse_properties(index_name, query_path, property.properties) columns.extend(child_columns) columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type="source" if property.enabled == False else "object" )) if property.dynamic: continue if not property.type: continue if property.type == "multi_field": property.type = property.fields[name].type # PULL DEFAULT TYPE for i, (n, p) in enumerate(property.fields.items()): if n == name: # DEFAULT columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type=p.type )) else: columns.append(Column( table=index_name, es_index=index_name, name=query_path + "\\." + n, es_column=query_path + "\\." + n, type=p.type )) continue if property.type in ["string", "boolean", "integer", "date", "long", "double"]: columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type=property.type )) if property.index_name and name != property.index_name: columns.append(Column( table=index_name, es_index=index_name, es_column=query_path, name=query_path, type=property.type )) elif property.enabled == None or property.enabled == False: columns.append(Column( table=index_name, es_index=index_name, name=query_path, es_column=query_path, type="source" if property.enabled==False else "object" )) else: Log.warning("unknown type {{type}} for property {{path}}", type=property.type, path=query_path) return columns
def normalize_edges(self, edges): output = DictList() for e in listwrap(edges): output.extend(self._normalize_edge(e)) return output
def parse_columns(parent_path, esProperties): """ RETURN THE COLUMN DEFINITIONS IN THE GIVEN esProperties OBJECT """ columns = DictList() for name, property in esProperties.items(): if parent_path: path = join_field(split_field(parent_path) + [name]) else: path = name if property.type == "nested" and property.properties: # NESTED TYPE IS A NEW TYPE DEFINITION # MARKUP CHILD COLUMNS WITH THE EXTRA DEPTH child_columns = deepcopy(parse_columns(path, property.properties)) self_columns = deepcopy(child_columns) for c in self_columns: c.depth += 1 columns.extend(self_columns) columns.append({ "name": join_field(split_field(path)[1::]), "type": "nested", "useSource": False }) if path not in INDEX_CACHE: pp = split_field(parent_path) for i in qb.reverse(range(len(pp))): c = INDEX_CACHE.get(join_field(pp[:i + 1]), None) if c: INDEX_CACHE[path] = c.copy() break else: Log.error("Can not find parent") INDEX_CACHE[path].name = path INDEX_CACHE[path].columns = child_columns continue if property.properties: child_columns = parse_columns(path, property.properties) columns.extend(child_columns) columns.append({ "name": join_field(split_field(path)[1::]), "type": "object", "useSource": False }) if property.dynamic: continue if not property.type: continue if property.type == "multi_field": property.type = property.fields[name].type # PULL DEFAULT TYPE for i, n, p in enumerate(property.fields): if n == name: # DEFAULT columns.append({ "name": join_field(split_field(path)[1::]), "type": p.type, "useSource": p.index == "no" }) else: columns.append({ "name": join_field(split_field(path)[1::]) + "\\." + n, "type": p.type, "useSource": p.index == "no" }) continue if property.type in [ "string", "boolean", "integer", "date", "long", "double" ]: columns.append({ "name": join_field(split_field(path)[1::]), "type": property.type, "useSource": property.index == "no" }) if property.index_name and name != property.index_name: columns.append({ "name": property.index_name, "type": property.type, "useSource": property.index == "no" }) elif property.enabled == None or property.enabled == False: columns.append({ "name": join_field(split_field(path)[1::]), "type": "object", "useSource": True }) else: Log.warning("unknown type {{type}} for property {{path}}", type=property.type, path=path) return columns
def parse_columns(parent_path, esProperties): """ RETURN THE COLUMN DEFINITIONS IN THE GIVEN esProperties OBJECT """ columns = DictList() for name, property in esProperties.items(): if parent_path: path = join_field(split_field(parent_path) + [name]) else: path = name if property.type == "nested" and property.properties: # NESTED TYPE IS A NEW TYPE DEFINITION # MARKUP CHILD COLUMNS WITH THE EXTRA DEPTH child_columns = deepcopy(parse_columns(path, property.properties)) self_columns = deepcopy(child_columns) for c in self_columns: c.depth += 1 columns.extend(self_columns) columns.append({ "name": join_field(split_field(path)[1::]), "type": "nested", "useSource": False }) if path not in INDEX_CACHE: pp = split_field(parent_path) for i in qb.reverse(range(len(pp))): c = INDEX_CACHE.get(join_field(pp[:i + 1]), None) if c: INDEX_CACHE[path] = c.copy() break else: Log.error("Can not find parent") INDEX_CACHE[path].name = path INDEX_CACHE[path].columns = child_columns continue if property.properties: child_columns = parse_columns(path, property.properties) columns.extend(child_columns) columns.append({ "name": join_field(split_field(path)[1::]), "type": "object", "useSource": False }) if property.dynamic: continue if not property.type: continue if property.type == "multi_field": property.type = property.fields[name].type # PULL DEFAULT TYPE for i, n, p in enumerate(property.fields): if n == name: # DEFAULT columns.append({"name": join_field(split_field(path)[1::]), "type": p.type, "useSource": p.index == "no"}) else: columns.append({"name": join_field(split_field(path)[1::]) + "\\." + n, "type": p.type, "useSource": p.index == "no"}) continue if property.type in ["string", "boolean", "integer", "date", "long", "double"]: columns.append({ "name": join_field(split_field(path)[1::]), "type": property.type, "useSource": property.index == "no" }) if property.index_name and name != property.index_name: columns.append({ "name": property.index_name, "type": property.type, "useSource": property.index == "no" }) elif property.enabled == None or property.enabled == False: columns.append({ "name": join_field(split_field(path)[1::]), "type": "object", "useSource": True }) else: Log.warning("unknown type {{type}} for property {{path}}", type= property.type, path= path) return columns