def serialize_project(node, context): return IR.select( node.kind_name, filters=IR.filter( IR.attribute(None, "name"), IR.literal(node.name), "=" ), limit=1, )
def aggregate_array(state): # If we are in a nested list search (e.g: Call(args=[Call(args=[Name()])])) # we can't directly use `ORDER BY @index` since the EdgeDB can't quite infer # which @index are we talking about. if len(state.parents) >= 1: path = IR.attribute( IR.typed(IR.name(_COMPILER_WORKAROUND_FOR_TARGET), state.match), state.pointer, ) body = IR.loop( IR.name(_COMPILER_WORKAROUND_FOR_TARGET), state.parents[-1].compute_path(allow_missing=True), IR.select(path, order=IR.property("index")), ) else: body = IR.select(state.compute_path(), order=IR.property("index")) return IR.call("array_agg", [body])
def serialize_ast(node, context): if node.is_enum: # <ast::op>'Add' return IR.enum_member(node.base_name, node.kind_name) else: # (INSERT ast::BinOp {.left := ..., ...}) reference = apply_ast(node, context) context.new_reference(reference.id) # (SELECT ast::expr FILTER .id = ... LIMIT 1) return IR.select( node.base_name, filters=IR.object_ref(reference), limit=1 )
def serialize_sequence(sequence, context): ir_set = IR.set([serialize(value, context) for value in sequence]) if all(isinstance(item, _BASIC_SET_TYPES) for item in sequence): # {1, 2, 3} / {<ast::op>'Add', <ast::op>'Sub', ...} return ir_set else: # Inserting a sequence of AST objects would require special # attention to calculate the index property. target = IR.name("item") scope = IR.namespace({"items": ir_set}) loop = IR.loop( target, IR.call("enumerate", [IR.name("items")]), IR.select( IR.attribute(target, 1), selections=[ IR.assign(IR.property("index"), IR.attribute(target, 0)) ], ), ) return IR.add_namespace(scope, loop)
from reiz.serialization.statistics import Insertion, Statistics from reiz.utilities import _available_cores, guarded, logger @guarded(Insertion.FAILED, ignored_exceptions=(InternalDatabaseError, )) def insert_file(context): if context.is_cached(): return Insertion.CACHED if not (tree := context.as_ast()): return Insertion.SKIPPED with context.connection.transaction(): module = apply_ast(tree, context) module_select = IR.select(tree.kind_name, filters=IR.object_ref(module), limit=1) update_filter = IR.filter( IR.attribute(None, "id"), IR.call("array_unpack", [IR.cast("array<uuid>", IR.variable("ids"))]), "IN", ) for base_type in Schema.module_annotated_types: update = IR.update( base_type.kind_name, filters=update_filter, assignments={"_module": module_select}, ) context.connection.query(IR.construct(update),
filters = None for key, value in node.filters.items(): if value is grammar.Ignore: continue if right_filter := state.compile(key, value): filters = IR.combine_filters(filters, right_filter) if state.is_root: state.scope.exit() if state_filters := IR.unpack_filters(state.filters): filters = IR.combine_filters(filters, state_filters) if state.variables: namespace = IR.namespace(state.variables) filters = IR.add_namespace(namespace, IR.select(filters)) return IR.select(state.match, filters=filters) if filters is None: filters = IR.filter(state.parents[-1].compute_path(), IR.wrap(state.match), "IS") return filters @codegen.register(grammar.MatchEnum) def convert_match_enum(node, state): expr = IR.enum_member(node.base, node.name) return IR.filter(state.compute_path(), expr, "=")
IR.selection( "_module", [ IR.selection("filename"), IR.selection( "project", [IR.selection("git_source"), IR.selection("git_revision")], ), ], ), ] STATS_QUERY = IR.construct( IR.select( IR.merge( IR.call("count", [IR.wrap(name)]) for name in STATISTICS_NODES))) class LocationNode(ast.AST): _attributes = ("lineno", "col_offset", "end_lineno", "end_col_offset") def get_username(link): if link.endswith("/"): index = 3 else: index = 2 return link.split("/")[-index]