Пример #1
0
def flatten_smlookups(comp):
    """Given a comprehension, flatten any demand-driven setmap lookups
    (e.g. for aggregates).
    """
    namer = L.NameGenerator('_av{}')
    replacer = LookupReplacer(namer)
    return L.rewrite_compclauses(comp, replacer.process)
Пример #2
0
    def test_replacer(self):
        look = L.pe('R.smlookup("bu", x)')
        dem1 = L.pe('DEMQUERY(foo, [y], R.smlookup("bu", y))')
        dem2 = L.pe('DEMQUERY(bar, [z], R.smlookup("bu", z))')

        tree = L.pe('x + LOOK + DEM1 + DEM1 + DEM2',
                    subst={
                        'LOOK': look,
                        'DEM1': dem1,
                        'DEM2': dem2
                    })
        namer = L.NameGenerator()
        replacer = LookupReplacer(namer)
        tree, clauses = replacer.process(tree)
        repls = replacer.repls

        exp_tree = L.pe('x + v1 + v2 + v2 + v3')
        exp_clauses = [
            L.Enumerator(L.sn('v1'), L.pe('{R.smlookup("bu", x)}')),
            L.Enumerator(L.sn('v2'),
                         L.pe('DEMQUERY(foo, [y], {R.smlookup("bu", y)})')),
            L.Enumerator(L.sn('v3'),
                         L.pe('DEMQUERY(bar, [z], {R.smlookup("bu", z)})')),
        ]
        exp_repls = {
            look: 'v1',
            dem1: 'v2',
            dem2: 'v3',
        }

        self.assertEqual(tree, exp_tree)
        self.assertEqual(clauses, exp_clauses)
        self.assertEqual(repls, exp_repls)
Пример #3
0
def inst_wildcards(vars):
    """Given a tuple of variables and wildcards, return a tuple where
    wildcards are replaced with successive fresh vars _v1, ... _vn.
    """
    namer = L.NameGenerator('_v{}', 1)
    vars = apply_subst_tuple(vars, {'_': lambda v: namer.next()})
    return vars
Пример #4
0
def flatten_tuples_comp(comp):
    """Flatten away nested tuples. Return the modified comprehension
    and an OrderedSet of tuple relations used.
    """
    tupvar_namer = L.NameGenerator(fmt='_tup{}', counter=1)
    flattener = TupleFlattener(tupvar_namer)
    comp = L.rewrite_compclauses(comp,
                                 flattener.process,
                                 after=True,
                                 enum_only=True,
                                 recursive=True)
    return comp, flattener.trels
Пример #5
0
    def elim_wildcards(self):
        """Opposite of make_wildcards(). Produce a semantically
        equivalent join in which wildcards have been replaced by
        fresh variables.
        """
        fresh_names = L.NameGenerator(fmt='_v{}', counter=1)
        subst = {'_': lambda _: fresh_names.next()}

        new_clauses = [
            self.factory.rewrite_lhs(cl, subst)
            if cl.kind is Clause.KIND_ENUM else cl for cl in self.clauses
        ]
        return self._replace(clauses=new_clauses)
Пример #6
0
    def __init__(self, namegen=None):
        if namegen is None:
            namegen = L.NameGenerator()
        self.namegen = namegen
        """Unique variable identifier generator."""

        self.parser = L

        self.compnamegen = L.NameGenerator(fmt='Comp{}', counter=1)
        """Generator specifically for naming comprehension queries."""

        from incoq.compiler.aggr import AGGR_PREFIX
        self.aggrnamegen = L.NameGenerator(fmt=AGGR_PREFIX + '{}', counter=1)
        """Generator specifically for naming aggregate queries."""

        self.options = OptionsManager()
        """Options for transformation."""

        self.header_comments = []
        """List of comments to emit at top of code."""

        self.vartypes = {}
        """Variable types. Keys are variable names, values are type terms."""

        self.stats = {
            'trans time': 0,  # transformation time (process time)
            'lines': 0,  # lines of code, excl. whitespace/comments
            'incr queries': 0,  # number of queries incrementalized
            'incr comps': 0,  # number of comps incrementalized
            'incr aggrs': 0,  # number of aggregates incrementalized
            'orig queries': 0,  # number of incr. queries that were
            # from the input program
            'orig updates': 0,  # number of updates to incr. queries
            # from the input program
            'dem structs': 0,  # number of tags/filters/inner-usets
            # created for filtered comps
            'comps expanded': 0,  # number of comps expanded as batch + maps 
            'auxmaps': 0,  # number of auxmaps created
            'queries processed': 0,  # number of queries considered for
            # transformation (not necessarily actually
            # transformed)
            'queries skipped': 0,  # number of queries skipped for not
            # satisfying syntactic requirements
            # for transformation

            # The following are used for exporting transformation data
            # for later analysis.
            'costs': {},  # dictionary mapping from function name to
            # analyzed cost
            'domain_subst': {},  # domain constraint solutions
            'invariants': {},  # mapping from invariant name to spec obj
        }
        """Statistics about the transformation."""

        self.original_queryinvs = set()
        """Set of names of invariants corresponding to queries
        from the original program.
        """

        # Hackish.
        self.parser.manager = self
        self.parser.options = self.options

        # Still hackish.
        self.use_mset = False
        self.fields = []
        self.use_mapset = False

        self.invariants = {}
        """Map from name to IncComp/IncAggr object."""