def get_yield_values(self):
        for_parents = [(y,
                        tree.search_ancestor(y, 'for_stmt', 'funcdef',
                                             'while_stmt', 'if_stmt'))
                       for y in get_yield_exprs(self.evaluator, self.tree_node)
                       ]

        # Calculate if the yields are placed within the same for loop.
        yields_order = []
        last_for_stmt = None
        for yield_, for_stmt in for_parents:
            # For really simple for loops we can predict the order. Otherwise
            # we just ignore it.
            parent = for_stmt.parent
            if parent.type == 'suite':
                parent = parent.parent
            if for_stmt.type == 'for_stmt' and parent == self.tree_node \
                    and parser_utils.for_stmt_defines_one_name(for_stmt):  # Simplicity for now.
                if for_stmt == last_for_stmt:
                    yields_order[-1][1].append(yield_)
                else:
                    yields_order.append((for_stmt, [yield_]))
            elif for_stmt == self.tree_node:
                yields_order.append((None, [yield_]))
            else:
                types = self.get_return_values(check_yields=True)
                if types:
                    yield context.get_merged_lazy_context(list(types))
                return
            last_for_stmt = for_stmt

        evaluator = self.evaluator
        for for_stmt, yields in yields_order:
            if for_stmt is None:
                # No for_stmt, just normal yields.
                for yield_ in yields:
                    for result in self._eval_yield(yield_):
                        yield result
            else:
                input_node = for_stmt.get_testlist()
                cn = ContextualizedNode(self, input_node)
                ordered = iterable.py__iter__(evaluator, cn.infer(), cn)
                ordered = list(ordered)
                for lazy_context in ordered:
                    dct = {
                        str(for_stmt.children[1].value): lazy_context.infer()
                    }
                    with helpers.predefine_names(self, for_stmt, dct):
                        for yield_in_same_for_stmt in yields:
                            for result in self._eval_yield(
                                    yield_in_same_for_stmt):
                                yield result
示例#2
0
    def get_yield_values(self):
        for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef',
                                                'while_stmt', 'if_stmt'))
                       for y in self.tree_node.iter_yield_exprs()]

        # Calculate if the yields are placed within the same for loop.
        yields_order = []
        last_for_stmt = None
        for yield_, for_stmt in for_parents:
            # For really simple for loops we can predict the order. Otherwise
            # we just ignore it.
            parent = for_stmt.parent
            if parent.type == 'suite':
                parent = parent.parent
            if for_stmt.type == 'for_stmt' and parent == self.tree_node \
                    and parser_utils.for_stmt_defines_one_name(for_stmt):  # Simplicity for now.
                if for_stmt == last_for_stmt:
                    yields_order[-1][1].append(yield_)
                else:
                    yields_order.append((for_stmt, [yield_]))
            elif for_stmt == self.tree_node:
                yields_order.append((None, [yield_]))
            else:
                types = self.get_return_values(check_yields=True)
                if types:
                    yield context.get_merged_lazy_context(list(types))
                return
            last_for_stmt = for_stmt

        evaluator = self.evaluator
        for for_stmt, yields in yields_order:
            if for_stmt is None:
                # No for_stmt, just normal yields.
                for yield_ in yields:
                    for result in self._eval_yield(yield_):
                        yield result
            else:
                input_node = for_stmt.get_testlist()
                cn = ContextualizedNode(self, input_node)
                ordered = iterable.py__iter__(evaluator, cn.infer(), cn)
                ordered = list(ordered)
                for lazy_context in ordered:
                    dct = {str(for_stmt.children[1].value): lazy_context.infer()}
                    with helpers.predefine_names(self, for_stmt, dct):
                        for yield_in_same_for_stmt in yields:
                            for result in self._eval_yield(yield_in_same_for_stmt):
                                yield result
示例#3
0
def py__iter__(evaluator, types, node=None):
    debug.dbg('py__iter__')
    type_iters = []
    for typ in types:
        try:
            iter_method = typ.py__iter__
        except AttributeError:
            if node is not None:
                # TODO this context is probably not right.
                analysis.add(typ, 'type-error-not-iterable', node,
                             message="TypeError: '%s' object is not iterable" % typ)
        else:
            type_iters.append(iter_method())

    for lazy_contexts in zip_longest(*type_iters):
        yield context.get_merged_lazy_context(
            [l for l in lazy_contexts if l is not None]
        )
示例#4
0
def py__iter__(evaluator, types, contextualized_node=None):
    debug.dbg('py__iter__')
    type_iters = []
    for typ in types:
        try:
            iter_method = typ.py__iter__
        except AttributeError:
            if contextualized_node is not None:
                analysis.add(contextualized_node.context,
                             'type-error-not-iterable',
                             contextualized_node._node,
                             message="TypeError: '%s' object is not iterable" %
                             typ)
        else:
            type_iters.append(iter_method())

    for lazy_contexts in zip_longest(*type_iters):
        yield context.get_merged_lazy_context(
            [l for l in lazy_contexts if l is not None])
示例#5
0
    def unpack(self, func=None):
        named_args = []
        for stars, el in self._split():
            if stars == 1:
                arrays = self.context.eval_node(el)
                iterators = [
                    _iterate_star_args(self.context, a, el, func)
                    for a in arrays
                ]
                iterators = list(iterators)
                for values in list(zip_longest(*iterators)):
                    # TODO zip_longest yields None, that means this would raise
                    # an exception?
                    yield None, context.get_merged_lazy_context(
                        [v for v in values if v is not None])
            elif stars == 2:
                arrays = self._evaluator.eval_element(self.context, el)
                for dct in arrays:
                    for key, values in _star_star_dict(self.context, dct, el,
                                                       func):
                        yield key, values
            else:
                if el.type == 'argument':
                    c = el.children
                    if len(c) == 3:  # Keyword argument.
                        named_args.append((
                            c[0].value,
                            context.LazyTreeContext(self.context, c[2]),
                        ))
                    else:  # Generator comprehension.
                        # Include the brackets with the parent.
                        comp = iterable.GeneratorComprehension(
                            self._evaluator, self.context,
                            self.argument_node.parent)
                        yield None, context.LazyKnownContext(comp)
                else:
                    yield None, context.LazyTreeContext(self.context, el)

        # Reordering var_args is necessary, because star args sometimes appear
        # after named argument, but in the actual order it's prepended.
        for named_arg in named_args:
            yield named_arg
示例#6
0
    def unpack(self, funcdef=None):
        named_args = []
        for star_count, el in self._split():
            if star_count == 1:
                arrays = self.context.eval_node(el)
                iterators = [_iterate_star_args(self.context, a, el, funcdef)
                             for a in arrays]
                iterators = list(iterators)
                for values in list(zip_longest(*iterators)):
                    # TODO zip_longest yields None, that means this would raise
                    # an exception?
                    yield None, context.get_merged_lazy_context(
                        [v for v in values if v is not None]
                    )
            elif star_count == 2:
                arrays = self._evaluator.eval_element(self.context, el)
                for dct in arrays:
                    for key, values in _star_star_dict(self.context, dct, el, funcdef):
                        yield key, values
            else:
                if el.type == 'argument':
                    c = el.children
                    if len(c) == 3:  # Keyword argument.
                        named_args.append((c[0].value, context.LazyTreeContext(self.context, c[2]),))
                    else:  # Generator comprehension.
                        # Include the brackets with the parent.
                        comp = iterable.GeneratorComprehension(
                            self._evaluator, self.context, self.argument_node.parent)
                        yield None, context.LazyKnownContext(comp)
                else:
                    yield None, context.LazyTreeContext(self.context, el)

        # Reordering var_args is necessary, because star args sometimes appear
        # after named argument, but in the actual order it's prepended.
        for named_arg in named_args:
            yield named_arg