コード例 #1
0
    def _parse_one(self, pos):
        if pos >= len(self.toks):
            return None

        if self.toks[pos].tid == lexing.TokenID.TEXT:
            return ast.AST(ASTID.TEXT, self.toks[pos].text), pos + 1
        elif self.toks[pos].tid == lexing.TokenID.TEXT_VERBATIM:
            return ast.AST(ASTID.TEXT_VERBATIM, self.toks[pos].text), pos + 1
        elif self.toks[pos].tid == lexing.TokenID.END:
            error.emit_token_error(self.toks[pos], "unexpected }")
        elif self.toks[pos].tid == lexing.TokenID.BEGIN:
            cmd = self.toks[pos].text
            pos += 1
            args = []
            while pos < len(
                    self.toks) and self.toks[pos].tid != lexing.TokenID.END:
                if self.toks[pos].tid == lexing.TokenID.END:
                    break
                arg, pos = self._parse_one(pos)
                if not (arg.aid == ASTID.TEXT and len(arg.text) == 0):
                    args.append(arg)
            if pos >= len(
                    self.toks) or self.toks[pos].tid != lexing.TokenID.END:
                error.emit_token_error(
                    self.toks[pos if pos < len(self.toks) else -1],
                    "{ expects closing }")
            return ast.AST(ASTID.COMMAND, cmd, *args), pos + 1
        else:
            raise NotImplementedError("unsupported token ID")
コード例 #2
0
def p_returnmainvalue(p):
    '''
    returnmainvalue : RETURN NUMBER SEQ
    '''
    if isinstance(p[2], int):
        p[0] = ast.AST('return', 'int')
        p[0].sons = [p[2], p.lineno(2)]
    elif isinstance(p[2], float):
        p[0] = ast.AST('return', 'float')
        p[0].sons = [p[2], p.lineno(2)]
コード例 #3
0
ファイル: test_ast.py プロジェクト: life02/kbengine-cocos2dx
    def test_AST_objects(self):
        x = ast.AST()
        self.assertEqual(x._fields, ())

        with self.assertRaises(AttributeError):
            x.vararg

        with self.assertRaises(TypeError):
            # "_ast.AST constructor takes 0 positional arguments"
            ast.AST(2)
コード例 #4
0
def p_declaration(p):
    '''
    declaration : INT ID
                | FLOAT ID
    '''
    if p[1] == 'int':
        p[0] = ast.AST('declaration', 'int')
        p[0].sons = [p[2], p.lineno(1), p.lineno(2)]
    elif p[1] == 'float':
        p[0] = ast.AST('declaration', 'float')
        p[0].sons = [p[2], p.lineno(1), p.lineno(2)]
コード例 #5
0
def p_expression(p):
    '''expression : NUMBER
                | ID
                | expression OPBIN expression'''
    if len(p) > 2:
        p[0] = ast.AST('OPBIN', p[2])
        p[0].sons = [p[1], p[3]]
    elif isinstance(p[1], str):
        p[0] = ast.AST('ID', p[1])
    else:
        p[0] = ast.AST('NUMBER', p[1])
コード例 #6
0
def p_commande(p):
    '''commande : ID EQUAL expression
                | commande SEQ commande
                | WHILE LP expression RP LB commande RB
    '''
    if len(p) > 4:
        p[0] = ast.AST('commande', 'while')
        p[0].sons = [p[3], p[6]]
    else:
        if p[2] == '=':
            p[0] = ast.AST('commande', 'asgnt')
            p[0].sons = [p[1], p[3]]
        else:
            p[0] = ast.AST('commande', 'seq')
            p[0].sons = [p[1], p[3]]
コード例 #7
0
 def analyzePrograma(self):
     tree = None
     if (self.component == None):
         self.errored = True
     elif (self.component.cat == "PR"
           and self.component.valor == "PROGRAMA"):
         self.advance()
         ids = []
         v = None
         if (hasattr(self, 'component') and hasattr(self.component, 'cat')
                 and self.component.cat == "Identif"):
             ids = [self.component.valor]
             v = self.component.valor
         self.check(cat="Identif", sync=set([None, "PtoComa"]))
         self.check(cat="PtoComa",
                    sync=set([None, "PR"]),
                    spr=set(["VAR", "PROC", "FUNCION", "INICIO"]))
         decl_var = self.analyzeDeclVar(ids=ids)
         decl_subprg = self.analyzeDeclSubprg(ids=decl_var['ids'],
                                              tipo_id=decl_var['tipo_id'])
         instrucciones = self.analyzeInstrucciones(
             ids=decl_subprg['ids'], tipo_id=decl_subprg['tipo_id'])
         self.check(cat="Punto", sync=set([None]), endEx=True)
         tree = ast.AST(v, instrucciones['nodos'])
     else:
         self.error(msg='PROGRAMA', sync=set([None]))
     return (tree, not self.errored)
コード例 #8
0
def p_main(p):
    '''
    main : typeetmain LP listeparamsmain RP LB listedeclarations commande SEQ PRINT LP expression RP SEQ returnmainvalue RB
    '''

    p[0] = ast.AST('prog', 'main')
    p[0].sons = [p[1], p[3], p[6], p[7], p[11], p[14]]
コード例 #9
0
    def __classifyReturnError(self, methodType, visibilityType, lineno,
                              colOffset):
        """
        Private method to classify and record a return annotation issue.
        
        @param methodType type of method/function the argument belongs to
        @type str
        @param visibilityType visibility of the function
        @type str
        @param lineno line number
        @type int
        @param colOffset column number
        @type int
        """
        # create a dummy AST node to report line and column
        node = ast.AST()
        node.lineno = lineno
        node.col_offset = colOffset

        # now classify the issue
        if methodType == "classmethod":
            self.issues.append((node, "A206"))
        elif methodType == "staticmethod":
            self.issues.append((node, "A205"))
        elif visibilityType == "special":
            self.issues.append((node, "A204"))
        elif visibilityType == "private":
            self.issues.append((node, "A203"))
        elif visibilityType == "protected":
            self.issues.append((node, "A202"))
        else:
            self.issues.append((node, "A201"))
コード例 #10
0
    def build(self, lex_file, dict_dir):
        """
           读入规则文件, 词典文件
           构建自动机和匹配引擎
        """
        ori_in = read_lex(lex_file)
        script = char_stream(ori_in)
        tokens = token_list(script).tokens
        ast_obj = ast.AST(stream(tokens))

        # 逗号分开的词典文件目录列表,  会加载目录中所有txt文件
        all_slot_entity_files = []
        if dict_dir:
            dict_dirs = dict_dir.split(",")
            for dir_path in dict_dirs:
                all_slot_entity_files.extend(glob.glob(dir_path + "/*.txt"))
        print("词典文件:", all_slot_entity_files)

        # 规则中的关键词
        keywords = ast.extract_all_atoms(ast_obj)
        ac_machine = ac.AC()
        ac_machine.make(keywords, all_slot_entity_files)

        rule_graph = parse.RuleStructure(ast_obj)
        rule_trie, rule_info = rule_graph.build()
        self.searcher = search.Searcher(rule_trie, rule_info, ac_machine)
        self.rule_info = rule_info
コード例 #11
0
    def test_AST_objects(self):
        if not support.check_impl_detail():
            # PyPy also provides a __dict__ to the ast.AST base class.
            return

        x = ast.AST()
        self.assertEqual(x._fields, ())
        x.foobar = 42
        self.assertEqual(x.foobar, 42)
        self.assertEqual(x.__dict__["foobar"], 42)

        with self.assertRaises(AttributeError):
            x.vararg

        with self.assertRaises(TypeError):
            # "_ast.AST constructor takes 0 positional arguments"
            ast.AST(2)
コード例 #12
0
 def test_AST_garbage_collection(self):
     class X:
         pass
     a = ast.AST()
     a.x = X()
     a.x.a = a
     ref = weakref.ref(a.x)
     del a
     support.gc_collect()
     self.assertIsNone(ref())
コード例 #13
0
 def test_prepare_message(self, valid_msg, source, lineno, col, lineno_exp,
                          col_exp):  # noqa
     node = ast.AST()
     node.lineno = 10
     msg = valid_msg.prepare_message(source=source,
                                     node=node,
                                     lineno=lineno,
                                     col=col)
     assert msg.line == lineno_exp
     assert msg.col == col_exp
     assert msg.source == source
コード例 #14
0
    def attach_comments(curr, prev):
        """
        LOOK FOR CODE BETWEEN prev AND curr
        :return: wrapped node with the code
        """
        if hasattr(prev.node, "lineno"):
            start_line = i = prev.node.end_lineno - 1
            end_line = curr.node.lineno - 1
            start_col = prev.node.end_col_offset
            end_col = len(lines[start_line]) if start_line < end_line else curr.node.col_offset
            res = lines[start_line][start_col:end_col]
            clr = res.lstrip()

            if prev.node.end_col_offset:
                start_line += 1
                if clr.startswith("#"):
                    prev.line_comment = clr

            while i < end_line:
                if clr and not clr.startswith("#"):
                    break
                i += 1
                start_col = 0  # between-node code is below prev
                res = lines[i]
                clr = res.lstrip()
            else:
                # CHECK OF THERE IS CODE AHEAD OF node ON node's LINE
                res = res[:curr.node.col_offset]
                clr = res.lstrip()
                if not clr:
                    curr.before_comment = [l.strip() for l in lines[start_line:end_line]] or None
                    return

            # IDENTIFY THE CODE
            s = len(res) - len(clr) + start_col
            e = res.find("#")
            if e == -1:
                e = len(res)
            e += start_col
            before = Previous(
                code=lines[i][s:e],
                before_comment=[l.strip() for l in lines[start_line:i]] or None,
                node=ast.AST(
                    **{
                        "lineno": i + 1,
                        "col_offset": s + 1,
                        "end_lineno": i + 1,
                        "end_col_offset": e,
                    }
                ),
            )
            attach_comments(curr, before)
            curr.before = before
コード例 #15
0
 def test_prepare_invalid_message_invalid_arg(self, valid_msg, args, desc,
                                              exp_error):  # noqa
     node = ast.AST()
     node.lineno = 10
     valid_msg.desc = desc
     with pytest.raises(robocop.exceptions.InvalidRuleUsageError) as err:
         valid_msg.prepare_message(*args,
                                   source='file1.robot',
                                   node=node,
                                   lineno=None,
                                   col=None)
     assert rf"Fatal error: Rule '0101' failed to prepare message description with error: {exp_error}" in str(
         err)
コード例 #16
0
ファイル: base_parser.py プロジェクト: allofhercats/whiskey
    def parse_unary_right(self, rhs_rule, base_rule, op_mapping):
        if not self.more_tokens():
            return ParserResult(None, False)

        for i in op_mapping:
            if self.get_token().tid == i[0]:
                tok = self.eat_token()

                rhs = self.expect_parse(rhs_rule)
                if rhs.keep:
                    return ParserResult(ast.AST(tok, i[1], rhs=rhs.ast), True)
                else:
                    return ParserResult(None, False)

        return self.try_parse(base_rule)
コード例 #17
0
 def test_prepare_message_with_jinja(self, kwargs, msg, exp):  # noqa
     node = ast.AST()
     node.lineno = 10
     rule = Rule(rule_id="0101", name="some-message", msg=msg, severity=RuleSeverity.WARNING)
     msg = rule.prepare_message(
         source="file1.robot",
         node=node,
         lineno=None,
         col=None,
         end_lineno=None,
         end_col=None,
         ext_disablers=None,
         **kwargs,
     )
     assert msg.desc == exp
コード例 #18
0
 def test_prepare_message(self, valid_msg, source, range,
                          range_exp):  # noqa
     node = ast.AST()
     node.lineno = 10
     lineno, col, end_lineno, end_col = range
     lineno_exp, col_exp, end_lineno_exp, end_col_exp = range_exp
     msg = valid_msg.prepare_message(source=source,
                                     node=node,
                                     lineno=lineno,
                                     col=col,
                                     end_lineno=end_lineno,
                                     end_col=end_col)
     assert msg.line == lineno_exp
     assert msg.col == col_exp
     assert msg.end_line == end_lineno_exp
     assert msg.end_col == end_col_exp
     assert msg.source == source
コード例 #19
0
ファイル: base_parser.py プロジェクト: allofhercats/whiskey
    def parse_binary(self, lhs_rule, rhs_rule, op_mapping):
        lhs = self.try_parse(lhs_rule)
        if not lhs.keep:
            return ParserResult(None, False)

        if not self.more_tokens():
            return lhs

        tok = self.get_token()
        for i in op_mapping:
            if tok.tid == i[0]:
                self.eat_token()

                rhs = self.expect_parse(rhs_rule)

                return ParserResult(
                    ast.AST(tok, i[1], lhs=lhs.ast, rhs=rhs.ast), True)

        return lhs
コード例 #20
0
ファイル: base_parser.py プロジェクト: allofhercats/whiskey
    def parse_unary_left(self, lhs_rule, op_mapping):
        lhs = self.try_parse(lhs_rule)
        if not lhs.keep:
            return ParserResult(None, False)

        while self.more_tokens():
            tok = self.get_token()

            matched = False
            for i in op_mapping:
                if tok.tid == i[0]:
                    lhs.ast = ast.AST(tok, i[1], lhs=lhs.ast)
                    matched = True
                    break

            if matched:
                self.eat_token()
            else:
                break

        return lhs
コード例 #21
0
    code = "def x(a): pass"
    s = setup_state(code, code)
    with pytest.raises(
        InstructorError,
        match=r"`check_call\(\)` can only be called on `check_function_def\(\)` or `check_lambda_function\(\)`\.",
    ):
        s.check_object("x").check_call("f(1)")


# Utility functions to make the above work ------------------------------------


@pytest.mark.parametrize(
    "element, no_error",
    [
        (ast.AST(), True),
        ([ast.AST()], True),
        ({"node": ast.AST()}, True),
        ({"node": [ast.AST()]}, True),
        (1, False),
        ([1, 2], False),
        ({"node": 1}, False),
        ({"node": [1, 2]}, False),
    ],
)
def test_assert_ast(element, no_error):
    s = setup_state()._state
    if no_error:
        assert_ast(s, element, {})
    else:
        with pytest.raises(InstructorError):
コード例 #22
0
    s = setup_state(code, code)
    with pytest.raises(
            InstructorError,
            match=
            r"`check_call\(\)` can only be called on `check_function_def\(\)` or `check_lambda_function\(\)`\.",
    ):
        s.check_object("x").check_call("f(1)")


# Utility functions to make the above work ------------------------------------


@pytest.mark.parametrize(
    "element, no_error",
    [
        (ast.AST(), True),
        ([ast.AST()], True),
        ({
            "node": ast.AST()
        }, True),
        ({
            "node": [ast.AST()]
        }, True),
        (1, False),
        ([1, 2], False),
        ({
            "node": 1
        }, False),
        ({
            "node": [1, 2]
        }, False),
コード例 #23
0
    s = setup_state(code, code)
    with helper.set_v2_only_env('1'):
        with pytest.raises(InstructorError, match=r"`has_equal_ast\(\)` should not be called on `check_function\(\)`\."):
            s.check_function('round').has_equal_ast()

def test_check_call_not_on_check_function_def():
    code = 'def x(a): pass'
    s = setup_state(code, code)
    with pytest.raises(InstructorError, match=r"`check_call\(\)` can only be called on `check_function_def\(\)` or `check_lambda_function\(\)`\."):
        s.check_object('x').check_call("f(1)")

# Utility functions to make the above work ------------------------------------

@pytest.mark.parametrize('element, no_error',
    [
        (ast.AST(), True),
        ([ast.AST()], True),
        ({'node': ast.AST()}, True),
        ({'node': [ast.AST()]}, True),
        (1, False),
        ([1, 2], False),
        ({'node': 1}, False),
        ({'node': [1, 2]}, False),
    ],
)
def test_assert_ast(element, no_error):
    s = setup_state()._state
    if no_error:
        assert_ast(s, element, {})
    else:
        with pytest.raises(InstructorError):
コード例 #24
0
import ast
import textwrap
from argparse import ArgumentParser
from collections import defaultdict
from functools import partial
from pathlib import Path

import pyasdl

_BASE_CLASS = "AST"

_TYPING = ast.Name("typing", ast.Load())
_SYS_VERSION = ast.Attribute(ast.Name("sys"), "version_info", ast.Load())
_EMPTY_BODY = [ast.Constant(...)]
_DUMMY_CONDITION = ast.AST(test=ast.AST())


class StubGenerator(pyasdl.ASDLVisitor):
    def __init__(self):
        self.namespaces = defaultdict(list)

    def visit_Type(self, node):
        return self.visit(
            node.value,
            name=node.name,
            attributes=self.visit_all(node.value.attributes),
        )

    def visit_Sum(self, node, name, attributes):
        self._create_type(name,
                          base=_BASE_CLASS,
コード例 #25
0
    for point in points:
        res = tree.eval({var: point})
        if res == "ERROR":
            return -1
        total += abs(tree.eval({var: point}) - target.eval({var: point}))
    return total


POPULATION_SIZE = 10
MUTATION_RATE = 0.3
TARGET = t.SubNode(
    t.AddNode(t.MultNode(t.TerminalNode("x"), t.TerminalNode("x")),
              t.TerminalNode("x")), t.TerminalNode(6))
TERMINALS = [int(i) for i in range(-10, 11)] + ["x"]
FUNCTIONS = [t.AddNode, t.SubNode, t.MultNode]
GENERATOR = ast.AST(FUNCTIONS, TERMINALS)
print("=============================")
print("Symbolic Regression")
print("==============================")
#ga = GeneticAlgorithm(POPULATION_SIZE, fitness_fun_single_var, MUTATION_RATE,
#                      GENERATOR, TARGET, var="x", points=[i for i in range(-100, 101)])
#ga.run()
print("==============================")
print("=============================")
print("Symbolic Regression with division")
print("==============================")
FUNCTIONS = [t.AddNode, t.SubNode, t.MultNode, t.DivNode]
GENERATOR = ast.AST(FUNCTIONS, TERMINALS)
ga = GeneticAlgorithm(POPULATION_SIZE,
                      fitness_fun_single_var_div,
                      MUTATION_RATE,
コード例 #26
0
    def add_comments(node, prev, parent):
        """
        ANNOTATE node WITH COMMENTS
        :return ANNOTATED node AND ORIGINAL prev
        :param node: THE NODE WE ARE ANNOTATING WITH COMMENTS
        :param prev: THE NODE BEFORE THIS ONE, MAYBE BELONGING TO SOME OTHER STRUCTURE, REQUIRED SO WE CAN ADD IT line_comment
        :param parent: THE PARENT OF node, JUST IN CASE WE WANT MORE CONTEXT
        """
        if not hasattr(node, "_fields"):
            return node, prev
        try:
            wrapper_class = lookup[node.__class__]
            output = wrapper_class(node=node)
        except KeyError:
            Log.error("Do not have a wrapper for class {{class_name}}", class_name=node.__class__.__name__)

        # DECORATORS ARE BEFORE FUNCTION/CLASS DEFINITION
        if "decorator_list" in node._fields:
            dec_list = output["decorator_list"] = []
            for d in node.decorator_list:
                dd, prev = add_comments(d, prev, output)
                dd.is_decorator = True
                dec_list.append(dd)

        # CAPTURE COMMENT LINES ABOVE NODE
        if hasattr(node, "lineno") and hasattr(prev.node, "end_lineno"):
            attach_comments(output, prev)
            first_child = latest_child = Sentinal(  # SENTINEL FOR BEGINNING OF TOKEN
                is_begin=True,
                node={
                    "lineno": node.lineno,
                    "col_offset": node.col_offset,
                    "end_lineno": node.lineno,
                    "end_col_offset": node.col_offset,
                },
            )
        else:
            first_child = latest_child = prev

        for f in node._fields:
            if f == "decorator_list":
                # DECORATORS ARE TREATED SPECIALLY, BEFORE
                continue
            if f == "ctx":
                # THESE "context" VARIABLES HAVE NO PLACE IN THE SOURCE CODE
                continue
            field_value = getattr(node, f)
            if not field_value:
                continue
            if isinstance(field_value, list):
                child_list = output[f] = []
                for c in field_value:
                    cc, latest_child = add_comments(c, latest_child, output)
                    if isinstance(c, ast.Expr):
                        cc.eol = CR
                    child_list.append(cc)
            else:
                value, latest_child = add_comments(
                    field_value, latest_child, output
                )
                output[f] = value
                if isinstance(field_value, ast.Constant) and lines[
                    field_value.lineno - 1
                ][field_value.col_offset :].startswith('"""'):
                    # DETECT MULTILINE STRING
                    value.is_multiline_string = True
                elif isinstance(field_value, ast.arguments) and not any(
                    getattr(field_value, f) for f in field_value._fields
                ):
                    # EMPTY ARGUMENTS HAVE NO LOCATION
                    # ASSUME ARGUMENTS START ON THIS LINE
                    argline = lines[node.lineno - 1]
                    found = re.search(r"\(\s*\)", argline)
                    if not found:
                        Log.error(
                            "expecting empty arguments on line {{line}}", line=argline
                        )
                    location = first(found.regs)
                    latest_child = Sentinal(
                        node={
                            "lineno": node.lineno,
                            "col_offset": location[0] + 1,
                            "end_lineno": node.lineno,
                            "end_col_offset": location[1],
                        }
                    )
                if is_data(value) and value.before:
                    pass
                pass

        prev = latest_child
        if prev is first_child:
            prev = output
        elif hasattr(node, "lineno"):
            # END OF NODE
            eon = ast.AST(
                **{
                    "is_end": True,
                    "lineno": output.node.end_lineno,
                    "col_offset": output.node.end_col_offset,
                    "end_lineno": output.node.end_lineno,
                    "end_col_offset": output.node.end_col_offset,
                }
            )
            eon, _ = add_comments(eon, prev, parent)
            if eon.before.before_comment or eon.before.line_comment:
                output.after = eon.before
            output.after_comment = eon.before_comment

            if not hasattr(prev.node, "lineno"):
                prev = output
            elif (
                (node.end_lineno, node.end_col_offset)
                > (prev.node.end_lineno, prev.node.end_col_offset)
                >= (node.lineno, node.col_offset)
            ):
                prev = output
            elif (
                # IF ALL ON ONE LINE, THEN GIVE COMMENT TO BIGGEST ast ON LINE
                node.lineno
                == node.end_lineno
                == prev.node.lineno
                == prev.node.end_lineno
            ):
                prev = output

        return output, prev
コード例 #27
0
def p_main(p):
    '''
    main : MAIN LP listevariables RP LB commande SEQ  PRINT LP expression RP SEQ RB 
    '''
    p[0] = ast.AST('prog', 'main')
    p[0].sons = [p[3], p[6], p[10]]
コード例 #28
0
def _parse_out_default_and_doc(
    _start_idx,
    start_rest_offset,
    default,
    line,
    rstrip_default,
    typ,
    default_end_offset,
    emit_default_doc,
):
    """
    Internal function to parse the default and extract out the doc iff `emit_default_doc is False`

    :param _start_idx: The start index to look from
    :type _start_idx: ```int```

    :param start_rest_offset: The start index to look from, for the rest that's appended
    :type start_rest_offset: ```int```

    :param default: The currently parsed out default, could be the end form, could parse into something more specific
    :type default: ```Any```

    :param line: Example - "dataset. Defaults to mnist"
    :type line: ```str```

    :param rstrip_default: Whether to rstrip whitespace, newlines, and '.' from the default
    :type rstrip_default: ```bool```

    :param typ: The type of the default value, useful to disambiguate `25` the float from  `25` the float
    :type typ: ```Optional[str]```

    :param default_end_offset: Set to -1 if one parenthesis, -2 if )., and 0 if none
    :type default_end_offset: ```int```

    :param emit_default_doc: Whether help/docstring should include 'With default' text
    :type emit_default_doc: ```bool```

    :returns: Example - ("dataset. Defaults to mnist", "mnist") if emit_default_doc else ("dataset", "mnist")
    :rtype: Tuple[str, Optional[str]]
    """
    if typ is not None and typ in simple_types and default not in none_types:
        lit = (ast.AST() if typ != "str" and any(
            map(
                partial(contains, frozenset(
                    ("*", "^", "&", "|", "$", "@", "!"))),
                default,
            )) else literal_eval("({default})".format(default=default)))
        default = ("```{default}```".format(
            default=default) if isinstance(lit, ast.AST) else {
                "bool": bool,
                "int": int,
                "float": float,
                "complex": complex,
                "str": str,
            }[typ](lit))
    elif default.isdecimal():
        default = int(default)
    elif default in frozenset(("True", "False")):
        default = literal_eval(default)
    else:
        with suppress(ValueError):
            default = float(default)
    if emit_default_doc:
        return line, default
    else:
        stop_tokens = frozenset((" ", "\t", "\n", "\n", "."))
        extra_offset = int(line[:_start_idx - 1][-1] in frozenset((" ", "\t",
                                                                   "\n",
                                                                   "\n")))

        if rstrip_default:
            offset = count_iter_items(
                takewhile(
                    partial(contains, stop_tokens),
                    line[start_rest_offset:],
                ))
            start_rest_offset += offset

        fst = line[:_start_idx - 1 - extra_offset]
        rest = line[start_rest_offset:(
            -extra_offset if extra_offset > 0 else None
        ) if default_end_offset is None else default_end_offset]
        return (
            fst + rest,
            default,
        )
コード例 #29
0
import ast
import os

obj = ast.AST()
path = os.path.join("/home", "bigbasket", "Desktop",
                    "SqlMigrations_Automation", "helloworld.py")
file = open(path, "r")
parsed_object = ast.parse(file.read())

print parsed_object.body[0].body[1].body[1]
コード例 #30
0
    s = setup_state(code, code)
    with pytest.raises(
            InstructorError,
            match=
            r"`check_call\(\)` can only be called on `check_function_def\(\)` or `check_lambda_function\(\)`\."
    ):
        s.check_object('x').check_call("f(1)")


# Utility functions to make the above work ------------------------------------


@pytest.mark.parametrize(
    'element, no_error',
    [
        (ast.AST(), True),
        ([ast.AST()], True),
        ({
            'node': ast.AST()
        }, True),
        ({
            'node': [ast.AST()]
        }, True),
        (1, False),
        ([1, 2], False),
        ({
            'node': 1
        }, False),
        ({
            'node': [1, 2]
        }, False),