コード例 #1
0
def file_lines(stdin):
    split_lines = [""]
    paren_depth = 0
    for line in stdin.split("\n"):
        line = line.strip()
        if line.startswith("#"):
            continue
        paren_depth += tokenize(line).count("(") - tokenize(line).count(")")
        if paren_depth == 0:
            split_lines[-1] += line
            split_lines.append("")
        else:
            split_lines[-1] += line
    return [x for x in split_lines if x]
コード例 #2
0
ファイル: parser_types.py プロジェクト: codebam/ergonomica
    def make(self):
        """Return an (unevaluated) function that acts as an Ergonomica builtin function."""
        # add EOF character to end of function
        self.body.append(tokenize("\n")[0])
        self.body[-1].type = "EOF"

        return {self.name: make_function(self.evaluator, self)}
コード例 #3
0
ファイル: interpreter.py プロジェクト: timgates42/ergonomica
def ergo(stdin, namespace=namespace):
    if stdin.strip() == "":
        return None
    try:
        return eval(parse(tokenize(stdin)), namespace, True)
    except Exception as e:
        if isinstance(e, ErgonomicaError) or isinstance(
                e, DocoptException) or isinstance(e, KeyboardInterrupt):
            print(e)
        else:
            traceback.print_exc()
コード例 #4
0
            def all_blocks_closed(ptk_buffer):
                """Return True when all Ergonomica code blocks are closed."""
                def_count = 0
                end_count = 0

                for token in tokenize(ptk_buffer.document.text):
                    if token.type == 'DEFINITION':
                        def_count += 1
                    if token.type == 'END':
                        end_count += 1

                return def_count == end_count
コード例 #5
0
    def _auto_newline(_buffer):
        r"""
        Insert \n at the cursor position. Also add necessary padding.
        """
        insert_text = _buffer.insert_text

        if _buffer.document.current_line_after_cursor:
            # When we are in the middle of a line. Always insert a newline.
            insert_text('\n')
        else:
            # Go to new line, but also add indentation.
            current_line = _buffer.document.current_line_before_cursor.rstrip()
            insert_text('\n')

            for character in current_line:
                if character.isspace():
                    insert_text(character)
                else:
                    break

            # If the last line ends with a colon, add four extra spaces.
            insert_text('   ' * (tokenize(current_line).count("(") -
                                 tokenize(current_line).count(")")))
コード例 #6
0
 def all_blocks_closed(ptk_buffer):
     """Return True when all Ergonomica code blocks are closed."""
     return tokenize(ptk_buffer.text).count("(") == tokenize(
         ptk_buffer.text).count(")")
コード例 #7
0
ファイル: interpreter.py プロジェクト: timgates42/ergonomica
def _tokenize(string):
    return tokenize(string)
コード例 #8
0
ファイル: ergo.py プロジェクト: codebam/ergonomica
def raw_eval_tokens(_tokens, namespace, log=False, silent=False):
    """Evaluate Ergonomica tokens."""

    new_command = True
    in_function = False
    in_lambda = False
    argspec = False

    function = Function(eval_tokens)

    command_function = False
    args = []
    skip = False
    depth = 0
    lambda_depth = 0
    _lambda = []
    eval_next_expression = False
    current_indent = 0

    pipe = Pipeline(ENV, namespace)
    pipe.operations = []
    pipe.args = []

    tokens = _tokens

    tokens.append(tokenize("\n")[0])
    tokens[-1].type = 'EOF'

    for i in enumerate(tokens):

        token = i[1]

        if log:
            print("--- [ERGONOMICA LOG] ---")
            print("CURRENT TOKEN: ", token)
            print("CURRENT args : ", args)
            print("F is         : ", command_function)
            print("NEW_COMMAND  : ", new_command)
            print("IN_LAMBDA    : ", in_lambda)
            print("------------------------\n")

        if not in_function:
            if token.type == 'EVAL':
                eval_next_expression = True
                continue

            if token.type == 'LBRACKET':
                lambda_depth += 1
                in_lambda = True
                continue

            elif in_lambda:
                if token.type == 'RBRACKET':
                    lambda_depth -= 1

                if lambda_depth != 0:
                    _lambda.append(token)
                    continue

                else:  # time to wrap up the function
                    token.type = 'LITERAL'

                    if eval_next_expression:
                        token.value = eval_tokens(_lambda,
                                                  namespace,
                                                  log=log,
                                                  silent=silent)[0]
                        eval_next_expression = False
                    else:
                        lambda_uuid = str(uuid.uuid1())
                        partial = [_lambda, namespace, log, silent]
                        namespace[
                            lambda_uuid] = lambda blank, s=partial: eval_tokens(
                                s[0], s[1], s[2], s[3])
                        token.value = lambda_uuid

                    _lambda = []
                    in_lambda = False

        if (token.type == 'EOF') or \
           ((token.type == 'NEWLINE') and (tokens[i[0] + 1].type != 'INDENT')):
            if in_function:
                in_function = False
                namespace.update(function.make())
            else:
                token.type = 'NEWLINE'

        # recognize commands as distinct from arguments
        if token.type == 'NEWLINE':

            argspec = False
            current_indent = 0
            skip = False

            if (len(tokens) > i[0] + 1) and tokens[i[0] + 1].type == 'INDENT':
                function.append_to_body(token)
                continue

            if in_function:
                function.append_to_body(token)
                continue

            if command_function:
                pipe.append_operation(Operation(command_function, args))
                args = []
                command_function = False
                stdout = pipe.stdout()
                if (stdout != None) and (not silent):
                    yield stdout

                pipe = Pipeline(ENV, namespace)

            if skip:
                skip = False
                continue

            new_command = True
            continue

        if token.type == 'PIPE':
            try:
                pipe.append_operation(Operation(command_function, args))
            except KeyError:
                print("[ergo: CommandError]: Unknown command '%s'." %
                      command_function)

            command_function = False
            args = []
            new_command = True
            continue

        if token.type == "INDENT":
            if not current_indent:
                current_indent += 1
                continue
            current_indent += 1

        if in_function:
            if token.type == 'DEFINITION':
                depth += 1
                skip = True
                function.append_to_body(token)
                continue

            elif not function.name:
                function.set_name(token.value)
                argspec = True
                continue

            elif argspec:
                function.argspec += " " + token.value
                continue

            function.append_to_body(token)
            continue

        if token.type == 'VARIABLE':
            token.type = 'LITERAL'
            token.value = str(namespace[token.value])

        if token.type == 'DEFINITION':
            in_function = True
            function = Function(eval_tokens)
            depth += 1
            continue

        elif (not new_command) and in_function:
            if not function.name:
                function.set_name(token.value)
            else:
                function.append_to_body(token)

        if new_command and in_function:
            function.append_to_body(token)

        elif new_command and (not in_function):
            if not command_function:
                try:
                    command_function = namespace[token.value]
                except KeyError:
                    if len(token.value) == 3:
                        possible_matches = [
                            x for x in namespace if x.startswith(token.value)
                        ]
                        if len(possible_matches) == 1:
                            command_function = namespace[token.value]
                    #print("[ergo: CommandError]: Unknown command '%s'." % (token.value))
                    command_function = token.value

                new_command = False
                continue

        elif (not new_command) and (not in_function):
            if eval_next_expression:
                args.append(eval(token.value, namespace))
            else:
                args.append(token.value)
コード例 #9
0
ファイル: ergo.py プロジェクト: codebam/ergonomica
def ergo(stdin, log=False):
    """Wrapper for Ergonomica tokenizer and evaluator."""
    return eval_tokens(tokenize(stdin + "\n"), ENV.ns, log=log)
コード例 #10
0
ファイル: ergo.py プロジェクト: codebam/ergonomica
def main():
    """The main Ergonomica runtime."""

    # parse arguments through Docopt
    arguments = docopt(__doc__)

    # help already covered by docopt
    if arguments['--version']:
        print('[ergo]: Version 2.0.0')

    else:
        # whether we want devlog or not
        log = arguments['--log']

        if '--file' in arguments and arguments['--file']:
            ergo(open(arguments['--file'], 'r').read(), log=log)

        elif arguments['-m']:
            print(ergo(arguments['STRING'], log=log))

        else:
            # persistent namespace across all REPL loops
            namespace = ENV.ns

            # if run as login shell, run .ergo_profile
            if arguments['--login']:
                eval_tokens(tokenize(open(PROFILE_PATH).read() + "\n"),
                            ns,
                            log=log,
                            silent=True)

            # REPL loop
            while ENV.run:
                try:
                    stdin = prompt(ENV, namespace)
                    try:
                        stdout = eval_tokens(tokenize(stdin + "\n"),
                                             namespace,
                                             log=log)

                        if stdout is None:
                            pass
                        else:
                            for item in stdout:
                                if item != '':
                                    if isinstance(item, list):
                                        # map(print, list) doesn't work, so this is used
                                        [print(x) for x in item]  # pylint: disable=expression-not-assigned
                                    else:
                                        print(stdout)

                    # disable this because the traceback is printed
                    # pylint: disable=broad-except
                    except Exception:
                        traceback.print_exc(file=sys.stdout)
                        continue

                # allow for interrupting functions. Ergonomica can still be
                # suspended from within Bash with C-z.
                except KeyboardInterrupt:
                    print("[ergo: KeyboardInterrupt]: Exited.")
コード例 #11
0
def get_arg_type(verbs, text):
    """
    Get the type of the current argument to complete,
    given the buffer text and the verbs dictionary.
    """
    
    if text[-1] == " ":
        text += "a"

    tokens = parse(tokenize(text))

    if text.endswith("(") or (len(tokens) == 0) or isinstance(tokens[-1], Symbol):
        return [("<function>", "")]

    for token in tokens[::-1]:
        if isinstance(token, Symbol):
            current_command = token

    argcount = 0
    for i in range(len(tokens))[::-1]:
        token = tokens[i]
        if (i == 0) or (isinstance(tokens[i - 1], list)):
            argcount = len(tokens)  - i
            
    # lookup and get docstring
    try:
        # regexp match
        docstrings = re.search(r'(Usage|usage):\n\s.*', verbs[current_command].__doc__).group()

        # preprocess
        docstrings = [x.strip().split() for x in docstrings.split("\n")[1:]]

    except AttributeError:
        return [("<file/directory>", "")]
    except TypeError: # empty buffer
        return [("<file/directory>", "")]
    except KeyError: # no such command        
        if os.name != "nt":
            return [("<file/directory>", "")] + get_all_args_from_man(current_command)
        else:
            return [("<file/directory>", "")]

    # we .split() the docstring which splits it by spaces--but this needs to be corrected
    # for individual elements that contain spaces, e.g. (-a | --address)
    # parsed_docstring contains the corrected list of arguments.
    parsed_docstrings = []
    for docstring in docstrings:
        parsed_docstrings.append([])
        for item in docstring:
            if (parsed_docstrings[-1] == []) or \
                ((parsed_docstrings[-1][-1].count('(') == parsed_docstrings[-1][-1].count(')')) and \
                (parsed_docstrings[-1][-1].count('[') == parsed_docstrings[-1][-1].count(']'))):
                parsed_docstrings[-1].append(item)

            else:
                parsed_docstrings[-1][-1] += item
    
    out = []
    for parsed_docstring in parsed_docstrings:
        try:
            preset_arg = re.match(r'[a-z]+', parsed_docstring[argcount - 1])
            if preset_arg and (preset_arg.group() == parsed_docstring[argcount - 1]):
                out.append((parsed_docstring[argcount - 1], ""))
            else:
                try:
                    out.append((re.match(r'<[a-z]+?>', parsed_docstring[argcount - 1]).group(), ""))
                except AttributeError:
                    # current argument doesn't have a declared type
                    out.append(("<file/directory>", ""))
                except IndexError:
                    # no argument
                    pass
        except IndexError:
            pass

    return out
コード例 #12
0
def complete(verbs, text):
    """
    Return a completion for a command or directory.
    """

    verbs.update({"if": None,
                  "set": None,
                  "global": None,
                  "lambda": None,})
    
    fixed_text = text
    if text.endswith(" "):
        fixed_text += "a"
        last_word = ""
    
    last_word = tokenize(text)[-1]
        
    options = []
    meta = {}

    if True:#(["("] + tokenize(text))[-2] != "(":
        for argtype in get_arg_type(verbs, fixed_text):

            if argtype[1] != "":
                # aka there's a meta definition
                meta[argtype[0]] = argtype[1]
                
            if not (argtype[0].startswith("<") or argtype[0].endswith(">")):
                # then add it directory
                options.append(argtype[0])

            if argtype[0] == "<none>":
                # aka no more arguments to supply to function
                pass

            elif argtype[0] == "<variable>":
                options += [x for x in verbs.keys() if not hasattr(verbs[x], "__call__")] + ['def']

            elif argtype[0] in ["<file>", "<directory>", "<file/directory>"]:
                if os.path.basename(text) == text:
                    try:
                        options += os.listdir(".")
                    except OSError:
                        pass
                else:
                    dirname = os.path.dirname(text.split(" ")[1])
                    original_dirname = dirname

                    # process dirname
                    if not dirname.startswith("/"):
                        if dirname.startswith("~"):
                            dirname = os.path.expanduser(dirname)
                        else:
                            dirname = "./" + dirname
                    try:
                        options += [os.path.join(original_dirname, x) for x in os.listdir(dirname)]
                    except OSError:
                        pass

                if argtype[0] == "<file>":
                    options = [x for x in options if os.path.isfile(x)]
                elif argtype[0] == "<directory>":
                    options = [x for x in options if os.path.isdir(x)]

            elif argtype[0] == "<string>":
                options += [text.split(" ")[-1] + '"']        

            elif argtype[0] == "<function>":
                commands = [os.listdir(y) for y in os.environ['PATH'].split(os.pathsep)]
                flattened_commands = [x for l in commands for x in l]
                options = [x for x in verbs.keys() if hasattr(verbs[x], "__call__")] + flattened_commands

    if text.endswith(" "):
        last_word = ""

    if last_word == "(":
        last_word = ""
                
    if not text.endswith(" "):
        options = [i for i in options if i.startswith(last_word)]
        
    if options == []:
        if text.endswith("/"):
            try:
                options = os.listdir(last_word)
            except OSError:
                options = []
            return ([(len(last_word), option) for option in options], meta)
    if options != []:
        return ([(len(last_word), i) for i in options], meta)
    return ([], {})