示例#1
0
def printTokens(path):
    """ Print tokens in Hebrew Python module """
    readline = codecs.open(path, 'rU', 'utf-8').readline
    for (type, string, (srow, scol), (erow, ecol), line) \
        in htokenize.generate_tokens(readline):
        name = token.tok_name[type]
        print '%s (%d, %d): "%s"' % (name, scol, ecol, string.encode('utf-8'))
示例#2
0
文件: __init__.py 项目: nirs/hpy
def printTokens(path):
    """ Print tokens in Hebrew Python module """
    readline = codecs.open(path, 'rU', 'utf-8').readline
    for (type, string, (srow, scol), (erow, ecol), line) \
        in htokenize.generate_tokens(readline):
        name = token.tok_name[type]
        print '%s (%d, %d): "%s"' % (name, scol, ecol, 
                                     string.encode('utf-8'))
示例#3
0
def translate(readline, func):
    """ Translate HPython source to Python source """
    result = StringIO()
    position = 0
    indent = ''
    newline = True

    for (type, string, (srow, scol), (erow, ecol), line) \
        in htokenize.generate_tokens(readline):

        # Add missing whitespace before tokens
        result.write(u' ' * (scol - position))
        position = ecol

        # Handle indentation
        if type == token.NEWLINE:
            newline = True
            result.write(string)
            continue
        elif type == token.INDENT:
            newline = False
            indent = string
            result.write(indent)
            continue
        elif type == token.DEDENT:
            indent = u' ' * ecol
            continue
        elif newline:
            newline = False
            result.write(indent)

        # Handle other tokens
        if type == token.NAME:
            result.write(func(string))
        else:
            result.write(string)

    return result.getvalue()
示例#4
0
文件: __init__.py 项目: nirs/hpy
def translate(readline, func):
    """ Translate HPython source to Python source """
    result = StringIO()
    position = 0
    indent = ''
    newline = True
    
    for (type, string, (srow, scol), (erow, ecol), line) \
        in htokenize.generate_tokens(readline):
        
        # Add missing whitespace before tokens
        result.write(u' ' * (scol - position))
        position = ecol

        # Handle indentation
        if type == token.NEWLINE:
            newline = True
            result.write(string)
            continue  
        elif type == token.INDENT:
            newline = False
            indent = string
            result.write(indent)
            continue
        elif type == token.DEDENT:
            indent = u' ' * ecol
            continue            
        elif newline:
            newline = False
            result.write(indent)
        
        # Handle other tokens
        if type == token.NAME:
            result.write(func(string))
        else:
            result.write(string)
                
    return result.getvalue()