コード例 #1
0
ファイル: format_latex.py プロジェクト: balancededge/pyxam
 def def_prompt(token):
     try:
         definition = token.definition[0].definition
         title, prompt = definition.pop(0), []
         while len(definition) > 0 and (
             not hasattr(definition[0], "name")
             or filters.has_name(definition[0], ["$", "img", "verbatim", "emphasis"])
         ):
             prompt.append(definition.pop(0))
         definition.insert(0, util.Map({"name": "prompt", "definition": prompt}))
         definition.insert(0, title)
         return token
     except AttributeError:
         raise (
             parser_composer.FormatError("Malformed question token definition:" + parser_composer.str_token(token))
         )
コード例 #2
0
 def def_prompt(token):
     try:
         definition = token.definition[0].definition
         title, prompt = definition.pop(0), []
         while len(definition) > 0 and (
                 not hasattr(definition[0], 'name') or filters.has_name(
                     definition[0], ['$', 'img', 'verbatim', 'emphasis'])):
             prompt.append(definition.pop(0))
         definition.insert(
             0, util.Map({
                 'name': 'prompt',
                 'definition': prompt
             }))
         definition.insert(0, title)
         return token
     except AttributeError:
         raise (parser_composer.FormatError(
             'Malformed question token definition:' +
             parser_composer.str_token(token)))
コード例 #3
0
ファイル: format_moodle.py プロジェクト: balancededge/pyxam
 def escape_html(token):
     for i in range(len(token.definition)):
         if not filters.has_name(token.definition[i]):
             token.definition[i] = token.definition[i].replace('(', '(').replace(')', ')')
     return token
コード例 #4
0
ファイル: format_moodle.py プロジェクト: balancededge/pyxam
 def fix_solutions(token):
     if not filters.has_name(token) and token.definition[0].startswith('<text>'):
         token.definition = ['<text>'] + token.definition + ['</text>']
     return token
コード例 #5
0
ファイル: format_moodle.py プロジェクト: flaviobarros/pyxam
 def escape_html(token):
     for i in range(len(token.definition)):
         if not filters.has_name(token.definition[i]):
             token.definition[i] = token.definition[i].replace(
                 '(', '&#40;').replace(')', '&#41;')
     return token
コード例 #6
0
ファイル: format_moodle.py プロジェクト: flaviobarros/pyxam
 def fix_solutions(token):
     if not filters.has_name(token) and token.definition[0].startswith(
             '<text>'):
         token.definition = ['<text>'] + token.definition + ['</text>']
     return token
コード例 #7
0
ファイル: parser_composer.py プロジェクト: flaviobarros/pyxam
def build_token(token, src, fmt):
    """
    Attempts to convert the source into a token.

    :param token: The token to build
    :param src: The source to build from
    :param fmt: The format to build to
    :return: The token if built or none
    """
    definition, unmatched, packing = [], src, False
    for symbol in token.definition[:-1]:
        if packing:
            matched, parentheses = '', 0
            while packing:
                # No match
                if not unmatched:
                    return None, src
                # If token we don't care what parentheses level we are in
                elif isinstance(symbol, list):
                    for sub_token in symbol:
                        child, unmatched = build_token(fmt['format'][sub_token], unmatched, fmt)
                        if child is not None:
                            definition += parse_tokens(matched, fmt)
                            definition.append(child)
                            packing = False
                            break
                    else:
                        matched, unmatched = increment(matched, unmatched)
                # Move down a parentheses level
                elif fmt['left_paren'] is not None and unmatched.startswith(fmt['left_paren']):
                    parentheses += 1
                    matched, unmatched = increment(matched, unmatched)
                # If nested move back up a parentheses level
                elif parentheses != 0 and unmatched.startswith(fmt['right_paren']):
                    parentheses -= 1
                    matched, unmatched = increment(matched, unmatched)
                # If parentheses are not balanced consume character
                elif parentheses != 0:
                    matched, unmatched = increment(matched, unmatched)
                # If at the end of content
                elif isinstance(symbol, str) and unmatched.startswith(symbol):
                    definition += [matched] if filters.has_name(token, ['$', 'verb', 'comment']) else parse_tokens(matched, fmt)
                    unmatched = unmatched[len(symbol):]
                    packing = False
                # If not at the end of content
                elif isinstance(symbol, str):
                    matched, unmatched = increment(matched, unmatched)
                # No match
                else:
                    return None, src
        # If str
        elif isinstance(symbol, str) and unmatched.startswith(symbol):
            unmatched = unmatched[len(symbol):]
        # If token
        elif isinstance(symbol, list):
            for sub_token in symbol:
                child, unmatched = build_token(fmt['format'][sub_token], unmatched, fmt)
                if child is not None:
                    definition.append(child)
                    break
            else:
                return None, src
        # If content
        elif isinstance(symbol, tuple):
            packing = True
        # No match
        else:
            return None, src
    # If exited before packing
    if packing:
        matched, parentheses = '', 0
        while packing:
            # End of string
            if len(unmatched) == 0:
                definition += [matched] if ('$' or 'verbatim' or 'comment') in token.name else parse_tokens(matched, fmt)
                packing = False
            # Move down a parentheses level
            elif fmt['left_paren'] is not None and unmatched.startswith(fmt['left_paren']):
                parentheses += 1
                matched, unmatched = increment(matched, unmatched)
            # If nested move back up a parentheses level
            elif parentheses != 0 and unmatched.startswith(fmt['right_paren']):
                parentheses -= 1
                matched, unmatched = increment(matched, unmatched)
            # If parentheses are not balanced consume character
            elif parentheses != 0:
                matched, unmatched = increment(matched, unmatched)
            # If at the end of content
            elif re.match(r'^\s*(({})|$).*'.format(token.definition[-1]), unmatched, re.DOTALL):
                definition += [matched] if filters.has_name(token, ['$', 'verb', 'comment']) else parse_tokens(matched, fmt)
                packing = False
            # If not at the end of content
            else:
                matched, unmatched = increment(matched, unmatched)
    # Check if ending regex matches
    if unmatched == '' or re.match(r'^\s*(({})|$).*'.format(token.definition[-1]), unmatched, re.DOTALL):
        return util.Map({'name': token.name, 'definition': definition}), unmatched
    return None, src