Beispiel #1
0
def containedSymbolPrototypes(source):
    lexer = GLSLLexer130.GLSLLexer130(source)
    token = lexer.token()
    symbolList = []
    
    while token != None:
        if tokenIsDataType(token):
            token = lexer.token()
            if tokenIs(token, "IDENTIFIER"):
                symbolIdentifier = token.tokenData
                token = lexer.token()
                if tokenIs(token, "LPAREN"):
                    token = lexer.token()
                    if tokenIs(token, "RPAREN"):
                        token = lexer.token()
                        if tokenIs(token, "SEMICOLON"):
                            symbolList += [ symbolIdentifier ]
                    else:
                        # Ignore the now following argument list; it does not matter at all.
                        while not tokenIs(token, "RPAREN"):
                            token = lexer.token()
                        token = lexer.token()
                        if tokenIs(token, "SEMICOLON"):
                            symbolList += [ symbolIdentifier ]
        token = lexer.token()
    return symbolList
Beispiel #2
0
def compress(source):
    return source
    newcode = ""
    lexer = GLSLLexer130.GLSLLexer130(source)
    token = lexer.token()
    ids = []
    define = False
    while token != None:
        if token.tokenName == "DEFINE_DIRECTIVE":
            newcode += '\n'
        if token.tokenName == "ENFORCED_CRLF":
            newcode += '\n'
        if token.tokenName == "IDENTIFIER": ids += [token.tokenData]
        if (token.tokenName != "SINGLELINE_COMMENT") and (
                token.tokenName != "MULTILINE_COMMENT") and (token.tokenName !=
                                                             "ENFORCED_CRLF"):
            newcode += token.tokenData
        if token.tokenName in [
                "VOID", "FLOAT", "VEC2", "VEC3", "VEC4", "MAT2", "MAT3",
                "MAT4", "SAMPLER2D", "UNIFORM", "IN_QUALIFIER",
                "OUT_QUALIFIER", "INOUT_QUALIFIER", "VOID",
                "VERSION_DIRECTIVE", "DEFINE_DIRECTIVE", "CONST", "INT",
                "ELSE", "RETURN"
        ]:
            newcode += " "
        token = lexer.token()
    return newcode
Beispiel #3
0
def hasEntryPoint(source):
    lexer = GLSLLexer130.GLSLLexer130(source)
    token = lexer.token()
    while token != None:
        if tokenIs(token, "VOID"):
            token = lexer.token()
            if tokenIs(token, "MAIN"):
                token = lexer.token()
                if tokenIs(token, "LPAREN"):
                    token = lexer.token()
                    if tokenIs(token, "RPAREN"):
                        return True
        token = lexer.token()
    return False
Beispiel #4
0
def compress(source):
    newcode = ""
    lexer = GLSLLexer130.GLSLLexer130(source)
    token = lexer.token()
    while token != None:
        if (token.tokenName != "SINGLELINE_COMMENT") and (token.tokenName !=
                                                          "MULTILINE_COMMENT"):
            newcode += token.tokenData
        if token.tokenName in [
                "VOID", "FLOAT", "VEC2", "VEC3", "VEC4", "MAT2", "MAT3",
                "MAT4", "SAMPLER2D", "UNIFORM", "IN_QUALIFIER",
                "OUT_QUALIFIER", "INOUT_QUALIFIER", "VOID",
                "VERSION_DIRECTIVE", "DEFINE_DIRECTIVE", "CONST", "INT", "ELSE"
        ]:
            newcode += " "
        token = lexer.token()
    return newcode
Beispiel #5
0
def compressSource(source):
    lexer = GLSLLexer130.GLSLLexer130(source)
    token = lexer.token()
    smallerSource = ""
    lineHasPreprocessorDirective = False
    
    while token != None:
        if tokenIsPreprocessorDirective(token):
            lineHasPreprocessorDirective = True
            smallerSource += "\\n"
        if (not tokenIs(token, "SINGLELINE_COMMENT")) and (not tokenIs(token, "MULTILINE_COMMENT")):
            smallerSource += token.tokenData
            if tokenNeedsSpace(token):
                smallerSource += ' '
        if tokenIs(token, "CRLF"):
            if lineHasPreprocessorDirective:
                smallerSource += "\\n"
            lineHasPreprocessorDirective = False
        token = lexer.token()
    
    return smallerSource
Beispiel #6
0
def compress(source):
    #return source

    identifier_list = []
    small_identifiers = [chr(ord('a') + i) for i in range(26)]
    small_identifiers += [chr(ord('A') + i) for i in range(26)]
    small_identifiers += [
        a + b for a in small_identifiers for b in small_identifiers
    ]
    print(small_identifiers)

    newcode = ""
    lexer = GLSLLexer130.GLSLLexer130(source)
    token = lexer.token()
    while token != None:
        uniform = False

        if (token.tokenName != "SINGLELINE_COMMENT") and (
                token.tokenName != "MULTILINE_COMMENT") and (token.tokenName !=
                                                             "IDENTIFIER"):
            newcode += token.tokenData
        if token.tokenName in [
                "VOID", "FLOAT", "VEC2", "VEC3", "VEC4", "MAT2", "MAT3",
                "MAT4", "SAMPLER2D", "UNIFORM", "IN_QUALIFIER",
                "OUT_QUALIFIER", "INOUT_QUALIFIER", "VOID",
                "VERSION_DIRECTIVE", "DEFINE_DIRECTIVE", "CONST", "INT",
                "ELSE", "RETURN"
        ]:
            newcode += " "
        #if token.tokenName == "IDENTIFIER": # No minification
        #newcode += token.tokenData
        if token.tokenName == "IDENTIFIER":
            identifier = token.tokenData
            if not identifier in identifier_list:
                identifier_list += [identifier]
            ind = identifier_list.index(identifier)
            print("ID: ", token.tokenData, ind)
            newcode += small_identifiers[ind]
        token = lexer.token()
    return newcode
Beispiel #7
0
def compressSource(source):
    lexer = GLSLLexer130.GLSLLexer130(source)
    token = lexer.token()
    smallerSource = ""
    lineHasPreprocessorDirective = False

    characters = {}
    numbers = {}
    for character in source:
        if not character in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
            if character in "0123456789":
                if character in numbers:
                    numbers[character] += 1
                else:
                    numbers[character] = 1
            continue
        if character in characters:
            characters[character] += 1
        else:
            characters[character] = 1
    characters = {
        k: v
        for k, v in reversed(
            sorted(characters.items(), key=lambda item: item[1]))
    }
    numbers = {
        k: v
        for k, v in reversed(sorted(numbers.items(), key=lambda item: item[1]))
    }

    isUniform = False
    uniforms = []
    ids = {}

    # Simple optimizations
    while token != None:
        if (not tokenIs(token, "SINGLELINE_COMMENT")) and (not tokenIs(
                token, "MULTILINE_COMMENT")):
            smallerSource += token.tokenData
            if tokenNeedsSpace(token):
                smallerSource += ' '

        if tokenIs(token, "UNIFORM"):
            isUniform = True
        if tokenIs(token, "SEMICOLON"):
            isUniform = False
        if tokenIs(token, "IDENTIFIER"):
            if isUniform:
                uniforms += [token.tokenData]
            if not (token.tokenData in uniforms):
                if token.tokenData in ids:
                    ids[token.tokenData] += 1
                else:
                    ids[token.tokenData] = 1
        token = lexer.token()

    # Sort the ids by probability
    ids = {
        k: v
        for k, v in reversed(sorted(ids.items(), key=lambda item: item[1]))
    }
    idList = list(ids.keys())

    dictionary = {}
    for i in range(len(idList)):
        id = idList[i]
        dictionary[id] = generateIdentifier(characters, numbers, i)

    # print(smallerSource)
    # f = open("smallerSource", "wt")
    # f.write(smallerSource)
    # f.close()

    # Context model optimizations
    smallestSource = ""
    lexer = GLSLLexer130.GLSLLexer130(smallerSource)
    token = lexer.token()
    while token != None:
        if tokenIsPreprocessorDirective(token):
            lineHasPreprocessorDirective = True
            smallestSource += "\\n"
        if tokenIs(token, "CRLF"):
            if lineHasPreprocessorDirective:
                smallestSource += "\\n"
            lineHasPreprocessorDirective = False
        if tokenIs(token, "IDENTIFIER"):
            smallestSource += dictionary[token.tokenData]
        else:
            smallestSource += token.tokenData
            if tokenNeedsSpace(token):
                smallestSource += ' '
        # print(token.tokenData)
        token = lexer.token()

    # print(smallestSource)
    # ff = open("smallestSource", "wt")
    # ff.write(smallestSource)
    # ff.close()

    return smallestSource