class AbstractSerializer:
    def __init__(self,ds,cs):
        self.ds = ds
        self.cs = CustomSerializerAdapter(cs)
        self.output = OutputStream()
        self.input = InputStream()
    def toByte(self,obj):
        if object == None:
            return
        ts = self.ds if self.ds.isSuitable(obj) else self.cs
        self.output.writeByte(ts.getTypeId())
        ts.write(self.output,obj)
        bytes = list(self.output.buf)
        self.output.flush()
        return bytes
    def toObject(self,data):
        self.input.setData(data)
        typeId = -1
        try:
            typeId = self.input.readByte()
            ts = self.ds if self.ds.getTypeId()==typeId else self.cs
            return ts.read(self.input)
        except Exception as e:
            print "serialization error while reading"  , e
        finally:
            self.input.flush()
Beispiel #2
0
def IO_Test():
    wave1 = Input("samples/sanctuary.mp3")
    x = wave1.read_all()
    y = g.sine_t(500, 5, 440)
    z = op.crossfade_exp(x, y, 0.000001, 2)
    wave3 = out(z)
    wave3.write()
    wave1.close()
Beispiel #3
0
def executeQuery():
    inputString = sqlEntry.get()

    if sqlEntry.index("end") != 0:
        sqlEntry.delete(0, 'end')

    if outputQuery.index("end") != 0:

        outputQuery.configure(state='normal')
        outputQuery.delete(1.0, 'end')
        outputQuery.configure(state='disabled')

    if inputString == 'clear':
        os.system('clear')
    elif inputString == 'q':
        quit()

    elif inputString[-1] != ";":
        print("semicolon missing")

    else:

        try:

            inputQ = InputStream(inputString)

            start_time = time.time()
            #use the lexer to check the tokens
            lexer = SQLiteLexer(inputQ)
            stream = CommonTokenStream(lexer)

            #initialize parser to check semantics and adds error listener
            parser = SQLiteParser(stream)
            parser._listeners = [MyErrorListener()]

            #start parsing
            tree = parser.parse()

            parsed = sqlparse.parse(inputString)[0]
            print(parsed)

            #initialize array of tokens
            tokenArray = []
            for token in parsed.tokens:
                data = str(token)

                #escapes whitespaces and semicolon
                if not data.isspace() and data != ';':
                    tokenArray.append(data)

            print(tokenArray)
            evaluateExpression.evaluateQuery(tokenArray)

            print("\n--- execution time in seconds : %s ---" %
                  (time.time() - start_time))

        except Exception as error:
            print("error : " + str(error))
Beispiel #4
0
def generateRule(rules, expr, expectedForm, **kargs):
    dummy = Tokeniser.Tokeniser(None)
    reverse = False
    lexs = []
    exprArray = []
    if expr[0:2] == "!!":
        reverse = True
        expr = expr[2:]

    dummy.stream = InputStream("", raw=expr)
    while not dummy.complete:
        t = dummy.getToken()
        if t.type == "doller":
            exprArray.append(None)
        else:
            exprArray.append(t.type)
        if t.type == "identifier":
            lexs.append(t.lexme)
        else:
            lexs.append(None)
    rules.append(Rule(exprArray, expectedForm, reverse=reverse, lexs=lexs))
 def __init__(self,ds,cs):
     self.ds = ds
     self.cs = CustomSerializerAdapter(cs)
     self.output = OutputStream()
     self.input = InputStream()
	def __init__(self, filename):
		if filename == None:
			self.stream = None
		else:
			self.stream = InputStream(filename)
		self.complete = False
		self.LL0TokenMap = {
			# white space
			" ":("space",None,False),
			"	":("tab",None,False),
			"\n":("newLine",None,False),
			# braces/brackets
			"{":("openBrace",None,False),
			"}":("closeBrace",None,False),
			"(":("openBracket",None,False),
			")":("closeBracket",None,False),
			"[":("openSquareBracket",None,False),
			"]":("closeSquareBracket",None,False),
			# basic control
			";":("semicolon",None,False),
			",":("comma",None,False),
			# math
			"+":("mathSymbol",None,False),
			"-":("mathSymbol",None,False),
			"/":("mathSymbol",None,False),
			"*":("mathSymbol",None,False),
			">":("mathSymbol",None,False),
			"<":("mathSymbol",None,False),
			"=":("equals",None),
			"\"":("stringLiteral","\"",True),
			"'":("charLiteral","'",True),
			# special
			"$":("doller",None,False),
			"@":("at",None,False),
		}
		# token, lookahead -> token-type, terminating symbol, terminating symbol lookahead
		self.LL1TokenMap = {
			("/","/"):("comment","\n",None,False),
			("/","*"):("comment","*","/",True),
			("=","="):("mathSymbol",None,None,False),
			("if","("):("if",None,None,False),
			("if"," "):("if",None,None,False),
			("else","{"):("else",None,None,False),
			("else"," "):("else",None,None,False),
			("while","("):("while",None,None,False),
			("while"," "):("while",None,None,False),
			("for","("):("for",None,None,False),
			("for"," "):("for",None,None,False),
			("#include"," "):("include",'\n',None,False),
			("#define"," "):("define",'\n',None,False),
			("return"," "):("return",None,None,False),
			("void"," "):("primType",None,None,False),
			("char"," "):("primType",None,None,False),
			("short"," "):("primType",None,None,False),
			("int"," "):("primType",None,None,False),
			("long"," "):("primType",None,None,False),
			("float"," "):("primType",None,None,False),
			("double"," "):("primType",None,None,False),
			("signed"," "):("primType",None,None,False),
			("unsigned"," "):("primType",None,None,False),
			("struct"," "):("primType",None,None,False),
			("union"," "):("primType",None,None,False),
			("const"," "):("primType",None,None,False),
			("volatile"," "):("primType",None,None,False)
		}
class Tokeniser():
	def __init__(self, filename):
		if filename == None:
			self.stream = None
		else:
			self.stream = InputStream(filename)
		self.complete = False
		self.LL0TokenMap = {
			# white space
			" ":("space",None,False),
			"	":("tab",None,False),
			"\n":("newLine",None,False),
			# braces/brackets
			"{":("openBrace",None,False),
			"}":("closeBrace",None,False),
			"(":("openBracket",None,False),
			")":("closeBracket",None,False),
			"[":("openSquareBracket",None,False),
			"]":("closeSquareBracket",None,False),
			# basic control
			";":("semicolon",None,False),
			",":("comma",None,False),
			# math
			"+":("mathSymbol",None,False),
			"-":("mathSymbol",None,False),
			"/":("mathSymbol",None,False),
			"*":("mathSymbol",None,False),
			">":("mathSymbol",None,False),
			"<":("mathSymbol",None,False),
			"=":("equals",None),
			"\"":("stringLiteral","\"",True),
			"'":("charLiteral","'",True),
			# special
			"$":("doller",None,False),
			"@":("at",None,False),
		}
		# token, lookahead -> token-type, terminating symbol, terminating symbol lookahead
		self.LL1TokenMap = {
			("/","/"):("comment","\n",None,False),
			("/","*"):("comment","*","/",True),
			("=","="):("mathSymbol",None,None,False),
			("if","("):("if",None,None,False),
			("if"," "):("if",None,None,False),
			("else","{"):("else",None,None,False),
			("else"," "):("else",None,None,False),
			("while","("):("while",None,None,False),
			("while"," "):("while",None,None,False),
			("for","("):("for",None,None,False),
			("for"," "):("for",None,None,False),
			("#include"," "):("include",'\n',None,False),
			("#define"," "):("define",'\n',None,False),
			("return"," "):("return",None,None,False),
			("void"," "):("primType",None,None,False),
			("char"," "):("primType",None,None,False),
			("short"," "):("primType",None,None,False),
			("int"," "):("primType",None,None,False),
			("long"," "):("primType",None,None,False),
			("float"," "):("primType",None,None,False),
			("double"," "):("primType",None,None,False),
			("signed"," "):("primType",None,None,False),
			("unsigned"," "):("primType",None,None,False),
			("struct"," "):("primType",None,None,False),
			("union"," "):("primType",None,None,False),
			("const"," "):("primType",None,None,False),
			("volatile"," "):("primType",None,None,False)
		}

	def LL1SkipUntil(self,stream,curr,s,inclTerminal):
		while stream.peek() != s:
			curr+= stream.getNext()
		if inclTerminal:
			curr += stream.getNext()
		return curr

	def LL2SkipUntil(self,stream,curr,s,n,inclTerminal):
		next = stream.getNext()
		while not (next == s and stream.peek() == n):
			curr += next
			next = stream.getNext()
		if inclTerminal:
			curr += next
			curr += stream.getNext()
		return curr

	def getToken(self):
		if self.stream.isLast() == True:
			self.complete = True
		stream = self.stream
		curr = ""
		while stream.hasNext():
			# get next char
			curr += stream.getNext()
			currLookAhead = (curr.lower(),stream.peek())

			# LL1 tokens
			if currLookAhead[1] != None and currLookAhead in self.LL1TokenMap:
				tokenInfo = self.LL1TokenMap[currLookAhead]
				if tokenInfo[1] == None:
					pass
				elif tokenInfo[2] == None:
					curr = self.LL1SkipUntil(stream,curr,tokenInfo[1],tokenInfo[2])
				else:
					curr = self.LL2SkipUntil(stream,curr,tokenInfo[1],tokenInfo[2],tokenInfo[3])
				return Token(tokenInfo[0],curr,stream.currLine,stream.lineIndex)

			# LL0 tokens
			if curr.lower() in self.LL0TokenMap:
				prev = curr.lower()
				if self.LL0TokenMap[curr.lower()][1] != None:
					curr = self.LL1SkipUntil(stream,curr,self.LL0TokenMap[prev][1],self.LL0TokenMap[prev][2])
				return Token(self.LL0TokenMap[prev][0],curr,stream.currLine,stream.lineIndex)

			# general token cases
			if not self.stream.hasNext():
				self.complete = True

			# Literal float or int
                        if not stream.hasNext():
                            # End of stream
			    return Token("identifier",curr,stream.currLine,stream.lineIndex)
                        if re.match(r'\d+\.\d*',curr) and stream.peek() not in IDENTIFIER_CHARACTERS:
                            # Float literal
			    return Token("numberLiteral",curr,stream.currLine,stream.lineIndex)
                        elif re.match(r'\d+',curr) and stream.peek() not in IDENTIFIER_CHARACTERS + ["."]:
                            # Integer literal
			    return Token("numberLiteral",curr,stream.currLine,stream.lineIndex)
                        elif stream.peek() not in IDENTIFIER_CHARACTERS:
			    # identifiers
			    return Token("identifier",curr,stream.currLine,stream.lineIndex)

		raise Exception("'"+curr+"' was not recognised as a token")
def generate(folder):
    data_stream1 = InputStream(folder + "\\data1.qqq")
    data_stream2 = InputStream(folder + "\\data2.qqq")
    data_stream3 = InputStream(folder + "\\data3.qqq")
    data_stream4 = InputStream(folder + "\\data4.qqq")
    data_stream1.initialize()
    data_stream2.initialize()
    data_stream3.initialize()
    data_stream4.initialize()
    node01 = InputNode(100, data_stream1)
    node02 = InputNode(10, data_stream2)
    node03 = InputNode(10, data_stream3)
    node04 = InputNode(2, data_stream4)
    node05 = InputNode(100, data_stream4)
    gen1_calc_nodes = create_calculation_nodes1(
        [node01, node02, node03, node04, node05])
    gen2_calc_nodes = create_calculation_nodes1(gen1_calc_nodes)
    gen3_calc_nodes = create_calculation_nodes2(gen2_calc_nodes)
    limit = 1073741824  # 2^30
    node06 = OutputNode(0, limit)
    node07 = OutputNode(-limit, limit)
    l = len(gen3_calc_nodes)
    for i in range(0, int(2.0 / 3.0 * l)):
        node06.subscribe(gen3_calc_nodes[i])
    for i in range(int(1.0 / 3.0 * l), l):
        node07.subscribe(gen3_calc_nodes[i])
    output1 = OutputStream(folder + "\\output1.qqq")
    output2 = OutputStream(folder + "\\output2.qqq")
    node06.counter_subscribe(output1)
    node07.counter_subscribe(output2)
    return data_stream1, data_stream2, data_stream3, data_stream4, output1, output2