示例#1
0
文件: field_old.py 项目: onlyacan/fsp
    def __init__(self, domain, filename = None, faceValue = None, dim =3):
        # if the field is initilized from a field file
        if faceValue in [True, 'Yes', 'yes', 'True']:
            self.face = VectorArray(domain.faces.nb)
        if filename != None:
            tk = token.token(domain.case.field_path + filename)
            len_tk = len(tk)
            for i in range(len_tk):
                if tk[i] == 'internalField' and \
                    (tk[i+2] == '(' or tk[i+2][0].isdigit()):
                    break
            stopid = i

            if tk[stopid+2] == '(' and tk[stopid + 1] == 'uniform':
                value = Vec(float(tk[stopid+3]), 
                            float(tk[stopid+4]),
                            float(tk[stopid+5]))
        
                self[:,0] = value[0]
                self[:,1] = value[1]
                self[:,2] = value[2]
            else:
                raise TypeError('Unkonw type in file %s'%case.field_path+filename)
            # tested, read internal field right
            if faceValue != None:
                # next if face is present, 
                # value on bc face should read from the bc dictionary in field file.
                tk = token.token(domain.case.field_path + filename, skipstr = ['uniform']) 
                bc_dict = Dictionary()
                bc_dict.read(tk, 'boundaryField')
                self.bcValueDict = bc_dict
            
                for bc in domain.faces.bcs:
                    if bc_dict[bc.name]['type'] == 'fixedValue':
                        value = Vec(string = bc_dict[bc.name]['value'])
                        start = bc.startFace
                        end = start + bc.nFaces
                        self.face[start : end, 0] = value[0]
                        self.face[start : end, 1] = value[1]
                        self.face[start : end, 2] = value[2]
                # value on internal face should be intepolated.
                # tested 
                for face in domain.faces:
                    if face.isIntern():
                        self.face[face.id] = self[face.on.id] * face.itp_fct + \
                                       self[face.nb.id] * (1 - face.itp_fct)
                    # for boundary face. 
                    #    1. Drichlet face_value = C, face_value[i] = C do nothing
                    #    2. Neumann gradient = C, face_value[i] = value_P + C/beta_f
                    elif bc_dict[face.bc.name]['type'] == 'fixedValue':
                        pass
                    elif bc_dict[face.bc.name]['type'] in ['zeroGradient', 'empty']:
                        self.face[face.id] = self[face.on.id]
                    elif  bc_dict[face.bc.name]['type'] == 'fixedGradient':
                        gradient =  Vec(string = bc_dict[face.bc.name]['value'])
                        self.face[face.id] = self[face.on.id] + gradient/face.df_fct
                    else:
                        raise TypeError('Unkown bc value type %s in file%s'%(bc_dict[face.bc.name]['type'], domain.case.field_path + filename))
示例#2
0
def letra(ele, i, tam, mapaReservadas):  #, palavrasReservadas):
    global nLinha, caracteresValidos, classificacao, indiceParada, erro
    tkn = None

    for j in range(i + 1,
                   tam):  #continua sequencia de caracteres validos(a..Z_0..9)?
        if ele[j] in caracteresValidos:
            pass

        elif ele[j] == '[':  #será array

            tkn = numero(ele, j, tam)

            if tkn.classificacao == 'integer' and ele[indiceParada] == ']':
                #print de acompanhamento de programa
                #print('fechou array')
                #print(ele[z])
                indiceParada += 1  #indiceParada ja foi alterado na chamada de numero, vai ser incrementado em 1

                sIdentificador = ele[i:indiceParada]
                classificacao = 'array'
                tkn = token(sIdentificador, classificacao, str(nLinha))
                return tkn

            else:
                for z in range(indiceParada, tam):  #procura o ]
                    if ele[z] == ']':
                        indiceParada = z + 1

                erro = True
                #indiceParada += 1 #indiceParada ja foi alterado na chamada de numero, vai ser incrementado em 1

                sIdentificador = ele[i:indiceParada]
                classificacao = 'ERRO na indexação de array'
                tkn = token(sIdentificador, classificacao, str(nLinha))
                return tkn

        else:
            indiceParada = j
            break

    sIdentificador = ele[i:indiceParada]

    try:
        classificacao = mapaReservadas[sIdentificador]  #é palavra reservada?
    except:
        classificacao = 'identificador'  #se não for palavra reservada

    tkn = token(sIdentificador, classificacao, str(nLinha))

    return tkn
	def abstractCond(self, condNode):
		if condNode.value.name == "INSTRUCT":
			condNode = condNode.children[0]
			assert condNode.value.name == "COND"
		else:
			assert condNode.value.name == "COND-END"

		abstractCondNode = parseTreeNode(token.token("Cond"))
		assert len(condNode.children) > 3

		if not condNode.children[0].value.name == "CLOSE-COND":  # this cond is a final 'end'
			try:
				ifNode = condNode.findToken("EXP", maxDepth=1).next()
			except StopIteration:
				raise Exception("Symbol 'EXP' was not found in COND Node : \n" + str(condNode))
			abstractCondNode.giveNodeChild(self.abstractExp(ifNode))

		try:
			thenNode = condNode.findToken("INSTRUCT-LIST", maxDepth=1).next()
		except StopIteration:
			raise Exception("Symbol 'INSTRUCT-LIST' was not found in COND Node : \n" + str(condNode))
		abstractCondNode.giveNodeChild(self.abstractInstrList(thenNode))

		if not condNode.children[0].value.name == "CLOSE-COND":  # this cond is a final 'end'
			try:
				elseNode = condNode.findToken("COND-END", maxDepth=1).next()
			except StopIteration:
				raise Exception("Symbol 'COND-END' was not found in COND Node : \n" + str(condNode))
			abstractCondNode.giveNodeChild(self.abstractCond(elseNode))

		return abstractCondNode
示例#4
0
def leTokens():
    arquivo = open('tokens', 'r')
    linhas = arquivo.readlines()
    arquivo.close()

    tokens = []

    for l in linhas:
        
        aux = l.split(' ')

        if len(aux) == 3:

            identificador = aux[0]
            classificacao = aux[1]
            nLinha = aux[2]
            nLinha = nLinha[0:nLinha.find('\n')] #tira o \n

        else:

            identificador = aux[0]
            classificacao = aux[1]+' '+aux[2]
            nLinha = aux[3]
            nLinha = nLinha[0:nLinha.find('\n')] #tira o \n

        #print(nLinha)

        tkn = token(identificador, classificacao, nLinha)
        tokens.append(tkn)

    return tokens
	def abstractInstrList(self, inputInstrListNode):
		abstractInstrListNode = parseTreeNode(token.token("Instr-List"))
		nextInstrNode = inputInstrListNode
		while (nextInstrNode is not None):
			newNode, nextInstrNode = self.abstractInstr(nextInstrNode)
			abstractInstrListNode.giveNodeChild(newNode)
		return abstractInstrListNode
示例#6
0
def upload(filepath):
	if not filepath.endswith('.xpi'):
		print 'Refusing to upload a non-xpi'
		exit(1)

	guid, version = xpifile.get_guid_and_version(filepath)

	method = 'PUT'
	path = 'https://addons.mozilla.org/api/v3/addons/%s/versions/%s/' % (guid, version)
	headers = {
		'Authorization': 'JWT %s' % token.token()
	}
	fields = {
		'upload': (os.path.basename(filepath), open(filepath, 'rb').read())
	}

	# print method
	# print path
	# print headers
	# return

	response = http.request_encode_body(method, path, headers=headers, fields=fields)

	print response.status
	# print response.getheaders()
	print response.data
	def abstractReturn(self, instrNode):
		assert instrNode.value.name == "INSTRUCT"
		assert instrNode.children[0].value.name == "RET"
		expNode = instrNode.children[1]
		assert expNode.value.name == "EXP", "2nd children is not EXP : " + str(instrNode)
		abstractReturnNode = parseTreeNode(token.token("return"))
		abstractReturnNode.giveNodeChild(self.abstractExp(expNode))
		return abstractReturnNode
	def abstractAssign(self, instrNode):
		assert instrNode.value.name == "INSTRUCT"
		varNode = instrNode.children[0]
		expNode = instrNode.children[2]
		assert varNode.value.name == "VARIABLE"
		assert instrNode.children[1].value.name == "EQUAL"
		assert expNode.value.name == "EXP"
		abstractAssignNode = parseTreeNode(token.token("Assign", value=varNode.value.value))
		abstractAssignNode.giveNodeChild(self.abstractExp(expNode))
		return abstractAssignNode
示例#9
0
def check_status(filepath):
	guid, version = xpifile.get_guid_and_version(filepath)

	method = 'GET'
	path = 'https://addons.mozilla.org/api/v3/addons/%s/versions/%s/' % (guid, version)
	headers = {
		'Authorization': 'JWT %s' % token.token()
	}

	return http.request(method, path, headers=headers)
	def abstractSimpleExp(self, simpleExpNode):
		simpleExpTypeNode = simpleExpNode.children[0]
		simpleExpType = simpleExpTypeNode.value.name
		if (simpleExpType == "INT" or simpleExpType == "STRING" or simpleExpType == "VARIABLE"):
			return parseTreeNode(token.token(simpleExpType, value=simpleExpTypeNode.value.value))
		elif (simpleExpType == "FUNCT-CALL"):
			return self.abstractFctCall(simpleExpTypeNode)
		elif (simpleExpType == "OPEN-PAR"):
			return self.abstractExp(simpleExpNode.children[1])
		else:
			raise Exception("Unknown Simple Expression Type : " + str(simpleExpType))
示例#11
0
	def parse(self, inputText):
		self.input = inputText  # list of symbols
		self.output = []
		self.error = False
		self.success = False

		self.parseTree = parseTreeNode(token.token(self.grammar.startSymbol))
		self.currentNode = self.parseTree

		self.parse_recursiveCall()
		return self.output
	def abstractFct(self, inputFctNode):
		try:
			idNode = inputFctNode.findToken("ID", maxDepth=1).next()
		except StopIteration:
			raise Exception("Symbol 'ID' was not found in function Node : \n" + str(inputFctNode))
		abstractFctNode = parseTreeNode(token.token("Funct", value=idNode.value.value))

		try:
			argRoot = inputFctNode.findToken("ARG-LIST", maxDepth=1).next()
		except StopIteration:
			raise Exception("Symbol 'ARG-LIST' was not found in function Node : \n" + str(inputFctNode))
		for varNode in argRoot.findToken("VARIABLE"):
			abstractFctNode.giveChild(token.token("arg", value=varNode.value.value))

		try:
			instrRoot = inputFctNode.findToken("INSTRUCT-LIST", maxDepth=1).next()
		except StopIteration:
			raise Exception("Symbol 'INSTRUCT-LIST' was not found in function Node : \n" + str(inputFctNode))
		abstractFctNode.giveNodeChild(self.abstractInstrList(instrRoot))

		return abstractFctNode
示例#13
0
	def produce(self, i):
		if (self.verbose):
			print "produce", i
		self.output.append("P" + str(i))
		saved_current = self.currentNode
		for produced in self.grammar.rules[i][1:]:
			if (produced != self.grammar.emptySymbol):
				self.currentNode.giveChild(token.token(produced))
				self.currentNode = self.currentNode.children[-1]
				self.parse_recursiveCall()
				self.currentNode = saved_current
		self.currentNode = saved_current
示例#14
0
文件: field_old.py 项目: onlyacan/fsp
    def __init__(self, domain, filename = None, faceValue = None, old= None):
        self.case = domain.case
        self.domain = domain
        # if the field is initilized from a field file
        if faceValue in [True, 'Yes', 'yes', 'True']:
            self.face = ScalarArray(domain.faces.nb)
        if filename != None:
            self.filename = filename
            tk = token.token(domain.case.field_path + filename)
            len_tk = len(tk)
            #find the position of the head of face list in token list
            for i in range(len_tk):
                if tk[i] == 'internalField' and \
                    (tk[i+2] == '(' or tk[i+2][0].isdigit()):
                    break
            stopid = i
             # scalar field
            if tk[stopid+1] == 'uniform':      
                value = float(tk[stopid + 2])
                #print value
                self.fill(value)
            else:
                raise TypeError, 'Unkonw type in file %s'%case.field_path+filename

            tk = token.token(domain.case.field_path + filename, skipstr = ['uniform'])
            bc_dict = Dictionary()
            bc_dict.read(tk, 'boundaryField')
            self.bcValueDict = bc_dict

            if faceValue != None:
                for bc in domain.faces.bcs:
                    if bc_dict[bc.name]['type'] == 'fixedValue':
                        value = float(bc_dict[bc.name]['value'])
                        start = bc.startFace
                        end = start + bc.nFaces
                        self.face[start:end] = value

        # after field is initializd, then do this
        if old in [True, 'Yes', 'yes', 'True']:
            self.old = self.copy()
示例#15
0
class translator:
    INSTRUCTION = ""
    tokenify = t.token()
    tablify = tab.table()
    syntaxify = s.syntaxis()
    stackify = tab.table()
    semanticfy = sem.semantic()
    IS_INTERPRETING = True

    def start(self):
        print(Fore.GREEN + "    ___(                     )\n" +
              "   (      THUNDER-flask      _)\n" +
              "  (_                       __)\n" +
              "    ((                _____)\n" + "      (_________)----'\n" +
              "         _/  /\n" + "        /  _/\n" + "      _/  /\n" +
              "     / __/\n" + "   _/ /\n" + "  /__/\n" + " //\n" + "/'\n")

    def listen(self):
        while self.IS_INTERPRETING:
            self.INSTRUCTION = raw_input(Fore.MAGENTA + 'thunder>')
            self.INSTRUCTION = self.INSTRUCTION.strip()  #quitar espacios
            # Sacar los tokens
            tokens = self.tokenify.tokenize(self.INSTRUCTION)
            # Agregar tokens a la tabla
            ok = True  #variable validadora de errores
            for token in tokens:
                if (token[1] == "error"):
                    print(Fore.CYAN + "(X_X)Invalid Token: " + token[2] +
                          " in index " + str(token[0]))
                    ok = False  #informar de error
                else:
                    self.tablify.add(token)
            #no hay error continuar a syntaxis
            if ok:
                #sacar el top de instrucciones
                tab = self.tablify.top()  #to improve
                #verificar validez sintactica del comando
                if self.syntaxify.syntaxize(tab) != None:
                    self.stackify.data = self.syntaxify.syntaxize(tab)
                    #analizar por clave la semantica
                    #print(self.stackify.data)
                    status = self.semanticfy.semanticize(self.stackify.data)
                    self.IS_INTERPRETING = status[0]
                    print(status[1])
                else:
                    print(Fore.RED + "(X_X) invalid comand: " +
                          ' '.join(str(x[2]) for x in tab) +
                          '\n(X_X) unknown syntaxis: ' +
                          ' '.join(str(x[1]) for x in tab))
示例#16
0
def operador(ele, i, tam, mapaOperadores):
    global nLinha, classificacao, indiceParada

    classificado = False
    j = i + 1

    if ele[i] == ':':
        if ele[j] == '=':
            op = ele[i] + ele[j]
            sIdentificador = op
            classificacao = mapaOperadores[op]
            classificado = True
            indiceParada = j + 1  #pula o segundo

    elif ele[i] == '<':
        #print de acompanhamento de programa
        #print('menor que')
        if ele[j] == '=':
            op = ele[i] + ele[j]
            sIdentificador = op
            classificacao = mapaOperadores[op]
            classificado = True
            indiceParada = j + 1  #pula o segundo

        elif ele[j] == '>':
            #print de acompanhamento de programa
            #print('diferente')
            op = ele[i] + ele[j]
            sIdentificador = op
            classificacao = mapaOperadores[op]
            classificado = True
            indiceParada = j + 1  #pula o segundo

    elif ele[i] == '>':
        if ele[j] == '=':
            op = ele[i] + ele[j]
            sIdentificador = op
            classificacao = mapaOperadores[op]
            classificado = True
            indiceParada = j + 1  #pula o segundo

    if not classificado:  #se não foi uma das opcoes de 2 caracteres tratadas acima(:= <= >= <>) será classificado aqui
        sIdentificador = ele[i]
        classificacao = mapaOperadores[ele[i]]

    tkn = token(sIdentificador, classificacao, str(nLinha))

    return tkn
	def abstract(self):
		assert self.currentInputNode.value.name == "S"
		assert len(self.currentInputNode.children) == 2

		thereWasAFctList = False
		# for loop : because the input tree can contain 1 or 0 "funct-list"
		for fctListInputNode in self.currentInputNode.findToken("FUNCT-LIST", maxDepth=2):
			thereWasAFctList = True
			fctListAbstractNode = parseTreeNode(token.token("Funct-List"))
			for fctNode in fctListInputNode.findToken("FUNCT"):
				fctListAbstractNode.giveNodeChild(self.abstractFct(fctNode))
			self.currentAbstractNode.giveNodeChild(fctListAbstractNode)

		depthIntrList = 3 if thereWasAFctList else 2

		for instrListRoot in self.currentInputNode.findToken("INSTRUCT-LIST", maxDepth=depthIntrList):
			self.currentAbstractNode.giveNodeChild(self.abstractInstrList(instrListRoot))
示例#18
0
def download(filepath):
	response = check_status(filepath)
	if response.status != httplib.OK:
		print response.status
		print response.data
		exit(1)

	method = 'GET'
	path = json.loads(response.data)['files'][0]['download_url']
	headers = {
		'Authorization': 'JWT %s' % token.token()
	}

	response = http.request(method, path, headers=headers)
	print response.status
	print response.getheaders()
	with open(os.path.basename(path).replace('?src=api', ''), 'wb') as f:
		f.write(response.data)
示例#19
0
def test_3():
	from grammars_examples import g3
	ll1_parser = parser.LL1Parser(g3)

	inputTokens = []
	inputTokens.append(token.token("id", "a"))
	inputTokens.append(token.token("*"))
	inputTokens.append(token.token("("))
	inputTokens.append(token.token("id", "a"))
	inputTokens.append(token.token("+"))
	inputTokens.append(token.token("id", "b"))
	inputTokens.append(token.token(")"))
	inputTokens.append(token.token("$"))
	out = ll1_parser.parse(inputTokens)

	assert out[-1] == 'A'
	out_tree = ll1_parser.parseTree
	assert (out_tree.value.name == 'S')
	assert (out_tree.children[0].value.name == 'E')
	assert (out_tree.children[0].children[0].children[0].children[0].value.value == 'a')
	def abstractExpLevel(self, thisLevelExpNode, thisLevelName, nextLevelName, nextLevelFct, operators):  # this method abstract methods abstractExp, abstractExp2 and abstractExp3
		# exp : always has 2 children : exp2 and exp-tail
		expNextLNode = thisLevelExpNode.children[0]
		expTailNode = thisLevelExpNode.children[1]
		assert expNextLNode.value.name == nextLevelName, str(expNextLNode.value.name) + " == " + str(nextLevelName)
		assert expTailNode.value.name == thisLevelName + "-TAIL"
		if len(expTailNode.children) == 0:
			return nextLevelFct(expNextLNode)
		elif len(expTailNode.children) == 2:
			expTailNextLNode = expTailNode.children[1]
			assert expTailNextLNode.value.name == nextLevelName
			expType = expTailNode.children[0].value.name
			assert expType in operators, str(expType + " is not in the list of accepted operators : " + str(operators))
			abstractExpNode = parseTreeNode(token.token("OPERATOR", value=expType))
			abstractExpNode.giveNodeChild(nextLevelFct(expNextLNode))
			abstractExpNode.giveNodeChild(nextLevelFct(expTailNextLNode))
			return abstractExpNode
		else:
			raise Exception("Misformed expression node (should have 2 or 0 children) :\n" + str(thisLevelExpNode))
示例#21
0
	def scans(self, pathFile):
		try:
			Perlfile = open(pathFile, "r")
			tokenList = list()
			line = ""
			for line2 in Perlfile:
				line = line + line2

			while line != "":
				tok, line = self.getNextToken(line)
				if tok.name != "":
					tokenList.append(tok)
					if (self.verbose):
						print tok
			tokenList.append(token.token('END-SYMBOL'))
		except Exception as e:
			raise Exception("Le fichier ne respecte pas la syntaxe PERL", e)
			tokenList = list()
		Perlfile.close()
		return tokenList
	def abstractInstr(self, inputInstrListNode):
		assert inputInstrListNode.value.name == "INSTRUCT-LIST"
		abstractInstrNode = parseTreeNode(token.token("Instr", value="END"))  # will be deleted if a real instruction is found
		nextInstruction = None  # ditto
		for inputInstrNode in inputInstrListNode.findToken("INSTRUCT", maxDepth=1):
			instrTypeNode = inputInstrNode.children[0]
			instrType = instrTypeNode.value.name
			if (instrType == "FUNCT-CALL"):
				abstractInstrNode = self.abstractFctCall(instrTypeNode)
			elif (instrType == "VARIABLE"):
				abstractInstrNode = self.abstractAssign(inputInstrNode)
			elif (instrType == "COND"):
				abstractInstrNode = self.abstractCond(inputInstrNode)
			elif (instrType == "RET"):
				abstractInstrNode = self.abstractReturn(inputInstrNode)
			else:
				raise Exception("Instruction of unknown type : " + str(instrType))
			for nextInstrNode in inputInstrListNode.findToken("INSTRUCT-LIST", maxDepth=1):
				nextInstruction = nextInstrNode
		return abstractInstrNode, nextInstruction
示例#23
0
def lex(characters, token_exprs, filename):
    line = 0
    pos = 0
    char = 0
    tokens = []
    while (pos < len(characters)):
        if (characters[pos] == "\n"):
            char = 0
            line += 1
        match = None
        for token_expr in token_exprs:
            pattern, tag = token_expr
            regex = re.compile(pattern)
            match = regex.match(characters, pos)
            if (match):
                text = match.group(0)
                if (tag):
                    current_token = token(text, tag, char, line)  # (text, tag)
                    tokens.append(current_token)
                break
        if (not match):
            if (characters[pos] == "”" or characters[pos] == "“"):
                throw(f"detected unicode quotations", char, line, filename,
                      characters[pos])
                return []
            elif (characters[pos] == "'"):
                throw("please use '\"' for quotations marks only", char, line,
                      filename, characters[pos])
                return []
            elif (characters[pos] == "#"):
                throw(f"illegal character, did you mean '//'?", char, line,
                      filename, characters[pos])
                return []
            else:
                throw(f"illegal character", char, line, filename,
                      characters[pos])
                return []
        else:
            pos = match.end(0)
        char += 1
    return tokens
	def abstractFctCall(self, fctCallNode):
		#assert instrNode.value.name == "INSTRUCT", "Problem with INSTRUCT Node (Wrong name) :\n" + str(instrNode)
		#fctCallNode = instrNode.children[0]
		# un funct-call peut venir d'un simple-exp, donc vire cette partie
		assert fctCallNode.value.name == "FUNCT-CALL"
		assert len(fctCallNode.children) == 4
		nameNode = fctCallNode.children[0]
		name = nameNode.value.name
		if (name == "FUNCT-NAME"):  # user-defined fct
			name = nameNode.value.value
		abstractFctCallNode = parseTreeNode(token.token("Fct-Call", value=name))
		# children are the arguments
		for argRoot in fctCallNode.findToken("FUNCT-CALL-ARG", maxDepth=2):
			for firstArgNode in argRoot.findToken("FUNCT-CALL-ARG-BEG"):
				expNode = firstArgNode.children[0]
				assert expNode.value.name == "EXP"
				abstractFctCallNode.giveNodeChild(self.abstractExp(expNode))
			for nextArgNode in filter(lambda x: len(x.children) > 0, argRoot.findToken("FUNCT-CALL-ARG-END")):
				expNode = nextArgNode.children[1]
				assert expNode.value.name == "EXP"
				abstractFctCallNode.giveNodeChild(self.abstractExp(expNode))
		return abstractFctCallNode
示例#25
0
	def getNextToken(slef, line):
		line = line.lstrip()

		while re.match("\n", line):
			line = line[2:]
			print "ligne vide"

		if line == "":
			return token.token("", ""), line
		else:
			# On cherche d'abord les operateurs "non string"
			if line[0] == "-":
				line = line[1:]
				return token.token("MINUS", ""), line
			if line[0] == "+":
				line = line[1:]
				return token.token("ADD", ""), line
			if line[0] == ">":
				if len(line) > 2 and line[1] == "=":
					line = line[2:]
					return token.token("GE", ""), line
				else:
					line = line[1:]
					return token.token("GT", ""), line
			if line[0] == "<":
				if len(line) > 2 and line[1] == "=":
					line = line[2:]
					return token.token("LE", ""), line
				else:
					line = line[1:]
					return token.token("LT", ""), line
			if line[0] == "/":
				line = line[1:]
				return token.token("DIV", ""), line
			if line[0] == "*":
				line = line[1:]
				return token.token("MUL", ""), line
			if line[0] == "}":
				line = line[1:]
				return token.token("CLOSE-BRAC", ""), line
			if line[0] == "{":
				line = line[1:]
				return token.token("OPEN-BRAC", ""), line
			if line[0] == ")":
				line = line[1:]
				return token.token("CLOSE-PAR", ""), line
			if line[0] == "(":
				line = line[1:]
				return token.token("OPEN-PAR", ""), line
			if line[0] == ",":
				line = line[1:]
				return token.token("COMA", ""), line
			if line[0] == ";":
				line = line[1:]
				return token.token("SEMICOLON", ""), line
			if line[0] == ".":
				line = line[1:]
				return token.token("DOT", ""), line
			if line[0] == "=":
				if len(line) > 2 and line[1] == "=":
					line = line[2:]
					return token.token("EQUIV", ""), line
				else:
					line = line[1:]
					return token.token("EQUAL", ""), line
			if line[0] == "!":
				if len(line) > 2 and line[1] == "=":
					line = line[2:]
					return token.token("DIF", ""), line
				else:
					line = line[1:]
					return token.token("FAC", ""), line
			if re.match("\|\|", line):
				line = line[2:]
				return token.token("OR", ""), line
			if re.match("\&\&", line):
				line = line[2:]
				return token.token("AND", ""), line
			if re.match("''", line):
				line = line[2:]
				return token.token("BOOL", "false"), line

			# On cherche ensuite les operateurs "strings"
			if line[0] == "n":
				if re.match("not[^a-zA-Z0-9_-]", line):
					line = line[3:]
					return token.token("NOT", ""), line
				elif re.match("ne[^a-zA-Z0-9__-]", line):
					line = line[2:]
					return token.token("NE-S", ""), line
			if re.match("true[^a-zA-Z0-9_-]", line):
				line = line[4:]
				return token.token("BOOL", "true"), line
			if re.match("false[^a-zA-Z0-9_-]", line):
				line = line[5:]
				return token.token("BOOL", "false"), line
			if line[0] == "l":
				if re.match("lt[^a-zA-Z0-9__-]", line):
					line = line[2:]
					return token.token("LT-S", ""), line
				if re.match("le[^a-zA-Z0-9__-]", line):
					line = line[2:]
					return token.token("LE-S", ""), line
				if re.match("length[^a-zA-Z0-9_-]", line):
					line = line[6:]
					return token.token("PERL-LENG", ""), line
			if line[0] == "g":
				if re.match("gt[^a-zA-Z0-9__-]", line):
					line = line[2:]
					return token.token("GT-S", ""), line
				if re.match("ge[^a-zA-Z0-9__-]", line):
					line = line[2:]
					return token.token("GE-S", ""), line
			if line[0] == "i":
				if re.match("if[^a-zA-Z0-9_-]", line):
					line = line[2:]
					return token.token("OPEN-COND", ""), line
				if re.match("int[^a-zA-Z0-9_-]", line):
					line = line[3:]
					return token.token("PERL-INT", ""), line
			if line[0] == "e":
				if re.match("eq[^a-zA-Z0-9__-]", line):
					line = line[2:]
					return token.token("EQ-S", ""), line
				if re.match("elsif[^a-zA-Z0-9_-]", line):
					line = line[5:]
					return token.token("ADD-COND", ""), line
				if re.match("else[^a-zA-Z0-9_-]", line):
					line = line[4:]
					return token.token("CLOSE-COND", ""), line
			if re.match("defined[^a-zA-Z0-9_-]", line):
				line = line[7:]
				return token.token("PERL-DEF", ""), line
			if re.match("unless[^a-zA-Z0-9_-]", line):
				line = line[6:]
				return token.token("NEG-COND", ""), line
			if re.match("print[^a-zA-Z0-9_-]", line):
				line = line[5:]
				return token.token("PERL-PRIN", ""), line
			if re.match("return[^a-zA-Z0-9_-]", line):
				line = line[6:]
				return token.token("RET", ""), line
			if line[0] == "s":
				if re.match("sub[^a-zA-Z0-9_-]", line):
					line = line[3:]
					return token.token("FUNCT-DEF", ""), line
				if re.match("substr[^a-zA-Z0-9_-]", line):
					line = line[6:]
					return token.token("PERL-SUBS", ""), line
				if re.match("scalar[^a-zA-Z0-9_-]", line):
					line = line[6:]
					return token.token("PERL-SCAL", ""), line

			# On cherche ensuite les nombres (float et int)
			if re.match("[0-9]", line):
				floatNumber = re.match("([0-9])+\.([0-9])+", line)
				intNumber = re.match("([0-9])+", line)  # On sait deja qu il n y a pas de point apres puisqu on teste les float d abord
				if floatNumber:
					# On a un float
					line = line[len(floatNumber.group()):]
					return token.token("FLOAT", floatNumber.group()), line
				if intNumber:
					# on a un entier
					line = line[len(intNumber.group()):]
					return token.token("INT", intNumber.group()), line

			# On cherche ensuite les variables, fonctions et strings (tout ce qui necessite une boucle)
			if line[0] == "&":
				func = re.match("&([A-Za-z])+([A-Za-z0-9_-])*", line)
				if func:
					# On a un appel de fonction (& suivi d'un string)
					line = line[len(func.group()):]
					return token.token("FUNCT-NAME", func.group()[1:]), line

			if line[0] == "'":
				string = re.match("'([^'])*'", line)
				if string:
					# On a un string (' suivi d'un string et termine par un autre ')
					line = line[len(string.group()):]
					return token.token("STRING", string.group()[1:-1]), line

			if line[0] == "#":
				com = re.match("#(.)*\n", line)
				if com:
					# On a un commentaire (# suivi d'un string et termine par un autre \n)
					line = line[len(com.group()):]
					return token.token("", ""), line

			if line[0] == "$":
				var = re.match("[$]([A-Za-z])+([A-Za-z0-9_-])*", line)
				if var:
					# On a une variable ($ suivi d'un string)
					line = line[len(var.group()):]
					return token.token("VARIABLE", var.group()[1:]), line
			# tout ce qui reste est alors une variable
			if re.match("([A-Za-z])", line):
				var = re.match("([A-Za-z])+([A-Za-z0-9_-])*", line)
				if var:
					# On a un ID (un string)
					line = line[len(var.group()):]
					return token.token("ID", var.group()), line
			# Si on arrive ici c est qu il y a un probleme avec la syntaxe du fichier
			return None
	def __init__(self, completeParseTree):
		self.inputTree = completeParseTree
		self.currentInputNode = completeParseTree
		self.ast = parseTreeNode(token.token("AST-ROOT"))
		self.currentAbstractNode = self.ast
示例#27
0
def execucao(programa, palavrasReservadas, automato):

    global q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, q13, q14, q15, q16, q17, q18, q19, q20, q21, q22, q23, q24, q25

    estadoAtual = q0
    estadoAnterior = None
    indiceCaractere = 0  #Indice do caractere avaliado

    temTransicao = False  #Variavél que indica se tem transição de um automato pra outro a partir do caractere avaliado
    transicao = ''  #Indica o estado para qual a transição aponta

    nLinha = 1  #Contador de linha
    tokens = []  #Lista que guarda os tokens classificados

    encerra = False  #Indica se foi detectado algum símbolo não pertencente a linguagem para encerrar o loop
    comentario = False  #Indica se o programa acabou e se algum comentario ficou aberto

    dictEstados = {
        'q0': q0,
        'q1': q1,
        'q2': q2,
        'q3': q3,
        'q4': q4,
        'q5': q5,
        'q6': q6,
        'q7': q7,
        'q8': q8,
        'q9': q9,
        'q10': q10,
        'q11': q11,
        'q12': q12,
        'q13': q13,
        'q14': q14,
        'q15': q15,
        'q16': q16,
        'q17': q17,
        'q18': q18,
        'q19': q19,
        'q20': q20,
        'q21': q21,
        'q22': q22,
        'q23': q23,
        'q24': q24,
        'q25': q25
    }

    #Para cada linha no programa
    for linha in programa:

        #print("\nlinha: " , linha)

        #Percorre todos os caracteres da linha
        while (indiceCaractere < len(linha)):

            #print("\nCaractere: " + linha[indiceCaractere] + '\n')

            #Se estivermos no estado inicial, quer dizer que começamos a avaliar um novo token
            if estadoAtual.nome == automato.estadoInicial:
                inicio = indiceCaractere
                #print("aqui2", inicio)

            #Percorrer as transições	do estado atual
            for t in estadoAtual.transicoes.keys():

                #Se o caractere for um '{', deve-se indicar que tem um comentário aberto
                if linha[indiceCaractere] == '{':
                    comentario = True

                #Se o caractere for um '}', deve-se indicar o fechamento de um comentário
                if linha[indiceCaractere] == '}':
                    comentario = False

                #Verifica se o caractere possui alguma transição no estado atual
                if linha[indiceCaractere] in estadoAtual.transicoes[t]:
                    temTransicao = True
                    transicao = t  #t armazena a chave do dicionário que indica para qual estado a transição direciona

            #print(estadoAtual.nome)

            #Se tiver uma transição
            if temTransicao:

                #Se a transicao for pra o estado 22 devemos ignorar a linha
                if transicao == 'q22':
                    estadoAtual = dictEstados[automato.estadoInicial]
                    temTransicao = False
                    break
                else:

                    #Se o estado atual é um estado final, precisamos guardar ele como um estado anterior que é pra poder voltar para o último estado final
                    if estadoAtual.nome in automato.estadosFinais:
                        estadoAnterior = estadoAtual

                    #print(transicao)
                    #Atualiza o estado	que	essa transicao indica
                    estadoAtual = dictEstados[transicao]

                    #Vamos analisar o proximo caractere
                    indiceCaractere += 1

            else:  #Se não tiver transição

                #Se não tiver transição e estivermos no estado inicial, quer dizer que o símbolo não é reconhecido pela linguagem
                if estadoAtual.nome == automato.estadoInicial:
                    #print("ERRRROR")

                    #Indica o erro
                    sIdentificador = linha[indiceCaractere]
                    classificacao = "ERROR - Símbolo inválido"
                    tkn = token(sIdentificador, classificacao, str(nLinha))
                    tokens.append(tkn)
                    encerra = True
                    break

                #Se não tiver transição e o estado atual é um estado final
                if estadoAtual.nome in automato.estadosFinais:

                    #Pega o token que vai ser classificado
                    sIdentificador = linha[inicio:indiceCaractere]

                    #O estado q2 pode classificar como palavra reservada, como identificador, como operador aditivo e como operador multiplicativo,
                    #então temos que verificar
                    if estadoAtual.nome == 'q2':
                        if sIdentificador in palavrasReservadas:
                            classificacao = 'palavra reservada'
                            tkn = token(sIdentificador, classificacao,
                                        str(nLinha))
                        elif sIdentificador == 'and':
                            classificacao = 'operador multiplicativo'
                            tkn = token(sIdentificador, classificacao,
                                        str(nLinha))
                        elif sIdentificador == 'or':
                            classificacao = 'operador aditivo'
                            tkn = token(sIdentificador, classificacao,
                                        str(nLinha))
                        else:
                            tkn = token(sIdentificador,
                                        estadoAtual.classificacao, str(nLinha))

                    #Se não tivermos no estado q2, vamos classificar com a classificação do estado atual
                    else:
                        tkn = token(sIdentificador, estadoAtual.classificacao,
                                    str(nLinha))

                    #Adiciona a nova classificação na lista de tokens classificados
                    tokens.append(tkn)

                #Se não tiver transição e o estado atual não é um estado final
                else:

                    #pega o token que vai ser classificado
                    sIdentificador = linha[inicio:indiceCaractere]

                    #E classifica a partir do estado anterior que é o último estado final
                    if estadoAnterior.nome == 'q2':
                        if sIdentificador in palavrasReservadas:
                            classificacao = 'palavra	reservada'
                            tkn = token(sIdentificador, classificacao,
                                        str(nLinha))
                        elif sIdentificador == 'and':
                            classificacao = 'operador multiplicativo'
                            tkn = token(sIdentificador, classificacao,
                                        str(nLinha))
                        elif sIdentificador == 'or':
                            classificacao = 'operador aditivo'
                            tkn = token(sIdentificador, classificacao,
                                        str(nLinha))

                        else:
                            tkn = token(sIdentificador,
                                        estadoAnterior.classificacao,
                                        str(nLinha))
                    #Se não é o estado q2
                    else:
                        tkn = token(sIdentificador, estadoAtual.classificacao,
                                    str(nLinha))

                    #Adiciona a nova classificação na lista de tokens classificados
                    tokens.append(tkn)

                #Se não tiver transição, quer dizer que classificamos um token, então devemos voltar para o estado inicial
                estadoAtual = dictEstados[automato.estadoInicial]

            #A variavel que indica se tem transição retorna para false para verificarmos o próximo caractere
            temTransicao = False

        #Final do while que indica o final da linha

        #Incrementa a linha
        nLinha += 1
        #O indice do caractere avaliado vai para o começo da nova linha
        indiceCaractere = 0
        #Variavel que indica o inicio do token também vai pra 0
        inicio = 0

        #Se encerra for true quer dizer que houve erro de símbolo não pertencente a linguagem, então temos que encerrar
        if encerra:
            break

    #Se comentario for true, quer dizer que o programa terminou com um comentario aberto e devemos indicar o erro
    if comentario:
        #print("ERRRROR")
        sIdentificador = ''
        classificacao = "ERROR - Símbolo '}' não encontrado"
        tkn = token(sIdentificador, classificacao, '')
        tokens.append(tkn)

    #Lista que salva os tokens classificados
    tokensSalvar = []

    #Pega todas as informaçõs do token para salvar
    for t in tokens:
        #print(t.getTokenInfo())
        tokensSalvar.append(t.getTokenInfo() + '\n')

    cio.salvaTokens(tokensSalvar)

    return tokens
示例#28
0
 def send_cur_token(self):
     ret = token.token(self.cur_kind, self.cur_token)
     self.cur_token = ""
     return ret
示例#29
0
文件: main.py 项目: ShawSumma/code
import token
import tree

data = open('main.ion').read()
tokens = token.token(data)
open('tree.txt', 'w').write(str(tokens))
out = tree.make(tokens)
open('out.py', 'w').write(out)
示例#30
0
def test_1():
	from grammars_examples import g1
	ll1_parser = parser.LL1Parser(g1)

	inputTokens = []
	inputTokens.append(token.token("begin"))
	inputTokens.append(token.token("Id", "a"))
	inputTokens.append(token.token(":="))
	inputTokens.append(token.token("Id", "b"))
	inputTokens.append(token.token("+"))
	inputTokens.append(token.token("Nb", "4"))
	inputTokens.append(token.token(";"))
	inputTokens.append(token.token("write"))
	inputTokens.append(token.token("("))
	inputTokens.append(token.token("Id", "a"))
	inputTokens.append(token.token("+"))
	inputTokens.append(token.token("("))
	inputTokens.append(token.token("Nb", "2"))
	inputTokens.append(token.token("-"))
	inputTokens.append(token.token("Id", "a"))
	inputTokens.append(token.token(")"))
	inputTokens.append(token.token(")"))
	inputTokens.append(token.token(";"))
	inputTokens.append(token.token("end"))
	inputTokens.append(token.token("$"))

	out = ll1_parser.parse(inputTokens)
	assert out[-1] == 'A'

	# unknown symbol (*)
	inputTokens = []
	inputTokens.append(token.token("begin"))
	inputTokens.append(token.token("Id"))
	inputTokens.append(token.token(":="))
	inputTokens.append(token.token("Id"))
	inputTokens.append(token.token("*"))  # ##
	inputTokens.append(token.token("Nb"))
	inputTokens.append(token.token(";"))
	inputTokens.append(token.token("write"))
	inputTokens.append(token.token("("))
	inputTokens.append(token.token("Id"))
	inputTokens.append(token.token("+"))
	inputTokens.append(token.token("("))
	inputTokens.append(token.token("Nb"))
	inputTokens.append(token.token("-"))
	inputTokens.append(token.token("Id"))
	inputTokens.append(token.token(")"))
	inputTokens.append(token.token(")"))
	inputTokens.append(token.token(";"))
	inputTokens.append(token.token("end"))
	inputTokens.append(token.token("$"))

	try:
		out = ll1_parser.parse(inputTokens)
	except parser.ParseError as parse_e:
		assert parse_e.errorType == "Unknown symbol", "Got " + repr(parse_e.errorType)
		assert parse_e.symbol == "*", "Got " + repr(parse_e.symbol)

	# mismatched parenthesis
	inputTokens = []
	inputTokens.append(token.token("begin"))
	inputTokens.append(token.token("Id"))
	inputTokens.append(token.token(":="))
	inputTokens.append(token.token("Id"))
	inputTokens.append(token.token("+"))
	inputTokens.append(token.token("Nb"))
	inputTokens.append(token.token(";"))
	inputTokens.append(token.token("write"))
	inputTokens.append(token.token("("))
	inputTokens.append(token.token("Id"))
	inputTokens.append(token.token("+"))
	inputTokens.append(token.token("("))
	inputTokens.append(token.token("Nb"))
	inputTokens.append(token.token("-"))
	inputTokens.append(token.token("Id"))
	inputTokens.append(token.token(")"))
	inputTokens.append(token.token("("))  # ###
	inputTokens.append(token.token(")"))
	inputTokens.append(token.token(";"))
	inputTokens.append(token.token("end"))
	inputTokens.append(token.token("$"))

	try:
		out = ll1_parser.parse(inputTokens)
	except parser.ParseError as parse_e:
		assert parse_e.errorType == "Misplaced symbol", "Got " + repr(parse_e.errorType)
		assert parse_e.symbol == "(", "Got " + repr(parse_e.symbol)
示例#31
0
def test_2():
	from grammars_examples import g2
	ll1_parser = parser.LL1Parser(g2)

	inputTokens = []
	inputTokens.append(token.token("ID"))
	inputTokens.append(token.token("-"))
	inputTokens.append(token.token("("))
	inputTokens.append(token.token("ID"))
	inputTokens.append(token.token(")"))
	inputTokens.append(token.token("$"))

	out = ll1_parser.parse(inputTokens)
	assert out[-1] == 'A'

	inputTokens = []
	inputTokens.append(token.token("ID"))
	inputTokens.append(token.token("-"))
	inputTokens.append(token.token("("))
	inputTokens.append(token.token("ID"))
	inputTokens.append(token.token("("))  # ##
	inputTokens.append(token.token(")"))
	inputTokens.append(token.token("$"))

	try:
		out = ll1_parser.parse(inputTokens)
	except parser.ParseError as parse_e:
		assert parse_e.errorType == "Misplaced symbol", "Got " + repr(parse_e.errorType)
		assert parse_e.symbol == ")", "Got " + repr(parse_e.symbol)
示例#32
0
def numero(ele, i, tam):
    global nLinha, numeros, classificacao, indiceParada

    #print de acompanhamento de programa
    #print('é numero')

    classificacao = 'integer'

    for j in range(i + 1, tam):  #estado de loop recebendo numeros

        #print de acompanhamento de programa
        #print(ele[j])

        if ele[j] in numeros:  #continua recebendo numeros
            #print de acompanhamento de programa
            #print('continua integer')

            classificacao = 'integer'

        elif ele[j] == '.':  #recebeu um ponto -> real
            #print de acompanhamento de programa
            #print('é real')

            classificacao = 'real'

            for z in range(j + 1, tam):
                #print de acompanhamento de programa
                #print('tem casa decimal?')

                if ele[z] in numeros:
                    #print de acompanhamento de programa
                    #print('casa decimal')

                    classificacao = 'real'

                else:
                    #print de acompanhamento de programa
                    #print('acabaram as casas decimais')

                    indiceParada = z
                    break

            #print de acompanhamento de programa
            #print('fim de real')

            sIdentificador = ele[i:z]
            tkn = token(sIdentificador, classificacao, str(nLinha))
            #tokens.append(tkn)

            break

        else:  #nao recebeu mais numero, sai desse estado e volta pro inicial
            #print de acompanhamento de programa
            #print('fim de integer')

            indiceParada = j

            sIdentificador = ele[i:j]
            tkn = token(sIdentificador, classificacao, str(nLinha))
            #tokens.append(tkn)

            #print(tkn)

            break

    return tkn
示例#33
0
def exe(programa, mapaReservadas, mapaOperadores):  #, palavrasReservadas):
    """
    Função que representa o autômato
    Retorna lista de classificação de tokens, 
    cada elemento da lista representa um objeto token
    """

    global minusculas, maiusculas, letras, numeros, caracteresValidos, nLinha, classificacao, sIdentificador, indiceParada, estaComentado

    #começa uma lista vazia de objetos token
    #faz o loop do automato
    #loop finalizado -> instancia novo objeto token e add na lista de tokens
    #cria lista de classificação de tokens
    #loop na lista de tokens """token.getTokenInfo()""" adicionando na lista de classificação
    #retorna a lista de classificação

    tokens = []
    erro = False  #variável para amazenar estado de erro... se tem erro encerra a análise léxica
    nLinhaAbreComentario = -1

    for ele in programa:  #percorre todo o programa linha por linha
        #print de acompanhamento de programa
        #print('\n')
        #print(ele)

        tam = len(ele)

        #print de acompanhamento de programa
        #print('tam ', tam)

        for i in range(0, tam):  #estado inicial
            #print de acompanhamento de programa
            #print('aque')
            #print(i)
            #print(indiceParada)

            if i < indiceParada:  #serve para ignorar os caracteres até o caracter no indiceParada
                #print de acompanhamento de programa
                #print('i ', i)
                #print(ele[i])
                pass

            else:
                #print de acompanhamento de programa
                #print('here')
                #print('ele[i]: ' + ele[i])

                if ele[i] == '\n':  #pula linha
                    #precisa ser o primeiro if
                    nLinha += 1

                elif ele[i] == '\t' or ele[
                        i] == ' ':  #tabulação ou espaço, não faz nada
                    pass

                elif ele[i] == '{' or estaComentado:  #abre comentario
                    if nLinhaAbreComentario == -1:
                        nLinhaAbreComentario = nLinha

                    comentario(
                        ele, i, tam
                    )  #chama a funcao comentario, ela altera o valor da variaável estaComentado, se encontrar } -> false, se pular linha sem fechar o comentário -> true

                    if estaComentado:  #se continua comentado é porque pulou linha sem fechar o comentário
                        break
                    '''pulouLinha = comentario(ele, i, tam)
                    if pulouLinha:
                        break'''

                elif ele[i] == '/' and ele[i + 1] == '/':  #comentário de linha
                    nLinha += 1
                    break

                elif ele[i] in numeros:  #recebeu numero
                    tkn = numero(ele, i, tam)
                    tokens.append(tkn)

                elif ele[i] in letras:  #recebeu letra [a..Z]
                    #print de acompanhamento de programa
                    #print('letras')
                    tkn = letra(ele, i, tam,
                                mapaReservadas)  #, palavrasReservadas)
                    tokens.append(tkn)

                elif ele[i] in list(mapaOperadores.keys()):
                    #print de acompanhamento de programa
                    #print('operadores')
                    tkn = operador(ele, i, tam, mapaOperadores)
                    tokens.append(tkn)

                #elif ele[i] == '': #End Of File
                #pass

                else:  #caracteres não permitidos
                    sIdentificador = ele[i]
                    classificacao = 'ERRO: SÍMBOLO NÃO RECONHECIDO'
                    tkn = token(sIdentificador, classificacao, str(nLinha))
                    tokens.append(tkn)
                    erro = True  #se tem erro encerra a análise léxica
                    break

        if erro:
            break  #se tem erro encerra a análise léxica

        indiceParada = 0  #retorna o valor do proximo indice a ser analisado pro valor inicial de comparação

    #fim do for do autômato

    if estaComentado:  #se a analise foi encerrada e estaComentado -> erro comentário não fechado

        sIdentificador = '{'
        classificacao = 'ERRO: COMENTÁRIO ABERTO NA LINHA ' + str(
            nLinhaAbreComentario)

        tkn = token(sIdentificador, classificacao, str(nLinha))
        tokens.append(tkn)
        erro = True  #se tem erro encerra a análise léxica

    #print de acompanhamento de programa
    #print('\n')

    classificacaoTokens = []
    for t in tokens:
        #print de acompanhamento de programa
        #print(t.getTokenInfo())
        classificacaoTokens.append(t.getTokenInfo() + '\n')

    return classificacaoTokens
示例#34
0
import discord
import token
import asyncio

token = token.token()

client = discord.Client()

@client.event
async def on_ready():
  print("Bot Online!")

@client.event
async def on_member_join(member):
  channel = client.get_channel("***")
  msg = "Welcome {}".format(member.mention)
  await client.send_message(channel, msg)

@client.event
async def on_member_remove(member):
  channel = client.get_channel("***")
  msg = "Goodbye {}".format(member.mention)
  await client.send_message(channel, msg)

client.run(token)
示例#35
0
#To create an instance of the telegram.Bot:

import telegram 
from threading import Timer 
import sys
import random
#from messaging import sendMessage, sendChat
import logging
sys.path.insert(0,'..')
import token

#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(format='%(asctime)s:%(levelname)s -%(message)s', filename='../bot.log', level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler())

logging.debug(token.token())

bot = telegram.Bot(token=token.token())
startTimer=1
nightTimer=10
polling_timeout=60

bot_me=bot.getMe()

bot_firstname=bot_me.first_name
bot_username=bot_me.username

def logit(message):
	if message!='':
		logging.info(message)
示例#36
0
文件: __main__.py 项目: sscst/arber
#!/usr/bin/env python  
# -*- coding: utf-8 -*- 
from utensil import *
from utensil_function import *
from token import token
from token_list import token_list
import threading 


token_container = token(token_list) 
lock = threading.Lock() 
restart(token_container,lock)
print '''        欢迎来到微博备份工具! 
        'a'后跟微博昵称,可进行微博备份
	'working'可查询正在工作的昵称
	'finish'可查询已经完成的昵称
	'down'后跟微博昵称,可将数据从数据库中导成txt文件,文件将会存放在result文件夹,后面可以跟 'top' + 数字,表示导出热度最大的N条微博
     '''
while True :
    o = raw_input("your choice : ")
    if o[0] == 'a':
        name = [x for x in o.split(' ') if x]
        if check_screen_name(name[1],token_container):
            thread = MyThread(name[1],token_container,lock)
            thread.setDaemon(True)
            thread.start()
        else :
            print "好像没有这个昵称哦,看看你有没有弄错?"
    elif o == 'working':
        show('0')
    elif o == 'finish':