def analize(self, grammars):
		self.analized_data = []
	
		for i, grammar in enumerate(grammars):
		
			if grammar == '':
				continue
			self.analized_data.append([])
				
			for unit in grammar:
				# if "unit" is already generated in Token, 
				if Token.is_generated(unit):
					rtn = Token.return_token(unit)
				# if already generated in Non-Token, 
				elif nonToken.is_generated(unit):
					rtn = nonToken.return_token(unit)
				elif unit == nonToken.repete_start_symbol.get_sign():
					rtn = nonToken.repete_start_symbol
				elif unit == nonToken.repete_end_symbol.get_sign():
					rtn = nonToken.repete_end_symbol
				elif unit == nonToken.epsilon_symbol.get_sign():
					rtn = nonToken.epsilon_symbol
				else:
					print u"Error! Token or Non-Token excepted !"
					print unit
					
				self.analized_data[i].append(rtn)
Exemplo n.º 2
0
class TokenRingThread(threading.Thread):
    """ A worker thread that takes directory names from a queue, finds all
        files in them recursively and reports the result.

        Input is done by placing directory names (as strings) into the
        Queue passed in dir_q.

        Output is done by placing tuples into the Queue passed in result_q.
        Each tuple is (thread name, dirname, [list of files]).

        Ask the thread to stop by calling its join() method.
    """
    def __init__(self, token_nodes, wait=2):
        super(TokenRingThread, self).__init__()
        self.__token_nodes = token_nodes
        self.__stoprequest = threading.Event()
        self.__token = Token()
        self.__wait = wait

    def set_wait_time(self, wait):
        self.__wait = wait
        if self.__wait < 1:
            self.__wait = 1

    def grant_token(self, node):
        node.receive_token(self.__token)

    def retrieve_token(self, node):
        self.__token = node.return_token()

    def run(self):
        # As long as we weren't asked to stop, try to take new tasks from the
        # queue. The tasks are taken with a blocking 'get', so no CPU
        # cycles are wasted while waiting.
        # Also, 'get' is given a timeout, so __stoprequest is always checked,
        # even if there's nothing in the queue.
        while not self.__stoprequest.isSet():
            try:
                it = iter(self.__token_nodes)

                for i in it:
                    print "Token Ring Grant MSS %d" % i.get_name()
                    self.grant_token(i)
                    time.sleep(self.__wait)
                    self.retrieve_token(i)

                self.__token.increment_counter()
                print "token counter = %d" % self.__token.get_counter()

            except:
                continue

    def join(self, timeout=None):
        self.__stoprequest.set()
        super(TokenRingThread, self).join(timeout)
	def def_code_generate(self):
		generated_code = []
		generated_code.append("def "+self.sign+"()")
		generated_code.append("\tcase @token")

		epsilon_flag = False
		for row in self.analized_data:
			
			if row[0] != nonToken.epsilon_symbol:
				frst = []
				if row[0] != nonToken.repete_start_symbol:
					row[0].get_first(frst)
				else:
					row[1].get_first(frst)

				code = "\twhen " + ",".join(frst)
				generated_code.append(code)
	
				for i, col in enumerate(row):
					if Token.is_generated(col.get_sign()):
						code = "\t\tchecktoken(\"" + self.sign + "\", " + col.get_entity() + ")"
					elif nonToken.is_generated(col.get_sign()):
						code = "\t\t" + col.get_sign() + "()"
					elif col == nonToken.repete_start_symbol:
			
						if Token.is_generated(row[i+1].get_sign()):
							code = "\t\twhile @token == " + row[i+1].get_entity() + " do"
						elif nonToken.is_generated(row[i+1].get_sign()):
			
							frst = []
							row[i+1].get_first(frst)
							code = "\t\twhile "
							for ft in frst:
								code += "@token == " + ft + " || "
							code = code[:-3] + "do"
							 
					elif col == nonToken.repete_end_symbol:
						code = "\t\tend"
					generated_code.append(code)
			else:
				epsilon_flag = True
	
		if epsilon_flag:
			generated_code.append("\telse")
		else:
			generated_code.append("\telse")
			generated_code.append("\t\tputs \"error\"")
			generated_code.append("\t\texit(1)")
				
		generated_code.append("\tend")
		generated_code.append("end")
		
		return generated_code
Exemplo n.º 4
0
 def __init__(self):
     Token.__init__(self)
     
     params = urllib.urlencode({})
     headers = { "X-Auth-Token":self.apitoken, "Content-type":"application/json" }
     self.conn = httplib.HTTPConnection(self.apiurlt[1])
     self.conn.request("GET", "%s/images/detail?" % self.apiurlt[2], params, headers)
 
     self.response = self.conn.getresponse()
     self.image_metadata = self.response.read()
     self.image_metadata = json.loads(self.image_metadata)
     self.conn.close()
Exemplo n.º 5
0
    def testSimulationSimpleConflictsPetrinet(self):
        """ Test token priority: on one place has a token two transition choices. We impose one choice
        """
        pn = build_simple_conflicts()
        tok0 = Token(name='tok')
        p0 = pn.getPlace('p0')
        tok0.addPriority(p0, pn.getTransition('t0'))
        pn.addToken(p0, tok0)

        pn.simulation(show=False)
        self.assertIn('tok', map(lambda tok: tok.name, pn.getPlace('p1').token))
        self.assertFalse(pn.getPlace('p0').token)
        self.assertFalse(pn.getPlace('p2').token)
Exemplo n.º 6
0
    def token(self):
        if self._reload:
            self.initialize()

        from Token import Token
        token = Token()

        token.tid = self.tid
        token.date = self.date
        token.locator = self.locator.encode(self.tid, self.date)

        self._info.log("issued token: %s" % token)
        self.tid += 1

        return token
Exemplo n.º 7
0
 def generate_number(self):
     points = 0
     number = ''
     while self.current_char != None and (self.current_char == '.' or self.current_char.isnumeric()):
         if self.current_char == '.':
             points += 1
             if points > 1:
                 print(f"Too many decimal points in one number: '{points}'")
                 exit()
         number += self.current_char
         self.advance()
     if points > 0:
         if number.startswith('.'):
            number = '0' + number
         return Token("NUMBER", float(number))
     return Token("NUMBER", int(number))
Exemplo n.º 8
0
    def Retrieve(self, customer):
        """ Will return all cards by customer with masked account/card number.

        :param customer: Customer unique name
        :return: all cards by customer
        """

        r = requests.get(
            url='https://sandbox.payfabric.com/rest/v1/api/wallet/get/' +
            customer,
            headers={
                'Content-Type': 'application/json; charset=utf-8',
                'authorization': Token().Create()
            })

        print r.status_code, r.text

        #
        # Sample response
        # ------------------------------------------------------
        # Response text is an array of card object with json format
        # Go to https:#github.com/PayFabric/APIs/wiki/API-Objects#card for more details about card object.
        # ------------------------------------------------------
        #
        return r.json()
Exemplo n.º 9
0
    def Remove(self, cardId):
        """ Removed card is not recoverable

        :param cardId: Card guid
        :return: removing result
        """

        r = requests.get(
            url='https://sandbox.payfabric.com/rest/v1/api/wallet/delete/' +
            cardId,
            headers={
                'Content-Type': 'application/json; charset=utf-8',
                'authorization': Token().Create()
            })

        print r.status_code, r.text

        # Sample response
        # ------------------------------------------------------
        #{
        #     "Result":"true"
        #}
        # ------------------------------------------------------
        #
        return r.json()
Exemplo n.º 10
0
 def line2tokens(self, line):
     tokens = []
     #
     # long length operator
     #
     for op in long_operators:
         line = line.replace(op, long_operators[op])
     #
     # add space around operators
     #
     for token in space_tokens:
         line = line.replace(token, ' ' + token + ' ')
     #
     # split line into tokens
     #
     divided_line = line.strip().split()
     #
     # work with token
     #
     for item in divided_line:
         if item in token_type:
             # special token
             token_kind = token_type[item]
         else:
             if item.isdigit():
                 # integer constant
                 token_kind = 'integerConstant'
             else:
                 token_kind = 'identifier'
         token = Token(token_kind, item)
         tokens.append(token)
     return tokens
Exemplo n.º 11
0
def createInstanceObjectList(processed_dataset):
    '''
    Creates a list of Instance objects from the tokenized input + label
    '''
    print('Reading instances...')
    instanceObjects = []

    #Els: read in tokenised lines
    #processed_data = []
    for item in processed_dataset:
        tokenized = []
        line = item[1]
        data = line.strip('\n')
        if data:
            all_words = word_tokenize(data)
            content = ' '.join([str(elem) for elem in all_words])
        label = item[2]
        #processed_data.append(tokenized + '\t' + str(label))
        instanceObject = Instance(content, label)
        for i, token in enumerate(content.split()):
            instanceObject.tokenDictionary[i + 1] = Token(token)
        if FeatureSelection.getInstance(featureFile).normalizeInstances:
            instanceObject.tokenDictionary = instanceObject.normalizeTokens()
        instanceObjects.append(instanceObject)
    return instanceObjects
Exemplo n.º 12
0
	def __word(self):
		value = u""
		self.file_readable = False

		while self.curr_char.isalnum():
			value += self.curr_char
			self.__set_curr_char()

		if value == "null":
			return Token(Token.Token_Type.NULL, value)
		elif value == "true" or value == "false":
			return Token(Token.Token_Type.BOOL, value)
		elif value.isnumeric():
			return Token(Token.Token_Type.INT, value)
		else:
			return Token(Token.Token_Type.COMMAND, value)
    def __init__(self, InputFile):

        self._Tokens = []
        self._CurrentIndex = 0

        # read in the text from the input file
        jackText = InputFile.read()

        # remove comments
        jackText = re.sub("(//.*\n)|(/\*(.|\n)*?\*/)", "", jackText).strip()

        jackText = re.sub("\"", "\"\n", jackText)

        # get quoteGroups
        quoteGroups = re.findall(r"\"\n.*\"", jackText)
        i = 0
        while i < len(quoteGroups):
            quoteGroups[i] = re.sub("\n", "", quoteGroups[i])
            quoteGroups[i] = re.sub("\"", "", quoteGroups[i])
            i = i + 1

        # add space around symbols
        for symbol in self._Symbols:
            jackText = re.sub("\\" + symbol, " " + symbol + " ", jackText)

        # replace '\'
        jackText = re.sub(r"\\", " \ ", jackText)

        #tokenize the text and store in a temp list
        tempTokens = jackText.split()

        i = 0
        j = 0
        # find type of token in order to make new token
        while i < len(tempTokens):
            if tempTokens[i] == "\"":
                del tempTokens[i]
                while (tempTokens[i] != "\""):
                    del tempTokens[i]
                tempTokens[i] = quoteGroups[j]
                j = j + 1
                temptokentype = Token.StringConstant
            elif tempTokens[i] in self._Keywords:
                temptokentype = Token.Keyword
            elif tempTokens[i] in self._Symbols:
                temptokentype = Token.Symbol
            elif re.match("\d+", tempTokens[i]):
                temptokentype = Token.IntegerConstant
            elif re.match("[a-zA-Z_]+[a-zA-Z0-9_]*", tempTokens[i]):
                temptokentype = Token.Identifier
            else:
                try:
                    raise Exception("Unknown token type: " + tempTokens[i] +
                                    "\n")
                except Exception, err:
                    sys.stderr.write(str(err))
                    return
            # create Tokens of type token and store in _Tokens
            self._Tokens.append(Token(temptokentype, tempTokens[i]))
            i = i + 1
Exemplo n.º 14
0
	def execute(self):
		token = Token.get()
		apiResult = self.translate(token)

		result = self.parseXml(apiResult)

		return result
Exemplo n.º 15
0
    def load_data(self, data_packet: dict):
        """Loads tokens, grouped_tokens, regions, tokens_by_block_and_line using a data packet. Raises an error if data is already populated"""
        existing_data = [
            self.tokens,
            self.grouped_tokens,
            self.regions,
            self.tokens_by_block_and_line,
        ]

        if any(existing_data
               ):  # If any of the data already exists in the invoicePage
            raise Exception(
                "InvoicePage data loading error: Data already exists in InvoicePage object. Data can only be loaded onto a fresh InvoicePage"
            )

        if not all([data for key, data in data_packet.items()
                    ]):  # If not all data in data_packet is present
            return  # We probably didn't do OCR for this page previously, so just return

        create_tokens_from_dict = lambda dictionary: Token(**dictionary)

        self.tokens = list(map(create_tokens_from_dict, data_packet["tokens"]))
        self.grouped_tokens = list(
            map(create_tokens_from_dict, data_packet["grouped_tokens"]))
        self.regions = list(
            map(create_tokens_from_dict, data_packet["regions"]))
        self.tokens_by_block_and_line = {
            block_num: {
                line_num: list(map(create_tokens_from_dict, line_tokens))
                for line_num, line_tokens in block_data.items()
            }
            for block_num, block_data in
            data_packet["tokens_by_block_and_line"].items()
        }
Exemplo n.º 16
0
    def get_instances(self, folder):

        instances = []
        labels = set()
        for author in os.listdir(folder):
            path = folder + "/" + author + "/agree-sent/"
            path_pos = folder + "/" + author + "/pos/"
            if os.path.exists(path) and os.path.exists(path_pos):
                for af in os.listdir(path):
                    current = os.path.join(path, af)
                    current_pos = os.path.join(
                        path_pos,
                        af.split('.')[0] + '.sent.okpuncs.props.pos')
                    if os.path.isfile(current) and os.path.isfile(current_pos):
                        agree_data = open(current, "rb")
                        pos_data = open(current_pos, "rb").readlines()
                        for x in agree_data:
                            x = x.strip()
                            id = int(x.split("@")[0])
                            y = pos_data[id].strip()
                            label = int(x.split("@")[1])
                            text = x.split("@")[2]
                            inst = Instance(text, label)
                            for tagtoken in y.split("):("):
                                tag = tagtoken.split(" ")[0].lstrip("(")
                                token = tagtoken.split(" ")[1]
                                token = Token(token, tag)
                                inst.add_token(token)
                            instances.append(inst)
                            labels.add(label)

        return instances, labels
Exemplo n.º 17
0
    def test_fit_known_tokens_create_token_to_the_rigth_and_left_between_2_tokens(
            self):
        # Create a token positioned inside the first word
        text = 'xto par'  # T-ex-to
        init = self._text.find(text)
        end = init + len(text) - 1
        known_token = Token(text, init, end, "teste")

        # Only the first sentence will be used
        text = self._st1._text

        pipeline = NerCorpusPipeline(text, [known_token])
        pipeline.apply_processing_rules()

        word_tokens_after = pipeline.word_tokens

        # Ensure the words have a valid structure
        for token in word_tokens_after:
            self.assertTrue(self._text[token._init_index:token._end_index +
                                       1] == token._text)

        # Ensure the known tokens have a valid structure
        for token in pipeline.known_tokens:
            self.assertTrue(self._text[token._init_index:token._end_index +
                                       1] == token._text)

        #self.assertTrue(len(word_tokens_after) == 4)
        self.assertTrue(
            set(['Te', 'xto', 'par', 'a', 'teste', '.']) == set([
                t._text for t in utils.sort_tokens(pipeline.known_tokens +
                                                   word_tokens_after)
            ]))
Exemplo n.º 18
0
    def test_merge_2_sentences(self):
        # Create a token positioned between the forst and second sentence
        text = 'teste. Este'
        init = self._text.find(text)
        end = init + len(text) - 1
        known_token = Token(text, init, end, "teste")

        pipeline = NerCorpusPipeline(self._text, [known_token])
        pipeline.apply_processing_rules()

        sentence_tokens = pipeline.sentences_tokens

        # Ensure the sentences have a valid structure
        for token in sentence_tokens:
            self.assertTrue(self._text[token._init_index:token._end_index +
                                       1] == token._text)

        # Ensure sentence was correctlly merged
        self.assertTrue(len(sentence_tokens) == 2)
        # 1 merged
        self.assertTrue([
            t for t in sentence_tokens if t._text ==
            self._text[self._st1._init_index:self._st2._end_index + 1]
        ][0])
        # 1 untouched
        self.assertTrue(
            [t for t in sentence_tokens if t._text == self._st3._text][0])
Exemplo n.º 19
0
    def program(self):
        """program -> assign | if_condition"""
        program_token = Token('program', Types.Program, 'program', 0, 0)
        program = AstNode(program_token)
        while True:
            future_token = self._future_token(0)
            if future_token is None:
                break
            elif future_token.value == 'if':
                tree = self.if_condition()
            elif future_token.value == 'while':
                tree = self.while_cycle()
            elif future_token.value == 'for':
                tree = self.for_cycle()
            elif future_token.value in types:
                if self._future_token(2).value == '(':
                    tree = self.assign_function()
                elif self._future_token(2).value == ';':
                    tree = self.assign()
                else:
                    tree = self.assign_with_init()

            else:
                if self._future_token(1).value == '=':
                    tree = self.init()
                else:
                    tree = self.expression()
            program.add_child(tree)
        return program
Exemplo n.º 20
0
def get_page(url, user_use=False):
    web = urllib.request.urlopen(url)
    data = str(web.read())
    data = data[2:-1]
    if user_use:
        return Token(data, STRING)
    return data
Exemplo n.º 21
0
    def advanced_variable_declaration(self):
        type = self.current_token.get_type()
        type_node = Type(self.current_token)
        self.eat(self.current_token.get_type())

        id = self.current_token
        var_node = Identifier(self.current_token)
        self.eat(TokenType.IDENTIFIER)

        if (self.current_token.get_type() == TokenType.EQUAL):
            token = self.current_token
            self.eat(TokenType.EQUAL)

            if (type == TokenType.LIST):
                self.eat(TokenType.LEFT_BRACKET)
                right = self.recognize_list()
                self.eat(TokenType.RIGHT_BRACKET)

                assign_node = Assign(var_node, token, right)
                var_decl = VarDecl(var_node, type_node, assign_node)
                return Variable([var_decl])

        # They are not assigning so we have to initialize an empty list
        if (type == TokenType.LIST):
            token = Token(TokenType.EQUAL, TokenType.EQUAL,
                          self.current_token.get_line(),
                          self.current_token.get_line())
            right = List([])
            assign_node = Assign(var_node, token, right)
            return Variable([VarDecl(var_node, type_node, assign_node)])
Exemplo n.º 22
0
    def assign_function(self):
        """function -> assign '(' args ')' block"""
        function_token = Token('function', Types.Function, 'function',
                               self._curr_token().start_pos,
                               self._curr_token().num_line)

        type_token = self._curr_token()
        id = self._future_token(1)

        assign = self.assign(semicolon=False)
        self.parenthesis('(')
        args = self.args()
        self.parenthesis(')')

        func = self.Function(type_token.value, id.value, args)
        self.functions.append(func)

        block = self.block(_return=(type_token.value != 'void'))

        function = AstNode(function_token)
        function.add_child(assign)
        function.add_child(args)
        function.add_childs(block)

        return function
Exemplo n.º 23
0
    def Process(self, transactionKey):
        """ Process a pre-saved PayFabric transaction

        :param transactionKey: PayFabric transaction which is ready to process
        :return: process result (JSON object)
        """

        r = requests.get(
            url='https://sandbox.payfabric.com/rest/v1/api/transaction/process/'
            + transactionKey,
            headers={
                'Content-Type': 'application/json; charset=utf-8',
                'authorization': Token().Create()
            })

        print r.status_code, r.text

        # "result" of HttpRequest is a JSON text similar with following format.
        #
        # {
        #    "AVSAddressResponse":"Y",
        #    "AVSZipResponse":"Y",
        #    "AuthCode":"010010",
        #    "CVV2Response":"Y",
        #    "IAVSAddressResponse":"Y",
        #    "Message":"APPROVED",
        #    "OriginationID":"987220999",
        #    "RespTrxTag":"",
        #    "ResultCode":"0",
        #    "Status":"Approved",
        #    "TrxDate":"",
        #    "TrxKey":"140500229001"
        #}
        #
        return r.json()
Exemplo n.º 24
0
	def make_number(self):
		num_string = ''
		dot_count = 0
		position_start = self.position.copy()

		while self.current_char != None and self.current_char in TokenType.DIGITS + '.':
			if self.current_char == '.':
				if dot_count == 1: break
				dot_count += 1
			num_string += self.current_char
			self.advance()

		if dot_count == 0:
			return Token(TokenType.INTEGER, int(num_string), position_start, self.position)
		else:
			return Token(TokenType.FLOAT, float(num_string), position_start, self.position)
Exemplo n.º 25
0
 def get_instances(self, folder):
     # happiness/joy???????????????????????????
     labels_dict = {
         "hp": "joy",
         "sd": "sadness",
         "ag": "anger",
         "dg": "disgust",
         "sp": "surprise",
         "fr": "fear"
     }
     instances = []
     labels = set()
     tagger = PerceptronTagger(
     )  # load nltk perceptron just once to speed up tagging
     with open(folder) as f:
         for line in f:
             label, id, text = line.strip().split(
                 " ", 2)  # split by first two spaces only
             if label == "ne":  # ignore no emotion
                 continue
             inst = Instance(text, labels_dict[label])
             inst_tokenized = word_tokenize(text)
             inst_tagged = tagger.tag(inst_tokenized)
             for tokentag in inst_tagged:
                 token = Token(tokentag[0], tokentag[1])
                 inst.add_token(token)
             instances.append(inst)
             labels.add(label)
     return instances, labels
Exemplo n.º 26
0
class Server(WebSocketHandler):

    users = set()  # 用来存放在线用户的容器
    scanner = Scanner()
    token_cls = Token()
    RestartServer = RestartServer()

    def open(self):
        logger.info("client " + self.request.remote_ip + " connected")
        self.users.add(self)  # 建立连接后添加用户到容器中

    def on_message(self, message):
        logger.info("Recive message: " + message)
        message = message.split('?s=')
        msg_type = message[0]
        if len(message) > 1:
            msg_info = message[1]

        if msg_type == "GET_DATA":
            data = self.scanner.run()

        if msg_type == "GET_TOKEN":
            data = self.token_cls.get_token(msg_info)

        if msg_type == "RESTART":
            data = self.RestartServer.run(msg_info, self)

        self.write_message(data)

    def on_close(self):
        logger.info("client " + self.request.remote_ip + " cloed")
        self.users.remove(self)  # 用户关闭连接后从容器中移除用户

    def check_origin(self, origin):
        return True  # 允许WebSocket的跨域请求
Exemplo n.º 27
0
 def get_instances(self, label_file, xml_file):
     instances = []
     labels_final = set()
     tagger = PerceptronTagger(
     )  # load nltk perceptron just once to speed up tagging
     labels_dict = {
         0: "anger",
         1: "disgust",
         2: "fear",
         3: "joy",
         4: "sadness",
         5: "surprise"
     }
     tree = ET.parse(xml_file)
     root = tree.getroot()
     with open(label_file) as f:
         for sent, line in izip(root, f):
             id_xml = sent.attrib.values()[0]
             id_labels = line.rstrip().split()
             id_file = id_labels[0]
             if id_xml == id_file:
                 for i in sent.itertext():
                     text = i
                 labels = id_labels[1:]
                 label = labels.index(
                     str(max([int(label) for label in labels])))
                 inst = Instance(text, labels_dict[label])
                 inst_tokenized = word_tokenize(text)
                 inst_tagged = tagger.tag(inst_tokenized)
                 for tokentag in inst_tagged:
                     token = Token(tokentag[0], tokentag[1])
                     inst.add_token(token)
                 instances.append(inst)
                 labels_final.add(label)
         return instances, labels_final
Exemplo n.º 28
0
 def addToken(self, tipo, valor):
     #print("|"+valor+"|")
     nuevo = Token(tipo, valor)
     self.lista_tokens.append(nuevo)
     self.caracterActual = ""
     self.estado = 0
     self.lexema = ""
Exemplo n.º 29
0
    def Retrieve(self, gatewayAccountId):
        """ Retrieve a gateway account profile. Gateway account guid is generated by PayFabric,
            And 3rd party application may not keep this unique identifier. However developers can
            still retrieve these unique identifiers by get all gateway account profiles by customer.

            Keywords:
                gatewayAccountId - GUID of gateway account profile
        """

        r = requests.get(
            url='https://sandbox.payfabric.com/payment/api/setupid/' +
            gatewayAccountId,
            headers={
                'Content-Type': 'application/json; charset=utf-8',
                'authorization': Token().Create()
            })

        #
        # Sample response
        # ------------------------------------------------------
        # Response text is a gateway account object with json format
        # Go to https://github.com/PayFabric/APIs/blob/master/PayFabric/Sections/Objects.md#gateway-account-profile
        # for more details about gateway account object.
        # ------------------------------------------------------
        #
        return r.json()
Exemplo n.º 30
0
    def Refund(self):
        """ Refund a customer if the original transaction is already settled.
        """

        data = {
            'Customer': 'ARRONFIT0003',
            'Currency': 'USD',
            'Amount': '10.05',
            'Type': 'Credit',
            'SetupId': 'Paypal',
            'Card': {
                'Account': '5555555555554444',
                'Cvc': '1453',
                'Tender': 'CreditCard',
                'CardName': 'MasterCard',
                'ExpDate': '0115',
                'CardHolder': {
                    'FirstName': 'Jason',
                    'LastName': 'Zhao',
                },
                'Billto': {
                    'Zip': '22313',
                    'Country': 'US',
                    'State': 'CA',
                    'City': 'ANAHEIM',
                    'Line1': '2099 S State College Blvd',
                    'Email': '*****@*****.**'
                }
            }
        }

        r = requests.post(
            url='https://sandbox.payfabric.com/rest/v1/api/transaction/process',
            data=json.dumps(data),
            headers={
                'Content-Type': 'application/json; charset=utf-8',
                'authorization': Token().Create()
            })

        print r.status_code, r.text

        # Sample response is similar to below
        #
        # {
        # "AVSAddressResponse": null,
        #    "AVSZipResponse": null,
        #    "AuthCode": null,
        #    "CVV2Response": null,
        #    "IAVSAddressResponse": null,
        #    "Message": "Approved",
        #    "OriginationID": "A70E6C184BA5",
        #    "RespTrxTag": null,
        #    "ResultCode": "0",
        #    "Status": "Approved",
        #    "TrxDate": "5\/31\/2014 3:17:27 PM",
        #    "TrxKey": "140531067716"
        #}
        #
        return r.json()
Exemplo n.º 31
0
 def readToken(self):
     cursor = self.connection.cursor()
     cursor.execute("SELECT * FROM token")
     result = cursor.fetchone()
     cursor.close()
     if result == None:
         return None
     return Token.createFromDDIC(result)
Exemplo n.º 32
0
def criarToken(placa):
	if(validarPlaca(placa)):
		numero = randint(1111,9999)
		tempo = datetime.datetime.now()
		adicionarToken(Token(numero, tempo, placa))
		return numero
	else:
		return False
Exemplo n.º 33
0
    def scan_tokens(self):
        """Tokenizes the source code passed to a list of tokens"""
        while not self.at_end():
            self.start = self.current
            self.scan_token()

        self.tokens.append(Token(TokenType.EOF, "", None, self.line))
        return self.tokens
Exemplo n.º 34
0
 def get_int(self, op, type=0):
     op = op.lower()
     if op[:2] in ("&h", "0x"): op = int("0x" + op[2:], 16)
     try:
         int(op)
     except:
         raise SyntaxError, "not an int: '%s'" % op
     return Token(Token.T_CINT, str(op))
Exemplo n.º 35
0
    def getInteger(self):
        result = ""
        offset = 0
        while self.text[self.pos + offset].isdigit():
            result += self.text[self.pos + offset]
            offset += 1

        return Token("int", int(result))
Exemplo n.º 36
0
    def getVar(self):
        result = ""
        offset = 0
        while self.text[self.pos + offset].isalnum():
            result += self.text[self.pos + offset]
            offset += 1

        return Token("id", result)
Exemplo n.º 37
0
 def createToken(self, power, toughness):
     roll = random.randint(1, 10) + self.chartModifier
     # clip the modified roll to fit in the chart
     if roll < 1: roll = 1
     if roll > 16: roll = 16
     text = self.tokenChart[roll]
     token = Token(0, power, toughness, extraText=text, x=25, y=650)
     self.mainScreen.tokens.append(token)
Exemplo n.º 38
0
    def __init__(self, module):
        self.default = None
        self._module = module
        self._tokens = {}
        self._tokensByGroup = [] # touple list (group, token)
        #self._closeTokens = []

        # ordered list from tokens
        # lowest width first
        self._ordered = []

        self._makeTokenStructs(getattr(module, "TOKENS"))

        if self.default is None:
            deft = Token("(?P<begin>)")
            deft.name = self.DEFAULT_TOKEN_NAME
            self.default = deft
            self._tokens[deft.name] = deft
            self._addToIndex(deft)
Exemplo n.º 39
0
class TokenTest(unittest.TestCase):
    """ test everything about Token class
    """
    def setUp(self):
        self.token = Token(name='name_castle_city', show=True, fire=True)

    def testCopy(self):
        """ does token.copy() copy everything?
        """
        tok = self.token.copy()
        for key in tok.__dict__.iterkeys():
            self.assertNotEqual(tok.__dict__[key] or '', self.token.__dict__[key] or {})
Exemplo n.º 40
0
 def __init__(self, name='no name', logger=logging, show=True, fire=True):
     Token.__init__(self, name=name, logger=logger, show=show, fire=fire)
     self.placeClocks = {}
     """ We save inside a place as key and associated to this place the time that the token will live on this place.
         We can add a place's clock using the method addPlaceClock()
     """
     self.transitionClocks = {}
     """ We save inside a transition as key and associated to this transition the time that the token will live
         on this transition. We can add a transition's clock using the method addTransitionClock()
     """
     self.pclock = 0.0
     """ It represents the time that the tokens lived on the current place on the TimedPetriNet during a simulation.
         It is reinitialized to 0.0 when the token change its current place
     """
     self.tclock = {}
     """ It represents the time that the tokens lived on the current transitions on the TimedPetriNet
         during a simulation. Only the transition that can fire this token are save inside
     """
     self.currentClock = 0.0
     """It represents how much time lived the token in the TimedPetriNet during a simulation
     """
     self.minimumStartingTime = {}
     """The token can't be fired by the given transition before the associated time
Exemplo n.º 41
0
	def nextToken(self):
		if len(self.input)== 0 or self.input == None:
			return;
		tok = Token("", "")
		char = self.getChar()
		self.removeChar()
		self.addChar(char, tok)
		
		if char != None:
			if char.isdigit():
				for c in self.input:
					char = self.getChar()
					if char.isdigit():
						self.removeChar()
						self.addChar(char, tok)
					else:
						break
			elif char.isalpha():
				for c in self.input:
					if tok.lexeme == "print":
						tok.tCode = "PRINT"
					elif tok.lexeme == "end":
						tok.tCode = "END"
					else:
						char = self.getChar()
						if char.isalpha():
							self.removeChar()
							self.addChar(char, tok)
						else:
							break
			else:
				if self.isOneCharToken(char) == False:
					char = self.getChar()
					self.addChar(char, tok)
		self.lookUp(tok)
		
		return tok
Exemplo n.º 42
0
    def testSimulationParallelChainPetrinet(self):
        """ Test the fireheritance: a token could wait that another token hs been fired by a transition
            on a given place to have the right to be fired
        """
        # Without fireHeritance
        pn = build_parallel_chain_petrinet(size=1, branchs=2)
        tok0 = Token(name='tok0')
        tok1 = Token(name='tok1', fire=False)
        pn.addToken(pn.getPlace('p_0_0'), tok0)
        pn.addToken(pn.getPlace('p_1_0'), tok1)

        pn.simulation(show=False)

        self.assertIn('tok1', map(lambda tok: tok.name, pn.getPlace('p_1_0').token))

        # With fireHeritance
        pn = build_parallel_chain_petrinet(size=1, branchs=2)
        tok0 = Token(name='tok0')
        tok1 = Token(name='tok1', fire=False)
        tok0.addFireHeritance('tok1', pn.getPlace('p_1_0'), pn.getTransition('t_0_0'))
        pn.addToken(pn.getPlace('p_0_0'), tok0)
        pn.addToken(pn.getPlace('p_1_0'), tok1)

        ets = pn.enabledTransitionsSet()
        self.assertIn('tok0', map(lambda tok: tok.name, pn.getPlace('p_0_0').token))
        self.assertIn('tok1', map(lambda tok: tok.name, pn.getPlace('p_1_0').token))
        self.assertFalse(tok1.fire)

        pn.oneFireSimulation(ets)
        self.assertFalse(pn.getPlace('p_0_0').token)
        self.assertIn('tok1', map(lambda tok: tok.name, pn.getPlace('p_1_0').token))
        self.assertTrue(tok1.fire)

        pn.oneFireSimulation(ets)
        self.assertFalse(pn.getPlace('p_0_0').token)
        self.assertFalse(pn.getPlace('p_1_0').token)
Exemplo n.º 43
0
    def _makeTokenStructs(self, tokens, parent=None):

        for name, value in tokens.iteritems():

            if isinstance(value, Token):
                value.name = name
                value.group = parent
                value.lexer = self
                self._tokens[name] = value
                self._addToIndex(value)

            elif isinstance(value, str):
                tok = Token("(?P<begin>" + re.escape(value) + ")")
                tok.name = name
                tok.group = parent
                tok.lexer = self
                self._tokens[name] = value
                self._addToIndex(value)

            elif isinstance(value, TokenTable):
                self._makeTokenStructs(value.table, name)

            elif isinstance(value, dict):
                self._makeTokenStructs(value, name)
	def insert(self, ParentToken, ExpectedText=None, ExpectedType=None, ChildTokenType=None):
		if(ChildTokenType == None):
			ChildToken=self.token()
		else:
			ChildToken=Token(ChildTokenType)
		
		ChildToken.Subroutine=self.CurrentSubroutine
		ChildToken.Class=self.CurrentClass

		if(ExpectedText != None and ChildToken.text() != None):
			if(ExpectedText != ChildToken.text()):
				try:
					raise Exception("Error parsing!  Expected: '"+ExpectedText+"'.  Received: '"+ChildToken.text()+"'.\n")
				except Exception, err:
					sys.stderr.write(str(err))
					return
Exemplo n.º 45
0
	def __init__(self, _input):
		self.token = Token(symbol='start', lexeme='start')
		self.lexer = MicroScalaLexer(_input=_input)

		self.getToken()
		self.tree = self.program()
Exemplo n.º 46
0
 def getId(self):
     tok1 = self.lex.getNextToken()
     self.match(tok1, TokenType.ID)
     tok2 = Token.getLexeme(tok1)
     return Id(tok2)
Exemplo n.º 47
0
    def analize (cls, text):
        word = []
        tokens = []
        prev_letter = ""

        # Разбивка на токены
        i = 0
        while i < len (text):
            letter = text[i]
            if letter == " ":
                if len (word) > 0:
                    token = Token ()
                    token.text = ''.join (word)
                    tokens.append (token)
                    word = []
            elif letter == "(" or \
                 letter == ")" or \
                 letter == ",":
                 #letter == "_":
                if len (word) > 0:
                    token = Token ()
                    token.text = ''.join (word)
                    tokens.append (token)
                    word = []
                token = Token ()
                token.text = letter
                tokens.append (token)
            elif letter == "." or \
                 letter == "_":
                j = i + 1
                if j == len (text):
                    if len (word) > 0:
                        token = Token ()
                        token.text = ''.join (word)
                        tokens.append (token)
                        word = []
                    token = Token ()
                    token.text = letter
                    tokens.append (token)
                while j < len (text):
                    letter = text[j]

                    if letter == " ":
                        break
                    elif letter == ")" or \
                         letter == "(" or \
                         letter == ",":
                        j = j - 1
                        break
                    if i == (j - 1):
                        if letter == " ":
                            if len (word) > 0:
                                token = Token ()
                                token.text = ''.join (word)
                                tokens.append (token)
                                word = []
                            token = Token ()
                            token.text = letter
                            tokens.append (token)
                            break
                        else:
                            word.append (text[i])
                            prev_letter = letter
                            word.append (letter)
                            j += 1
                    else:
                        prev_letter = letter
                        word.append (letter)
                        j += 1
                i = j
            elif letter == "=":
                if prev_letter == " " or \
                   prev_letter == "(":
                    if len (word) > 0:
                        token = Token ()
                        token.text = ''.join (word)
                        tokens.append (token)
                        word = []
                    token = Token ()
                    token.text = letter
                    tokens.append (token)
                else:
                    word.append (letter)
            elif letter == "?":
                if len (word) > 0:
                    token = Token ()
                    token.text = ''.join (word)
                    tokens.append (token)
                    word = []
                if prev_letter not in [" ", "("]:
                    token = Token ()
                    token.text = letter
                    tokens.append (token)
                else:
                    word.append (letter)
            elif letter == "\"":
                i += 1
                while i < len (text):
                    prev_letter = letter
                    letter = text[i]
                    if letter == "\"" and prev_letter != "\\":
                        break
                    else:
                        word.append (letter)
                    i += 1
                token = Token ()
                token.text = ''.join (word)
                token.type = TokenType.string
                tokens.append (token)
                word = []
            else:
                word.append (letter)
            prev_letter = letter
            i += 1

        # Идентификация токенов
        for token in tokens:
            #print token.text
            if token.type == TokenType.string:
                continue
            if token.text.find ('?') == 0 and len (token.text) > 1:
                s = token.text.replace ('?', '')
                query = "SELECT id FROM qsl_linkage WHERE name = \'" + s + "\';"
                cls.__cursor.execute (query)
                row = cls.__cursor.fetchone ()
                if row != None:
                    token.type = TokenType.linkage
                    token.linkage = TokenLinkage ()
                    token.linkage.id = row[0]
                    token.linkage.name = s
                else:
                    cls.__error_text = ErrorHelper.get_text (102, token.text)
                    return False
            elif token.text.find ('%') == 0 and len (token.text) > 1:
                token.type = TokenType.code_object
            elif token.text.find ('*') == 0:
                # Модификатор
                token.type = TokenType.modifier
            elif token.text == "(":
                token.type = TokenType.opening_bracket
            elif token.text == ")":
                token.type = TokenType.closing_bracket
            elif token.text == ",":
                token.type = TokenType.comma
            elif token.text == "_":
                token.type = TokenType.underscore
            elif token.text == ".":
                token.type = TokenType.point
            elif token.text == "?":
                token.type = TokenType.question_mark
            elif token.text == "=":
                token.type = TokenType.equal_sign
            else:
                query = "SELECT id, type FROM qsl_concept WHERE name = \'" + token.text + "\';"
                cls.__cursor.execute (query)
                row = cls.__cursor.fetchone ()
                if row != None:
                    token.type = TokenType.concept
                    token.concept = TokenConcept ()
                    token.concept.id = row[0]
                    token.concept.type = row[1]
                    token.concept.name = token.text
                else:
                    if token.text.isdigit ():
                        token.type = TokenType.number
                    else:
                        cls.__error_text = ErrorHelper.get_text (103, token.text)
                        return False

        node = cls.build_tree (tokens)
        if node != None:
            cls.proposition_tree = PropositionTree ()
            cls.proposition_tree.root_node = node
        else:
            return False

        return True
Exemplo n.º 48
0
 def __init__(self, token_nodes, wait=2):
     super(TokenRingThread, self).__init__()
     self.__token_nodes = token_nodes
     self.__stoprequest = threading.Event()
     self.__token = Token()
     self.__wait = wait
Exemplo n.º 49
0
   def getToken(self):

      character, c1, c2 = self.next()

      self.__debug("%s token acquired" % c1)

      if character == None:
         return EOF

      """Let's get comments out of the way first
      Indentation doesn't care about them"""
      if c1 + c2 == "//":

         self.__debug("Comment token")

         token = Token(character)
         token.type = SHORTCOMMENT
         token.components += "/"

         #Pass the second slash
         self.next()

         character, c1, c2 = self.next()

         #Double slash comments last till the end of the line
         while c1 != "\n":
            self.__debug("Seeking to end of line")
            token.components += c1
            character, c1, c2 = self.next()

         self.__debug("Returning comment token")

         return token

      #Long comment type
      if c1 + c2 == "/*":

         self.__debug("Long comment token")

         token = Token(character)
         token.type = LONGCOMMENT
         token.components += "*"

         while c1 + c2 != "*/":
            self.__debug("Seeking to end of long comment")
            character, c1, c2 = self.next()
            token.components += c1

         token.components += "/"

         #Pass the ending slash
         self.next()

         self.__debug("Returning long comment token")

         return token

      indentAmount = 0

      #Space and Tab count equally for indentation
      #Don't care to be helpful to people who mix the styles
      if self.context == 'global' and c1 in INDENTATION:
         indentAmount += 1

         token = Token(character)
         token.type = INDENT

         self.__debug("Indentation token")

         while c2 in INDENTATION:

            self.__debug("Gathering all indentation")

            character, c1, c2 = self.next()
            #Keep adding spaces for consistency in printout
            token.components += ' '

         #Ignore indentation ending in a newline
         if c2 == "\n":
            return None

         last = self.indentStack.pop()

         self.__debug("Comparing current indentation amount to last")

         #This is a new, higher level of indentation
         if indentAmount > last:
            self.indentStack.append(last)
            self.indentStack.append(indentAmount)
         #Same indentation level; no token added
         elif indentAmount == last:
            self.indentStack.append(last)
            self.__debug("Indentation level matches")
            return None
         #This is actually a dedent; keep dedenting until the appropriate level is found
         else:
            self.__debug("Starting Dedenting")
            while indentAmount < last:
               self.__debug("Dedenting")
               tok = Token(character)
               tok.type = DEDENT
               tokens.append(tok)
               last = self.indentStack.pop()

               if indentAmount == last:
                  self.indentStack.append(last)
                  return None
               elif len(self.indentStack) == 0:
                  print "Previous indentation amount not found!"

         return Token

      if self.context == 'global' and c1 == DECLARATION:

         self.__debug("Name or variable declaration")

         if c2 == ":":
            self.__debug("Name declaration")
            #skip the { and colon
            self.next()
            character, c1, c2 = self.next()
            token = Token(character)
            token.type = NAME

            character, c1, c2 = self.next()

            while c1 != END_DECLARATION:
               token.components += c1

               self.__debug("Seeking declaration end")

               if c1 in SPACING:
                  print "name declaration contains invalid character"
                  return None

               character, c1, c2 = self.next()


            #Skip }
            self.next()

            #Clear trailing whitespace
            while c2 in INDENTATION:
               self.__debug("Clearing trailing whitespace")
               character, c1, c2 = self.next()

            if c1 != "\n":
               print "name declarations must end with a newline"

               while c1 != "\n":
                  self.__debug("Recovering bad name declaration")
                  character, c1, c2 = self.next()

            return token

         else:
            self.__debug("Variable declaration")
            #Skip the brace
            character, c1, c2 = self.next()
            token = Token(character)
            token.type = VARIABLE

            while c1 != SPACE:
               self.__debug("Gathering variable name")
               character, c1, c2 = self.next()
               token.components += c1

            #Throw the space out
            self.switchContext('variable')

            self.__debug("Returning variable declaration token")

            return token

      #Collect the entire variable contents
      if self.context == 'variable':

         self.__debug("Variable value")

         if c1 == END_DECLARATION:
            print "empty variable declaration"
            self.switchContext('global')
            return None

         token = Token(character)
         token.type = VALUE

         character, c1, c2 = self.next()
         while c1 != END_DECLARATION:
            self.__debug("Scanning for remainder of variable name")

            token.components += c1

            if c1 == '\\':
               if c2 in '\\:}':
                  token.components += c2
                  self.next()

            character, c1, c2 = self.next()

            #The token up to this point is actually a function call
            if c1 == ':':
               token.type = FUNCTION
               switchContext('function')
               #Clear trailing whitespace
               while c2 in INDENTATION:
                  self.__debug("SCANNING2")
                  character, c1, c2 = self.next()
               return token

         #skip }
         self.next()

         #Clear trailing whitespace
         while c2 in INDENTATION:
            self.__debug("SCANNING3")
            character, c1, c2 = self.next()

         self.switchContext('global')
         return token

      if self.context == 'function':
         self.restoreContext()
         #Enclosed function arguments
         if c1 == '(':
            self.switchContext('closed-funcargs')
            #Clear the paren
            self.next()

            #Clear trailing whitespace
            while c2 in INDENTATION:
               self.__debug("SCANNING4")
               character, c1, c2 = self.next()

         else:
            self.switchContext('funcargs')

      if self.context == 'funcargs':

         token = Token(character)
         token.type = ARGUMENT

         #Add initial spacing to the token
         while c1 in SPACING:
            self.__debug("SCANNING5")
            character, c1, c2 = self.next()
            token.components += c1

         #Then, consume everything up to the next whitespace
         while not c1 in SPACING:
            self.__debug("SCANNING6")
            if c1 == '\\':
               if c2 == ' ':
                  token.components += ' '
                  self.next()
            character, c1, c2 = self.next()
            token.components += c1

         #Argument list is over
         if c1 == "\n":
            self.next()
            self.restoreContext()
            return token

         #Do not add funcarg-delimiting whitespace
         self.next()

         return token

      if self.context == 'closed-funcargs':

         token = Token(character)
         token.type = ARGUMENT

         #Add initial spacing to the token
         while c1 in SPACING:
            self.__debug("SCANNING7")
            character, c1, c2 = self.next()
            token.components += c1

         #Then, consume everything up to the next whitespace
         while not c1 in SPACING:
            self.__debug("SCANNING8")
            if c1 == '\\':
               if c2 == ' ' or c2 == ')':
                  token.components += ' '
                  self.next()
            character, c1, c2 = self.next()
            token.components += c1

            if c2 == ')':
               #Funcargs ended; return context after stripping )
               self.next()
               self.restoreContext()
               return token

         return token

      #If we have not indented at all and we aren't using a name declaration, it's a selector
      if len(self.indentStack) == 1:
         self.__debug("Selector")
         token = Token(character)

         token.type = SELECTOR

         character, c1, c2 = self.next()

         while c1 != "\n":
            self.__debug("Seeking selector")
            token.components += c1

            #Declaration starting
            if c2 == DECLARATION:
               return token

            character, c1, c2 = self.next()

         #Remove the terminating newline
         self.next()

         self.__debug("Returning selector")

         return token

      return None
Exemplo n.º 50
0
	def __init__(self, goldPath, predictedPath=None):
		self.goldPath = goldPath
		self.predictedPath = predictedPath
		self.sents = [] # all sents in corpus
		self.sent_stats = {}
		self.numTokens = 0 # count total tokens in corpus
		self.tags = set()
		self.tokens = []
		sent = Sentence()



		if predictedPath:
			with open(goldPath) as gf, open(predictedPath) as pf:
				for gline,pline in izip(gf, pf): # open two files simultaneously
					if gline.strip() and pline.strip(): # check if lines not empty
						gtoken_tag = re.split(r'\t', gline)
						ptoken_tag = re.split(r'\t', pline)
						if gtoken_tag[0] == ptoken_tag[0]:
							token = Token(gtoken_tag[0], gtoken_tag[1].strip(), ptoken_tag[1].strip()) # create new Token object
							sent.addToken(token)
							self.numTokens += 1 
						else:
							raise Exception("Files not in sync")
					else:
						self.sents.append(sent)
						sent = Sentence()
		else:
			# store all sentences from corpus
			sentences = []
			# store a sentence that consists of tokens
			sentence = []
			with open(goldPath) as gf:
				for line in gf: 
					# check if lines not empty
					if line.strip(): 
						# split line into token and tag as list elements
						token_tag = re.split(r'\t', line)
						# add a token object into sentence
						sentence.append(Token(token_tag[0].strip(), token_tag[1].strip()))
						# count total number of tokens
						self.numTokens += 1 
					else:
						# we have reached end of sentence (empty line)
						sentences.append(sentence)
						sentence = []

			prev = "prevnotekzist"
			following = "folnotekzist"
			for j, sentence in enumerate(sentences):
				for i, token in enumerate(sentence):
					# make sure we don't go beyond sentence length
					if i+1 < len(sentence):
						following = sentence[i+1]
					# if we reached end of current sentence - take following word as first word of next sentence
					elif j+1 < len(sentences):
						following = sentences[j+1][0]
					token.setPrev(prev)
					token.setFollowing(following)
					token.getNeighborFeatures()
					# print (vars(token))
					prev = token
					sent.addToken(token)
				self.sents.append(sent)
		 		sent = Sentence()
Exemplo n.º 51
0
 def setUp(self):
     self.token = Token(name='name_castle_city', show=True, fire=True)
Exemplo n.º 52
0
 def __init__(self):
     self.token = Token()
     self.apitoken = self.token.get_apitoken()
     self.apiurl = self.token.get_apiurl()
     self.apiurlt = urlparse(self.apiurl)
Exemplo n.º 53
0
 def __init__(self):
     Token.__init__(self)
Exemplo n.º 54
0
class F_Volume(Error):
    
    def __init__(self):
        self.token = Token()
        self.apitoken = self.token.get_apitoken()
        self.apiurl = self.token.get_apiurl()
        self.apiurlt = urlparse(self.apiurl)
    
    def List_VolumesAttachedToAnInstance(self,instance_id):
        params = urllib.urlencode({})
        headers = { "X-Auth-Token":self.apitoken, "Content-type":"application/json" }
        conn = httplib.HTTPConnection(self.apiurlt[1])
        conn.request('GET','%s/servers/%s/os-volume_attachments'%(self.apiurlt[2],instance_id),params,headers)
        reponse = conn.getresponse()
        volume_details = reponse.read()
        volume_details = json.loads(volume_details)
        conn.close()
        return volume_details
    
    def Attach_VolumeToInstance(self,instance_id,volume_id,volume_device):
        params = {
                  'volumeAttachment': {
                                       'volumeId': volume_id,
                                       'device': volume_device
                                       }
                  }
        params=json.dumps(params)
        headers = { "X-Auth-Token":self.apitoken, "Content-type":"application/json" }
        conn = httplib.HTTPConnection(self.apiurlt[1])
        conn.request('POST','%s/servers/%s/os-volume_attachments'%(self.apiurlt[2],instance_id),params,headers)
        reponse = conn.getresponse()
        attachment = reponse.read()
        attachment = json.loads(attachment)
        conn.close()
        return attachment
    
    def List_AllVolumetypes(self):
        params = urllib.urlencode({})
        headers = { "X-Auth-Token":self.apitoken, "Content-type":"application/json" }
        conn = httplib.HTTPConnection(self.apiurlt[1])
        conn.request('GET','%s/os-volume-types' %self.apiurlt[2][:2]+'1.1'+self.apiurlt[2][3:],params,headers)
        reponse = conn.getresponse()
        volume_type = reponse.read()
        volume_type = json.loads(volume_type)
        conn.close()
        return volume_type
    
    def Create_Volume(self,volume_metadata):
        params = json.dumps(volume_metadata)
        headers = { "X-Auth-Token":self.apitoken, "Content-type":"application/json" }
        conn = httplib.HTTPConnection(self.apiurlt[1])
        conn.request('POST','%s/volumes' %self.apiurlt[2][:2]+'1.1'+self.apiurlt[2][3:],params,headers)
        reponse = conn.getresponse()
        volume_details = reponse.read()
        volume_details= json.loads(volume_details)
        conn.close()
        return volume_details

###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################
###########################################################################################################################

    """the functions under this line is not right yet""" 
    

    def backup_volume(self,data):
        true = True
        params = {
                  "snapshot": {
                               "display_name": "snap-001",
                               "display_description": "Daily backup",
                               "volume_id": "21a0e3ef-a78a-46b8-b552-ec9946df6d8a",
                               "force": true
                               }
                  }
        params = json.dumps(params)
        headers = { "X-Auth-Token":self.apitoken, "Content-type":"application/json" }
        tenant_id = '70c6e018c0d9416ba85459884adeccd2'
        test= '/v1.1/70c6e018c0d9416ba85459884adeccd2'
        conn = httplib.HTTPConnection(self.apiurlt[1])
        conn.request('POST','%s/os-snapshots'%test,params,headers)
        reponse = conn.getresponse()
        result = reponse.read()
        print result
        result = json.loads(result)
        conn.close()
        return result

    def list_vbackup(self):
        true = True
        params = urllib.urlencode({})
        headers = { "X-Auth-Token":self.apitoken, "Content-type":"application/json" }
        tenant_id = '70c6e018c0d9416ba85459884adeccd2'
        test= '/v1.1/70c6e018c0d9416ba85459884adeccd2'
        conn = httplib.HTTPConnection(self.apiurlt[1])
        conn.request('GET','%s/os-snapshots'%test,params,headers)
        reponse = conn.getresponse()
        result = reponse.read()
        print result
        result = json.loads(result)
        conn.close()
        return result

    def delete_vbackup(self):
        true = True
        params = urllib.urlencode({})
        headers = { "X-Auth-Token":self.apitoken, "Content-type":"application/json" }
        tenant_id = '70c6e018c0d9416ba85459884adeccd2'
        test= '/v1.1/70c6e018c0d9416ba85459884adeccd2'
        snapshot_id = '1ace60c5-5799-463b-9f64-6202372195c6'
        conn = httplib.HTTPConnection(self.apiurlt[1])
        conn.request('DELETE','%s/os-snapshots/%s'%(test,snapshot_id),params,headers)
        reponse = conn.getresponse()
        result = reponse.read()
        print result
        result = json.loads(result)
        conn.close()
        return result
Exemplo n.º 55
0
class MicroTree(object):
	def __init__(self, _input):
		self.token = Token(symbol='start', lexeme='start')
		self.lexer = MicroScalaLexer(_input=_input)

		self.getToken()
		self.tree = self.program()

	# getToken() : input: None, output: None
	# Obtains the next token from the lexer
	def getToken(self):
		self.token = self.lexer.nextToken()

		# Skips epsilon and comment tokens
		while(self.token.symbol() == 'e' or self.token.symbol() == 'comment'):

			self.token = self.lexer.nextToken()

	# program() : input: None, output: instance of AST.Program()
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# EOF was added to represent the end-of-file -- returned by lexer when input file is fully parsed
	# compilationUnit ::= object id _{ {def} mainDef _} EOF
	def program(self):
		function = prgm = main = None
		symbol = ''
		argList = []
		funcList = []
		decVarList = []
		name = ''

		# object
		if self.token.symbol() != 'object':
			ErrorMessage('{0} expected'.format('object'), self.lexer.position(), self.lexer.echo())
		
		self.getToken()
		
		# identifier
		if self.token.symbol() != 'identifier':
			ErrorMessage('{0} expected'.format('id'), self.lexer.position(), self.lexer.echo())

		# store identifer in name
		name = self.token.lexeme()
		self.getToken()
		
		# _{
		if self.token.symbol() != 'leftbrace':
			ErrorMessage('{0} expected'.format('{'), self.lexer.position(), self.lexer.echo())
		
		self.getToken()
		
		# {def}
		# Cycles through optional function and global variable declarations until 'def main' is reached
		while symbol != 'main':
			symbol, function = self.functionDef(name = name)
			if symbol != 'main' and function != None:
				# Check type of function
				if function.name != name and type(function) != type(AST.DecVar(name='',typ='',value='')):
					# function is a function AST.Program()
					funcList.append(function)
				else:
					# function is a declared variable AST.DecVar()
					decVarList.append(function)

		# mainDef
		main = self.mainDef(symbol)

		# _}
		if self.token.symbol() != 'rightbrace':
			ErrorMessage('{0} expected'.format('}'), self.lexer.position(), self.lexer.echo())
		
		self.getToken()

		# EOF
		if self.token.symbol() != 'EOF':
			ErrorMessage('{0} expected'.format('EOF'), self.lexer.position(), self.lexer.echo())			
		
		# Create new instance of class AST.Program()
		prgm = AST.Program(name = name, stmt = copy.deepcopy(main), argList = copy.deepcopy(argList), funcList = copy.deepcopy(funcList), decVarList = copy.deepcopy(decVarList))
		
		return prgm

	# mainDef() : input: symbol -- str(), output: instance of AST.Program()
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# mainDef ::= def main ( args : Array _[ String _] ) _{ {varDef} statement {statement} _}
	def mainDef(self, symbol):
		prgm = None
		stmt1 = stmt2 = None
		argList = []
		decVarList = []
		arg = ''
		typ = ''

		# def
		if self.token.symbol() == 'def':
			self.getToken()

		# main
		# Rejects identifier that is not main and raises error
		if self.token.symbol() != 'main' and symbol != 'main':
			ErrorMessage('{0} expected'.format('main'), self.lexer.position(), self.lexer.echo())
		
		self.getToken()
		
		# (
		if self.token.symbol() == 'leftparen':
			self.getToken()

			# args
			if self.token.symbol() != 'args':
				ErrorMessage('{0} expected'.format('Args'), self.lexer.position(), self.lexer.echo())
			
			# store args lexeme in args
			arg = self.token.lexeme()
			self.getToken()
			
			# :
			if self.token.symbol() != 'colon':
				ErrorMessage('{0} expected'.format(':'), self.lexer.position(), self.lexer.echo())
			
			self.getToken()
			
			# Array
			if self.token.symbol() != 'array':
				ErrorMessage('{0} expected'.format('Array'), self.lexer.position(), self.lexer.echo())
			
			# store type of args in typ
			typ = self.token.lexeme()
			self.getToken()
			
			# [
			if self.token.symbol() != 'leftbracket':
				ErrorMessage('{0} expected'.format('['), self.lexer.position(), self.lexer.echo())
			
			typ += ' ' + self.token.lexeme()
			self.getToken()
			
			# String
			if self.token.symbol() != 'string':
				ErrorMessage('{0} expected'.format('String'), self.lexer.position(), self.lexer.echo())
			
			typ += ' ' + self.token.lexeme()
			self.getToken()
			
			# ]
			if self.token.symbol() != 'rightbracket':
				ErrorMessage('{0} expected'.format(']'), self.lexer.position(), self.lexer.echo())
			
			typ += ' ' + self.token.lexeme()
			self.getToken()
			
			# )
			if self.token.symbol() != 'rightparen':
				ErrorMessage('{0} expected'.format(')'), self.lexer.position(), self.lexer.echo())
			
			self.getToken()

		# _{
		if self.token.symbol() != 'leftbrace':
			ErrorMessage('{0} expected'.format('{'), self.lexer.position(), self.lexer.echo())
		
		self.getToken()

		# {varDef}
		while self.token.symbol() == 'var':
			var = self.varDef()
			if var != None:
				decVarList.append(var)

		# statement
		stmt1 = self.statement()

		# {statement}
		while self.token.symbol() != 'rightbrace':
			stmt2 = self.statement()
			stmt1 = AST.Statement(copy.deepcopy(stmt1), copy.deepcopy(stmt2))

		# _}
		if self.token.symbol() != 'rightbrace':
			ErrorMessage('{0} expected'.format('}'), self.lexer.position(), self.lexer.echo())

		self.getToken()

		argList.append(AST.DecVar(name = arg, typ = typ, value = AST.NilValue()))

		prgm = AST.Program(name = 'main', stmt = copy.deepcopy(stmt1), argList = copy.deepcopy(argList), funcList = [], decVarList = copy.deepcopy(decVarList))
		
		return prgm

	# functionDef() : input: name -- str(), output: symbol -- str(), instance of AST.Program()
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# def ::= def id ( [id : Type {, id : Type } ] ) : Type = _{ {varDef} {statement} return listExpr ; _}
	#        | varDef
	def functionDef(self, name):
		symbol = 'def'
		funcId = ''
		expr = None
		stmt1 = stmt2 = None
		prgm = None
		rtrn = None
		decVarList = []
		argList = []
		arg = None
		name = ''
		typ = ''

		# def
		if self.token.symbol() == 'def':
			self.getToken()

			# identifier that is not main
			if self.token.symbol() == 'identifier' and self.token.lexeme() != 'main':
				# store lexeme in funcId
				funcId = self.token.lexeme()
				self.getToken()

				# (
				if self.token.symbol() != 'leftparen':
					ErrorMessage('{0} expected'.format('('), self.lexer.position(), self.lexer.echo())
				
				self.getToken()

				# id -- identifier of 1st declared argument to pass to function
				if self.token.symbol() == 'identifier':
					# store argument id into name
					name = self.token.lexeme()
					self.getToken()

					# :
					if self.token.symbol() != 'colon':
						ErrorMessage('{0} expected'.format(':'), self.lexer.position(), self.lexer.echo())
					
					self.getToken()

					# Type -- store argument type into typ
					typ = self.type()

					# if argument is Int, create DecVar with default int value UNDEFINED
					if typ in ['int', 'Int']:
						arg = AST.DecVar(name = name, typ = typ, value = AST.IntValue(value = UNDEFINED))
					# else argument is Array -- treat as List with default Nil value
					else:
						arg = AST.DecVar(name = name, typ = typ, value = AST.NilValue())

					# add argument to argument list
					argList.append(arg)

					# ,
					while self.token.symbol() == 'comma':
						self.getToken()

						# id -- identifier of 2nd+ declared argument to pass to function
						if self.token.symbol() != 'identifier':
							ErrorMessage('{0} expected'.format('id'), self.lexer.position(), self.lexer.echo())
						
						# store argument id into name
						name = self.token.lexeme()
						self.getToken()

						# :
						if self.token.symbol() != 'colon':
							ErrorMessage('{0} expected'.format(':'), self.lexer.position(), self.lexer.echo())
						
						self.getToken()

						# Type -- store argument type into typ
						typ = self.type()

						# if argument is Int, create DecVar with default int value UNDEFINED
						if typ in ['int', 'Int']:
							arg = AST.DecVar(name = name, typ = typ, value = AST.IntValue(value = UNDEFINED))
						# else argument is Array -- treat as List with default Nil value
						else:
							arg = AST.DecVar(name = name, typ = typ, value = AST.NilValue())

						# add argument to argument list
						argList.append(arg)

				# )
				if self.token.symbol() != 'rightparen':
					ErrorMessage('{0} expected'.format(')'), self.lexer.position(), self.lexer.echo())
				
				self.getToken()

				# :
				if self.token.symbol() != 'colon':
					ErrorMessage('{0} expected'.format(':'), self.lexer.position(), self.lexer.echo())
				
				self.getToken()

				# Type
				self.type()

				# =
				if self.token.symbol() != 'assign':
					ErrorMessage('{0} expected'.format('='), self.lexer.position(), self.lexer.echo())
				
				self.getToken()

				# _{
				if self.token.symbol() != 'leftbrace':
					ErrorMessage('{0} expected'.format('{'), self.lexer.position(), self.lexer.echo())
				
				self.getToken()

				# {varDef}
				while self.token.symbol() == 'var':
					var = self.varDef()
					if var != None:
						decVarList.append(var)

				# {statement}
				while self.token.symbol() != 'return':
					stmt2 = self.statement()

					if stmt1 == None:
						stmt1 = stmt2
					else:
						stmt1 = AST.Statement(stmt = copy.deepcopy(stmt1), stmt2 = copy.deepcopy(stmt2))

				# return
				if self.token.symbol() != 'return':
					ErrorMessage('{0} expected'.format('return'), self.lexer.position(), self.lexer.echo())

				self.getToken()
				
				# listExpr
				expr = self.listExpr()

				# Create instance of AST.Return() object
				rtrn = AST.Return(expr = copy.deepcopy(expr))

				if stmt1 == None:
					stmt1 = rtrn
				else:
					stmt1 = AST.Statement(stmt = copy.deepcopy(stmt1), stmt2 = copy.deepcopy(rtrn))

				# ;
				if self.token.symbol() != 'semicolon':
					ErrorMessage('{0} expected'.format(';'), self.lexer.position(), self.lexer.echo())

				self.getToken()

				# _}
				if self.token.symbol() != 'rightbrace':
					ErrorMessage('{0} expected'.format('}'), self.lexer.position(), self.lexer.echo())

				self.getToken()

				prgm = AST.Program(name = funcId, stmt = copy.deepcopy(stmt1), argList = copy.deepcopy(argList), funcList = [], decVarList = copy.deepcopy(decVarList))

			else:
				symbol = 'main'

		# varDef
		else:
			decVarList = self.varDef()

			prgm = decVarList

		return symbol, prgm

	# varDef() : input: None, output: instance of AST.DecVar()
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# VarDef ::= var id : Type = Literal ;
	def varDef(self):
		var = None

		# var
		if self.token.symbol() == 'var':
			self.getToken()

			# id
			if self.token.symbol() != 'identifier':
				ErrorMessage('{0} expected'.format('id'), self.lexer.position(), self.lexer.echo())

			# store variable id in v_id
			v_id = self.token.lexeme()

			self.getToken()

			# :
			if self.token.symbol() != 'colon':
				ErrorMessage('{0} expected'.format(':'), self.lexer.position(), self.lexer.echo())

			self.getToken()

			# Type -- store variable type into v_type
			v_type = self.type()

			# =
			if self.token.symbol() != 'assign':
				ErrorMessage('{0} expected'.format('='), self.lexer.position(), self.lexer.echo())

			self.getToken()

			# Literal -- store variable value into v_val
			v_val = self.literal()

			# ;
			if self.token.symbol() != 'semicolon':
				ErrorMessage('{0} expected'.format(';'), self.lexer.position(), self.lexer.echo())

			self.getToken()

			var = AST.DecVar(name = v_id, typ = v_type, value = v_val)

		return var

	# type() : input: None, output: typ -- str() representing type of variable
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# Type ::= Int | List _[ Int _]
	def type(self):
		typ = None

		# Int
		if self.token.symbol() == 'int':
			typ = self.token.lexeme()
			self.getToken()
		
		# List
		elif self.token.symbol() == 'list':
			typ = self.token.lexeme()
			self.getToken()

			# _[
			if self.token.symbol() != 'leftbracket':
				ErrorMessage('{0} expected'.format('['), self.lexer.position(), self.lexer.echo())
			
			self.getToken()
			
			# Int
			if self.token.symbol() != 'int':
				ErrorMessage('{0} expected'.format('Int'), self.lexer.position(), self.lexer.echo())
			
			typ += ' [' + self.token.lexeme() + ']'
			self.getToken()

			# _]
			if self.token.symbol() != 'rightbracket':
				ErrorMessage('{0} expected'.format(']'), self.lexer.position(), self.lexer.echo())
			
			self.getToken()

		else:
			ErrorMessage('{0} expected'.format('type'), self.lexer.position(), self.lexer.echo())

		return typ

	# statement() : input: None, output: instance of appropriate AST object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols		
	# Statement ::= if ( Expr ) Statement [ else Statement]
	# 			| while ( Expr ) Statement 
	# 			| id = ListExpr ;
	# 			| println ( ListExpr ) ;
	# 			| _{ Statement { Statement } _}
	def statement(self):
		expr = None
		stmt = stmt1 = stmt2 = None
		v_id = None

		# if
		if self.token.symbol() == 'if':
			self.getToken()

			# (
			if self.token.symbol() != 'leftparen':
				ErrorMessage('{0} expected'.format('('), self.lexer.position(), self.lexer.echo())
			
			self.getToken()

			# Expr -- store Expr into expr
			expr = self.expr()

			# )
			if self.token.symbol() != 'rightparen':
				ErrorMessage('{0} expected'.format(')'), self.lexer.position(), self.lexer.echo())
			
			self.getToken()

			# Statement
			stmt1 = self.statement()

			# [ else Statement ]
			if self.token.symbol() == 'else':
				self.getToken()
				stmt2 = self.statement()

			stmt = AST.If(cond = copy.deepcopy(expr), term1 = copy.deepcopy(stmt1), term2 = copy.deepcopy(stmt2))

		# while
		elif self.token.symbol() == 'while':
			self.getToken()

			# (
			if self.token.symbol() != 'leftparen':
				ErrorMessage('{0} expected'.format('('), self.lexer.position(), self.lexer.echo())
			
			self.getToken()
			
			# Expr -- store Expr into expr
			expr = self.expr()

			# )
			if self.token.symbol() != 'rightparen':
				ErrorMessage('{0} expected'.format(')'), self.lexer.position(), self.lexer.echo())
			
			self.getToken()

			# Statement
			stmt1 = self.statement()

			stmt = AST.While(cond = copy.deepcopy(expr), statement = copy.deepcopy(stmt1))

		# id
		elif self.token.symbol() == 'identifier':
			v_id = AST.Variable(name = self.token.lexeme())

			self.getToken()

			# =
			if self.token.symbol() != 'assign':
				ErrorMessage('{0} expected'.format('='), self.lexer.position(), self.lexer.echo())

			self.getToken()

			# ListExpr -- store ListExpr into expr
			expr = self.listExpr()

			# ;
			if self.token.symbol() != 'semicolon':
				ErrorMessage('{0} expected'.format(';'), self.lexer.position(), self.lexer.echo())
			
			self.getToken()

			stmt = AST.Assignment(lhs = v_id, rhs = copy.deepcopy(expr))

		# println
		elif self.token.symbol() == 'println':
			self.getToken()

			# (
			if self.token.symbol() != 'leftparen':
				ErrorMessage('{0} expected'.format('('), self.lexer.position(), self.lexer.echo())
			
			self.getToken()

			# ListExpr -- store ListExpr into expr
			expr = self.listExpr()
			
			# )
			if self.token.symbol() != 'rightparen':
				ErrorMessage('{0} expected'.format(')'), self.lexer.position(), self.lexer.echo())
			
			self.getToken()

			# ;
			if self.token.symbol() != 'semicolon':
				ErrorMessage('{0} expected'.format(';'), self.lexer.position(), self.lexer.echo())
			
			self.getToken()

			stmt = AST.Println(expr = copy.deepcopy(expr))

		# _{
		elif self.token.symbol() == 'leftbrace':
			self.getToken()

			# Statement
			stmt1 = self.statement()

			# {Statement}
			while self.token.symbol() != 'rightbrace':
				stmt2 = self.statement()
				stmt1 = AST.Statement(copy.deepcopy(stmt1), copy.deepcopy(stmt2))

			# _}
			if self.token.symbol() != 'rightbrace':
				ErrorMessage('{0} expected'.format('}'), self.lexer.position(), self.lexer.echo())

			self.getToken()

			stmt = stmt1

		return stmt

	# expr() : input: None, output: instance of AST.Expr() object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols	
	# expr ::= andExpr {|| andExpr}
	def expr(self):
		# andExpr
		expr = self.andExpr()
		term1 = None
		term2 = None

		# {|| andExpr}
		while self.token.symbol() == 'or':
			# ||
			op = self.token.lexeme()
			self.getToken()

			# andExpr
			andExpr = self.andExpr()

			term1 = expr
			term2 = andExpr
			expr = AST.Expr(op = op, term1 = copy.deepcopy(expr), term2 = copy.deepcopy(andExpr))

		return expr

	# andExpr() : input: None, output: instance of AST.Expr() object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols	
	# andExpr ::= relExpr {&& relExpr}
	def andExpr(self):
		# relExpr -- store relExpr in expr
		expr = self.relExpr()

		# {&& relExpr}
		while self.token.symbol() == 'and':
			# &&
			op = self.token.lexeme()
			self.getToken()

			# relExpr
			relExpr = self.relExpr()
			expr = AST.Expr(op = op, term1 = copy.deepcopy(expr), term2 = copy.deepcopy(relExpr))

		return expr

	# relExpr() : input: None, output: instance of AST.Expr() object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols	
	# relExpr ::= [!] listExpr [relOper listExpr]
	def relExpr(self):
		op = None

		# [!] -- store in op if exists
		if self.token.symbol() == 'not':
			op = self.token.lexeme()
			self.getToken()

		# listExpr -- store listExpr in expr
		expr = self.listExpr()

		# [relop listExpr]
		# store relOper in relop
		relop = self.relOper()

		if relop != None:
			term2 = self.listExpr()
			expr = AST.Expr(op = relop, term1 = copy.deepcopy(expr), term2 = copy.deepcopy(term2))

		if op != None:
			expr = AST.Expr(op = op, term1 = copy.deepcopy(expr), term2 = None)

		return expr

	# relOper() : input: None, output: op -- str()
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols	
	# relOper ::= < | <= | > | >= | == | !=
	def relOper(self):
		op = None

		# < | <= | > | >= | == | !=
		if self.token.symbol() == 'relop':
			op = self.token.lexeme()
			self.getToken()

		return op

	# listExpr() : input: None, output: instance of AST.Expr() object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# listExpr ::= addExpr | addExpr :: listExpr
	def listExpr(self):
		# addExpr -- store addExpr to expr
		expr = self.addExpr()

		# :: listExpr
		if self.token.symbol() == 'cons':
			op = self.token.lexeme()
			self.getToken()
			term2 = self.listExpr()

			expr = AST.Expr(op = op, term1 = copy.deepcopy(expr), term2 = copy.deepcopy(term2))

		return expr

	# addExpr() : input: None, output: instance of AST.Expr() object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# addExpr ::= mulExpr {addOper mulExpr}
	def addExpr(self):
		# mulExpr -- store mulExpr in expr
		expr = self.mulExpr()
		op = True

		# {addOper mulExpr}
		while op != None:
			# addOper
			op = self.addOper()

			# mulExpr -- store in term2, store expr into term1
			if op != None:
				term1 = expr
				term2 = self.mulExpr()
				expr = AST.Expr(op = op, term1 = copy.deepcopy(term1), term2 = copy.deepcopy(term2))

		return expr

	# addOper() : input: None, output: op -- str()
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	#  addOper ::= + | -
	def addOper(self):
		op = None

		# + | -
		if self.token.symbol() == 'addop':
			op = self.token.lexeme()
			self.getToken()

		return op

	# mulExpr() : input: None, output: instance of AST.Expr() object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# mulExpr ::= prefixExpr {mulOper prefixExpr}
	def mulExpr(self):
		# prefixExpr -- store prefixExpr in expr
		expr = self.prefixExpr()
		op = True

		# {mulOper prefixExpr}
		while op != None:
			# mulOper
			op = self.mulOper()

			# prefixExpr -- store in term2, store expr into term1
			if op != None:
				term1 = expr
				term2 = self.prefixExpr()
				expr = AST.Expr(op = op, term1 = copy.deepcopy(expr), term2 = copy.deepcopy(term2))

		return expr

	# addOper() : input: None, output: op -- str()
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# mulOper ::= * | /
	def mulOper(self):
		op = None

		# * | /
		if self.token.symbol() == 'multop':
			op = self.token.lexeme()		
			self.getToken()
		
		return op

	# prefixExpr() : input: None, output: instance of AST.Expr() object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# prefixExpr ::= [addOper] simpleExpr {listMethodCall}
	def prefixExpr(self):
		# [addOper]
		addop = self.addOper()

		# simpleExpr -- store simpleExpr in expr
		expr = self.simpleExpr()
		listop = True

		# {listMethodCall}
		while listop != None:
			listop = self.listMethodCall()
			if listop != None:
				expr = AST.Expr(op = listop, term1 = copy.deepcopy(expr), term2 = None)

		if addop != None:
			expr = AST.Expr(op = addop, term1 = copy.deepcopy(expr), term2 = None)

		return expr

	# listMethodCall() : input: None, output: op -- str()
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols
	# listMethodCall ::= . head | . tail | . isEmpty
	def listMethodCall(self):
		op = None

		# .
		if self.token.symbol() == 'period':
			self.getToken()

			# head | tail | isEmpty -- store listOp into op
			if self.token.symbol() == 'listop':
				op = self.token.lexeme()
				self.getToken()
			else:
				ErrorMessage('{0} expected'.format('(head | tail | isempty)'), self.lexer.position(), self.lexer.echo())

		return op

	# simpleExpr() : input: None, output: instance of appropriate AST object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols	
	# simpleExpr ::= literal | ( expr ) | id [ ( [ listExpr {, listExpr} ] ) ]
	def simpleExpr(self):
		v_id = None
		parameterList = []

		# id
		if self.token.symbol() == 'identifier':
			# store id lexeme into v_id
			v_id = self.token.lexeme()
			self.getToken()

			# [ ( ...
			if self.token.symbol() == 'leftparen':
				self.getToken()

				# listExpr -- store listExpr into parameterList
				parameterList.append(copy.deepcopy(self.listExpr()))

				# { , ...
				while self.token.symbol() == 'comma':
					self.getToken()
					# listExpr -- store listExpr into parameterList
					parameterList.append(copy.deepcopy(self.listExpr()))

				# ) ]
				if self.token.symbol() != 'rightparen':
					ErrorMessage('{0} expected'.format(')'), self.lexer.position(), self.lexer.echo())					
				
				self.getToken()

				expr = AST.FunctionCall(name = v_id, parameterList = copy.deepcopy(parameterList))

			# no [ ( [ listExpr {, listExpr} ] ) ]
			else:
				expr = AST.Variable(name = v_id)

		# (
		elif self.token.symbol() == 'leftparen':
			self.getToken()

			# expr -- store expr in expr
			expr = self.expr()

			# )
			if self.token.symbol() != 'rightparen':
				ErrorMessage('{0} expected'.format(')'), self.lexer.position(), self.lexer.echo())					

			self.getToken()

		# literal -- store literal in expr
		else:
			expr = self.literal()

		return expr

	# literal() : input: None, output: instance of appropriate AST object
	# Recognizes the following BNF where symbols preceded with underscores are in-language symbols	
	# literal ::= integer | Nil
	def literal(self):
		val = None

		# integer
		if self.token.symbol() == 'integer':
			val = AST.IntValue(value = self.token.lexeme())
			self.getToken()				

		# Nil -- a list
		elif self.token.symbol() == 'nil':
			val = AST.NilValue()
			self.getToken()

		return val		
Exemplo n.º 56
0
 def decompile(cls, source, flags, properties):
     length = len(source)
     if (length == 0):
         return ""
     indent = properties.getInt(cls.INITIAL_INDENT_PROP, 0)
     if indent < 0:
         raise IllegalArgumentException()
     indentGap = properties.getInt(cls.INDENT_GAP_PROP, 4)
     if indentGap < 0:
         raise IllegalArgumentException()
     caseGap = properties.getInt(cls.CASE_GAP_PROP, 2)
     if caseGap < 0:
         raise IllegalArgumentException()
     result = StringBuffer()
     justFunctionBody = (0 != flags & Decompiler.cls.ONLY_BODY_FLAG)
     toSource = (0 != flags & Decompiler.cls.TO_SOURCE_FLAG)
     if cls.printSource:
         System.err.println("length:" + length)
         ## for-while
         i = 0
         while i < length:
             tokenname = None
             if Token.printNames:
                 tokenname = Token.name(source.charAt(i))
             if tokenname is None:
                 tokenname = "---"
             pad = "\t" if len(tokenname) > 7 else "\t\t"
             System.err.println(tokenname + pad + source.charAt(i) + "\t'" + ScriptRuntime.escapeString(source.substring(i, i + 1)) + "'")
             i += 1
         System.err.println()
     braceNesting = 0
     afterFirstEOL = False
     i = 0
     topFunctionType = 0
     if (source.charAt(i) == Token.SCRIPT):
         i += 1
         topFunctionType = -1
     else:
         topFunctionType = source.charAt(i + 1)
     if not toSource:
         result.cls.append('\n')
         ## for-while
         j = 0
         while j < indent:
             result.cls.append(' ')
             j += 1
     else:
         if (topFunctionType == FunctionNode.FUNCTION_EXPRESSION):
             result.cls.append('(')
     while i < length:
         if source[i] in (Token.GET,
                          Token.SET):
             result.cls.append("get " if (source.charAt(i) == Token.GET) else "set ")
             i += 1
             i = cls.printSourceString(source, i + 1, False, result)
             i += 1
             break
         elif source.charAt(i) in( Token.NAME, Token.REGEXP):
             i = cls.printSourceString(source, i + 1, False, result)
             continue
         elif source.charAt(i) == Token.STRING:
             i = cls.printSourceString(source, i + 1, True, result)
             continue
         elif source.charAt(i) == Token.NUMBER:
             i = cls.printSourceNumber(source, i + 1, result)
             continue
         elif source.charAt(i) == Token.TRUE:
             result.cls.append("true")
             break
         elif source.charAt(i) == Token.FALSE:
             result.cls.append("false")
             break
         elif source.charAt(i) == Token.NULL:
             result.cls.append("null")
             break
         elif source.charAt(i) == Token.THIS:
             result.cls.append("this")
             break
         elif source.charAt(i) == Token.FUNCTION:
             i += 1
             result.cls.append("function ")
             break
         elif source.charAt(i) == cls.FUNCTION_END:
             break
         elif source.charAt(i) == Token.COMMA:
             result.cls.append(", ")
             break
         elif source.charAt(i) == Token.LC:
             braceNesting += 1
             if (Token.EOL == cls.getNext(source, length, i)):
                 indent += indentGap
             result.cls.append('{')
             break
         elif source.charAt(i) == Token.RC:
             braceNesting -= 1
             if justFunctionBody and (braceNesting == 0):
                 break
             result.cls.append('}')
             if cls.getNext(source, length, i) == cls.FUNCTION_END:
                 indent -= indentGap
                 break
             elif cls.getNext(source, length, i) == Token.ELSE:
                 indent -= indentGap
                 result.cls.append(' ')
                 break
             break
         elif source.charAt(i) == Token.LP:
             result.cls.append('(')
             break
         elif source.charAt(i) == Token.RP:
             result.cls.append(')')
             if (Token.LC == cls.getNext(source, length, i)):
                 result.cls.append(' ')
             break
         elif source.charAt(i) == Token.LB:
             result.cls.append('[')
             break
         elif source.charAt(i) == Token.RB:
             result.cls.append(']')
             break
         elif source.charAt(i) == Token.EOL:
             if toSource:
                 break
             newLine = True
             if not afterFirstEOL:
                 afterFirstEOL = True
                 if justFunctionBody:
                     result.setLength(0)
                     indent -= indentGap
                     newLine = False
             if newLine:
                 result.cls.append('\n')
             if i + 1 < length:
                 less = 0
                 nextToken = source.charAt(i + 1)
                 if (nextToken == Token.CASE) or (nextToken == Token.DEFAULT):
                     less = indentGap - caseGap
                 else:
                     if (nextToken == Token.RC):
                         less = indentGap
                     else:
                         if (nextToken == Token.NAME):
                             afterName = cls.getSourceStringEnd(source, i + 2)
                             if (source.charAt(afterName) == Token.COLON):
                                 less = indentGap
                 ## for-while
                 while less < indent:
                     result.cls.append(' ')
                     less += 1
             break
         elif source.charAt(i) == Token.DOT:
             result.cls.append('.')
             break
         elif source.charAt(i) == Token.NEW:
             result.cls.append("new ")
             break
         elif source.charAt(i) == Token.DELPROP:
             result.cls.append("delete ")
             break
         elif source.charAt(i) == Token.IF:
             result.cls.append("if ")
             break
         elif source.charAt(i) == Token.ELSE:
             result.cls.append("else ")
             break
         elif source.charAt(i) == Token.FOR:
             result.cls.append("for ")
             break
         elif source.charAt(i) == Token.IN:
             result.cls.append(" in ")
             break
         elif source.charAt(i) == Token.WITH:
             result.cls.append("with ")
             break
         elif source.charAt(i) == Token.WHILE:
             result.cls.append("while ")
             break
         elif source.charAt(i) == Token.DO:
             result.cls.append("do ")
             break
         elif source.charAt(i) == Token.TRY:
             result.cls.append("try ")
             break
         elif source.charAt(i) == Token.CATCH:
             result.cls.append("catch ")
             break
         elif source.charAt(i) == Token.FINALLY:
             result.cls.append("finally ")
             break
         elif source.charAt(i) == Token.THROW:
             result.cls.append("throw ")
             break
         elif source.charAt(i) == Token.SWITCH:
             result.cls.append("switch ")
             break
         elif source.charAt(i) == Token.BREAK:
             result.cls.append("break")
             if (Token.NAME == cls.getNext(source, length, i)):
                 result.cls.append(' ')
             break
         elif source.charAt(i) == Token.CONTINUE:
             result.cls.append("continue")
             if (Token.NAME == cls.getNext(source, length, i)):
                 result.cls.append(' ')
             break
         elif source.charAt(i) == Token.CASE:
             result.cls.append("case ")
             break
         elif source.charAt(i) == Token.DEFAULT:
             result.cls.append("default")
             break
         elif source.charAt(i) == Token.RETURN:
             result.cls.append("return")
             if (Token.SEMI != cls.getNext(source, length, i)):
                 result.cls.append(' ')
             break
         elif source.charAt(i) == Token.VAR:
             result.cls.append("var ")
             break
         elif source.charAt(i) == Token.SEMI:
             result.cls.append(';')
             if (Token.EOL != cls.getNext(source, length, i)):
                 result.cls.append(' ')
             break
         elif source.charAt(i) == Token.ASSIGN:
             result.cls.append(" = ")
             break
         elif source.charAt(i) == Token.ASSIGN_ADD:
             result.cls.append(" += ")
             break
         elif source.charAt(i) == Token.ASSIGN_SUB:
             result.cls.append(" -= ")
             break
         elif source.charAt(i) == Token.ASSIGN_MUL:
             result.cls.append(" *= ")
             break
         elif source.charAt(i) == Token.ASSIGN_DIV:
             result.cls.append(" /= ")
             break
         elif source.charAt(i) == Token.ASSIGN_MOD:
             result.cls.append(" %= ")
             break
         elif source.charAt(i) == Token.ASSIGN_BITOR:
             result.cls.append(" |= ")
             break
         elif source.charAt(i) == Token.ASSIGN_BITXOR:
             result.cls.append(" ^= ")
             break
         elif source.charAt(i) == Token.ASSIGN_BITAND:
             result.cls.append(" &= ")
             break
         elif source.charAt(i) == Token.ASSIGN_LSH:
             result.cls.append(" <<= ")
             break
         elif source.charAt(i) == Token.ASSIGN_RSH:
             result.cls.append(" >>= ")
             break
         elif source.charAt(i) == Token.ASSIGN_URSH:
             result.cls.append(" >>>= ")
             break
         elif source.charAt(i) == Token.HOOK:
             result.cls.append(" ? ")
             break
         elif source.charAt(i) == Token.OBJECTLIT:
             result.cls.append(':')
             break
         elif source.charAt(i) == Token.COLON:
             if (Token.EOL == cls.getNext(source, length, i)):
                 result.cls.append(':')
             else:
                 result.cls.append(" : ")
             break
         elif source.charAt(i) == Token.OR:
             result.cls.append(" || ")
             break
         elif source.charAt(i) == Token.AND:
             result.cls.append(" && ")
             break
         elif source.charAt(i) == Token.BITOR:
             result.cls.append(" | ")
             break
         elif source.charAt(i) == Token.BITXOR:
             result.cls.append(" ^ ")
             break
         elif source.charAt(i) == Token.BITAND:
             result.cls.append(" & ")
             break
         elif source.charAt(i) == Token.SHEQ:
             result.cls.append(" === ")
             break
         elif source.charAt(i) == Token.SHNE:
             result.cls.append(" !== ")
             break
         elif source.charAt(i) == Token.EQ:
             result.cls.append(" == ")
             break
         elif source.charAt(i) == Token.NE:
             result.cls.append(" != ")
             break
         elif source.charAt(i) == Token.LE:
             result.cls.append(" <= ")
             break
         elif source.charAt(i) == Token.LT:
             result.cls.append(" < ")
             break
         elif source.charAt(i) == Token.GE:
             result.cls.append(" >= ")
             break
         elif source.charAt(i) == Token.GT:
             result.cls.append(" > ")
             break
         elif source.charAt(i) == Token.INSTANCEOF:
             result.cls.append(" instanceof ")
             break
         elif source.charAt(i) == Token.LSH:
             result.cls.append(" << ")
             break
         elif source.charAt(i) == Token.RSH:
             result.cls.append(" >> ")
             break
         elif source.charAt(i) == Token.URSH:
             result.cls.append(" >>> ")
             break
         elif source.charAt(i) == Token.TYPEOF:
             result.cls.append("typeof ")
             break
         elif source.charAt(i) == Token.VOID:
             result.cls.append("void ")
             break
         elif source.charAt(i) == Token.CONST:
             result.cls.append("const ")
             break
         elif source.charAt(i) == Token.NOT:
             result.cls.append('!')
             break
         elif source.charAt(i) == Token.BITNOT:
             result.cls.append('~')
             break
         elif source.charAt(i) == Token.POS:
             result.cls.append('+')
             break
         elif source.charAt(i) == Token.NEG:
             result.cls.append('-')
             break
         elif source.charAt(i) == Token.INC:
             result.cls.append("++")
             break
         elif source.charAt(i) == Token.DEC:
             result.cls.append("--")
             break
         elif source.charAt(i) == Token.ADD:
             result.cls.append(" + ")
             break
         elif source.charAt(i) == Token.SUB:
             result.cls.append(" - ")
             break
         elif source.charAt(i) == Token.MUL:
             result.cls.append(" * ")
             break
         elif source.charAt(i) == Token.DIV:
             result.cls.append(" / ")
             break
         elif source.charAt(i) == Token.MOD:
             result.cls.append(" % ")
             break
         elif source.charAt(i) == Token.COLONCOLON:
             result.cls.append("::")
             break
         elif source.charAt(i) == Token.DOTDOT:
             result.cls.append("..")
             break
         elif source.charAt(i) == Token.DOTQUERY:
             result.cls.append(".(")
             break
         elif source.charAt(i) == Token.XMLATTR:
             result.cls.append('@')
             break
         else:
             raise RuntimeException("Token: " + Token.name(source.charAt(i)))
             
         i += 1
     if not toSource:
         if not justFunctionBody:
             result.cls.append('\n')
     else:
         if (topFunctionType == FunctionNode.FUNCTION_EXPRESSION):
             result.cls.append(')')
     return str(result)