Beispiel #1
0
    def build_track(self):
        file = Parse('bach_846.mid')
        #file = Parse('WTC_Part1/Fugue7.mid')
        tpm = file.parse()

        simulate = Simulation(tpm, self)
        simulate.next_state(simulate.get_init_note(), self.length)
Beispiel #2
0
 def testUnescape(self):
     self.assertEquals("a<b", Parse().unescape("a&lt;b"))
     self.assertEquals("a>b & b>c &&",
                       Parse().unescape("a&gt;b&nbsp;&amp;&nbsp;b>c &&"))
     self.assertEquals("&amp;&amp;", Parse().unescape("&amp;amp;&amp;amp;"))
     self.assertEquals("a>b & b>c &&",
                       Parse().unescape("a&gt;b&nbsp;&amp;&nbsp;b>c &&"))
Beispiel #3
0
def start_indexing(dirs_list, dirs_dicts, main_path, posting_path, to_stem,
                   start_index, end_index, directory):
    dirs_dicts[directory] = None
    reader = ReadFile()
    parser = Parse(main_path)
    indexer = Indexer(posting_path + directory)

    if to_stem:
        parser.to_stem = True
        indexer.to_stem = True
    if not os.path.exists(posting_path + directory):
        os.makedirs(posting_path + directory)

    documents = {}
    i = start_index
    while i < end_index:
        docs = reader.separate_docs_in_file(main_path + '\\corpus',
                                            dirs_list[i])
        j = 0
        for doc_id in docs:
            doc_dict = parser.main_parser(docs[doc_id].text, docs[doc_id])
            docs[doc_id].text = None
            if i == end_index - 1 and j == len(docs) - 1:
                indexer.finished_parse = True
            indexer.index_terms(doc_dict, doc_id)
            documents[doc_id] = docs[doc_id]
            j += 1
        i += 1
    dirs_dicts[directory] = [
        indexer.post_files_lines, indexer.terms_dict, documents,
        reader.languages
    ]
Beispiel #4
0
def main():
    #Création de l'objet qui parse le site web
    parse = Parse("https://www.matchendirect.fr/")
    #On parse tout le site
    parse.parseAllSite()
    #Création du fichier de sortie
    df_data = parse.createInputOutputFile()
    #Enregistrement en CSV
    df_data.to_csv('data.csv', index=False, encoding="utf-8-sig")
    print('tete')
Beispiel #5
0
 def caseOne(self, path):
     p = Parse()
     p.setPath(path)
     tag = p.getTag()
     f = open(casesnew_dir + tag + ".robot", 'a+')
     apiname = p.getAPIName()
     casename = p.getCaseName()
     f.write(casename + "\n")
     # 获取接口返回值
     parameters = p.getParams()
     paramskey = []
     summary = p.getSummary()
     f.write("\t[Documentation]    " + summary.encode("utf-8") + "\n")
     f.write("\t[Tags]    Run\n")
     f.write("\t${resp}    " + apiname + "    ${token}")
     if len(parameters) > 0:
         for i in range(len(parameters)):
             name = parameters[i]["name"].keys()[0]
             paramskey.append(name)
             f.write("    ${" + casename + "_param[\'" + name + "\']}")
     f.write("\n")
     resp = globals()[casename + "_resp"]
     if isinstance(resp, dict) or isinstance(resp, list):
         self.assertDict(f, casename, resp, "")
         f.write("\n")
     else:
         f.write("\tshould be equal as strings    ${resp}    ${" +
                 casename + "_resp}\n\n")
    def __init__(self, handler):
        Parse.__init__(self, handler)
        self.addRule(ListRule())
        self.addRule(ListItemRule())
        self.addRule(TitleRule())
        self.addRule(HeadingRule())
        self.addRule(ParagraphRule())

        self.addFilter(r'\*(.+?)\*', 'emphasis')
        self.addFilter(r'(http://[\.a-zA-Z/]+)', 'url')
        self.addFilter(r'([\.a-zA-Z]+@[\.a-zA-Z]+[a-zA-Z]+)', 'mail')
Beispiel #7
0
 def __init__(self, corpus_path, posting_path, terms_dict, cities_dict, docs_dict, avg_doc_length, with_stemming,
              with_semantics):
     self.terms_dict = terms_dict
     self.cities_dict = cities_dict
     self.docs_dict = docs_dict
     self.parser = Parse(corpus_path)  ## corpus path for stop words
     self.parser.to_stem = with_stemming
     self.posting_path = posting_path
     self.ranker = Ranker(avg_doc_length)
     self.model = None
     self.with_semantics = with_semantics
     self.with_stemming = with_stemming
Beispiel #8
0
def callRobot(txt, request):

    parse = Parse()
    result = []
    decision = parse.outputString(str(txt))
    if decision[0][0] == 1:
        fb = FB()
        temp = fb.searchUser(str(decision[1]))
        if temp != None and len(temp) > 0:
            result.append("Result from Facebook, Total results are " +
                          str(len(temp)) + "\n")
            for i, y in temp.iteritems():
                result.append("UserID:" + i + "\tUser Full Name: " + y + "\n")
    if decision[0][1] == 1:
        twitter = twitterSearch()
        temp = twitter.searchName(str(txt))
        result.append("Result from twitter Total results are :" +
                      str(len(temp)) + "\n")
        for i in temp:
            result.append("UserID:" + i[0] + "\tUser Full Name:" + i[1] +
                          "\tUser location" + i[2] + "\n")
    if decision[0][2] == 1:
        fb = FB()
        temp = fb.searchDetailInfo(str(txt))
        if "name" in temp:
            result.append("Name:    " + temp["name"])
        if "category" in temp:
            result.append("\tCategory:  " + temp["category"])
        if "birthday" in temp:
            result.append("\tBirthday:  " + temp["birthday"])
        if "about" in temp:
            result.append("\tAbout: " + temp["about"])
        if "pic" in temp:
            result.append("\tPicture:   " + temp["pic"] + "\n")
    #result = fb.searchUser(str(txt))
    message = ""

    for i in result:
        message = message + i

    #for i in range(1,1000):
    #   print message.decode('unicode-escape')
    # for i in range(1,300):
    #     print message[1791:1793]
    #msg = message.decode('unicode-escape')

    #print result

    msg = message
    user = GetRobot(request, msg)

    return msg
Beispiel #9
0
def callRobot(txt,request):

    parse = Parse()
    result = []
    decision = parse.outputString(str(txt))
    if decision[0][0]==1:
        fb = FB()
        temp = fb.searchUser(str(decision[1]))
        if temp!=None and len(temp)>0:
            result.append("Result from Facebook, Total results are "+str(len(temp))+"\n")
            for i,y in temp.iteritems():
                result.append("UserID:"+i+"\tUser Full Name: "+y+"\n")
    if decision[0][1]==1:
        twitter = twitterSearch()
        temp = twitter.searchName(str(txt))
        result.append("Result from twitter Total results are :"+str(len(temp))+"\n")
        for i in temp:
            result.append("UserID:"+i[0]+"\tUser Full Name:"+i[1]+"\tUser location"+i[2]+"\n")
    if decision[0][2]==1:
        fb = FB()
        temp = fb.searchDetailInfo(str(txt))
        if "name" in temp:
            result.append("Name:    "+temp["name"])
        if "category" in temp:
            result.append("\tCategory:  "+temp["category"])
        if "birthday" in temp:
            result.append("\tBirthday:  "+temp["birthday"])
        if "about" in temp:
            result.append("\tAbout: "+temp["about"])
        if "pic" in temp:
            result.append("\tPicture:   "+temp["pic"]+"\n")
    #result = fb.searchUser(str(txt))
    message=""

    for i in result:
        message = message+i

    #for i in range(1,1000):
     #   print message.decode('unicode-escape')
    # for i in range(1,300):
    #     print message[1791:1793]
    #msg = message.decode('unicode-escape')

    
    #print result

    msg = message
    user = GetRobot(request,msg)
  
    return msg
Beispiel #10
0
    def __init__(self, path):
        self.path = path.rstrip()
        logging.getLogger('asa_utils').debug("Searching %s for data files" %
                                             self.path)
        self.files = glob.glob(os.path.join(self.path, "*"))
        logging.getLogger('asa_utils').debug("File list: %s" % self.files)
        if len(self.files) == 0:
            logging.getLogger('asa_utils').error(
                "Directory does not exist, or is empty: %s" % path)
            raise EmptyDataFolder

        self.parsers = []
        for file in self.files:
            try:
                p = Parse(file)
                self.parsers.append(p)
            except ParseError:
                logging.getLogger('asa_utils').warn(
                    "Failed to parse file: %s" % file)

        if len(self.parsers) == 0:
            logging.getLogger('asa_utils').error(
                "Unable to parse any data files in this folder: %s" %
                self.path)
            raise EmptyDataFolder
Beispiel #11
0
 def testParsing(self):
     tags = ("table", )
     p = Parse("leader<Table foo=2>body</table>trailer", tags)
     self.assertEquals("leader", p.leader)
     self.assertEquals("<Table foo=2>", p.tag)
     self.assertEquals("body", p.body)
     self.assertEquals("trailer", p.trailer)
Beispiel #12
0
 def testIterating(self):
     p = Parse(
         "leader<table><tr><td>one</td><td>two</td><td>three</td></tr></table>trailer"
     )
     self.assertEquals("one", p.parts.parts.body)
     self.assertEquals("two", p.parts.parts.more.body)
     self.assertEquals("three", p.parts.parts.more.more.body)
Beispiel #13
0
    def __init__(self, corpus_folder_path, stop_words_file_path, to_stem,
                 path_for_posting_and_dictionary):
        self.stop_words_container = StopWordsContainer(stop_words_file_path)
        self.stemmer = Stemmer() if to_stem else None
        self.parser = Parse()
        self.doc_posting_fd = None
        self.doc_counter = 0  # Will be used to count the number of iterations
        self.doc_repr_list = []
        self.term_posting_fd = None
        self.global_term_dict = {}
        self.number_of_docs_processed = 0
        self.corpus_folder = corpus_folder_path
        self.stop_words_file_path = stop_words_file_path
        self.path_for_posting_and_dictionary = path_for_posting_and_dictionary

        self.stem_doc_posting_folder = os.path.join(
            path_for_posting_and_dictionary,
            IR_CONFIG["storage"]["stem_doc_posting_folder"])
        self.doc_posting_folder = os.path.join(
            path_for_posting_and_dictionary,
            IR_CONFIG["storage"]["doc_posting_folder"])
        self.stem_term_posting_folder = os.path.join(
            path_for_posting_and_dictionary,
            IR_CONFIG["storage"]["stem_term_posting_folder"])
        self.term_posting_folder = os.path.join(
            path_for_posting_and_dictionary,
            IR_CONFIG["storage"]["term_posting_folder"])

        if self.stemmer is not None:
            self.doc_posting_file_path = self.stem_doc_posting_folder
            self.term_posting_file_target = os.path.join(
                self.stem_term_posting_folder,
                IR_CONFIG["storage"]["stem_term_posting_file_name"])
            self.doc_posting_file_target = os.path.join(
                self.stem_doc_posting_folder,
                IR_CONFIG["storage"]["stem_doc_posting_file_name"])
            self.cache_file_path = os.path.join(
                path_for_posting_and_dictionary,
                IR_CONFIG["storage"]["cache_file_name_stem"])
            self.dictionary_file_path = os.path.join(
                path_for_posting_and_dictionary,
                IR_CONFIG["storage"]["dictionary_file_name_stem"])

        else:
            self.doc_posting_file_path = self.doc_posting_folder
            self.term_posting_file_target = os.path.join(
                self.term_posting_folder,
                IR_CONFIG["storage"]["term_posting_file_name"])
            self.doc_posting_file_target = os.path.join(
                self.doc_posting_folder,
                IR_CONFIG["storage"]["doc_posting_file_name"])
            self.cache_file_path = os.path.join(
                path_for_posting_and_dictionary,
                IR_CONFIG["storage"]["cache_file_name"])
            self.dictionary_file_path = os.path.join(
                path_for_posting_and_dictionary,
                IR_CONFIG["storage"]["dictionary_file_name"])

        self.dictionary = MainDictionary(self.term_posting_file_target)
Beispiel #14
0
 def run(self, file, right, wrong, ignores, exceptions):
     input = self.read("Documents/" + file + ".html")
     fixture = Fixture()
     if input.find("<wiki>") != -1:
         tables = Parse(input, ("wiki", "table", "tr", "td"))
         fixture.doTables(tables.parts)
     else:
         tables = Parse(input, ("table", "tr", "td"))
         fixture.doTables(tables)
     output = open("Reports/" + file + ".html", "wt")
     output.write(str(tables))
     output.close()
     self.assertEquals(right, fixture.counts.right, file + " right")
     self.assertEquals(wrong, fixture.counts.wrong, file + " wrong")
     self.assertEquals(ignores, fixture.counts.ignores, file + " ignores")
     self.assertEquals(exceptions, fixture.counts.exceptions,
                       file + " exceptions")
Beispiel #15
0
 def getAllCellStates(filename):
     print(f"Parsing file ({filename})...")
     startTime = time.monotonic()
     states = Parse.getAllCellStates(filename)
     endTime = time.monotonic()
     timeElapsed = endTime - startTime
     print(f"Time taken: {timeElapsed}s")
     return [states, timeElapsed]
Beispiel #16
0
    def find_all_POS_tags(self, text):

        parses = re.findall(re.escape("     ") + "(.*)" + re.escape("\n"), text)
        tags = []

        for parse in parses:
            tags.append(Parse.convert_parse_to_tag(parse))

        self.tags += tags
    def __init__(self, dataStorage):
        """
        初始化
        :return:
        """
        self.driver = webdriver.Chrome()
        # self.driver.implicitly_wait(1)
        # self.driver = webdriver.PhantomJS() #########不能使用,个人成就和推荐信信息爬不出来

        self.accountName = ACCOUNT.get('name')
        self.urlName = LINKEDINURL.get('name')
        self.usersName = LINKEDINUSERS.get('name')
        self.postName = LINKEDINPOSTSURL.get('name')
        self.__dataStorage = dataStorage
        self.parse = Parse()
        self.webAction = WebAction()
        # self.linkedin_limit = 0
        self.__initLogger()
    def __init__(self, dataStorage):
        """
        初始化
        :return:
        """
        self.driver = webdriver.Chrome()
        # self.driver = webdriver.PhantomJS()
        # self.driver.maximize_window()
        # 设置页面超时时间
        self.driver.set_page_load_timeout(30)

        self.accountName = ACCOUNT.get('name')
        self.urlName = LINKEDINURL.get('name')
        self.usersName = LINKEDINUSERS.get('name')
        self.__dataStorage = dataStorage
        self.parse = Parse()
        # self.linkedin_limit = 0
        self.__initLogger()
Beispiel #19
0
 def testParseException(self):
     try:
         p = Parse(
             "leader<table><tr><th>one</th><th>two</th><th>three</th></tr><tr><td>four</td></tr></table>trailer"
         )
     except ParseException, e:
         self.assertEquals(17, e.offset)
         self.assertEquals("Can't find tag: td", e.message)
         return
Beispiel #20
0
def tranning_network(s, readout, h_fc1, sess, gameState):
    print("START OF TRAINING NETWORK")
    a = tf.placeholder("float", [None, ACTIONS])
    y = tf.placeholder("float", [None])
    readout_action = tf.reduce_sum(tf.mul(readout, a), reduction_indices = 1)
    

    cost = tf.reduce_mean(tf.square(y - readout_action))
    tf.scalar_summary("cost", cost)
        
    #rain_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
    train_step = tf.train.AdamOptimizer(0.005).minimize(cost)

   
    
    merged_summary_op = tf.merge_all_summaries()
    summary_writer = tf.train.SummaryWriter('logs',sess.graph)
    
    sess.run(tf.initialize_all_variables())
    
    #first state
    timeStep = 0
    s_t = gameState[timeStep]
    D=[]
    print("ADD GAME STATE TO DOMAIN")
    
    # add all 10k gameState to Domain
    for i in range(1, len(gameState)):
        
        #SET ACTION
        a_t = np.zeros([ACTIONS])
        action_index = 0
        gameStateObjectNext = Parse.parse_game_state(gameState[timeStep+1])
        action_index = gameStateObjectNext.pacman.lastMoveMade
        a_t[action_index] = 1
        
        # SET REWARD
        r_t = gameStateObjectNext.score
        
        # SET NEXTSTATE
        s_t1 = gameState[timeStep+1]
        
        # if GAME OVER SET TERMINAL
        terminal = (gameStateObjectNext.pacman.numberOfLivesRemaining >0) 
        
        #ADD TO DOMAIN
        D.append((s_t, a_t, r_t, s_t1, terminal))
        
        # NEXT STATE IN SEQUENCE
        s_t = s_t1
        timeStep += 1
   
    print("START TO TRAINING BATCH")
    #training 1k times with BATCH size
    for i in range (0,TRANING_TIME):
        if (i%SAVING_STEP==0):
            print ("Training Time %d " %i )
 def testText(self):
     tags =("td",)
     p = Parse("<td>a&lt;b</td>", tags)
     self.assertEquals("a&lt;b", p.body)
     self.assertEquals("a<b", p.text())
     p = Parse("<td>\ta&gt;b&nbsp;&amp;&nbsp;b>c &&&nbsp;</td>", tags)
     self.assertEquals("a>b & b>c &&", p.text())
     p = Parse("<td>\ta&gt;b&nbsp;&amp;&nbsp;b>c &&nbsp;</td>", tags)
     self.assertEquals("a>b & b>c &", p.text())
     p = Parse("<TD><P><FONT FACE=\"Arial\" SIZE=2>GroupTestFixture</FONT></TD>", tags)
     self.assertEquals("GroupTestFixture",p.text())
Beispiel #22
0
    def addParse(self, parseText):
        parseText = parseText.strip()
        for parse in self.parses:
            if parseText == parse.text:
                parse.frequency += 1
                return

        parse = Parse(parseText)
        self.parses.append(parse)
        self.sortParses()
Beispiel #23
0
 def newFile(self):
     fi = open(resp_dir + "__init__.py", "w")
     fi.write("# coding=utf-8\n")
     fi.close()
     tags = Parse().getTags()
     for tag in tags:
         self.fileOne(tag)
         fi = open(resp_dir + "__init__.py", "a+")
         fi.write("from " + resp_pack + tag + " import *\n")
         fi.close()
Beispiel #24
0
    def find_all_POS_tags(self, text):

        parses = re.findall(
            re.escape("     ") + "(.*)" + re.escape("\n"), text)
        tags = []

        for parse in parses:
            tags.append(Parse.convert_parse_to_tag(parse))

        self.tags += tags
Beispiel #25
0
    def decoding():
        code = Parse.code().lower()
        number = Parse.number()
        result = ''
        letters = string.ascii_lowercase

        for letter in code:
            if letter.isalpha():
                position = letters.find(letter)
                if position > number:
                    position = position - number
                    result = result + letters[position]
                if position <= number:
                    position = position - 27
                    result = result + letters[position]
            
            if not letter.isalpha():
                result += letter
        
        return result
Beispiel #26
0
 def __init__(self):
     self.parse = Parse()
     # Authorise my request using OAuth
     self.t = Twitter(
         auth=OAuth("2999654973-YS9XG2UdzWDNJhapOjAUDHjrv5wgG3az3MZ5JfF",
                    "NlP0PWQPJv1Z0lPsH6xv3bb7jw3Aos5xD18ni3ODuDZME",
                    "p6yoZBcF9NnteTV67YbVRwcis",
                    "nY0Q4ugFlef0AtlRTaXptshXxJNGymxFgn4KzaeF77UXlMkRuq"))
     self.tweets = []
     # Define target account and fetch tweet
     self.t.statuses.user_timeline(screen_name="premierleague",
                                   count=1,
                                   exclude_replies=True,
                                   include_rts=False)
     tweets = self.t.statuses.user_timeline(screen_name="premierleague",
                                            count=1,
                                            exclude_replies=True,
                                            include_rts=False)
     for i in xrange(len(tweets)):
         self.tweets.append(self.parse.parseOne(tweets[i]))
Beispiel #27
0
 def testIndexing(self):
     p = Parse(
         "leader<table><tr><td>one</td><td>two</td><td>three</td></tr><tr><td>four</td></tr></table>trailer"
     )
     self.assertEquals("one", p.at(0, 0, 0).body)
     self.assertEquals("two", p.at(0, 0, 1).body)
     self.assertEquals("three", p.at(0, 0, 2).body)
     self.assertEquals("three", p.at(0, 0, 3).body)
     self.assertEquals("three", p.at(0, 0, 4).body)
     self.assertEquals("four", p.at(0, 1, 0).body)
     self.assertEquals("four", p.at(0, 1, 1).body)
     self.assertEquals("four", p.at(0, 2, 0).body)
     self.assertEquals(1, p.size())
     self.assertEquals(2, p.parts.size())
     self.assertEquals(3, p.parts.parts.size())
     self.assertEquals("one", p.leaf().body)
     self.assertEquals("four", p.parts.last().leaf().body)
    def __init__(self):
        # creating an object for parser class
        parser_obj = Parse()

        # cycle count
        self.cycle = 0
        self.cy_needed = 0

        # loop case check
        self.loop = False

        # flag for i-cache
        self.spec_i_flag = False

        # collecting the parsed data
        self.inst = parser_obj.inst
        self.config = parser_obj.conf
        self.registers = parser_obj.regs
        self.data = parser_obj.data

        # hit count
        self.i_hit_count = 0
        self.i_access_count = 0
        self.d_miss = 0
        self.d_hit_count = 0
        self.d_access_count = 0

        self.stall = 0

        self.fflag = False
        self.next = 0
        self.v = 0

        # register set
        self.register_set = {}

        # d-cache
        self.d_block_0 = {d_value_0: [] for d_value_0 in range(2)}
        self.d_block_1 = {d_value_1: [] for d_value_1 in range(2)}

        self.least_recently_used = 0
        self.least_recently_used2 = 0

        # initialize instruction sets
        self.mem = ['LW', 'SW', 'L.D', 'S.D']
        self.add_sub = ['ADD.D', 'SUB.D']
        self.int_inst = ['DADD', 'DADDI', 'DSUB', 'DSUBI', 'AND', 'ANDI', 'OR', 'ORI']
        self.jump = ['HLT', 'J', 'BEQ', 'BNE']

        # tracking if busy or not
        self.fetch_busy = self.decode_busy = self.mem_busy = self.add_busy = self.mul_busy = self.div_busy \
            = self.write_back_busy = self.iu_busy = self.jump_busy = [False, None]
Beispiel #29
0
def calculate_value_game_state(input_layer, readout, h_fc1, sess, gameState):

    gameObject = Parse.parse_game_state(gameState)

    #print("AT Game Step: %d Score %d" %(gameObject.totalTime, gameObject.score))

    s_t = Frame.get_input_network(gameObject)

    readout_t = readout.eval(feed_dict={input_layer: [s_t]})[0]

    action_index = np.argmax(readout_t)

    return action_index
 def testIndexing(self):
     p = Parse("leader<table><tr><td>one</td><td>two</td><td>three</td></tr><tr><td>four</td></tr></table>trailer")
     self.assertEquals("one", p.at(0,0,0).body)
     self.assertEquals("two", p.at(0,0,1).body)
     self.assertEquals("three", p.at(0,0,2).body)
     self.assertEquals("three", p.at(0,0,3).body)
     self.assertEquals("three", p.at(0,0,4).body)
     self.assertEquals("four", p.at(0,1,0).body)
     self.assertEquals("four", p.at(0,1,1).body)
     self.assertEquals("four", p.at(0,2,0).body)
     self.assertEquals(1, p.size())
     self.assertEquals(2, p.parts.size())
     self.assertEquals(3, p.parts.parts.size())
     self.assertEquals("one", p.leaf().body)
     self.assertEquals("four", p.parts.last().leaf().body)
Beispiel #31
0
    def report(self):
        num_count = 0
        i = 0
        freq = {}
        for term in self.indexer.terms_dict.keys():
            if Parse.isFloat(term):
                num_count += 1
            freq[term] = self.indexer.terms_dict[term][1]

        freq_list = sorted(freq.items(), key=itemgetter(1))
        with open('frequency.txt', 'wb') as f:
            for n in freq_list:
                f.write(str(n[0]) + ": " + str(n[1]) + '\n')

        print "Num of terms which are nums: " + str(num_count)
        print "Num of countries: " + str(len(self.indexer.countries))
        print "Num of capitals: " + str(self.indexer.num_of_capitals)
Beispiel #32
0
    def test_run_OLCIL1_Oa01_radiance(self):
        from test_OLCIL1_parse_data import OLCIL1_test_path, OLCIL1_test_attributes
        from Parse import Parse

        s3parse = Parse(OLCIL1_test_path)

        # Test attributes attribute
        # Assert opened attributes are the same as the true attributes
        for key in OLCIL1_test_attributes.keys():
            if type(OLCIL1_test_attributes[key]) == list:
                self.assertItemsEqual(s3parse.attributes[key],
                                      OLCIL1_test_attributes[key],
                                      "Problem with %s" % key)
            else:
                self.assertEqual(s3parse.attributes[key],
                                 OLCIL1_test_attributes[key],
                                 "Problem with %s" % key)
Beispiel #33
0
    def respOne(self, path):
        p = Parse()
        p.setPath(path)
        tag = p.getTag()
        f = open(resp_dir + tag + ".py", 'a+')
        casename = p.getCaseName()
        resp = p.getResponse()

        f.write(casename + "_resp = ")
        if isinstance(resp, dict):
            self.writeDict(f, resp)
        elif isinstance(resp, list):
            self.writeList(f, resp)
        else:
            f.write("\'" + resp + "\'")
        f.write("\n\n")
Beispiel #34
0
    def __init__(self, arr):
        parser_obj = Parse()
        # depending on the number of registers
        if len(arr) == 1:
            self.inst = arr[0]
            self.reg1 = ''
            self.reg2 = ''
            self.reg3 = ''

        if len(arr) == 3:
            self.inst = arr[0]
            self.reg1 = arr[1]
            self.reg2 = arr[2]
            self.reg3 = ''
        if len(arr) == 4:
            self.inst = arr[0]
            self.reg1 = arr[1]
            self.reg2 = arr[2]
            self.reg3 = arr[3]

        self.address = 0
        self.x = ''
        self.mem_check = False
        self.sub_cycle = 0
        self.int_cycle = 1
        self.iu_cycle = 1
        self.mem_cycle = int(parser_obj.conf[0]['Main memory'])
        self.add_sub_cycle = int(parser_obj.conf[0]['FP adder'])
        self.mul_cycle = int(parser_obj.conf[0]['FP Multiplier'])
        self.div_cycle = int(parser_obj.conf[0]['FP divider'])

        # cycle count for each stage
        self.fetch = self.decode = self.execute = self.write_back = 0

        # possible hazards
        self.raw = self.war = self.waw = self.struct_haz = 'N'

        self.status = 'IF'

        # instruction cache flag
        self.i_flag = [False, 0]
        self.cache_miss_flag = False

        self.d_flag = False
Beispiel #35
0
if gradebook_choice == "academic":
  prompt_event = str(raw_input("Event> "))
  prompt_event_id = (raw_input("Event ID> "))
  prompt_skill = (raw_input("Skill> "))
  prompt_due_date = (raw_input("Due Date> "))
  f.write(create_event(prompt_event, prompt_event_id, prompt_skill, prompt_due_date))
elif gradebook_choice == "teks":
  prompt_assignment = (raw_input("Assignment> "))
  prompt_assignment_id = (raw_input("Assignment ID> "))
  prompt_due_date = (raw_input("Due Date> "))
  f.write(create_assignment(prompt_assignment, prompt_assignment_id, prompt_due_date))
else:
  print "Error"

p = Parse(args.csvimport)
p.container_bind()
p.get_field_names()

print """------------------
Listing of Field Names and Indices
----------------------"""

for i, v in enumerate(p.field_names):
  print "( %s )   ( %s )" % (i,v)

prompt_id = int(raw_input("Which field contains the ID? "))
prompt_grade = int(raw_input("Which field contains the grade? "))
prompt_full_name = int(raw_input("Which field contains the full name? "))
prompt_cohort = int(raw_input("Which field contains the cohort? "))
Beispiel #36
0
	def parseLine(self, slots):
	
		numSlots = len(slots)
			
		initialParse = Parse(self, numSlots)
		parses = initialParse.extend(slots[0])
		parses[0].comparisonNums.add(1)
		
		for slotN in range(1, numSlots):
		
			newParses = []
			for parse in parses:
				newParses.append(parse.extend(slots[slotN]))
				
			for parseSetIndex in range(len(newParses)):
			
				parseSet = newParses[parseSetIndex]
				
				for parseIndex in range(len(parseSet)):
				
					parse = parseSet[parseIndex]
					parse.comparisonParses = []
					
					if len(parseSet) > 1 and parseIndex == 0:
						parse.comparisonNums.add(parseSetIndex)
					
					for comparisonIndex in parse.comparisonNums:
					
						# should be a label break, but not supported in Python
						# find better solution; redundant checking
						if parse.isBounded:
							break

						try:
							for comparisonParse in newParses[comparisonIndex]:
							
								if parse is comparisonParse:
									continue
							
								if not comparisonParse.isBounded:
								
									if parse.canCompare(comparisonParse):
									
										boundingRelation = parse.boundingRelation(comparisonParse)
										
										if boundingRelation == Bounding.bounds:
											comparisonParse.isBounded = True
											
										elif boundingRelation == Bounding.bounded:
											parse.isBounded = True
											break
											
										elif boundingRelation == Bounding.equal:
											parse.comparisonParses.append(comparisonParse)
										
									else:
										parse.comparisonParses.append(comparisonParse)
						except IndexError:
							pass
									
			parses = []
			parseNum = 0
								
			for parseSet in newParses:
				for parse in parseSet:		
					if not parse.isBounded:
						#if (parse.score() < 1000):
						parse.parseNum = parseNum
						parseNum += 1
						parses.append(parse)
			
			for parse in parses:
			
				parse.comparisonNums = set()
				
				for compParse in parse.comparisonParses:
					if not compParse.isBounded:
						parse.comparisonNums.add(compParse.parseNum)
							
		return parses
#Author: Josh Wretlind
#Class: CSCI 410 - Elements of Computing Systems
#Project: ECS 10 - Compiler part #1
#Date: 04/07/13

import sys,string,os
from JackTokenizer import JackTokenizer
from CompilationEngine import CompilationEngine
from Parse import Parse

infile = sys.argv[1] # Sys.argv is the system argument list object

outfile = infile.replace(".jack",".xml")
parse = Parse(infile)
infileText = ""
jtok = JackTokenizer()

tokenList = []

while parse.hasMoreCommands():
    parse.advance()
    blah = parse.output()
    infileText += blah
    jtok.advance(blah)

tokenList.extend(jtok.listOfTokens)

ce = CompilationEngine()
ce.setListOfTokens(tokenList)
ce.run()