def extract_results(search_word, condition=None): url = util.create_url(MAIN_URL, search_word, DELIMITER) try: soup = util.check_exceptions(url) product_table = soup.find('table', class_='table_content') result_links = product_table.find_all('a') except: return [] equips = [] for link in result_links: product_url = HOME_URL + link.get('href') product_page_content = BeautifulSoup( urllib.request.urlopen(product_url), "html.parser") title = ''.join( product_page_content.find( 'div', class_='product_left').find('h1').find_all(text=True)).strip() equipment = Result(title) equipment.url = product_url equipment.image_src = HOME_URL + product_page_content.find( 'img', { "id": "big_product_img" }).get('src') equipment.price = util.get_price( product_page_content.find('div', class_='pr_price2').find(text=True)) if util.is_valid_price(equipment.price): equips.append(equipment) if len(equips) >= 10: return equips return equips
def extract_results(search_word, condition=None): url = util.create_url(MAIN_URL, search_word, DELIMITER) page = urllib.request.urlopen(url) soup = BeautifulSoup(page, "html.parser") try: product_contents = soup.find_all('div', class_='products-mnbox-content') except: return [] results = [] for product_content in product_contents: equip_url = HOME_URL + product_content.find('a').get('href') models_site = BeautifulSoup(urllib.request.urlopen(equip_url), "html.parser") model_descriptions = models_site.find_all('td', class_='description') for re in model_descriptions: result = Result( re.find('div', { 'id': 'gaProductName' }).find(text=True).strip()) result.image_src = 'https:' + re.find( 'img', class_='lazy').get('data-original') result.url = HOME_URL + re.find('a').get('href') price_site = BeautifulSoup(urllib.request.urlopen(result.url), "html.parser") result.price = util.get_price( price_site.find('div', class_='price-box').find( 'span', class_='price-range').find(text=True)) if util.is_valid_price(result.price): results.append(result) if len(results) >= 10: return results return results
def extract_results(item, condition=None): results = [] if condition != 'new': page = urllib.request.urlopen(create_url(SEARCH_URL, item, DELIMITER)) soup = BeautifulSoup(page, "html.parser") #See if page has data try: table = soup.find_all('li', class_='item') except: return results for row in table: new_result = Result(row.find('a').get('title')) new_result.url = MAIN_URL + row.find('a').get('href') new_result.price = get_price(row.find('span', class_='price').text) new_result.image_src = row.find('img').get('src') specific_page = urllib.request.urlopen(new_result.url) new_soup = BeautifulSoup(specific_page, "html.parser") item_condition = new_soup.find( 'div', class_='box-collateral-content').find('div', class_='std').text #Checking for matching conditions bad_condition_types = [ 'bad', 'poor', 'not working', 'broken', 'not functional' ] if condition.lower() != "new": #Only add working good equipment for type_word in bad_condition_types: if type_word not in item_condition and is_valid_price( new_result.price): results.append(new_result) break return results
def extract_results(search_word, condition=None): if condition == "new": return [] url = util.create_url(MAIN_URL, search_word, DELIMITER) try: soup = util.check_exceptions(url) product_grid = soup.find('div', class_='v-product-grid') total_equips = product_grid.find_all('div', class_='v-product') except: return [] equips = [] for equip in total_equips: title = equip.find( 'a', class_='v-product__title productnamecolor colors_productname' ).find(text=True).strip() equipment = Result(title) equipment.url = equip.find('a', class_='v-product__img').get('href') equipment.image_src = 'http:' + equip.find('img').get('src') price_text = equip.find( 'div', class_='product_productprice').find_all(text=True) equipment.price = util.get_price(''.join(price_text)) if util.is_valid_price(equipment.price): equips.append(equipment) if len(equips) >= 10: return equips return equips
def extract_results(search_word, condition=None): if condition == 'new': return [] url = util.create_url(MAIN_URL, search_word, DELIMITER) page = urllib.request.urlopen(url) soup = BeautifulSoup(page, "html.parser") product_grid = soup.find('ul', class_='Products_ul') try: total_equips = product_grid.find_all('li', class_='Products') except: return [] equips = [] for equip in total_equips: title = equip.find( 'div', class_='title').find('span').find(text=True).strip() equipment = Result(title) equipment.url = equip.find('a').get('href') equipment.image_src = equip.find('div', class_='Image').find('img').get('src') price_text = equip.find('div', class_='price').find_all(text=True) equipment.price = util.get_price(''.join(price_text)) if util.is_valid_price(equipment.price): equips.append(equipment) if len(equips) >= 10: return equips return equips
def __init__(self, env, version, resultInfo={}): Result.__init__(self, env, resultInfo) if self.chatter: self.reporter = ProgramReporter(version, self.params['Out']) else: self.reporter = NonChattyProgramReporter(version, self.params['Out'])
def extract_results(search_word, condition=None): url = util.create_url(MAIN_URL, search_word, DELIMITER) url = url if condition != "new" else url + '&Condition=5067' try: soup = util.check_exceptions(url) product_grid = soup.find('div', class_='pagebody') total_equips = product_grid.find_all('div', class_='el') except: return [] equips = [] for equip in total_equips: # items_details have names of generic device, model, manufacturer bundled together items_details = equip.find('div', class_='item_details').find_all(text=True) title = ' '.join(items_details).strip() equipment = Result(title) equipment.url = equip.find('div', class_='image').find( 'a', class_='item_number').get('href') equipment.image_src = equip.find('div', class_='image').find('img').get('src') price_text = equip.find('div', class_='price').find( text=True) if equip.find( 'span', class_='price_element') == None else equip.find( 'span', class_='price_element').find(text=True) equipment.price = util.get_price(''.join(price_text)) if util.is_valid_price(equipment.price): equips.append(equipment) if len(equips) >= 10: return equips return equips
def loginController(db: Session, loginRequest: Schema.UserLoginRequest): if not validateLoginRequest(loginRequest.username, loginRequest.password): return Result(MessageType.getMessageDetail(MessageType.INVALID_PARAM), None) isExistUser = login(db, loginRequest) if isExistUser is None: return Result(MessageType.getMessageDetail(MessageType.AUTHENTICATION_FAILED), None) return Result(MessageType.getMessageDetail(MessageType.SUCCESS), loginRequest.username)
def __init__(self, provided_salary, provided_pension): self.salary_before_taxes = provided_salary self.PENSION = provided_pension self.result = Result() self.result.salary_before_tax = provided_salary self.result.pension_percent = provided_pension self.result.tax_free = self.tax_config.get_tax_free()
def extract_results(item,condition=None): #Url is extended based on condition if condition == "new": specific_url = util.create_url(MAIN_URL,item,DELIMITER) + "&condition=New,New%20or%20Used&adtype=998" else: specific_url = util.create_url(MAIN_URL,item,DELIMITER) + "&condition=Used,Refurbished,For%20Parts/Not%20Working,New%20or%20Used&adtype=998" url = util.create_url(MAIN_URL,item, DELIMITER) results=[] #Check if page has data try: soup = util.check_exceptions(url) table = soup.find('tbody', class_='ResultsNewTable') rows=table.find_all('tr') except: return [] #Get 1st 10 results only for i in range(len(rows)): row= rows[i] new_result = Result(row.find('a').get('title')) new_result.url = row.find('a').get('href') new_result.price = util.get_price(row.find_all('td')[4].contents[0]) number = util.get_price(new_result.title) new_result.image_src = "https://photos.labx.com/labx/"+number+"/"+number+"-0.jpg" if util.is_valid_price(new_result.price): results.append(new_result) if len(results) == 10: return results return results
def processTableStrings(self, tableStrings, assignmentNum): # Go through list and turn them into a list of data results = [] previousResults = [] for tableString in tableStrings: tableStringValues = self.getTableStringValues(tableString) fileName1 = tableStringValues[1].strip().lower() fileName2 = tableStringValues[4].strip().lower() if self.testFileNaming(fileName1) and self.testFileNaming( fileName2): result = Result(assignmentNum, fileName1, fileName2, tableStringValues[0].strip(), int(tableStringValues[2]), int(tableStringValues[5]), int(tableStringValues[6])) if result.nameOneIsPrevious() and result.nameTwoIsPrevious(): previousResults.append(result) else: results.append(result) # Returns if len(results) > 0: return results, True elif len(previousResults) > 0: return previousResults, True return None, False
def extract_results(item, condition=None): results = [] if condition != "new": search_term = get_good_search_term(item) page = urllib.request.urlopen( create_url(search_url, search_term, DELIMITER)) soup = BeautifulSoup(page, "html.parser") table = soup.find_all('td', class_='productname') for row in table: new_result = Result(row.find('a').text) specific_url = main_url + row.find('a').get('href') new_result.url = re.sub('%2E', '.', specific_url) new_result.image_src = main_url+\ soup.find('td',class_='image').find('img').get('src') specific_page = urllib.request.urlopen(new_result.url) new_soup = BeautifulSoup(specific_page, "html.parser") try: new_result.price = get_price( new_soup.find('span', class_='sellprice').text) except: continue #Code to add only functional items description_url = main_url + re.sub( ' ', '%20', new_soup.find('p', id='name').find('a').get('href')) description_page = urllib.request.urlopen(description_url) description_soup = BeautifulSoup(description_page, "html.parser") functional_tag = description_soup.find(text='Functional:') working = functional_tag.find_next('td').text if "yes" in working.lower() and is_valid_price(new_result.price): results.append(new_result) return results
def execute( exp, # type : Experiment env, # type : Environment alg, # type : Algorithm ) -> Result: # number of steps of the experiment n_steps = exp.n_steps # structure used to store the results res = Result(exp, env, alg) for t in range(n_steps): if t % 100 == 0: print("Time step %d" % t) print(alg.__str__()) # retrieve the arm the should be pulled arm_to_pull = alg.get_action(t) # pull the arm reward = env.pull_arm(arm_to_pull) # update the internal state of the algorithm alg.update(t, arm_to_pull, reward) # store this step res.store(t, arm_to_pull, reward) return res
def __init__(self, url): url = "http://kmdvalg.dk/kv/2013/" + url page = requests.get(url) if (page.status_code != 200): return soup = BeautifulSoup(page.content, "html.parser") Result.__init__(self, soup) topmost_table = soup.find("table", "tableBottomColorKV") primary_table = topmost_table.find_next_sibling("table") municipality_info = primary_table.find("tr") description = municipality_info.find("td") counts = description.find_next_sibling("td") record = counts.find_next_sibling("td") sublist = record("tr", "statusText")[1:] words = [ word for strings in sublist for word in strings.stripped_strings ] self.__address = " ".join(words[1:]) self.__title = " ".join( description.find("tr", "title").find_next_sibling("tr").stripped_strings)
def __init__(self, env, version, resultInfo={}): Result.__init__(self, env, resultInfo) self.OBJECT_HEADER = """ ___ ______ ______ ______ ______ ______ ______ ______ ______ ___ __)(__ __)(__ __)(__ __)(__ __)(__ __)(__ __)(__ __)(__ __)(__ (______)(______)(______)(______)(______)(______)(______)(______)(______) """ self.OBJECT_FOOTER = self.OBJECT_HEADER self.PASS_BANNER = """ .--. .--. .--. .--. .--. .--. .--. . :::::.\\::::::::.\\::::::::.\\::::::::.\\::::::::.\\::::::::.\\::::::::.\\:::::: ' `--' `--' `--' `--' `--' `--' `--' """ self.FAIL_BANNER = """ __ __ __ __ __ __ __ __ __ __ __ __ __ __ __ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ _\/_ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ \/\/ """ self.STATUS_FORMAT = """{1} {2}: {3} """ self.ANNOUNCE_FORMAT = """ {3} csmake - version %s """ % version self.resultType = "csmake" self.PHASE_BANNER = """ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
def main_model_gat(data, num_layers, hidden_list, activation, if_all=False): torch.backends.cudnn.deterministic = True device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = ModelGAT( num_layers=num_layers, hidden_list=hidden_list, activation=activation, data=data, ) data.split_train_valid() model = model.to(device) data = data.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4) epoch = 1 loss_train = float("inf") loss_valid = float("inf") best_loss_train = float("inf") best_loss_valid = float("inf") best_epoch = 0 while best_epoch + 10 >= epoch: model.train() optimizer.zero_grad() predict = model(data) loss_train = nll_loss(predict[data.mask_train], data.y[data.mask_train]) loss_valid = nll_loss(predict[data.mask_valid], data.y[data.mask_valid]) loss_train.backward() optimizer.step() if loss_valid < best_loss_valid: best_loss_train = loss_train best_loss_valid = loss_valid best_epoch = epoch epoch += 1 model.eval() with torch.no_grad(): result = model(data) if if_all: return Result( result=result.cpu(), loss_train=loss_train.cpu(), loss_valid=loss_valid.cpu(), acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy().flatten(), result[data.mask_train].max(1)[1].cpu().numpy().flatten()), acc_valid=accuracy_score(data.y[data.mask_valid].cpu().numpy().flatten(), result[data.mask_valid].max(1)[1].cpu().numpy().flatten()), epoch=epoch - 1, ) else: return Result( result=result[data.mask_test].max(1)[1].cpu().numpy().flatten(), loss_train=loss_train.cpu(), loss_valid=loss_valid.cpu(), acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy().flatten(), result[data.mask_train].max(1)[1].cpu().numpy().flatten()), acc_valid=accuracy_score(data.y[data.mask_valid].cpu().numpy().flatten(), result[data.mask_valid].max(1)[1].cpu().numpy().flatten()), epoch=epoch - 1, )
def dispatcher(process): f, case, dop, verbose = process err, res, dt = None, -1, -1 # Assign some defaults in case of an error. try: s = time.time() res = f.process(case, dop) dt = time.time() - s # Process can return tuple in form of (result, time) if type(res) is tuple: dt = res[1] res = res[0] result = Result(res, dop=dop, src_case=case, fname=f.display_name, dt=dt, err=err) except: err = sys.exc_info()[0] result = Result(np.nan, dop=dop, src_case=case, fname=f.display_name, dt=dt, err=err) if verbose: print(result) else: print('.', end='', flush=True) return result
def getBM25F_text(self, tempBR, i): bmRes = [] for index, br in enumerate(BugReport.trainBR): bmScore_unigram = 0 bmScore_bigram = 0 # 判断list是否为空,可以用len(myList),或者直接写myList,因为空list就是False for term in list(set(tempBR.totalWords_unigram).intersection(br.totalWords_unigram)): TFD = self.getTFD_unigram(term, index) # k_ctl = 2 # controls parameter k1_unigram ,用REP.weights['k1_uni'] 代替 bmScore_unigram += self.getIDF_unigram(term) * ( TFD / (TFD + REP.weights['k1_uni'])) * self.getWQ_unigram(term, i) for term in list(set(tempBR.totalWords_bigram).intersection(br.totalWords_bigram)): TFD2 = self.getTFD_bigram(term, index) # k_ctl = 2 # controls parameter k1_bigram bmScore_bigram += self.getIDF_bigram(term) * ( TFD2 / (TFD2 + REP.weights['k1_bi'])) * self.getWQ_bigram(term, i) res = REP.weights['w1'] * bmScore_unigram res += REP.weights['w2'] * bmScore_bigram rTemp = Result() rTemp.bug_id = BugReport.trainBR[index].bugID rTemp.docID = index rTemp.REP = res bmRes.append(rTemp) return bmRes
def update_policy_info(self, p_policy_no,p_effect_date): result = Result() flag = self.cursor.callfunc('pubcde.l_tmp_sys_test.update_policy_info', cx_Oracle.STRING, [p_policy_no, p_effect_date]) if flag == 'Y': result.flag=True return result
def extract_results(search_term, condition=None): if condition == 'new': return [] url = util.create_url(MAIN_URL, search_term, DELIMITER) page = urllib.request.urlopen(url) soup = BeautifulSoup(page, "html.parser") table = soup.find('div', class_='content-area') rows = table.findAll("article") results = [] for row in rows: new_result = Result( row.find('h1', class_="entry-title").find("a").find(text=True)) result_url = row.find('a').get('href') #scrape from the result's page result_soup = BeautifulSoup(urllib2.urlopen(result_url), "html.parser") new_result.url = result_url new_result.price = util.get_price( result_soup.find('span', class_="amount").find(text=True)) new_result.image_src = result_soup.find( 'div', class_='images').find('img').get('src') if util.is_valid_price(new_result.price): results.append(new_result) if len(results) == 10: return results return results
def tempMatching(obj_query, query, dataset): "Perform template image over scene image andm template matching: Slide get scores for matches at each position" # Load puzzle image in gray img = cv2.imread(query, 0) # Make a copy of puzzle image in color img_c = cv2.imread(query, 1) methods = ['cv2.TM_SQDIFF'] method_names = ['Squared Difference'] for index in range(len(methods)): method = eval(methods[index]) # create a Result object for the given Histogram method (cross correlation, cross correlation normalized etc) # methodName sets the variation variable obj_result = Result("Template", method_names[index]) results = {} # iterate through dataset performing template matching for given puzzle/query image for i in range(len(dataset)): # Load query image template = cv2.imread(dataset[i], 0) # Get the dimensions of Query image h, w = template.shape[:2] # Apply template Matching res = cv2.matchTemplate(img, template, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc match_val = min_val else: top_left = max_loc match_val = max_val results[dataset[i]] = match_val if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: #sort matches in ascending reverse = False else: reverse = True results = sorted([(v, k) for (k, v) in results.items()], reverse=reverse) #store best four results in Result object for j in range(4): obj_result.add_match(str(results[j][1])) obj_query.add_result(obj_result) return
def __writeResults(self): """ Write results to results file. """ results_file = open(self.__results_filename, "w") for product_set in self.__product_dict.values(): for product in product_set: result = Result(product, self.__product_listing_dict[product]) results_file.write(result.toJson()) results_file.write("\n") results_file.close()
def __writeResults(self): """ Write results to results file. """ results_file = open(self.__results_filename, "w") for product_set in self.__product_dict.values(): for product in product_set: result = Result(product, self.__product_listing_dict[product]) results_file.write(result.toJson()) results_file.write("\n") results_file.close()
def createController(db: Session, createRequest: Schema.UserCreateRequest): if not validateUpdateRequest(createRequest): return Result(MessageType.getMessageDetail(MessageType.INVALID_PARAM), None) if not validateEmail(createRequest.email): return Result(MessageType.getMessageDetail(MessageType.EMAIL_NOT_VALID), None) isExistUser = checkExistingUsername(db, createRequest) if isExistUser is not None: return Result(MessageType.getMessageDetail(MessageType.USER_EXISTED), None) userCreate = create_user(db, createRequest) return Result(MessageType.getMessageDetail(MessageType.SUCCESS), userCreate)
def bia_to_core_batch(self, pono): result = Result() p_o_flag =self.cursor.var(cx_Oracle.STRING) p_o_msg = self.cursor.var(cx_Oracle.STRING) res =self.cursor.callproc("l_bia_eod_batch.bia_sync_to_core_by_pono",[pono,p_o_flag,p_o_msg]) self.conn.commit() if res[1]=="Y": result.flag = True result.msg = res[2] return result
def execute_commit(self,sql): result = Result() try: self.cursor.execute(sql) self.conn.commit() result.flag=True except Exception, e: result.flag=False result.msg=str(e) raise Exception(e)
def updateController(db: Session, updateRequest: Schema.UserUpdateRequest): if not validateUpdateRequest(updateRequest): return Result(MessageType.getMessageDetail(MessageType.INVALID_PARAM), None) if not validateEmail(updateRequest.email): return Result(MessageType.getMessageDetail(MessageType.EMAIL_NOT_VALID), None) isExistUser = login(db, updateRequest) if isExistUser is None: return Result(MessageType.getMessageDetail(MessageType.AUTHENTICATION_FAILED), None) userUpdate = update_user(db, updateRequest) return Result(MessageType.getMessageDetail(MessageType.SUCCESS), userUpdate)
def prior(input): parsed = self(input) if parsed.is_successful: next_parsed = other(parsed.reminder) if next_parsed.is_successful: return Result.success(parsed.value, next_parsed.reminder) else: return Result.failure(next_parsed.error, input) else: return parsed
def __init__(self, samples=[],labels=[], smote=True,v=[],percentage=20): super(Learner, self).__init__() self.samples = samples self.labels = labels self.smote_val=smote self.result = Result() self.predict = None self.data=Data() self.l=v self.per=percentage
def extract_results(search_word, condition=None): url = util.create_url(MAIN_URL, search_word, DELIMITER) url = url + '&cond=used' if condition != 'new' else url + '&cond=new' path_to_chromedriver = 'chromedriver.exe' option = webdriver.ChromeOptions() option.add_argument('headless') browser = webdriver.Chrome(executable_path=path_to_chromedriver, options=option) browser.get(url) time.sleep(5) soup = BeautifulSoup(browser.page_source, "html.parser") equips = [] try: sale_equips = soup.find_all('div', {'id': re.compile('listing_*')}) except: return equips for equip in sale_equips: title = equip.find('h4').find('a').text.strip() equipment = Result(title) equipment.set_url( HOME_URL + equip.find('div', class_='row').find('a').get('href')) equipment.set_image_src(equip.find('img').get('src')) equipment.set_price(util.get_price(equip.find('span', class_='price'))) if util.is_valid_price(equipment.get_price()): equips.append(equipment) if len(equips) == 10: return equips return equips
def extract_results(search_word, condition=None): if condition == 'new': return [] url = util.create_url(MAIN_URL, search_word, DELIMITER) path_to_chromedriver = 'chromedriver.exe' option = webdriver.ChromeOptions() option.add_argument('headless') browser = webdriver.Chrome(executable_path=path_to_chromedriver, options=option) browser.get(url) time.sleep(5) soup = BeautifulSoup(browser.page_source, "html.parser") product_grid = soup.find('ul', class_='product_list p_list') try: total_equips = product_grid.find_all( 'li', {"class": re.compile('p_list_item*')}) except: return [] equips = [] for equip in total_equips: title = equip.find('div', class_='title').find('a').text print(title, "t") equipment = Result(title) equipment.set_url(HOME_URL + equip.find('a').get('href')) equipment.set_image_src( HOME_URL + equip.find('div', class_='thumb').find('img').get('src')) price_text = equip.find('li', class_='price').text equipment.set_price(util.get_price(price_text)) if util.is_valid_price(equipment.get_price()): equips.append(equipment) if len(equips) == 10: return equips return equips
def test_declareVictor_tie(self): result = Result() value1 = Player() value2 = Player() value1.name = "player1" value1.move = 0 value1.hand = "Rock" value2.name = "player2" value2.move = 0 value2.hand = "Rock" result.declareVictor(value1, value2) self.assertEqual(result.score, 2)
def start(self): self.getPlayers() # print(self.player1.name) # print(self.player2.name) scoreboard = Scoreboard() while self.replayGame != False: round = Round() round.playerTurn(self.player1, self.player2) # print(self.player1.move, self.player2.move) result = Result() result.declareVictor(self.player1, self.player2) roundScore = result.score scoreboard.updateScore(roundScore) replay = Replay() replay.replayGame() self.replayGame = replay.playAgain
def fromString( cls, source ): sp = source.split(Movement.MOVESEP) if len(sp) != 5: raise ValueError( "Invalid syntax of Movement %s" %(source)) step = int(sp[0].strip()) fpos = int(sp[1].strip()) tpos = int(sp[2].strip()) flag = int(sp[3].strip()) result = Result.valueOf(sp[4].strip()) return Movement(step,fpos,tpos,result,flag)
def aloha(func): rand = lambda x: randint(0,x) results = [] for numTags in [(x+1)*100 for x in range(10)]: resultTotal = Result(numTags) for i in range(1000): numSlots = 64 result = Result(numTags) numTagsCount = numTags while(numTagsCount != 0): slots = [0]*numSlots numCollision = 0 for tag in range(numTagsCount): slot = rand(numSlots-1) slots[slot] = slots[slot] + 1 for slot in slots: if(slot == 0): result.numEmpty = result.numEmpty + 1; elif(slot == 1): numTagsCount = numTagsCount - 1; else: numCollision = numCollision + 1 result.numCollision = result.numCollision + numCollision numSlots = func(numCollision) resultTotal.add(result) results.append(resultTotal/1000) return results
def aloha(func): rand = lambda x: randint(0,x) results = [] for numTags in [(x+1)*100 for x in range(10)]: resultTotal = Result(numTags) for i in range(1000): numSlots = 64 result = Result(numTags) numTagsCount = numTags while(numTagsCount != 0): slots = [0]*numSlots numCollision = 0 numEmpty = 0 numSuccess = 0 for tag in range(numTagsCount): slot = rand(numSlots-1) slots[slot] = slots[slot] + 1 for slot in slots: if(slot == 0): numEmpty = numEmpty + 1 elif(slot == 1): numSuccess = numSuccess + 1 else: numCollision = numCollision + 1 result.numCollision = result.numCollision + numCollision result.numEmpty = result.numEmpty + numEmpty; numTagsCount = numTagsCount - numSuccess; total = numCollision + numSuccess + numEmpty numSlots = func({'collision' : numCollision, 'empty' : numEmpty, 'success' : numSuccess}) resultTotal.add(result) results.append(resultTotal/1000) return results
def result(id): db = DBConnection() for index, file in enumerate(db.files.find({'projectid': id, 'type': 'result'})): docid = file['_id'] for result in db.con.vcad.fs.files.find({'docid' : docid}): f = db.files.fs.get(result['_id']) filename = 'results/' + str(file['name']) + '.zip' zipResults = open(filename, 'wb') zipResults.write(f.read()) zipResults.close() with zipfile.ZipFile(filename, "r") as z: z.extractall("results/") #z.close() os.remove(filename) result = Result() # Validate Results if result.validateResults(): # Compile Results result.compileResults(filename) # Send Final Results to user download() else: print 'incomplete results sent from the middleware.' flash('incomplete results sent from the middleware.') return redirect(url_for('dashboard'))
def processIdenticalStmt(src_ast_list, cluster, id): caller = {} merged_list = [] merged_code = [] for i in cluster: filename = i.getSourceFile().getFileName() # src_ast_list[str(filename)].output("./sandbox/test.out") lines = tagging(i)[1:] code_snippet = generateCodeSnippet(i) merged = type1_dealer.generateNewCode(id, code_snippet, lines, src_ast_list[str(filename)], tagging(i)) merged_list.append(merged) merged_code.append(merged.get_code()) caller[tagging(i)] = merged.caller merged.tag = tagging(i) for i in range(1, len(merged_list)): cur_merge = merged_list[i] cur_code = merged_code[i] if cur_code != merged_code[0]: print "DIFFERENT!" if cur_merge.code_lines[1:-1] != merged_list[0].code_lines[1:-1]: print "STILL_DIFF" stat["ErrorStmt"] += 1 return False else: merged_list = type1_dealer.mergeDiffResults(merged_list) merged_code = [] for i in merged_list: merged_code.append(i.get_code()) line_len = len(i.code_lines) caller[i.tag] = i.caller for i in range(1, len(merged_list)): cur_merge = merged_list[i] cur_code = merged_code[i] if cur_code != merged_code[0]: print "DIFFERENT AGAIN!" stat["ErrorStmt"] += 1 return False code = merged_code[0] m = Result() m.add_code(code) m.lines = line_len for s in cluster: tag = tagging(s) m.add_tag(tag) m.add_caller(tag, caller[tag]) m.check_diff_pak() m.external = (len(merged_list[0].param), len(merged_list[0].return_vars)) mergeResults.append(m) return True
def processNonIdenticalStmt(src_ast_list, cluster, id): # raw_input() caller = {} merged_list = [] merged_code = [] for i in cluster: filename = i.getSourceFile().getFileName() # src_ast_list[str(filename)].output("./sandbox/test.out") lines = tagging(i)[1:] code_snippet = generateCodeSnippet(i) merged = type3_dealer.generateNewCode(id, code_snippet, lines, src_ast_list[str(filename)], tagging(i)) merged_list.append(merged) if type3_dealer.checkMergable(merged_list): type3_dealer.generateCommonCode(merged_list) for i in range(len(merged_list)): line_len = len(merged_list[i].code_lines) merged_code.append(merged_list[i].get_code()) caller[tagging(cluster[i])] = merged_list[i].caller merged_list[i].tag = tagging(cluster[i]) else: print "NOT MERGABLE" stat["DiffStmt"] += 1 return False for i in range(1, len(merged_list)): cur_code = merged_code[i] if cur_code != merged_code[0]: print "UNMERGABLELY DIFFERENT!" stat["DiffStmt"] += 1 # for m in merged_list: # m.output() return False code = merged_code[0] m = Result() m.lines = line_len m.add_code(code) for s in cluster: tag = tagging(s) m.add_tag(tag) m.add_caller(tag, caller[tag]) m.check_diff_pak() m.external = (len(merged_list[0].param), len(merged_list[0].return_vars)) mergeResults.append(m) return True
from Result import Result request = Result() request.insert_request("db.orders.remove({})") request.generation_request_insert() request.result_data()
if options.batch: filenames = [[options.file+f, int(f)] for f in listdir(options.file)] else: filenames = [[options.file, options.keyReference]] shared = getShared() #start executing the user commands if options.reset: resetDB() initAllTests(engine, shared) exit() if options.result: result = Result(engine, options.result, client) print result.getAll() exit() if options.file: cores = min(config.getint('keycheck', 'NumberCores'), len(filenames)) pool = ExitPool(cores, initWorker, [testSet, client, options.makePerm, options.verbose, shared], exitWorker) result=pool.map(test, filenames, chunksize=1) pool.close() pool.join() if options.globalTest: tester = Tester(testSet, engine, shared) result=[doTest(tester, None, None, None, None, client, options.makePerm, options.verbose, engine)] tester.release()
def processIdenticalDef(src_ast_list, cluster): caller = {} merged_code = [] unreach_num = [] for i in cluster: filename = i.getSourceFile().getFileName() # src_ast_list[str(filename)].output("./sandbox/test.out") lines = tagging(i)[1:] code_snippet = generateCodeSnippet(i) merged = type0_dealer.generateNewCode(code_snippet, lines, src_ast_list[str(filename)], tagging(i)) line_len = len(merged.code_lines) if merged.unreachable: unreach_num.append(len(merged.unreachable)) else: unreach_num.append(0) merged_code.append(merged.get_code()) caller[tagging(i)] = merged.caller for i in merged_code[1:]: if i != merged_code[0]: stat["ErrorDef"] += 1 return False code = merged_code[0] # print code m = Result() m.lines = line_len + len(caller) m.add_code(code) for s in cluster: tag = tagging(s) m.add_tag(tag) m.add_caller(tag, caller[tag]) m.check_diff_pak() m.external = (max(unreach_num), 0) mergeResults.append(m) return True
from Setting import Setting import numpy as np #--------------------------- run the exp ---------------------------- if 1: k = 5 fold = 10 dataset = Dataset('', '') dataset.file_folder_path = '../data/input/' method = Method('', '') method.k = k evaluation = Evaluation('') result = Result('', '') result.k = k setting = Setting('', '', dataset, method, result, evaluation) setting.fold = fold setting.load_classify_save() if 1: fold = 10 k = 5 result = Result('', '') result.k = k evaluation = Evaluation('') evaluation_result_of_each_fold = []
def __init__(self): # Setting the logger tulsiclient self.logger = logging.getLogger("tulsiclient") # Reading configuration parameters from tulsiclient.conf try: self.conf = ConfigParser.ConfigParser() self.conf.read('tulsiclient.conf') udp_ip = self.conf.get('tulsi', 'host') udp_port = int(self.conf.get('tulsi', 'port')) # printing the host and port of tulsi self.logger.info('The IP of the host: %s', self.udp_ip) self.logger.info('The Port number of the host :%s', self.udp_port) except: # Error message of tulsi not working self.logger.error('The tulsi configuration file is not found') # Creating objects for ProxyNodeInfo , StorageNodeInfo, # Result and status of node proxy_node = ProxyNodeInfo() storage_node = StorageNodeInfo() result = Result() # Initializing empty list self.storage_node_read = [] self.proxy_node_read = [] self.storage_node_drive = [] self.storage_node_status = [] self.storage_node_list = [] self.proxy_node_list = [] self.storage_node_list_storage_suffix = [] self.storage_node_list_drive_suffix = [] self.storage_node_list_suffix_id = [] self.storage_node_drives_file = [] self.storage_node_storage_file = [] self.storage_node_status_storage_file = [] self.proxy_node_list_proxy_suffix = [] self.proxy_node_status_file = [] # opening and binding the port to socket sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((udp_ip, udp_port)) # While loop to continuously receive message from Tulsi Server # to get update status of storage/ proxy node while True: self.data, self.addr = sock.recvfrom(65507) self.storage_node_list = \ storage_node.read_storage_node_list(self.data, self.storage_node_list) self.proxy_node_list, self.proxy_node_status_file = \ proxy_node.read_proxy_node_list(self.data) result.write_config_ui(self.proxy_node_list, self.storage_node_list, self.storage_node_drives_file) result.write_status_ui(self.storage_node_storage_file, self.proxy_node_status_file, self.storage_node_status_storage_file) self.storage_node_drives_file, self.storage_node_storage_file, self.storage_node_status_storage_file = \ storage_node.read_storage_node_config( self.data, self.storage_node_list)