def isValid(name, gender, age, perType, favoriteOS, seeking, minAge, maxAge): # Judge whether the name contains merely blank characters. pattern = re.compile(r'\S') match = pattern.match(name) if not match: return "The name can not be blank. " # Judge whether the age is an integer between 0 and 99. pattern = re.compile(r'\d+$') match = pattern.match(age) if not match: return "The age must be integer between 0 and 99. " # Judge whether the personality type is in correct formula. pattern = re.compile(r'[IE][NS][FT][JP]') match = pattern.match(perType) if not match: return "The personality type is not in correct formula. The first letter should be \ I or E, the second N or S, the third F or T, the fourth J or P. For example, it shoule \ be INFJ or ESTP. " # Judge whether the seeking is chosen. if not seeking: return "The seeking should not be empty, you must choose at least one. " # Judge whether the minAge and maxAge are integers between 0 and 99. pattern = re.compile(r'\d+$') match = pattern.match(minAge) if not match: return "The between ages must be integer between 0 and 99. " match = pattern.match(maxAge) if not match: return "The between ages must be integer between 0 and 99. " # Judge whether the minAge is less than the maxAge. maxAge = locale.atoi(maxAge) minAge = locale.atoi(minAge) if maxAge < minAge: return "In between ages, the min age should be less than the max age. " return "" # Don't exist any problems.
def _build_cpu_info(self): try: p = os.popen('lscpu') cpuinfo = p.read() p.close() m = re.search('Model name:\s+(.*)', cpuinfo) if m: self._cpu['model'] = m.group(1) m = re.search('Socket\(s\):\s+(\d+)', cpuinfo) if m: self._cpu['sockets'] = locale.atoi(m.group(1)) m = re.search('CPU MHz:\s+(.*)', cpuinfo) if m: self._cpu['clock'] = int(locale.atof(m.group(1)) * 1e6) m = re.search('CPU max MHz:\s+(.*)', cpuinfo) if m: self._cpu['clockMax'] = int(locale.atof(m.group(1)) * 1e6) m = re.search('CPU min MHz:\s+(.*)', cpuinfo) if m: self._cpu['clockMin'] = int(locale.atof(m.group(1)) * 1e6) m = re.search('Core\(s\) per socket:\s+(.*)', cpuinfo) if m: self._cpu['cores_per_sockets'] = locale.atoi(m.group(1)) m = re.search('Thread\(s\) per core:\s+(.*)', cpuinfo) if m: self._cpu['threads_per_core'] = locale.atoi(m.group(1)) except: pass
def ConvertVenueToCourseClass(strVenue): match = re.match(r'([^-\|]+)-*([^\|]*)[\|]*(.*)', strVenue) newCourse = Course() idx = 0 for courseGroups in match.groups(): if (courseGroups == None): continue if (idx == 0): newCourse.name = courseGroups.strip() elif (idx == 1): newCourse.location = courseGroups.strip() elif (idx == 2): #parse par value and length from length = courseGroups.strip() lengthGroups = re.match(r'Par (\d+) (\d*,*\d+).*$', length) lengthIdx = 0 if(lengthGroups == None): continue for lengthSplit in lengthGroups.groups(): if(lengthIdx == 0): newCourse.par = locale.atoi(lengthSplit.strip()) elif lengthIdx == 1: newCourse.length = locale.atoi(lengthSplit.strip()) lengthIdx+=1 idx+=1 return newCourse
def parse_instance(tr, inst2family): i = Instance() cols = tr.xpath('td') assert len(cols) == 12, "Expected 12 columns in the table, but got %d" % len(cols) i.instance_type = totext(cols[0]) i.family = inst2family.get(i.instance_type, "Unknown") # Some t2 instances support 32-bit arch # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-resize.html#resize-limitations if i.instance_type in ('t2.micro', 't2.small'): i.arch.append('i386') i.vCPU = locale.atoi(totext(cols[1])) i.memory = locale.atof(totext(cols[2])) storage = totext(cols[3]) m = re.search(r'(\d+)\s*x\s*([0-9,]+)?', storage) i.ssd = False if m: i.ebs_only = False i.num_drives = locale.atoi(m.group(1)) i.drive_size = locale.atof(m.group(2)) i.ssd = 'SSD' in totext(cols[3]) else: assert storage == 'EBS Only', "Unrecognized storage spec: %s" % (storage,) i.ebs_only = True i.ebs_optimized = totext(cols[10]).lower() == 'yes' i.network_performance = totext(cols[4]) i.enhanced_networking = totext(cols[11]).lower() == 'yes' i.generation = 'current' # print "Parsed %s..." % (i.instance_type) return i
def read_file(self): locale.setlocale(locale.LC_ALL, 'en_US.UTF8') with open(config.Arguments.test_specification_file, 'r') as f: for line in f: index = line.find('=') if index == -1: debug.exit_message("Found an invalid line '%s' in the test specification file" % line) else: lhs = line[:index].strip() rhs = line[index+1:].strip() if lhs.lower() == "type": self.base_type = rhs elif lhs.lower() == "length": try: self.length = int(rhs) except: debug.exit_message("The length of the test vector must be a non-negative integer. It is '%s'." % rhs) elif lhs.lower() == "lower": try: self.lower = locale.atoi(rhs) except: debug.exit_message("The lower bound on the range of elements in the test vector must be an integer. It is '%s'." % rhs) elif lhs.lower() == "upper": try: self.upper = locale.atoi(rhs) except: debug.exit_message("The upper bound on the range of elements in the test vector must be an integer. It is '%s'." % rhs) else: debug.exit_message("Do not understand the line '%s' in the test specification file" % line)
def dihedral(coor,dihedralparam): dihedralenergy=0.0 deg2rad=numpy.pi/180.0 for i in xrange(len(dihedralparam)): nps=1 tquad=dihedralparam[i][0] atm1=locale.atoi(tquad[0]) atm2=locale.atoi(tquad[1]) atm3=locale.atoi(tquad[2]) atm4=locale.atoi(tquad[3]) ldp=len(dihedralparam[i]) coor1=coor[atm1] ; coor2=coor[atm2] ; coor3=coor[atm3] ; coor4=coor[atm4] xi=deg2rad*sascalc.vdihed(coor1,coor2,coor3,coor4) k1xi=locale.atof(dihedralparam[i][1][0]) n1xi=locale.atof(dihedralparam[i][1][1]) t1xi=deg2rad*locale.atof(dihedralparam[i][1][2]) if(ldp>2): k2xi=locale.atof(dihedralparam[i][2][0]) n2xi=locale.atof(dihedralparam[i][2][1]) t2xi=deg2rad*locale.atof(dihedralparam[i][2][2]) nps=2 print 'k1xi = ',k1xi print 'n1xi = ',n1xi print 't1xi = ',t1xi print 3/0 else: dihedralenergy=dihedralenergy+k1xi*(1.0+numpy.cos(n1xi*xi-t1xi)) return dihedralenergy
def getangles(atoms,angles,pangles): angleparam=[] #ANGLES # #V(angle) = Ktheta(Theta - Theta0)**2 # #V(Urey-Bradley) = Kub(S - S0)**2 # #Ktheta: kcal/mole/rad**2 #Theta0: degrees #Kub: kcal/mole/A**2 (Urey-Bradley) #S0: A # nparam=len(pangles) ; nangles=len(angles) for i in xrange(nangles): tangle=angles[i] lex=[] tatomnum1=locale.atoi(angles[i][0])-1 tatomnum2=locale.atoi(angles[i][1])-1 tatomnum3=locale.atoi(angles[i][2])-1 atmname1=atoms[tatomnum1][1] atmname2=atoms[tatomnum2][1] atmname3=atoms[tatomnum3][1] for j in xrange(nparam): if(pangles[j][0]==atmname1 and pangles[j][2]==atmname3): if(pangles[j][1]==atmname2): loc=[] loc.append(str(tatomnum1)) loc.append(str(tatomnum2)) loc.append(str(tatomnum3)) lex.append(loc) loc0=[] for k in xrange(len(pangles[j])): if(k>2): loc0.append(pangles[j][k]) lex.append(loc0) angleparam.append(lex) break elif(pangles[j][0]==atmname3 and pangles[j][2]==atmname1): if(pangles[j][1]==atmname2): loc=[] loc.append(str(tatomnum1)) loc.append(str(tatomnum2)) loc.append(str(tatomnum3)) lex.append(loc) loc0=[] for k in xrange(len(pangles[j])): if(k>2): loc0.append(pangles[j][k]) lex.append(loc0) angleparam.append(lex) break print 'assigned ',len(angleparam),' angle parameters' return angleparam
def parse_prev_generation_instance(tr): i = Instance() cols = tr.xpath('td') assert len(cols) == 8, "Expected 8 columns in the table, but got %d" % len(cols) i.family = totext(cols[0]) i.instance_type = totext(cols[1]) archs = totext(cols[2]) i.arch = [] if '32-bit' in archs: i.arch.append('i386') if '64-bit' in archs: i.arch.append('x86_64') assert i.arch, "No archs detected: %s" % (archs,) i.vCPU = locale.atoi(totext(cols[3])) i.memory = locale.atof(totext(cols[4])) storage = totext(cols[5]) m = re.search(r'(\d+)\s*x\s*([0-9,]+)?', storage) i.ssd = False if m: i.ebs_only = False i.num_drives = locale.atoi(m.group(1)) i.drive_size = locale.atof(m.group(2)) i.ssd = 'SSD' in totext(cols[5]) else: assert storage == 'EBS Only', "Unrecognized storage spec: %s" % (storage,) i.ebs_only = True i.ebs_optimized = totext(cols[6]).lower() == 'yes' i.network_performance = totext(cols[7]) i.enhanced_networking = False i.generation = 'previous' # print "Parsed %s..." % (i.instance_type) return i
def parse(self, response): for sel in response.xpath('//ol[@class="threads"]').xpath('li'): try: item = RfdItem() title = sel.xpath('.//a[@class="title"]') item['title'] = title.xpath('text()').extract()[0] item['link'] = title.xpath('@href').extract()[0] nums = sel.xpath('div/div[contains(@class, "threadstats")]') replies = nums[0].xpath('.//a/text()')[0].extract() # replies = "0" if "-" else replies try: item['replies'] = locale.atoi(replies) except: item['replies'] = 0 views = nums[1].xpath('.//text()')[0].extract().strip() # views = "0" if "-" else views try: item['views'] = locale.atoi(views) except: item['views'] = 0 if item['views'] == 0 and item['replies'] == 0: continue str_time = sel.xpath('.//div[@class="author"]').xpath('.//a/@title').extract()[0].split("on ")[1] str_time = re.sub(r"(st|nd|rd|th),", ",", str_time) item['started'] = datetime.strptime(str_time, '%b %d, %Y %I:%M %p') yield item except: print "Failed Parsing:", sel.extract()
def read_fastq(self, pool): self.reads = 0 self.totalBp = 0 locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' ) for lib in pool.libs: if os.path.exists(lib.forward + ".summary"): with open(lib.forward + ".summary") as summaryReader: for line in summaryReader: self.reads += locale.atoi(line.split("\t")[1]) self.totalBp += locale.atoi(line.split("\t")[0]) if lib.reversed !=None: if os.path.exists(lib.reversed + ".summary"): with open(lib.reversed + ".summary") as summaryReader: for line in summaryReader: self.reads += locale.atoi(line.split("\t")[1]) self.totalBp += locale.atoi(line.split("\t")[0]) else: reads = 0 totalBp = 0 for rec in SeqIO.parse(lib.forward, "fastq"): reads += 1 totalBp += len(rec) if lib.reversed != None: self.totalBp = totalBp * 2 self.reads = reads * 2 else: self.totalBp += totalBp self.reads += reads self.meanReadLen = float(self.totalBp)/self.reads
def parse_tree(self, tree, package_name, version_code, out_dir): title = self.get_property(tree, xpathExpressions.TITLE) description = self.get_property(tree, xpathExpressions.DESCRIPTION) category = self.get_property(tree, xpathExpressions.CATEGORY) price = self.get_property(tree, xpathExpressions.PRICE) if len(price) > 1: price = price.split(' ')[1] date_published = self.get_property(tree, xpathExpressions.DATE_PUBLISHED) os_version = self.get_property(tree, xpathExpressions.OPERATING_SYSTEM) rating_count_text = self.get_property(tree, xpathExpressions.RATING_COUNT) rating_count = 0 if rating_count_text != '': rating_count = locale.atoi(rating_count_text) rating_text = self.get_property(tree, xpathExpressions.RATING) rating = 0 if rating_text != '': rating = float(rating_text.split()[1]) content_rating = self.get_property(tree, xpathExpressions.CONTENT_RATING) creator = self.get_property(tree, xpathExpressions.CREATOR) creator_url = self.get_property(tree, xpathExpressions.CREATOR_URL) creator_address = self.get_property(tree, xpathExpressions.CREATOR_ADDRESS) install_size = self.get_property(tree, xpathExpressions.INSTALL_SIZE) download_count_text = self.get_property(tree, xpathExpressions.DOWNLOAD_COUNT_TEXT) download_count = 0 if download_count_text != '': download_count = locale.atoi(download_count_text.split("-")[0].strip()) privacy_url = self.get_property(tree, xpathExpressions.PRIVACY_URL) whats_new = self.get_property(tree, xpathExpressions.WHATS_NEW) app = json.dumps({"n": package_name, "verc": version_code, "t": title, "desc": description, "cat": category, "pri": price, "dtp": date_published, "os": os_version, "rct": rating_count, "rate": rating, "crat": content_rating, "crt": creator, "cadd": creator_address, "curl": creator_url, "sz": install_size, "dct": download_count, "dtxt": download_count_text, "purl": privacy_url, "new": whats_new }, sort_keys=True, indent=4, separators=(',', ': ')) out_file = os.path.join(out_dir, package_name + '-' + version_code + '.listing.json') with open(out_file, 'w') as f: f.write(app) self.log.info("Listing details info has been written at %s", out_file)
def angle(coor,angleparam): angleenergy=0.0 ; ubenergy=0.0 deg2rad=numpy.pi/180.0 for i in xrange(len(angleparam)): vub=0.0 ttrio=angleparam[i][0] atm1=locale.atoi(ttrio[0]) atm2=locale.atoi(ttrio[1]) atm3=locale.atoi(ttrio[2]) coor1=coor[atm1]; coor2=coor[atm2] ; coor3=coor[atm3] theta=sasmath.calc_angle(coor1,coor2,coor3) ktheta=locale.atof(angleparam[i][1][0]) theta0=deg2rad*locale.atof(angleparam[i][1][1]) if(len(angleparam[i][1])>2): kub=locale.atof(angleparam[i][1][2]) so=locale.atof(angleparam[i][1][3]) v = coor3-coor1 s = math.sqrt(sum(v*v)) #s=sascalc.calc_dist(coor1,coor3) vub=kub*(s-so)**2.0 vangle=ktheta*(theta-theta0)**2.0 angleenergy=angleenergy+vangle ubenergy=ubenergy+vub return angleenergy,ubenergy
def _parse_contig_coverage(line): """ Parse contig coverage from a string, typically a line from the QualiMap result report. If no contig coverage can be parsed, None is returned. :param line: the input string :return: the contig coverage as a dict, with "[CONTIG] coverage" as key and a dict with the contig coverage information as value or None if no contig coverage could be parsed """ # identify contig coverage try: values = re.search(r'^\s+(\S+)\s+([0-9]+)\s+([0-9]+)\s+(\S+)\s+(\S+)\s*$', line).groups() key = "{} coverage".format(values[0]) value = dict() value["contig"] = values[0] value["length"] = locale.atoi(values[1]) value["mapped bases"] = locale.atoi(values[2]) value["mean coverage"] = locale.atof(values[3]) value["standard deviation"] = locale.atof(values[4]) return {key: value} except ValueError: # problems with the conversion to numeric values pass except AttributeError: # not a contig coverage row pass
def post(self): name = self.get_argument("Name") gender = self.get_argument("Gender") age = self.get_argument("Age") perType = self.get_argument("Personality_Type") favoriteOS = self.get_argument("Favorite_OS") seeking = self.get_argument("Seeking_Male", "") + self.get_argument("Seeking_Female", "") minAge = self.get_argument("Min_Age") maxAge = self.get_argument("Max_Age") if "Pic" in self.request.files: pic_dict_list = self.request.files["Pic"] else: pic_dict_list = None; # Form validation. isError = isValid(name, gender, age, perType, favoriteOS, seeking, minAge, maxAge) if isError: self.render("error.html", errorReason=isError) else: if pic_dict_list: # if user uploads images, put the picture into "static/images" path. storeImage(pic_dict_list) matchUsers = matchAllUsers(name, gender, locale.atoi(age), perType, favoriteOS, seeking,\ locale.atoi(minAge), locale.atoi(maxAge)) self.render("results.html", Name=name, MatchUsers=matchUsers) userInfo = [name, gender, age, perType, favoriteOS, seeking, minAge, maxAge] writeSinglesFile(userInfo)
def doTestDate(): testDate = date.today() weekDay = testDate.weekday()+1 print "date:%d-%d-%d, weekDay:%d" % (testDate.year, testDate.month, testDate.day, weekDay) testDate = testDate + timedelta(days = 14 ) weekDay = testDate.weekday()+1 print "date:%d-%d-%d, weekDay:%d" % (testDate.year, testDate.month, testDate.day, weekDay) #time.sleep(seconds) testTime = datetime.now() print "time:%d:%d:%d, ms:%d" % (testTime.hour, testTime.minute, testTime.second, testTime.microsecond) testTime = "12:30" testTuple = testTime.rpartition(":") print "testTime:%d:%d" % (locale.atoi(testTuple[0]), locale.atoi(testTuple[2])) bgnMinMinus=100 hourMinus = bgnMinMinus/60 minuteMinus = bgnMinMinus%60 print "testMinus:%d:%d" % (hourMinus, minuteMinus)
def getitem(i): url = i.dt.a['href'] name = ''.join(i.dt.a.findAll(text=True)) days = i.dd.span.span.string size = i.dd.find("span", "s").text seeds = i.dd.find("span", "u").text seeds = locale.atoi(seeds) # step needed to convert a number with commas peers = i.dd.find("span", "d").text peers = locale.atoi(peers) # step needed to convert a number with commas date_collected = i.dd.span.span["title"] # verified logic verified = i.findAll(attrs={"style" : re.compile(".*accept.*")}) if len(verified)==1: verified = 1 else: verified=0 result = {'name' : name.strip(), 'url':"http://torrents.eu"+url.strip(), 'days':days.strip(), 'size':size.strip(), 'date_collected':date_collected.strip(), 'seeds':seeds, 'peers':peers, 'verified':verified} if url!=None: return result else: return 0
def process_file(f): """This is example of the data structure you should return. Each item in the list should be a dictionary containing all the relevant data from each row in each file. Note - year, month, and the flight data should be integers. You should skip the rows that contain the TOTAL data for a year data = [{"courier": "FL", "airport": "ATL", "year": 2012, "month": 12, "flights": {"domestic": 100, "international": 100} }, {"courier": "..."} ] """ data = [] info = {} info["courier"], info["airport"] = f[:6].split("-") # Note: create a new dictionary for each entry in the output data list. # If you use the info dictionary defined here each element in the list # will be a reference to the same info dictionary. with open("{}/{}".format(datadir, f), "r") as html: soup = BeautifulSoup(html,'lxml') tableData = soup.find_all("table", class_="dataTDRight") rows = tableData[0].findAll('tr') for row in rows[1:]: tds = row.find_all("td") if tds[1].get_text() != "TOTAL": info["year"] = int(str(tds[0].get_text())) info["month"] = int(str(tds[1].get_text())) info["flights"] = {"domestic": locale.atoi(tds[2].get_text()), \ "international": locale.atoi(tds[3].get_text())} data.append(info) return data
def scrape_per_country_info(code, link): soup = url_to_soup(link) locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') parent_dict = dict() table_data = soup.find(id=URL_2_TABLE_ID) if table_data is not None: for row in table_data: for column in row: anchor = column.find('a') if anchor is not -1 and anchor is not None: ASN_CODE = anchor.string.replace("AS","") info = column.findAll('td') name = info[1].string Routesv4 = info[3].string Routesv6 = info[5].string child_dict = OrderedDict() child_dict['Country'] = str(code) if name is not None: child_dict['Name'] = name.encode("utf-8") # Fix for locale issue child_dict['Routes v4'] = locale.atoi(Routesv4) child_dict['Routes v6'] = locale.atoi(Routesv6) parent_dict[locale.atoi(ASN_CODE)] = child_dict return parent_dict
def parse_instance(instance_type, product_attributes): i = scrape.Instance() i.instance_type = instance_type pieces = instance_type.split('.') if len(pieces) == 1: # Dedicated host that is not u-*.metal, skipping # May be a good idea to all dedicated hosts in the future return i.family = product_attributes.get('instanceFamily') if '32-bit' in product_attributes.get('processorArchitecture'): i.arch.append('i386') i.vCPU = locale.atoi(product_attributes.get('vcpu')) # Memory is given in form of "1,952 GiB", let's parse it i.memory = locale.atof(product_attributes.get('memory').split(' ')[0]) i.network_performance = product_attributes.get('networkPerformance') if product_attributes.get('currentGeneration') == 'Yes': i.generation = 'current' else: i.generation = 'previous' gpu = product_attributes.get('gpu') if gpu is not None: i.GPU = locale.atoi(gpu) try: ecu = product_attributes.get('ecu') if ecu == 'Variable': i.ECU = 'variable' else: i.ECU = locale.atof(ecu) except: pass i.physical_processor = product_attributes.get('physicalProcessor') # CPU features processor_features = product_attributes.get('processorFeatures') if processor_features is not None: if "Intel AVX-512" in processor_features: i.intel_avx512 = True if "Intel AVX2" in processor_features: i.intel_avx2 = True if "Intel AVX" in processor_features: i.intel_avx = True if "Intel Turbo" in processor_features: i.intel_turbo = True i.clock_speed_ghz = product_attributes.get('clockSpeed') enhanced_networking = product_attributes.get('enhancedNetworkingSupported') if enhanced_networking is not None and enhanced_networking == 'Yes': i.enhanced_networking = True return i
def _get_prices(self, soup): ptable = soup.find("div", id="ctl00_ctl00_NestedMaster_PageContent" "_ctl00_BuyProductDialog1_tierPricing") prices = [] if ptable is None: self.vqtyavail = 0 return [] rows = ptable.find_all("tr") availq = None for row in rows: cells = row.findAll("td") if not len(cells): continue price_cell = row.find("span", id=re.compile(r"_Price$")) price_text = price_cell.text.strip() price = locale.atof(price_text.replace("$", "")) qty_cell = row.find("span", id=re.compile(r"_Range_From$")) qty_text = qty_cell.text.strip() m = self.rex_price.match(qty_text) if not m: raise ValueError("Didn't get a qty from " + qty_text + " for " + self.vpno) try: moq = locale.atoi(m.group("start")) except AttributeError: moq = locale.atoi(m.group("end")) maxq = locale.atoi(m.group("end")) if not availq or maxq > availq: availq = maxq price_obj = VendorPrice(moq, price, self._vendor.currency, oqmultiple=1) prices.append(price_obj) self.vqtyavail = availq return prices
def get_stats_for_match(match_url): """ Given a match stat url, from premierleague.com, returns a stats dict with all match statistics """ soup = BeautifulSoup(requests.get(match_url).text, 'lxml') seas = match_url.split('/')[-3] # get managers info mgrs = [ soup.select('div .teamtitle .homecol')[0].find_all('a')[-1], soup.select('div .teamtitle .awaycol')[0].find_all('a')[-1] ] match_stat_url = match_url.replace('match-report.html', 'match-stats.html') soup = BeautifulSoup(requests.get(match_stat_url).text, 'lxml') # get fixture infos fis = soup.find('p', 'fixtureinfo').get_text().split('|') fis = [re.sub(r'Referee: |Attendance ', '', i).strip() for i in fis] # convert attendance to integer fis[-1] = locale.atoi(fis[-1]) # add to stats stats = dict(zip(['date', 'venue', 'referee', 'attendance'], fis)) stats['season'] = seas stats['home_manager'] = mgrs[0].get_text().strip() stats['away_manager'] = mgrs[1].get_text().strip() # get home, away and goals css_classes = ['home', 'away', 'countscore'] res = [soup.find('td', c).get_text().strip() for c in css_classes] stats['home_team'], stats['away_team'] = res[0], res[1] stats['result'] = res[2].replace(' ', '') for loc in ['home', 'away']: goals = soup.find('span', '%sScore' % loc).get_text().strip() goals_details = [ li.get_text().strip() for li in soup.find('div', '%s goals' % loc).find_all('li') ] stats['%s_goals' % loc] = locale.atoi(goals) stats['%s_goals_details' % loc] = ','.join(goals_details) # in depth stats table tables = soup.find_all('div', 'statsTable') for table in tables: t = table.find('table') h = t.select('thead th')[1:] d = [r.select('td') for r in t.select('tbody tr')] for i in zip(h, *d): metric = i[0].get_text().strip().lower().replace(' ', '_') stats['%s_home_team' % metric] = locale.atoi(i[1].get_text().strip()) stats['%s_away_team' % metric] = locale.atoi(i[2].get_text().strip()) return stats
def sig_insert_text(widget, new_text, new_text_length, position): value = widget.get_text() position = widget.get_position() new_value = value[:position] + new_text + value[position:] try: locale.atoi(new_value) except ValueError: widget.stop_emission('insert-text')
def dayForWeek(): month , day = transformDateFormat(); m = shape(results)[0] week = range(0,m+1) for i in range(m+1): week[i] = datetime.datetime(2014,atoi(month[i]),atoi(day[i])).strftime("%w") for i in range(m+1): if(week[i] == '0'): week[i] = 7 return week
def sig_insert_text(self, entry, new_text, new_text_length, position): value = entry.get_text() position = entry.get_position() new_value = value[:position] + new_text + value[position:] if new_value == '-': return try: locale.atoi(new_value) except ValueError: entry.stop_emission('insert-text')
def parse_stat_table(soup): stat_table_soup = soup.find('td', attrs={'class': "title"}).findParent('table') unallocated_sp_str = stat_table_soup.find('td', text='Unallocated').findNext('td').text unallocated_sp_str = unallocated_sp_str.replace(',', '') if unallocated_sp_str: unallocated_sp = locale.atoi(unallocated_sp_str) else: unallocated_sp = 0 remaps_str = stat_table_soup.find('td', text='Remaps').findNext('td').text if remaps_str: remaps = locale.atoi(unallocated_sp_str) else: remaps = 0 return {'unallocated_sp': unallocated_sp, 'remaps': remaps}
def evalMacro(repos, data, expr, blacklist): """ evaluates a expression returns expanded version macros: $x repository in position x *x package list of repos in position x returns an ordered list of strings """ import re ret = [] components = expr.split(",") for component in components: matches = re.findall("\$(\d+)", component) if len(matches) > 0: from string import atoi column = atoi(matches[0]) if len(repos) < column: print("Can't use repo #%d, not enough repos" % column) exit(1) repo = repos[column - 1] ret.append(repo) matches = re.findall("\*(\d+)", component) if len(matches) > 0: from string import atoi column = atoi(matches[0]) if len(repos) < column: print("Can't use repo #%d package list, not enough repos" % column) exit(1) repo = repos[column - 1] packages = data[repo].packages() if len(packages) == 0: print("No packages defined for $s" % view) exit(1) ret.extend(packages) else: # assume it is a repo ret.append(component) if blacklist: for pkg in blacklist: while ret.count(pkg): ret.remove(pkg) return ret
def getFeatNumFromWeights(weightsFile): weightType = returnWeightFileType(weightsFile) if weightType in ['GAL', 'GWT']: weightFile = open(weightsFile, 'r') info = weightFile.readline().strip().split() if weightType == 'GAL': if len(info) == 1: return LOCALE.atoi(info[0]) elif len(info) > 1: return LOCALE.atoi(info[1]) else: return LOCALE.atoi(info[1]) elif weightType == 'SWM': swm = WU.SWMReader(weightsFile) return swm.numObs
def get_stats(self, soup: bs4.BeautifulSoup): table: bs4.Tag = soup.find("table", id="main_table_countries_today") headers: List[Text] = list(" ".join(th.strings) for th in table.thead.find_all("th")) stats = {} for tr_tag in table.find_all("tr"): if tr_tag.parent.name == "thead": continue all_tr = tr_tag.find_all("td") country = "".join(all_tr[1].stripped_strings) numbers: List[Union[int, float]] = [] self.logger.debug("all_tr: %s", all_tr) for td_tag in all_tr[2:-1]: value = "".join(td_tag.stripped_strings) or "0" # sometime, counter is negative, but has a + sign ... if value.startswith("+-"): value = value[1:] try: int_value = locale.atoi(value) numbers.append(int_value) except ValueError: try: float_value = locale.atof(value) numbers.append(float_value) except ValueError: self.logger.warning("Unable to parse value %s", value) continue self.logger.debug("%s - %s", country, numbers) stats[country] = dict(zip(headers[2:], numbers)) return stats
def cast(self, value): if value in ['', None]: return None try: return int(value) except: return locale.atoi(value)
def ans_tanzhang(page): print(f"======知乎关注的问题爬取第{page}页======") # 1、确定数据所在的url地址 link = 'https://www.zhihu.com/people/sujiankuan/answers?page={}'.format( str(page)) # 2、发送url地址对应的请求(你要的数据/不要的数据) html_data = get_data.get_data(link) # 返回的就是response.text # 3、解析你要的数据(不要的数据排查出去) ## {"author":{"avatarUrlTemplate":"https:\u002F\u002Fpic1.zhimg.com\u002F85d2fe422c461502ced66225564ab9f4.jpg?source=c8b7c179","badge":[],"name":"apin","url":"http:\u002F\u002Fwww.zhihu.com\u002Fapi\u002Fv4\u002Fpeople\u002Fbc51b4f9dcb0d1a2c7bc45264f487d36","gender":1,"userType":"people","urlToken":"apin","isAdvertiser":false,"avatarUrl":"https:\u002F\u002Fpic4.zhimg.com\u002F85d2fe422c461502ced66225564ab9f4_l.jpg?source=c8b7c179","isOrg":false,"headline":"产品设计师","type":"people","id":"bc51b4f9dcb0d1a2c7bc45264f487d36"},"url":"http:\u002F\u002Fwww.zhihu.com\u002Fapi\u002Fv4\u002Fquestions\u002F19932946","title":"对于“无UI是UI设计的最高境界”这句话你怎么看?","answerCount":44,"created":1322297069,"questionType":"normal","followerCount":112,"updatedTime":1322297069,"type":"question","id":19932946} get_answer(html_data) ## 循环小鹿的回答,每一页中含有20条小鹿的回答 answer_totals = re.findall( '<meta itemProp="zhihu:answerCount" content="(.*?)"', html_data)[0] # 探长的回答总数量 answer_totals = locale.atoi(answer_totals) if math.ceil(answer_totals / 20) != page: print("第" + str(page) + "页数据爬取完毕!") page += 1 time.sleep(random.randint(4, 8)) ans_tanzhang(page) else: # 4、数据保存,此时直接使用final_data即可,因为是全局变量,而且已经在get_final_data中已经获得到了数据 create_table() for info in final_data: insert(info) print("所有数据爬取完毕!") return
def parse_gpus(tr, by_type): cols = tr.xpath('td') instance_type = totext(cols[0]) instance = by_type.get(instance_type, None) if instance is None: return instance.GPU = locale.atoi(totext(cols[1]))
def _valsou_check(self, grid_file, version, idx, total): """ VALSOU check for the loaded s57 features and grid""" # GUI takes care of progress bar logger.debug('VALSOU check v%d ...' % version) self.parent_win.progress.start(title="VALSOU check v.%d" % version, text="Data loading [%d/%d]" % (idx, total), init_value=10) try: self.prj.open_grid(path=grid_file) except Exception as e: # noinspection PyCallByClass QtWidgets.QMessageBox.critical(self, "Error", "While reading grid file, %s" % e, QtWidgets.QMessageBox.Ok) self.parent_win.progress.end() return self.parent_win.progress.update(value=20, text="VALSOU check v%d [%d/%d]" % (version, idx, total)) try: if version == 7: specs_version = self.toggle_specs_v7.value() if specs_version in [2016, 2017]: locale.setlocale(locale.LC_ALL, "") scale = locale.atoi(self.set_scale_fsv7.text()) else: scale = 10000 with_laser = self.set_include_laser_fsv7.isChecked() is_target_detection = self.toggle_mode_v7.value() == 1 if specs_version == 2016: self.prj.valsou_check_v7(specs_version="2016", survey_scale=scale, with_laser=with_laser, is_target_detection=is_target_detection) elif specs_version == 2017: self.prj.valsou_check_v7(specs_version="2017", survey_scale=scale, with_laser=with_laser, is_target_detection=is_target_detection) elif specs_version == 2018: self.prj.valsou_check_v7(specs_version="2018", survey_scale=scale, with_laser=with_laser, is_target_detection=is_target_detection) else: raise RuntimeError("unknown specs version: %s" % specs_version) else: RuntimeError("unknown VALSOU check version: %s" % version) except Exception as e: # noinspection PyCallByClass QtWidgets.QMessageBox.critical(self, "Error", "While VALSOU checking, %s" % e, QtWidgets.QMessageBox.Ok) self.parent_win.progress.end() return self.parent_win.progress.end()
def _format_df(df: pd.DataFrame) -> pd.DataFrame: """Format the DataFrame to match the schema.""" result = _transpose_df(df) result = aggregate_ingest_utils.rename_columns_and_select( result, { "report_date": "report_date", "Census": "census", "In House": "in_house", "Boarded In": "boarded_in", "Boarded Out": "boarded_out", "- Sentenced": "sentenced", "- Civil": "civil", "- Federal": "federal", "- Technical Parole Violators": "technical_parole_violators", "- State Readies": "state_readies", "- Other Unsentenced **": "other_unsentenced", }, ) result["report_date"] = result["report_date"].apply(_parse_report_date) for column_name in set(result.columns) - {"report_date"}: result[column_name] = result[column_name].apply( lambda d: int(d) if isinstance(d, (int, float)) else 0 if "(" in d else locale.atoi(d)) result["facility_name"] = df["FACILITY"].iloc[0] return result
def intify(val): # strip any characters that are outside the ascii range - they won't make up the int anyway # and this will get rid of things like strange currency marks if isinstance(val, unicode): val = val.encode("ascii", errors="ignore") # try the straight cast try: return int(val) except ValueError: pass # could have commas in it, so try stripping them try: return int(val.replace(",", "")) except ValueError: pass # try the locale-specific approach try: return locale.atoi(val) except ValueError: pass raise ValueError(u"Could not convert string to int: {x}".format(x=val))
def getFreeCashFlow(html): #Find line containing dates dates = [] fcf = [] datesSpan = html.find("span", string=re.compile("Breakdown", re.IGNORECASE)) if (datesSpan): datesSection = datesSpan.parent datesSection = datesSection.next_sibling #skip ttm field while datesSection is not None: dateStr = datesSection.find("span").string if (dateStr != 'ttm'): dates.append(datetime.strptime(dateStr, "%m/%d/%Y")) datesSection = datesSection.next_sibling #Find 2 lines containing fcf fcfSpan = html.find_all("span", string=re.compile("^Free"), limit=2) #We want the second one if (len(fcfSpan) > 1): fcfSection = fcfSpan[1].parent.parent fcfSection = fcfSection.next_sibling #Advance to values fcfSection = fcfSection.next_sibling #Skip first value - trailing twelve months while fcfSection is not None: valueStr = fcfSection.find("span") if (valueStr): fcf.append(locale.atoi(valueStr.string) * 1000) fcfSection = fcfSection.next_sibling return list(zip(dates, fcf))
def parse_progress_line(line: str) -> Optional[RsyncProgressStatus]: """ Attempt to parse an rsync progress line into values. Either returns the values or None if not a progress line. """ # Example rsync progress line: # 823,915,288 35% 36.65MB/s 0:00:40 result = re.search( r"""(?P<bytes>.*[0-9,]+) # bytes transferred \ + # one or more spaces (?P<percent>[0-9.]+)% # percent complete \ + # one or more spaces (?P<rate>[0-9A-Za-z.]+/s) # current rate of transfer \ + # one or more spaces (?P<eta>\d+:\d\d:\d\d) # current rate of transfer .* """, line, flags=re.VERBOSE, ) if not result: return None bytes_transferred = locale.atoi(result.group("bytes")) percent_transferred = float(result.group("percent")) transfer_rate = parse_rate(result.group("rate")) eta = result.group("eta") return RsyncProgressStatus( bytes_transferred=bytes_transferred, percent_transferred=percent_transferred, transfer_rate=transfer_rate, eta=eta, )
def Notify(self, event): """Handled events: TickEvent: Make sure countdown timer hasn't reached zero (if applicable) RequestAttackEvent: Initiate a new attack SolveEvent: Solultion entered: Calculate damage and apply it appropriately """ ActorModel.Notify(self, event) if isinstance(event, TickEvent): #deal with solution timeout if self.solEndTime != 0 and self.time > self.solEndTime: self.solEndTime = self.time #so that dmgOffset doesn't go negative self.evManager.Notify(SolveEvent('-1')) #always wrong answer elif isinstance(event, RequestAttackEvent): self.problem = MultiplicationProblem() prob = unicode(self.problem) self.solEndTime = self.time + self.solutionWait self.evManager.Notify(RequestSolutionEvent(prob, self.solEndTime)) elif isinstance(event, SolveEvent): dmgOffset = 1.0 * (self.solEndTime - self.time) / self.solutionWait Debug("Damage Offset: %s" % dmgOffset, 2) if event.solution.isdigit() and self.problem.solve( locale.atoi(event.solution)): self.Attack(60 * dmgOffset) else: self.Hurt(10 + 20 * dmgOffset) self.solEndTime = 0
def test_fr_FR(): # print("fr_FR") import locale try: locale.setlocale(locale.LC_ALL, "fr_FR.UTF-8") except locale.Error: return # test str -> object conversions assert 1234567 == locale.atoi("1234567") assert 1234567.89 == locale.atof("1234567,89") # test object -> str assert "1234,56" == locale.str(1234.56) # print(locale.format("%.2f", 1234.56, grouping=True, monetary=True)) assert "1234,56" == locale.format_string("%.2f", 1234.56, grouping=False, monetary=True) # print(locale.currency(1234567.89, True, True, False)) # print(locale.currency(1234567.89, True, True, True)) assert "1 234 567,89 Eu" == locale.currency(1234567.89, True, True, False).strip() assert "1 234 567,89 EUR" == locale.currency(1234567.89, True, True, True).strip() return
def clean_val(self, val): ''' This needs alot of work but basically determines if a particular submission cell is alllowed. ''' str_lookup = {'yes': 1, 'no': 0} if val is None: return None # deal with percentages convert_percent = False if type(val) == unicode and '%' in val: try: val = float(re.sub('%', '', val)) convert_percent = True except ValueError: pass ## clean! i am on a deadline rn :-/ ## try: cleaned_val = locale.atoi(val) # 100,000 -> 100000.oo except AttributeError: cleaned_val = float(val) except ValueError: try: cleaned_val = float(val) except ValueError: try: cleaned_val = str_lookup[val.lower()] except KeyError: raise ValueError('Bad Value!') if convert_percent: cleaned_val = cleaned_val / 100.0 return cleaned_val
def clean_val(self, val): ''' This needs alot of work but basically determines if a particular submission cell is alllowed. Big point of future controversy... what do we do with zero values? In order to keep the size of the database manageable, we only accept non zero values. ''' str_lookup = {'yes': 1, 'no': 0} if val is None: return None ## clean! i am on a deadline rn :-/ ## locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') try: cleaned_val = locale.atoi(val) # 100,000 -> 100000.oo except AttributeError: cleaned_val = float(val) except ValueError: try: cleaned_val = str_lookup[val.lower()] except KeyError: raise ValueError('Bad Value!') return cleaned_val
def parse_instance(tr, inst2family): i = Instance() cols = tr.xpath('td') assert len( cols) == 12, "Expected 12 columns in the table, but got %d" % len(cols) i.instance_type = totext(cols[0]) # Correct typo on AWS site (temporary fix on 2016-10-11) # https://github.com/powdahound/ec2instances.info/issues/199 if i.instance_type == 'x1.16large': i.instance_type = 'x1.16xlarge' # Correct typo on AWS site (temporary fix on 2017-02-23) # https://github.com/powdahound/ec2instances.info/issues/227 if i.instance_type == 'i3.4xlxarge': i.instance_type = 'i3.4xlarge' if i.instance_type == 'i3.16large': i.instance_type = 'i3.16xlarge' i.family = inst2family.get(i.instance_type, "Unknown") # Some t2 instances support 32-bit arch # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-resize.html#resize-limitations if i.instance_type in ('t2.micro', 't2.small'): i.arch.append('i386') i.vCPU = locale.atoi(totext(cols[1])) i.memory = locale.atof(totext(cols[2])) i.ebs_optimized = totext(cols[10]).lower() == 'yes' i.network_performance = totext(cols[4]) i.enhanced_networking = totext(cols[11]).lower() == 'yes' i.generation = 'current' # print "Parsed %s..." % (i.instance_type) return i
def test_optimization_targets_05(): r'''Score with 1 voice, 50 measures, 100 divisions, 300 notes. Partition *divisions* in 16 parts and then set leaf color of each part. 2.12 (r9746) 2,444,770 function calls. 2.12 (r9752) 1,955,779 function calls. 2.12 (r9753) 1,848,278 function calls. ''' score_template = templatetools.GroupedRhythmicStavesScoreTemplate(staff_count=1) score_specification = musicexpressiontools.ScoreSpecificationInterface(score_template) score_specification.set_time_signatures(50 * [(3, 8)]) score_specification.set_divisions([(3, 16)]) score_specification.set_rhythm(library.sixteenths) parts = score_specification.select_divisions('Voice 1').partition_by_ratio(16 * [1]) for i, part in enumerate(parts): if i % 2 == 0: part.select_leaves('Voice 1').set_leaf_color('red') else: part.select_leaves('Voice 1').set_leaf_color('blue') string = 'score_specification.interpret()' count = systemtools.IOManager.count_function_calls(string, globals(), locals(), fixed_point=False) score = score_specification.score_specification.interpreter.score assert count < locale.atoi('2,900,000')
def cast(self, value): if value in ('', None): return None try: return int(value) except: return locale.atoi(value)
def set_client(self, record, value): if isinstance(value, basestring): try: value = locale.atoi(value) except ValueError: value = self._default super(IntegerField, self).set_client(record, value)
def newcases(): """ Compute the new cases/deaths """ import locale locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') # open cumulative cases file with open('us_states_cases.csv', newline='') as infile: # define a csv reader reader = csv.reader(infile, delimiter=',') # open the new cases file as output with open('us_states_newcases.csv', 'w') as outfile: # define a csv writer writer = csv.writer(outfile, delimiter=',') # read it line by line for row in reader: # first line, as header if row[0] == 'State': nrow = row[0:3] + row[8:] else: nrow = row[0:3] for i in range(8, len(row)): dailynewcase = locale.atoi(row[i])-locale.atoi(row[i-1]) # fix a Puerto Rico case error 11/9/20 if dailynewcase < -1000: dailynewcase = 0 nrow += [dailynewcase] writer.writerow(nrow) with open('us_states_deaths.csv', newline='') as infile: # define a csv reader reader = csv.reader(infile, delimiter=',') # open the new cases file as output with open('us_states_newdeaths.csv', 'w') as outfile: # define a csv writer writer = csv.writer(outfile, delimiter=',') # read it line by line for row in reader: # first line, as header if row[0] == 'State': nrow = row[0:3] + row[8:] else: nrow = row[0:3] for i in range(8, len(row)): nrow += [locale.atoi(row[i])-locale.atoi(row[i-1])] writer.writerow(nrow)
def collect_data(self): tx_pps_seq = [] rx_pps_seq = [] locale.setlocale(locale.LC_NUMERIC, 'English_US') print("exitFlag = %d, %s processing..." % (exitFlag, self.name)) statistic_dst_path = self.statistic.config["path"]["tool_path"] cmd = statistic_dst_path + self.statistic.config["pkg_list"]["eth_stat"] print(cmd) while not exitFlag: try: s = paramiko.SSHClient() s.set_missing_host_key_policy(paramiko.AutoAddPolicy()) s.connect( hostname=self.statistic.config['server_info']['host_name'], port=self.statistic.config['server_info']['host_port'], username=self.statistic.config['server_info']['username'], password=self.statistic.config['server_info']['password']) stdin, stdout, stderr = s.exec_command(cmd) for line in stdout: line = line.strip("\n") if 'SUM' in line: # remove redundant space line = re.sub(r"\s{2,}", " ", line) print(line) line_array = line.split(' ') tx_pps_seq.append(locale.atoi(line_array[3])) rx_pps_seq.append(locale.atoi(line_array[6])) print(line_array) print(line_array[3] + " " + line_array[6]) except Exception as e: print("execute command %s error, error message is %s" % (cmd, e)) return "" print(tx_pps_seq) avg_tx_pps = average(tx_pps_seq) print(avg_tx_pps) print(rx_pps_seq) avg_rx_pps = average(rx_pps_seq) avg_rx_mpps = float(avg_rx_pps / 1000000) print("raw: %d, mpps: %f" % (avg_rx_pps, avg_rx_mpps)) avg_rx_mpps = round(avg_rx_mpps, 2) # avg_rx_mpps = round(avg_rx_mpps + 0.001, 2) print("round mpps: %.2f" % avg_rx_mpps) self.statistic.df[self.core_num][self.pkt_len] = avg_rx_mpps
def add_eni_info(instances): eni_url = "http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html" tree = etree.parse(urllib2.urlopen(eni_url), etree.HTMLParser()) table = tree.xpath('//div[@class="table-contents"]//table')[0] rows = table.xpath('.//tr[./td]') by_type = {i.instance_type: i for i in instances} for r in rows: instance_type = etree.tostring(r[0], method='text').strip().decode() max_enis = locale.atoi(etree.tostring(r[1], method='text').decode()) ip_per_eni = locale.atoi(etree.tostring(r[2], method='text').decode()) if instance_type not in by_type: print("Unknown instance type: {}".format(instance_type)) continue by_type[instance_type].vpc = { 'max_enis': max_enis, 'ips_per_eni': ip_per_eni}
def is_number(s): try: locale.atof(s) return True except ValueError: pass # put string like '3\/32' into numbers try: special_str = '\/' pos = s.find(special_str) if pos > 0: locale.atoi(s[:pos]) locale.atoi(s[pos + len(special_str):]) return True except ValueError: pass return False
def get_total_player_count(data): # pattern adapted from https://stackoverflow.com/questions/5917082/ locale.setlocale(locale.LC_ALL, '') pattern = r'\d{1,3}(,\d{3})*' player_count = data.loc[0, 'player_count'] player_count = get_match(pattern, player_count) data.loc[0, 'player_count'] = locale.atoi(player_count) return data
def add_instance_storage_details(instances): """Add information about instance storage features.""" # Canonical URL for this info is http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html # url = "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.partial.html" # It seems it's no longer dynamically loaded url = "http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html" tree = etree.parse(urllib2.urlopen(url), etree.HTMLParser()) table = tree.xpath('//div[@class="table-contents"]/table')[0] rows = table.xpath('.//tr[./td]') checkmark_char = u'\u2714' dagger_char = u'\u2020' for r in rows: columns = r.xpath('.//td') (instance_type, storage_volumes, storage_type, needs_initialization, trim_support) = tuple(totext(i) for i in columns) if instance_type is None: continue for i in instances: if i.instance_type == instance_type: i.ebs_only = True # Supports "24 x 13,980 GB" and "2 x 1,200 GB (2.4 TB)" m = re.search(r'(\d+)\s*x\s*([0-9,]+)?\s+(\w{2})?', storage_volumes) if m: size_unit = 'GB' if m.group(3): size_unit = m.group(3) i.ebs_only = False i.num_drives = locale.atoi(m.group(1)) i.drive_size = locale.atoi(m.group(2)) i.size_unit = size_unit i.ssd = 'SSD' in storage_type i.nvme_ssd = 'NVMe' in storage_type i.trim_support = checkmark_char in trim_support i.storage_needs_initialization = checkmark_char in needs_initialization i.includes_swap_partition = dagger_char in storage_volumes
def setAreaOfCountry(self, name, area): # If the area value is in the form of a string with commas, convert it to an integer if "," in str(area): area = locale.atoi(area) # Search the dictionary for the country object, and change the area value using the setter method. self._countryCat[name].setArea(int(area))
def to_int(string): number_string = string.split(' ')[0] try: int_value = locale.atoi(number_string) except ValueError: number_string = number_string.replace(',', '') int_value = int(number_string) return int_value
def get_nfiled(fname): filer = open(fname, 'r') i = 0 for atom in filer: if re.search('Number of Bills: (\d+\,\d+)', atom): head = re.search('Number of Bills: (\d+\,\d+)', atom) i = locale.atoi(head.group(1).replace(',', '')) return i
def conv(x): try: return locale.atoi(x) except ValueError: try: return locale.atof(x) except Exception: return x
def setPopulationOfCountry(self, name, population): # If the population value is in the form of a string with commas, convert it to an integer if "," in str(population): population = locale.atoi(population) # Search the dictionary for the country object, and change the population value using the setter method. self._countryCat[name].setPopulation(int(population))
def parse_price(price): """ Convert string price to numbers """ if not price: return 0 price = price.replace(',', '') return locale.atoi(re.sub('[^0-9,]', "", price))
def submit_answer(self, widget, answer): try: # for int answer because of entry field if answer.get_name() == "GtkHScale": num = int(answer.get_value()) elif answer.get_name() == "GtkButton": num = locale.atoi(answer.get_label()) elif answer.get_name() == "GtkEntry": num = locale.atoi(answer.get_text()) except: self.ui.answer_nan() else: if self.data.check_answer(num): self.ui.answer_correct(self.data) else: self.ui.answer_incorrect(self.data) gobject.timeout_add(2500, self.ui.clear, self.data) self.setup(self.data, self.ui)
def _to_int(int_str: str) -> Optional[int]: if not isinstance(int_str, str): return int_str try: return locale.atoi(int_str) except ValueError: # Values containing a '-' have no reported data, so just return None return None
def get_value(self): txt_value = self.widget.get_text() if txt_value: try: return locale.atoi(txt_value) except ValueError: pass return None
def parse_stat_table(soup): stat_table_soup = soup.find('td', attrs={ 'class': "title" }).findParent('table') unallocated_sp_str = stat_table_soup.find( 'td', text='Unallocated').findNext('td').text unallocated_sp_str = unallocated_sp_str.replace(',', '') if unallocated_sp_str: unallocated_sp = locale.atoi(unallocated_sp_str) else: unallocated_sp = 0 remaps_str = stat_table_soup.find('td', text='Remaps').findNext('td').text if remaps_str: remaps = locale.atoi(unallocated_sp_str) else: remaps = 0 return {'unallocated_sp': unallocated_sp, 'remaps': remaps}