def collect_world(self): print("start to collect world info...") page = self.request_page() countryData_raw = page.find_all('script', attrs={'id': 'getListByCountryTypeService2true'}) if countryData_raw: data = countryData_raw[0].string RE = re.compile('\[.*\]') data_clear = re.findall(RE, data) data_json = json.loads(data_clear[0]) #self.countrycollection.insert_many(data_json) for item in data_json: if item['countryShortCode'] != 'CHN': id = item['id'] else: id = 100000000 if item.get('statisticsData') != None: cn = country(id, item['continents'], item['provinceName'], item['countryFullName'], item['countryShortCode'], item['statisticsData']) else: cn = country(id, item['continents'], item['provinceName'], item['countryFullName'], item['countryShortCode'], None) if cn.statisticData != None: trendRaw = self.requets_trend(cn.statisticData) trend_json = json.loads(trendRaw.text) trend = country_trend( cn.id, item['confirmedCount'], 0, item['curedCount'], 0, item['currentConfirmedCount'], 0, datetime.date.today().strftime("%Y-%m-%d"), item['deadCount'], 0) #clean data in mongodb self.country.delete_many({'id': cn.id}) self.countryTrend.delete_many({'id': cn.id}) self.countryTrend.insert_one(trend.__dict__) for titem in trend_json['data']: trend = country_trend( cn.id, titem['confirmedCount'], titem['confirmedIncr'], titem['curedCount'], titem['curedIncr'], titem['currentConfirmedCount'], titem['currentConfirmedIncr'], titem['dateId'], titem['deadCount'], titem['deadIncr']) self.countryTrend.insert_one(trend.__dict__) self.country.insert_one(cn.__dict__)
def parseLineGDP(currentLine): line = currentLine.split('|') if line[GDPFile.CC] not in countries: countries[line[GDPFile.CC]] = country.country() countries[line[GDPFile.CC]].setName(str(line[GDPFile.CC])) countries[line[GDPFile.CC]].addGDP(line[GDPFile.year], line[GDPFile.value][:-1])
def ETLNAUTIA(communityType): data = DataNAUTIA(communityType) com.community(communityType) camp.camp(data.getBibliography(), data.getEntities()) ctr.country(data.getBibliography()) cd.campData(data.getBibliography(), data.getEntities(), data.getLocalLeaders()) gd.generalData(data.getBibliography()) se.socioEconomic(data) g.government(data.getBibliography()) fa.fisicoAmbiental(data.getBibliography(), data.getEntities()) u.urbanism(data.getEntities(), data.getGeneralForm(), data.getPublicSpace()) inf.infrastructure(data) s.services(data) sh.shelter(data.getEntities(), data.getShelter(), data.getHouseHold()) fs.foodSecurity(data)
def BuildCountryDict(self): search_str = ( "select iso, iso3, fips, country, capital, " + "population, continent, languages, neighbors " + "from ctry;" ) results = list(self.curs.execute(search_str)) return dict( (r[0], country.country(*r)) for r in results )
def __init__(self): # store the video stream object and output path, then initialize # the most recently read frame, thread for reading frames, and # the thread stop event self.thread = None self.state = False self.stopEvent = None self.c = country() self.path = "/home/onuragtas/iptv/tv_channels_kalidas.m3u" # initialize the root window and image panel self.root = tki.Tk() #self.toggle_fullscreen(); #self.root.call('wm', 'attributes', '.', '-topmost', '1') self.panel = None import sys reload(sys) sys.setdefaultencoding('utf-8') # create a button, that when pressed, will take the current # frame and save it to file self.btn = tki.Button(self.root, text="SQL", height=1, command=self.convert) self.btn.pack(side="bottom", fill="both", expand="yes", padx=5, pady=10) self.textsql = tki.Text(self.root, height=10) self.textsql.pack(side="bottom", fill="both", padx=10, pady=10) self.textip = tki.Text(self.root, height=10) self.textip.pack(side="bottom", fill="both", padx=10, pady=10) self.btn = tki.Button(self.root, text="Select File", height=1, command=self.openfile) self.btn.pack(side="bottom", fill="both", expand="yes", padx=5, pady=10) #self.stopEvent = threading.Event() #self.thread = threading.Thread(target=self.videoLoop, args=()) #self.thread.start() # set a callback to handle when the window is closed self.root.wm_title("IPTV Convert to SQL") self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
def parseLineDelegation(currentLine): line = currentLine.split('|') if line[delegationsFile.CC] not in countries: countries[line[delegationsFile.CC]] = country.country() countries[line[delegationsFile.CC]].setName(str(line[GDPFile.CC])) countries[line[delegationsFile.CC]].addIPs(line[delegationsFile.year], line[delegationsFile.IPs]) countries[line[delegationsFile.CC]].addASNs(line[delegationsFile.year], line[delegationsFile.ASNs]) countries[line[delegationsFile.CC]].addCumulativeIPs( line[delegationsFile.year], line[delegationsFile.cumulativeIPs]) countries[line[delegationsFile.CC]].addCumulativeASNs( line[delegationsFile.year], line[delegationsFile.cumulativeASNs][:-1])
def parseLine(currentLine): line = currentLine.split('|') code = line[column.CC] year = line[column.year] # Checks if country exists, and adds it if it doesn't if line[column.CC] not in countries: countries[code] = country.country() countries[code].setName(code) countries[code].addGDP(year, line[column.GDP]) countries[code].addIPs(year, line[column.IPs]) countries[code].addASNs(year, line[column.ASNs]) countries[code].addCumulativeIPs(year, line[column.IPsC]) countries[code].addCumulativeASNs(year, line[column.ASNsC])
def run_game(): pygame.init() maps = map() gray_map = maps.get_gray_map() colored_map = maps.get_colored_map() screen_size = maps.get_mapsize() SCREEN_WIDTH = screen_size[0] SCREEN_HEIGHT = screen_size[1] BG_COLOR = 150, 150, 80 screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32) countries = country(gray_map, screen) armie = army(countries) continent = continents(countries) player = players() clock = pygame.time.Clock() armie.set_army(12, 88) screen.fill(BG_COLOR) screen.blit(colored_map, (0,0)) #Phase 1 while True: clock.tick(50) for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: if countries.is_country(countries.get_country_id(event.pos)) == True: pass #End Phase 1 if player.get_player_army() <= 0: break countries.update_countries(armie) pygame.display.flip()
def __init__(self): self.countries_list = [] self.website = "https://trends24.in" page = requests.get(self.website) self.soup = BeautifulSoup(page.content, 'html.parser') for country_link in self.soup.find_all( 'ul', {'class': 'location-menu__city-list'}): locations = [] for location_link in (country_link.children): locations.append(location_link.a) loc = locations.pop(0) p = place(loc.get_text(), loc["href"]) c = country(p) for loc in locations: p = place(loc.get_text(), loc["href"]) c.addCity(p) self.countries_list.append(c)
https://github.com/owid/covid-19-data/blob/master/public/data/owid-covid-codebook.csv """ import pandas as pd import numpy as np import urllib.request from datetime import date import time import pickle import country_converter as coco from country import country import pdb #-------------------- # Define country list c_all2 = country('all2','ISO2') c_all2b = country('all2','ISO3') # -------------------- # Download data tic = time.perf_counter() urllib.request.urlretrieve("https://github.com/owid/covid-19-data/raw/master/public/data/owid-covid-data.xlsx", "../data/ourworld.xlsx") urllib.request.urlretrieve("https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv","../data/google.csv") df0 = pd.read_excel("../data/ourworld.xlsx",sheet_name="Sheet1", engine='openpyxl',) df1 = pd.read_csv("../data/google.csv", dtype={'sub_region_1': 'str', 'sub_region_2': 'str', 'metro_area': 'str', 'iso_3166_2_code': 'str'}) # -------------------- # Data cleaning
return li print(thong_sort(ages)) new_list = thong_sort(ages) #find min and max print(max(new_list)) print(min(new_list)) ages_avg = mean(ages) print(ages_avg) #import country_list form country.py in this folder country_list = country() # print(country_list) print(len(country_list)) count = 0 """Divide the countries list into two equal lists if it is even if not one more country for the first half.""" if len(country_list) % 2 == 1: print("odd") count = 0 country_first_half = [] for i in country_list: count = count + 1 #ถ้า len country_list == ครึ่งนึงของ list ทั้งหมด
def BuildCountryDict(self): search_str = ("select iso, iso3, fips, country, capital, " + "population, continent, languages, neighbors " + "from ctry;") results = list(self.curs.execute(search_str)) return dict((r[0], country.country(*r)) for r in results)
- F(age) is from curve-fitting, using demographic data - p_d,age is from study Megan et al """ import pandas as pd import numpy as np from scipy.interpolate import interp1d from sklearn.neighbors import KernelDensity from matplotlib import pyplot as plt import country_converter as coco from country import country import pdb import pickle #-------------------- # Define country list c_set = country('special1', 'UNcode') age_0 = np.array([7, 85]) fatality = np.array([0.001, 8.29 ]) / 100 # From https://doi.org/10.1038/s41586-020-2918-0 log_fatality = np.log(fatality) # Calculate p_d,age global; IFR is log-linear in age buckets fat_func = interp1d(age_0, log_fatality, fill_value="extrapolate") age_new = np.linspace(0, 100, 500) fat_new = np.exp(fat_func(age_new)) #plt.plot(age_new, fat_new) # Demographic data df = pd.read_excel(
def newCountry(code): countries[code] = country.country() countries[code].setName(code)
def newCountry(code): countries[code] = country.country() countries[code].setName(code) def checkCountry(code): if code in countries: return True else: return False def getCountry(code): return countries[code] file = open(sys.argv[1], "r") outputFile = open(sys.argv[2], "w") countries = {} World = country.country() World.setName("World") currentLine = file.readline() while len(currentLine) > 1: parseLine(currentLine) currentLine = file.readline() outputFile.write(outputAll())
# -*- coding: UTF-8 -*- import language, country n = language.English(10000000) m = language.Hindi(132) amer_sp = language.Spanish(12333) China_C = language.Chinese(12999993) China_e = language.English(1223244) China_M = language.Malay(12342) american = country.country("USA", 10000000, 0.5, country.position(5, 23), n, m, amer_sp) China = country.country("China", 20000000, 0.2, country.position(-2, 24), China_C, China_e, China_M) print("China:") country.show(China) print("american:") country.show(american) China.flow(american) american.flow(China) print("after:") print("China:") country.show(China) print("american:") country.show(american)
def country(self): print "Country Activity opened @ %.19s"%(datetime.now()) self.countr = tk.Toplevel(self.parent) self.app=country(self.countr)
from pade.misc.utility import display_message, start_loop, call_later from pade.core.agent import Agent from pade.acl.messages import ACLMessage from pade.acl.aid import AID from agency import agency from country import country from hotel import hotel if __name__ == '__main__': agents = list() # port = int(argv[1]) port = 4004 receiver_agent = agency(AID(name='agency@localhost:{}'.format(port))) agents.append(receiver_agent) port += 1 sender_agent = country(AID(name='country@localhost:{}'.format(port))) agents.append(sender_agent) port += 1 sender_agent = hotel(AID(name='hotel@localhost:{}'.format(port))) agents.append(sender_agent) start_loop(agents)
def do_POST(self): """Reakcja serwera na otyrzymaną metodę POST. Pobierana jest treść zapytania, a następnie porównywana jest ona z odpowiednimi wyrażeniami regularnymi, w celu zidentyfikowania typu zapytania. W zależności od typu zapytania wywoływane są odpowiednie funkcje lub treść pobierana jest z bazy danych. Potem wynik wysyłany jest za pomocą metody respond.""" filename = argv length = int(self.headers['Content-Length']) data = self.rfile.read(length) if self.path != "/": data = self.path[2:] data = data.replace('%20', ' ') data = data.replace('%22', '\"') data = data.replace('%7B', '{') data = data.replace('%7D', '}') data = json.loads(data) requestContent = data["content"] requestContent = requestContent.decode('utf8', 'ignore') print requestContent address = data["address"] port = int(data["port"]) patternCountry = re.compile("country\([A-Z][a-z]*\s?[A-Z]?[a-z]*\)$") patternCountryTag = re.compile("country\([A-Z][a-z]*\s?[A-Z]?[a-z]*\)\;tag\(.*\)$") patternGetflag = re.compile("country\([A-Z][a-z]*\s?[A-Z]?[a-z]*\)\;getflag$") patternCheckflag = re.compile("checkflag\(https?\:.*\)$") if re.match(patternCountry, requestContent): requestContent = requestContent.replace(' ', '_') country1 = requestContent[requestContent.index("(")+1:requestContent.index(")")] respond(self, count.country(country1), address, port) elif re.match(patternCountryTag, requestContent): requestContent = requestContent.replace(' ', '_') country1 = requestContent[requestContent.index("(")+1:requestContent.index(")")] tag = requestContent[requestContent.index(";")+1:] tag = tag[tag.index("(")+1:tag.index(")")] respond(self, count.countryTag(country1, tag), address, port) elif re.match(patternCheckflag, requestContent): requestType = "checkingFlag" url = requestContent[requestContent.index("(")+1:requestContent.index(")")] if dataaccess.isFlagAlreadyChecked(url, requestType): tmp = dataaccess.getCountryFromUrl(url, requestType) response_content = tmp else: country = cacheAndResolveCountryName(str(url)) country = country[:-4] response_content = country respond(self, response_content, address, port) elif re.match(patternGetflag, requestContent): requestContent = requestContent.replace(' ', '_') countryName = requestContent[requestContent.index("(")+1:requestContent.index(")")] countryName.lower().capitalize() url = "http://www.sciencekids.co.nz/images/pictures/flags680/" + countryName + ".jpg" respond(self, url, address, port)
def execute(self): page = requests.get(self._urlMain) soup = BeautifulSoup(page.content, "lxml") # El div con id:ListDomesticLeague es el contenedor principal que contiene todos los países groupCountries = soup.find('div', attrs={'id': 'ListDomesticLeague'}) # Para cada país encontrado descargo el enlace en el cual podre encontrar todos las clasificaciones #La dirección de las clasificaciones de cada país, se encuentran dentro de <a href> #Cada vez que se encuentra un <a>, estamos en un nuevo país for aCountries in groupCountries.find_all("a"): #Guardamos la url del país urlCountry = aCountries.get('href') #Guardamos el nombre del país countryName = aCountries.get('title') #Creamos una nueva url para poder descargar todos los equipos urlCountry = self._urlBaseCountries + urlCountry co = country(countryName, urlCountry) #Comprobamos que el país no se haya descargado, si ya se ha descargado, se ignora if (len(self._countryList) > 0): bhas = any(x.countryName == countryName for x in self._countryList) if (bhas == False): self._countryList.append(co) else: self._countryList.append(co) print("Se descargara ", len(self._countryList), " paises") #En esta iteración se descarga cada equipo de cada país #En el primer bucle, se itera para cada país encontrado for indice, Cou in enumerate(self._countryList): pageCountry = requests.get(Cou.urlCountry) soupCountry = BeautifulSoup(pageCountry.content, "lxml") #Todos los equipos están dentro del cuerpo del html teams = soupCountry.find('tbody') countryTeams = [] #Cada fila, contiene cada equipo que hay que descargar for fila in teams.find_all("tr"): row_text = [x.text for x in fila.find_all('td')] #Descargamos cada columna, que contiene los atributos de cada equipo de futbol Cname = Cou.countryName tName = row_text[2] gPlayed = row_text[4] wHome = row_text[5] dHome = row_text[6] lHome = row_text[7] wAway = row_text[8] dAway = row_text[9] lAway = row_text[10] wTotal = row_text[11] dTotal = row_text[12] lTotal = row_text[13] points = row_text[14] position = row_text[0] #Creamos un nuevo objeto de equipo de futbol con los valores que se escribirán en el csv te = countryTeam(Cname, tName, gPlayed, wHome, dHome, lHome, wAway, dAway, lAway, wTotal, dTotal, lTotal, points, position) countryTeams.append(te) #Actualizamos la lista de equipos dentro del país Cou.actualizarTeams(countryTeams) self._countryList[indice] = Cou #Finalmente guardamos el csv en disco with open('uefa_clubs.csv', 'w', encoding='utf-8') as result_file: wr = csv.writer(result_file, delimiter=',', lineterminator='\n') wr.writerow([ "countryName", "teamName", "gamesPlayed", "wHome", "dHome", "lHome", "wAway", "dAway", "lAway", "wTotal", "dTotal", "lTotal", "points", "position" ]) #Para cada pais descargado, generamos sus datos en formato csv y lo escribimos en disco for Cou in self._countryList: listaTotal = Cou.countryToCsv() wr.writerows(listaTotal) print("Fin del proceso")
def create_country(self, name = None): c = country() if name == None : name = self.generate_id(self.countries) c.name = name self.countries[c.name] = c return c