def validCountry(self): """Country validation. Returns True if country code represents an actual country, False otherwise.""" try: countries.get(self.countryCode) self.flags['validCountry'] = True except KeyError: self.flags['validCountry'] = False return
def test_csv_output(self): response = self.app.get(reverse('networkgroups-csv')) csv = unicodecsv.reader(StringIO(response.body)) header_row = csv.next() # Headers need to be on a specific form headers = ['ISO3', 'Country', 'Geo coordinates', 'Map location', 'Local Groups status', 'Community Leaders', 'Website', 'Mailing List', 'Twitter handle', 'Youtube channel', 'Facebook page'] for group in WorkingGroup.objects.all(): headers.append('Topic: {0}'.format(group.name)) self.assertEqual(header_row, headers) germany = csv.next() germany_data = [countries.get(self.germany.country.code).alpha3, self.germany.get_country_display(), '', '', self.germany.get_group_type_display(), ', '.join([m.name for m in self.germany.members.all()]), self.germany.homepage_url, self.germany.mailinglist_url, self.germany.twitter, '', '', 'Y', 'Y'] self.assertEqual(germany, germany_data) britain = csv.next() britain_data = [countries.get(self.britain.country.code).alpha3, self.britain.get_country_display(), '', '', self.britain.get_group_type_display(), ', '.join([m.name for m in self.britain.members.all()]), self.britain.homepage_url, self.britain.mailinglist_url, self.britain.twitter, '', '', '', 'Y'] self.assertEqual(britain, britain_data) buckingham = csv.next() buckingham_data = [ countries.get(self.buckingham.country.code).alpha3, self.buckingham.get_country_display(), '{lat},{lon}'.format( lat=self.buckingham.position.latitude, lon=self.buckingham.position.longitude ), '{region}, {country}'.format( region=self.buckingham.region, country=self.buckingham.get_country_display() ), self.buckingham.get_group_type_display(), ', '.join([m.name for m in self.buckingham.members.all()]), self.buckingham.homepage_url, self.buckingham.mailinglist_url, self.buckingham.twitter, self.buckingham.youtube, self.buckingham.facebook_url, 'Y', '']
def db_value(self, value): '''Check if field is country by ISO 3166-1 alpha-3''' if value is None: return value try: countries.get(value) except KeyError as ex: raise KeyError('Country by ISO 3166-1 alpha-3 not found') else: return value
def registerNewItem(self): if self.registeringItem == False: self.resetFlags() self.resetVariables() self.registeringItem=True self.screenObj.addLine('Looking online for what this is...','top') response = barcodelookup.BarcodeLookup().lookupBarcode(self.newBarcode) if response != (None,None,None): self.newItemDesc,self.newItemVolume,self.newItemCountry = response try: name = countries.get(self.newItemCountry).name except: name = self.newItemCountry self.screenObj.addLine('I think this is '+str(self.newItemDesc)+' '+str(self.newItemVolume)+' from '+str(name)+'.','top') self.askNextNewItemQuestion() return else: self.screenObj.addLine('Not found. Now asking you for a description, then a size, a country of origin and a price.','top') self.askNextNewItemQuestion() return if not self.newItemDesc: self.newItemDesc = str(self.lastLine.strip()) self.askNextNewItemQuestion() return if not self.newItemVolume: self.newItemVolume = str(self.lastLine.strip()) self.askNextNewItemQuestion() return if self.newItemCountry is None: if self.lastLine.strip() is not '': try: countries.get(str(self.lastLine.strip())) except: self.screenObj.addLine("Unrecognised country. Please try again (or leave blank) (HINT: ccTLD's work best).",'top') return self.newItemCountry = str(self.lastLine.strip()) self.askNextNewItemQuestion() return if self.newItemPrice is None: if self.lastLine.strip() == '': self.newItemPrice = 0 try: self.newItemPrice = float(self.lastLine.strip()) except: self.newItemPrice = 0 self.screenObj.addLine('Invalid value. Skipping...','top') self.screenObj.addLine('Entering data now.','top') if self.newItemPrice is not 0: self.bar.addItem(self.newBarcode,self.newItemPrice) if self.newItemDesc and self.newItemVolume and self.newItemCountry: self.bar.addBarcodeDesc(self.newBarcode,self.newItemDesc,self.newItemVolume,self.newItemCountry) self.resetFlags() self.resetVariables() return
def check_lookup(alpha2, matching_keys, missing_keys): for k in matching_keys: assert countries[k].alpha2 == alpha2 assert countries.get(k).alpha2 == alpha2 assert k in countries for k in missing_keys: with pytest.raises(KeyError): countries.get(k) with pytest.raises(KeyError): countries[k] assert countries.get(k, None) is None
def addBarcodeDesc(self,barcode,description,volume='',issuing_country=''): """ Add a new barcode description entry to the bar. :param barcode: The barcode of the new Item :type barcode: str :param description: The description of the barcode item :type description: str :param volume: The weight or size of the item in question. :type volume: str :param issuing_country: The issuing country. :type issuing_country: str :returns: model.BarcodeDesc """ try: if issuing_country is not '': issuing_country = countries.get(issuing_country.lower()).alpha3.lower() self.log.debug("Identified country as %s"%issuing_country) except: raise ValueError("Unidentified country of origin") barcodedesc=BarcodeDesc(barcode,description,volume,issuing_country) self.session.add(barcodedesc) try: self.session.commit() self.session.flush() except Exception: self.log.error("Exception occured during commit:\n%s"%traceback.format_exc()) self.session.rollback() raise exceptions.SessionCommitError return barcodedesc
def normalize(code): """ Normalize language codes to ISO 639-2. If all conversions fails, return the `code` as it was given. Args: code (str): Language / country code. Returns: str: ISO 639-2 country code. """ if len(code) == 3: return code normalized = translate(code) if normalized: return normalized country = countries.get(code, None) if country: return country.alpha3.lower() return code
def print_formatted(label, api_index, top): nodes = read_datafile() ts = time.localtime(nodes['timestamp']) # get snapshopt time from JSON # print the table header print("Snapshot: {}".format(time.strftime("%Y-%m-%d %H:%M", ts))) print("\nNo. {:<69}Total".format(label)) print("-" * 80) total = 0 index = 0 for name, count in node_counter(read_datafile(), api_index, top): total += count index += 1 if api_index == 7: # its by countries country = countries.get(name, None) if country: name = country.name print("{:>4} {:<68} {:>5}".format(index, name[:68], count)) # print the table footer print("-" * 80) print("Nodes in top {:<6} {:>60}".format(top, total)) print("Total nodes {:>68}\n".format(nodes['total_nodes']))
def flag(self): try: c = countries.get(self.country) return "img/flags/%s.png" % c.alpha2 except: pass return ''
def worker(queue, work_dir): # register signal process function signal.signal(signal.SIGTERM, signal_handler) session = create_session() feature_dict = {} base_dir = os.path.join(work_dir, 'gadm') dir_name_list = [o for o in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, o))] for dir_name in dir_name_list: feature_name = dir_name.split("_")[0] shape_file = os.path.join(base_dir, dir_name + "/" + dir_name + "0.shp") driver = ogr.GetDriverByName("ESRI Shapefile") data_source = driver.Open(shape_file, 0) layer = data_source.GetLayer() feature = layer[0] feature_dict[feature_name] = feature while True: try: oid, longitude, latitude, country_code = queue.get(True, 1000) except Queue.Empty: break point = ogr.Geometry(ogr.wkbPoint) point.AddPoint(longitude, latitude) if country_code is None: session.query(Occurrence).filter(Occurrence.id == oid).update({"cross_check": -3}, synchronize_session=False) session.commit() continue elif country_code in countries: country_data_alpha_3 = countries.get(country_code)[2] if country_data_alpha_3 in feature_dict.keys(): poly = feature_dict[country_data_alpha_3] poly = poly.geometry() else: session.query(Occurrence).filter(Occurrence.id == oid).update({"cross_check": -1}, synchronize_session=False) session.commit() return None else: session.query(Occurrence).filter(Occurrence.id == oid).update({"cross_check": -2}, synchronize_session=False) session.commit() return None intersection = poly.Intersects(point) if intersection: session.query(Occurrence).filter(Occurrence.id == oid).update({"cross_check": 1}, synchronize_session=False) session.commit() else: session.query(Occurrence).filter(Occurrence.id == oid).update({"cross_check": 0}, synchronize_session=False) session.commit() return None
def country(self): """ Returns a Country object representing the country in this model's country_code. """ try: return countries.get(self.country_code) except (KeyError, ValueError, AttributeError): # Country code is not valid ISO-3166 return country.UNKNOWN_COUNTRY
def get(cls, country): try: if PYCOUNTRY: c = countries.lookup(country) return Country(c.alpha_2, c.alpha_3, c.numeric, c.name, getattr(c, "official_name", c.name)) else: c = countries.get(country) return Country(c.alpha2, c.alpha3, c.numeric, c.name, c.apolitical_name) except (LookupError, KeyError): raise LookupError("Invalid country code: {0}".format(country))
def process(self, tup): # Converting alpha-2 country codes to alpha-3 country_code = countries.get(tup.values[0]).alpha3 txt = tup.values[1] redis_hash = "country:" + country_code # Increment "tweet_count" field of hash self.r.hincrby(redis_hash, "t_count", 1) self.r.hset(redis_hash, "c_code", country_code)
def distanceToCountry(self): """Calculate smallest distance between coordinates and specified country.""" self.geoflags['negatedLatitude'] = False self.geoflags['negatedLongitude'] = False self.geoflags['transposedCoordinates'] = False country3 = countries.get(self.countryCode).alpha3 dist = pointCountryDistanceQuery(country3, self.decimalLatitude, self.decimalLongitude) if dist is not None: self.geoflags['distanceToCountryInKm'] = round(dist/1000, 3) return
def process_country(c, v, s, m): global url_template country = countries.get(c) url = url_template.format(code=c, country_name=country.name, variable=v, scenario=s, period=m[1], measurement=m[0]) r = requests.get(url) print "[%d] %s %s %s %s" % (r.status_code, c, v, s, m) f = StringIO.StringIO(r.text) reader = csv.DictReader(f, delimiter=',') try: return [(int(row["yr"]),float(row["value"])) for row in reader] except: return [(0,0)]
def setUp(self): super(CourseEnrollmentByLocationViewTests, self).setUp() self.country = countries.get('US') G(self.model, course_id=self.course_id, country_code='US', count=455, date=self.date) G(self.model, course_id=self.course_id, country_code='CA', count=356, date=self.date) G(self.model, course_id=self.course_id, country_code='IN', count=12, date=self.date - datetime.timedelta(days=29)) G(self.model, course_id=self.course_id, country_code='', count=356, date=self.date) G(self.model, course_id=self.course_id, country_code='A1', count=1, date=self.date) G(self.model, course_id=self.course_id, country_code='A2', count=2, date=self.date) G(self.model, course_id=self.course_id, country_code='AP', count=1, date=self.date) G(self.model, course_id=self.course_id, country_code='EU', count=4, date=self.date) G(self.model, course_id=self.course_id, country_code='O1', count=7, date=self.date)
def generate_ip_geolocation_report(self, ip_list): report = self.ip_geolocation_report_template for ip in ip_list: try: country = countries.get(self.geolocate_ip(ip)).alpha3 except KeyError: print("Couldn't get a location from {}".format(ip)) report[country] = report.get(country) + 1 return report
def GET(self, country): country = country.lower() image = country.lower() + ".svg" return render.result({ "title": country.replace("-", " ").capitalize(), "image": image, "url_to_graph_crude": "../static/svg/" + full_to_short_country((country.replace(" ", "-"))).lower() + "_Death_rate_crude_(per_1000_people).svg", "url_to_graph_under_five": "../static/svg/" + full_to_short_country((country.replace(" ", "-"))).lower() + "_Number_of_under-five_deaths.svg", "url_to_graph_injury": "../static/svg/" + full_to_short_country((country.replace(" ", "-"))).lower() + "_Cause_of_death_by_injury_%28%25_of_total%29.svg", "url_to_graph_battle": "../static/svg/" + full_to_short_country((country.replace(" ", "-"))).lower() + "_Battle-related_deaths_(number_of_people).svg", "url_to_graph_population": "../static/svg/" + full_to_short_country((country.replace(" ", "-"))).lower() + "_Population_total.svg", "country_code_2_letter": countries.get(full_to_short_country((country.replace(" ", "-"))))[1] })
def networkgroup_csv_output(request): response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="network.csv"' writer = unicodecsv.writer(response) header_row = ['ISO3', 'Country', 'Geo coordinates', 'Map location', 'Local Groups status', 'Community Leaders', 'Website', 'Wiki page', 'Mailing List', 'Twitter handle', 'Youtube channel', 'Facebook page', 'Google+ page'] working_groups = [] for group in WorkingGroup.objects.all(): topic = u'Topic: {0}'.format(group.name) working_groups.append(topic) header_row.extend(working_groups) writer.writerow(header_row) for group in NetworkGroup.objects.all(): row = [countries.get(group.country.code).alpha3, # ISO3 group.get_country_display(), # Country u'{lat},{lon}'.format( lat=group.position.latitude, lon=group.position.longitude ) if group.position else '', # Geo coordinates u'{region}, {country}'.format( region=group.region, country=group.get_country_display() ) if group.region else '', # Map location group.get_group_type_display(), # Local group status u', '.join([member.name for member in group.members.all()]), # Leaders group.homepage_url, # Website group.wiki_url if group.wiki_url else '', group.mailinglist_url, group.twitter if group.twitter else '', group.youtube_url if group.youtube_url else '', group.facebook_url, group.gplus_url if group.gplus_url else '', ] # Find topics of working group group_working_groups = [g.name for g in group.working_groups.all()] for working_group in working_groups: if working_group[len('Topic: '):] in group_working_groups: row.append('Y') else: row.append('') writer.writerow(row) return response
def alpha2Name(self, alpha2): if alpha2 == 'UK': alpha2 = 'GB' if alpha2 == 'EL': alpha2 = 'GR' if alpha2 == 'FY': alpha2 = 'MK' if alpha2 == 'KO': alpha2 = 'KR' if alpha2 == 'XK': return 'Kosovo' if alpha2 == 'AN': return 'Netherlands_Antilles' return countries.get(alpha2).name.replace(' ','_')
def add_population_to_countries(countries_array): for country in countries_array: country_code = country['country_code'] #print('Country code: ' + country_code) code = countries_codes.get(country_code) # print(code) countries_with_population = get_population_for_dbpedia_countries() for country_with_population in countries_with_population: populated_country_code = country_with_population['country_code']['value'] try: populated_code = countries_codes.get(populated_country_code) #print populated_code if code == populated_code: #print 'COINCIDE: ' + code[0] + ' con ' + populated_code[0] country['population'] = country_with_population['population']['value'] break except KeyError, e: continue
def _buildColHeaderToAlpha2Map(self): self._colHeaderToAlpha2Map = {} # I created this lookup table by hand file_path = os.path.join(base_dir, 'acs', 'ACS_12_5YR_B05006_metadata-iso3166.csv') logging.debug("Loading header to alpha map from {}".format(file_path)) csvfile = open(file_path, encoding='utf-8') csvreader = csv.reader(csvfile) for row in csvreader: if "Estimate" in row[1]: # TODO: how to handle margin of error? try: country_code = countries.get(row[2]) self._colHeaderToAlpha2Map[row[0]] = country_code.alpha3 except: continue
def load_dataset_from_file(file_name): fh = open(file_name, 'rb') db = get_db() reader = csv.DictReader(fh) for (count,row) in enumerate(reader): if not row.get('indicator_name'): continue indicator_name = munge_name(row.get('indicator_name')) dataset = { 'country_name': row.get('country2')} if row.get('value'): dataset['value'] = float(row.get('value')) else: dataset['value'] = 0.0 if row.get('value_norm'): dataset['normalized_value'] = float(row.get('value_norm')) else: dataset['normalized_value'] = dataset['value'] indicator = db.indicator.find_one({'id': indicator_name}) assert indicator, "Indicator %s could not be found!" % row.get('indicator_name') if not indicator.get('good'): dataset['normalized_value'] = 1.0 - dataset['normalized_value'] try: cc3 = row.get('country') cc3 = {'ROM': 'ROU', 'ZAR': 'COD', 'TMP': 'TLS'}.get(cc3, cc3) cc = countries.get(cc3).alpha2 except: #print row continue query = {'indicator': indicator.get('_id'), 'country': cc, 'time': row.get('time')} dataset.update(query) dataset['indicator_id'] = indicator.get('id') # db.datum.update(query, dataset, upsert=True) db.datum.insert(dataset) if count % 1000 == 0: print 'Progress: %s' % count db.datum.ensure_index('country') db.datum.ensure_index('indicator') db.datum.ensure_index('time') db.datum.ensure_index('indicator_id') fh.close() print 'Loaded: %s data points' % db.datum.count()
def _alpha2_to_country(alpha2): """ Try to look up an alpha2 country code from the iso3166 package; return the returned name if available, otherwise the original value. Returns "unknown" (str) for None. :param alpha2: alpha2 country code :type alpha2: str :return: country name, or original value if not found :rtype: str """ if alpha2 is None: return 'unknown' try: return countries.get(alpha2).name except KeyError: return alpha2
def get_album_info(self, album): """Returns an AlbumInfo object for a Metal Archives album object. """ artist = album.bands[0] tracks = self.get_tracks(album.tracks) album_id = _add_prefix(album.id) artist_id = _add_prefix(artist.id) try: country = countries.get(artist.country).alpha2 except KeyError: country = '' band_names = " / ".join([band.name for band in album.bands]) return AlbumInfo(album.title, album_id, band_names, artist_id, tracks, albumtype=album.type, va=False, year=album.year, month=album.date.month, day=album.date.day, label=album.label, mediums=album.disc_count, country=country, data_source=DATA_SOURCE, data_url=metallum.BASE_URL + '/' + album.url)
def get_location(self, location_string): if location_string in self.geomappings: return self.geomappings[location_string] elif location_string in self.geomappings_blacklist: return (0, 0, "", 0) else: location = self.geolocator.geocode(location_string, exactly_one=True, timeout=60) if location and u'countryCode' in location.raw: cc_alphabet = location.raw[u'countryCode'].encode('utf_8') cc_numeric = int(countries.get(cc_alphabet).numeric) res = (location.latitude, location.longitude, location.raw[u'countryName'].encode('utf_8'), cc_numeric) self.geomappings[location_string] = res if len(self.geomappings) % 200 == 0: log_info("Geomappings size now %s" % len(self.geomappings)) return res else: self.geomappings_blacklist.append(location_string) log_warn("Failed to get location for string %s" % location_string.encode('utf_8')) return (0, 0, "", 0)
def pointInCountry(self, lat="", lng="", ccode="", radius=""): """Check if given point falls within given country.""" params = { "lat": lat, "lng": lng, "radius": radius, "username": self.username } ccode = countries.get(self.countryCode).alpha2 url = gn_api+"countryCode" data = urlencode(params) # res = urlopen(url, data=data).read() res = urlfetch.fetch(url=url, payload=data, method=urlfetch.POST) if res.content.rstrip() == ccode: return True else: return False
def updateFromPassportScan(self, passport_info): from datetime import datetime from iso3166 import countries passport_info['expirationDate'] = datetime.strptime(passport_info['expirationDate'], '%y%m%d') passport_info['dayOfBirth'] = datetime.strptime(passport_info['dayOfBirth'], '%y%m%d').strftime('%d/%m/%Y') passport_info['countryCode']=countries.get(passport_info['countryCode']).alpha2 self.setPassportExpire(passport_info['expirationDate']) self.setPassportOrigin(passport_info['countryCode']) self.setPassportID(passport_info['documentNumber']) self.setFirstName(passport_info['givenNames']) self.setSurName(passport_info['surNames']) self.setBirthDate(passport_info['dayOfBirth']) if self.getAvatar(): self.getAvatar().setName(passport_info['givenNames']) self.getAvatar().setSurName(passport_info['surNames']) self.getAvatar().setBirthDate(passport_info['dayOfBirth']) self.updateValues(passport_info)
def clean_company_countries(companies_array): for company in companies_array: #print company if company['country_name'] == 'England' or company['country_name'] == 'Scotland': company['country_name'] = 'United Kingdom' if company['country_name'] == 'Russia': company['country_name'] = 'Russian Federation' if company['country_name'] == 'South Korea': company['country_name'] = 'Korea, Republic of' if company['country_name'] == 'Japan,': company['country_name'] = 'Japan' if company['country_name'] == 'USA,': company['country_name'] = 'USA' if company['country_name'] == 'Taiwan': company['country_name'] = 'Taiwan, Province of China' company['country_code'] = countries_codes.get(company['country_name'])[2] #print '\t' + str(countries_codes.get(company['country_name'])) return companies_array
def materialize_country_facets(cls, counts): """ Materialize country facet counts. Returns: dict: {label, value, count} """ facets = [] for abbr, count in counts: country = countries.get(abbr) if country: facets.append(dict( label = country.name, value = abbr.upper(), count = count, )) return facets
def test_it_can_be_printed(): address = Address(countries.get('gb'), 'City') assert repr(address) == "<Address 'City' None 'GB'>"
def add_covid19_tracker_data(engine, table, config_data, df_pop, add_SE2=True): ########################################## # get data from coronavirus-tracker API # url3 = "https://cvtapi.nl/all" url2 = "https://coronavirus-tracker-api.herokuapp.com/all" url1 = "https://covid-tracker-us.herokuapp.com/all" urls = (url1, url2, url3) valid_response = False response = [] for url in urls: response = requests.get(url) if response.status_code == 200: valid_response = True break else: continue if not valid_response: print('Could not get data from the coronavirus-tracker API') return -1 data = response.text parsed = json.loads(data) nr_of_countries = len(parsed["deaths"]["locations"]) # + 1 # x Countries +1 for sweden # To read out the array index country_code_two_digits = {} country_code_three_digits = {} for country in config_data["countries_of_interest"]: country_code_two_digits[country] = 0 country_code_three_digits[countries.get(country)[2]] = 0 # ------------------------------------------------------------------- # Get the magic country code number out of the parsed json object. # for i in range(0, nr_of_countries - 2): if parsed["deaths"]["locations"][i]["country_code"] in country_code_two_digits and \ parsed["deaths"]["locations"][i][ "province"] == "": country_code_two_digits[parsed["deaths"]["locations"][i]["country_code"]] = i country_code_three_digits[countries.get(parsed["deaths"]["locations"][i]["country_code"])[ 2]] = i # change from 2digit to three digits. if add_SE2: ########################################## # add SE2 from Excel to JSON # used for more accurate deaths time of sweden # country_code_three_digits['SE2'] = len(parsed["deaths"]["locations"]) SE2_confirmed_json = copy.deepcopy(parsed["confirmed"]["locations"][country_code_three_digits["SWE"]]) SE2_deaths_json = copy.deepcopy(parsed["deaths"]["locations"][country_code_three_digits["SWE"]]) parsed["deaths"]["locations"].append(SE2_deaths_json) parsed["confirmed"]["locations"].append(SE2_confirmed_json) url = 'https://www.arcgis.com/sharing/rest/content/items/b5e7488e117749c19881cce45db13f7e/data' myfile = requests.get(url, allow_redirects=True) open('Folkhalsomyndigheten_Covid19.xlsx', 'wb').write(myfile.content) wb = load_workbook(filename='Folkhalsomyndigheten_Covid19.xlsx') ws = wb['Antal avlidna per dag'] SE2_DateDeaths = [] for i in range(2, ws.max_row): if isinstance(ws.cell(row=i, column=1).value, datetime.date): SE2_DateDeaths.append(ws.cell(row=i, column=1).value) else: break SE2_Deaths = [] for i in range(2, ws.max_row): if isinstance(ws.cell(row=i, column=1).value, datetime.date): if i >= 3: SE2_Deaths.append(ws.cell(row=i, column=2).value + SE2_Deaths[i - 3]) else: SE2_Deaths.append(ws.cell(row=i, column=2).value) else: break k = 0 for key, value in parsed["deaths"]["locations"][country_code_three_digits["SE2"]]["history"].items(): if k >= len(SE2_DateDeaths): parsed["deaths"]["locations"][country_code_three_digits["SE2"]]["history"][key] = SE2_Deaths[k - 1] else: if datetime.datetime.strptime(key, "%m/%d/%y").date() == SE2_DateDeaths[k].date(): parsed["deaths"]["locations"][country_code_three_digits["SE2"]]["history"][key] = SE2_Deaths[k] k += 1 else: parsed["deaths"]["locations"][country_code_three_digits["SE2"]]["history"][key] = 0 df = pd.DataFrame() for three_digit, value_three_digit in country_code_three_digits.items(): df = df.append(pd.DataFrame( zip(list(parsed["deaths"]["locations"][value_three_digit]["history"].keys()), list([three_digit for _x in range(len( parsed["deaths"]["locations"][value_three_digit]["history"].items()))]), list([countries.get(parsed["confirmed"]["locations"][value_three_digit]["country_code"])[2] for _x in range(len( parsed["deaths"]["locations"][value_three_digit]["history"].items()))]), list(parsed["confirmed"]["locations"][value_three_digit]["history"].values()), list(parsed["deaths"]["locations"][value_three_digit]["history"].values())), columns=['date', 'countrycode', 'country_code_for_pop', 'confirmed', 'deaths']), ignore_index=True) df['date'] = pd.to_datetime(df['date']) df['confirmedperpop'] = 1e6 / df['country_code_for_pop'].map(df_pop.set_index('countrycode')['population']) * df[ 'confirmed'] df['newconfirmed'] = df.sort_values('date').groupby('countrycode')['confirmed'].diff().fillna(0) df['newconfirmedperpop'] = 1e6 / df['country_code_for_pop'].map(df_pop.set_index('countrycode')['population']) * df[ 'newconfirmed'] df['newconfirmedfilt3d'] = df.sort_values('date').groupby('countrycode')['newconfirmed'].rolling( 3).mean().reset_index(0, drop=True) df['newconfirmedfilt7d'] = df.sort_values('date').groupby('countrycode')['newconfirmed'].rolling( 7).mean().reset_index(0, drop=True) df['newconfirmedfilt14d'] = df.sort_values('date').groupby('countrycode')['newconfirmed'].rolling( 14).mean().reset_index(0, drop=True) df['newconfirmedperpopfilt3d'] = df.sort_values('date').groupby('countrycode')['newconfirmedperpop'].rolling( 3).mean().reset_index(0, drop=True) df['newconfirmedperpopfilt7d'] = df.sort_values('date').groupby('countrycode')['newconfirmedperpop'].rolling( 7).mean().reset_index(0, drop=True) df['newconfirmedperpopfilt14d'] = df.sort_values('date').groupby('countrycode')['newconfirmedperpop'].rolling( 14).mean().reset_index(0, drop=True) df['deathsperpop'] = 1e6 / df['country_code_for_pop'].map(df_pop.set_index('countrycode')['population']) * df[ 'deaths'] df['newdeaths'] = df.sort_values('date').groupby('countrycode')['deaths'].diff().fillna(0) df['newdeathsperpop'] = 1e6 / df['country_code_for_pop'].map(df_pop.set_index('countrycode')['population']) * df[ 'newdeaths'] df['newdeathsfilt3d'] = df.sort_values('date').groupby('countrycode')['newdeaths'].rolling(3).mean().reset_index(0, drop=True) df['newdeathsfilt7d'] = df.sort_values('date').groupby('countrycode')['newdeaths'].rolling(7).mean().reset_index(0, drop=True) df['newdeathsfilt14d'] = df.sort_values('date').groupby('countrycode')['newdeaths'].rolling(14).mean().reset_index( 0, drop=True) df['newdeathsperpopfilt3d'] = df.sort_values('date').groupby('countrycode')['newdeathsperpop'].rolling( 3).mean().reset_index(0, drop=True) df['newdeathsperpopfilt7d'] = df.sort_values('date').groupby('countrycode')['newdeathsperpop'].rolling( 7).mean().reset_index(0, drop=True) df['newdeathsperpopfilt14d'] = df.sort_values('date').groupby('countrycode')['newdeathsperpop'].rolling( 14).mean().reset_index(0, drop=True) del df['country_code_for_pop'] # was only used for calculation with engine.connect() as con: df.to_sql(name=table, con=con, if_exists='replace', index=True) engine.dispose() print(f"insert done into: {table}") return df
def process_pdf(location): #FIPS Data https://www.nrcs.usda.gov/wps/portal/nrcs/detail/national/home/?cid=nrcs143_013697 fips_data = pd.read_csv('state_county_fips.csv', names=['state', 'county', 'fips'], engine='python') for file in os.listdir("google/"): if file.endswith(".pdf"): filename = "google/" + file country = file.split("_")[1] if location == "US": if country == "US" and file.split("_")[2] != "Mobility": state = re.findall(r"US_(\w+?)_Mobility_", file)[0].replace("_", " ") rawText = parser.from_file(filename) rawList = rawText['content'].splitlines() loc = [] b = 0 for i in rawList: b += 1 if i == "Retail & recreation": loc.append(b) for i in range(1, len(loc)): temp = [] temp.append("USA") temp.append(state) county = rawList[loc[i] - 3] temp.append(county) try: fips = fips_data[ fips_data.state.str.contains(state) & fips_data.county.str.contains( county)]['fips'].values temp.append(str(fips[0])) except (IndexError, ValueError, TypeError) as e: temp.append(0) try: retail = rawList[loc[i] + 1].split(' ')[0] temp.append(retail) except (IndexError, ValueError) as e: temp.append(0) try: grocery = rawList[loc[i] + 5].split(' ')[0] temp.append(grocery) except (IndexError, ValueError) as e: temp.append(0) try: park = rawList[loc[i] + 9].split(' ')[0] temp.append(park) except (IndexError, ValueError) as e: temp.append(0) try: transit = rawList[loc[i] + 13].split(' ')[0] temp.append(transit) except (IndexError, ValueError) as e: temp.append(0) try: workplace = rawList[loc[i] + 17].split(' ')[0] temp.append(workplace) except (IndexError, ValueError) as e: temp.append(0) try: residential = rawList[loc[i] + 21].split(' ')[0] temp.append(residential) except (IndexError, ValueError) as e: temp.append(0) data.append(temp) else: if file.split("_")[2] == "Mobility": temp = [] country = countries.get(country).alpha3 rawText = parser.from_file(filename) rawList = rawText['content'].splitlines() temp.append(country) temp.append(country) temp.append("") temp.append(country) for i in metric: try: temp.append(rawList[2 + rawList.index(i)]) except ValueError: temp.append(0) data.append(temp)
def check_value(self, value): try: countries.get(value) return True except KeyError: return False
dict4 = {} ip1 = [] kount = 0 for root, dirs, files in os.walk("./log"): #print dirs if dirs: #print dirs ip1 = dirs for i in dirs: match = geolite2.lookup(i) if match is not None: #print match.country try: if match.country=="US": print i key = countries.get(match.country).name print i,key if key in dict3.keys(): dict3[key] = dict3[key]+1 else: dict3[key] = 1 except: print "Country Not Found" #print find_between(countries.get(match.country),"(name=u'","") for file in files: if file.endswith(".log"): name = os.path.join(root, file) f = open(name,"r") for line in f: line = line.split(" ") if 0 <= 1 < len(line):
def get_country(code): try: return countries.get(code.lower()).name except KeyError: return code
return s.title() # My API credentials are in "hidden" module auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) print(""" Enter ISO COUNTRY codes like AU / AUS for Australia DE / DEU for Germany """) user_country = input("Country code: ") try: c_code = countries.get(user_country).alpha2 c_name = countries.get(user_country).name except KeyError: wikiurl = "https://en.wikipedia.org/wiki/ISO_3166-1" sys.exit( f"'{user_country}' is not a valid Country code. For ISO codes refer {wikiurl}" ) dloc = getcities_forcountry(c_code) if len(dloc) < 1: print(f"No locations found for Country {c_code}-{c_name}") else: print(f"Trends are available for the following cities in {c_name}") pprint(list(dloc.keys()), indent=4, compact=True) while True:
actual_countrycodes = open(mapsd_data_dir + "/country_names/country_codes.txt").read().split("\n") actual_countrynames_in_en = [x.lower() for x in open(mapsd_data_dir + "/country_names/en.txt").read().split("\n")] actual_countrynames_diff_langs = {} diff_langs = os.listdir(mapsd_data_dir + "/country_names/") for lang in diff_langs: if lang == "country_codes.txt": continue if lang == "en.txt": continue langname = lang[:-4] actual_countrynames_diff_langs[langname] = [x.lower() for x in open(os.path.join(mapsd_data_dir + "/country_names/",lang)).read().split("\n")] #get mapping from 3 letter country codes to 2 letter actual codes that we have used as identifiers for countries in mapsd actual_countrycodes_3letter = {} for cc in actual_countrycodes: if cc not in countries: continue actual_countrycodes_3letter[countries.get(cc.lower()).alpha3] = cc actual_countrycodes_3letter["ACI"] = "AC" #load cities database ~25000 cities #(cityname,countryname,subcountryname) cities = [x.lower() for x in open(mapsd_data_dir + "/world-cities/data/world-cities.csv").read().strip().split("\n")] cities = [x.split(",")[:-1] for x in cities[1:]] mapping_countries_incitiesdatabase_to_actual_codes = {} for city_entry in cities: city_country = city_entry[1] if city_country in actual_countrynames_in_en: mapping_countries_incitiesdatabase_to_actual_codes[city_country] = actual_countrycodes[actual_countrynames_in_en.index(city_country)] #Example: (1) Vancouver, B.C. (2) Toronto, OR (3) Vancouver, BC (4) Burnaby, BC (5) Montreal, QC #People in Canada US and many other countries has a habit of abbreviating province/state names. #Create a database of list of provinces and states and codes
def ua_get_alpha3(alpha2): try: str_alpha2 = str(alpha2) return countries.get(str_alpha2).alpha3 except: return None
def get_osm_data_wambachers(country: str = "NL", organization_type: str = "municipality"): """ When the config.WAMBACHERS_OSM_CLIKEY is configured, this routine will get the map data. It's the same as OSM, but with major seas and lakes cut out. That makes a nicer terrain on the website. # uses https://wambachers-osm.website/boundaries/ to download map data. Might not be the most updated, but it has # a more complete and better set of queries. For example; it DOES get northern ireland and it clips out the sea, # which makes it very nice to look at. # yes, i've donated, and so should you :) This has been replaced with the osm-boundaries.com service. Looks very alike. :param country: :param organization_type: :return: """ """ curl -f -o NL_province.zip 'https://wambachers-osm.website/boundaries/exportBoundaries ?cliVersion=1.0 &cliKey=[key] done: add cliKey to config &exportFormat=json &exportLayout=levels &exportAreas=land &union=false &from_al=4 &to_al=4 done: get the right number &selected=NLD' done: get 3 letter ISO code :param country: :param organization_type: :return: """ # see if we cached a resultwikidata filename = create_evil_filename([country, organization_type, str(timezone.now().date())], "zip") filename = settings.TOOLS["openstreetmap"]["output_dir"] + filename log.debug(f"Saving data to {filename}") # if the file has been downloaded recently, don't do that again. four_hours_ago = time.time() - 14400 if os.path.isfile(filename) and four_hours_ago < os.path.getmtime(filename): log.debug("Already downloaded a coordinate file in the past four hours. Using that one.") # unzip the file and return it's geojson contents. return get_data_from_wambachers_zipfile(filename) level = get_region(country, organization_type) country = countries.get(country) if not level: raise NotImplementedError( "Combination of country and organization_type does not have a matching OSM query implemented." ) # todo: probably db changes over time. url = ( f"https://osm-boundaries.com/Download/Submit" f"?apiKey={config.WAMBACHERS_OSM_CLIKEY}" f"&db=osm20210104&" f"&osmIds={country_to_osm_number(country)}" f"&recursive" f"&minAdminLevel={level}" f"&maxAdminLevel={level}" f"&landOnly" f"&includeAllTags" # todo: use the resampling option here, instead of using the rdp algorithm. f"&simplify=0.001" ) # get's a zip file and extract the content. The contents is the result. response = requests.get(url, stream=True, timeout=(1200, 1200)) response.raise_for_status() # show a nice progress bar when downloading with open(filename, "wb") as f: i = 0 for block in response.iter_content(chunk_size=1024): i += 1 print_progress_bar(i, 100000, "Wambacher OSM data") if block: f.write(block) f.flush() # unzip the file and return it's geojson contents. return get_data_from_wambachers_zipfile(filename)
def countrycode(country): """Takes country name and returns alpha2 code""" _country = countries.get(country) return jsonify({'name': _country.name, 'code': _country.alpha2})
def iso_coding(arr): try: return countries.get(arr).name except: return '\\N'
# including many that are aggregates which we need to exclude # It gives the 3-char country code but not the numeric one used by "ISO 3166-1 numeric" response = requests.get(WB_API) jsondata = json.loads(response.text) meta = jsondata[0] nations = jsondata[1] nationlist = [] for c in nations: # skip the entries that are collections of countries if c["region"]["value"] == "Aggregates": continue try: country = countries.get(c["id"]) nationlist.append({ "charcode": c["id"], "numcode": int(country.numeric), "region": c["region"]["value"], "name": c["name"], }) except KeyError as ex: # print("No record for country code %s" % c['id']) pass print('"%s","%s","%s","%s"' % ("Country Code", "Population (Thousands)", "Region", "Country")) for c in nationlist: try: population = recent.loc[c["numcode"], "PopTotal"]
def get_query_request_handle(request, *args): """gets the users inputs and extracts the most correct filters Args: request and 3 lists with possible aggregators skills, type and organization Returns: a context dictionary and the same three lists after checking if the checkboxes are ticked """ context = {} job_search = request.GET.get('job_search', "") context['job_search'] = job_search place_search = request.GET.get('place_search', "") try: place_search = countries.get(place_search)[4] except: place_search = '' context['place_search'] = place_search if request.GET.get('remote_search', "") == 'on': remote_search = 'checked' else: remote_search = '' context['remote_search'] = remote_search skill_, type_ = main_search_handler(job_search) skills = set() types = set() orgs = set() if skill_ is not None: skills.add(skill_) if type_ is not None: types.add(type_) for val in request.GET.keys(): if 'skill.' in val: aux = val.split(".", 1)[1] skills.add(aux) if 'type.' in val: aux = val.split(".", 1)[1] types.add(aux) if 'org.' in val: aux = val.split(".", 1)[1] org.add(aux) args = args[0] skills_checkbox = args[0] for skill in skills_checkbox: if skill.get('value') in skills: skill['checked'] = 'checked' context['skills_checkbox'] = skills_checkbox type_checkbox = args[1] for type in type_checkbox: if type.get('value') in types: type['checked'] = 'checked' context['type_checkbox'] = type_checkbox org_checkbox = args[2] for org in org_checkbox: if org.get('value') in orgs: org['checked'] = 'checked' context['org_checkbox'] = org_checkbox skills = list(skills) types = list(types) orgs = list(orgs) filters = skills + types + orgs + [place_search] if remote_search != '': filters += ['Remote'] context['filters'] = filters return context, skills, types, orgs
ax.set_extent([-10.67, 34.5, 31.55, 71.05], crs=crs.PlateCarree()) ## Important plt.scatter(x=map1.Longitude, y=map1.Latitude, color="orangered", s=results.Count, alpha=1, transform=crs.PlateCarree()) ## Important plt.show() map1.S.unique() from iso3166 import countries print(countries.get('us')) def rename(country): try: return countries.get(country).alpha3 except: return (np.nan) old_sample_number = map1.S.shape[0] countriesData = map1.S.apply(rename) countriesData = map1.S.dropna() new_sample_number = map1.S.shape[0]
def process_result_value(self, value: Optional[str], dialect: Dialect) -> Optional[Country]: if value is not None: return countries.get(value)
def init_spam_agrar(self, **parameters): """initiates agriculture exposure from SPAM data: https://dataverse.harvard.edu/ dataset.xhtml?persistentId=doi:10.7910/DVN/DHXBJX Optional parameters: data_path (str): absolute path where files are stored. Default: SYSTEM_DIR country (str): Three letter country code of country to be cut out. No default (global) name_adm1 (str): Name of admin1 (e.g. Federal State) to be cut out. No default name_adm2 (str): Name of admin2 to be cut out. No default spam_variable (str): select one agricultural variable: 'A' physical area 'H' harvested area 'P' production 'Y' yield 'V_agg' value of production, aggregated to all crops, food and non-food (default) Warning: for A, H, P and Y, currently all crops are summed up spam_technology (str): select one agricultural technology type: 'TA' all technologies together, ie complete crop (default) 'TI' irrigated portion of crop 'TH' rainfed high inputs portion of crop 'TL' rainfed low inputs portion of crop 'TS' rainfed subsistence portion of crop 'TR' rainfed portion of crop (= TA - TI, or TH + TL + TS) ! different impact_ids are assigned to each technology (1-6) save_name_adm1 (Boolean): Determines how many aditional data are saved: False: only basics (lat, lon, total value), region_id per country True: like 1 + name of admin1 haz_type (str): hazard type abbreviation, e.g. 'DR' for Drought or 'CP' for CropPotential Returns: """ data_p = parameters.get('data_path', SYSTEM_DIR) spam_t = parameters.get('spam_technology', 'TA') spam_v = parameters.get('spam_variable', 'V_agg') adm0 = parameters.get('country') adm1 = parameters.get('name_adm1') adm2 = parameters.get('name_adm2') save_adm1 = parameters.get('save_name_adm1', False) haz_type = parameters.get('haz_type', DEF_HAZ_TYPE) # Test if parameters make sense: if spam_v not in ['A', 'H', 'P', 'Y', 'V_agg'] or \ spam_t not in ['TA', 'TI', 'TH', 'TL', 'TS', 'TR']: LOGGER.error('Invalid input parameter(s).') raise ValueError('Invalid input parameter(s).') # read data from CSV: data = self._read_spam_file(data_path=data_p, spam_technology=spam_t, spam_variable=spam_v, result_mode=1) # extract country or admin level (if provided) data, region = self._spam_set_country(data, country=adm0, name_adm1=adm1, name_adm2=adm2) # sort by alloc_key to make extraction of lat / lon easier: data = data.sort_values(by=['alloc_key']) lat, lon = self._spam_get_coordinates(data.loc[:, 'alloc_key'], data_path=data_p) if save_adm1: self.name_adm1 = data.loc[:, 'name_adm1'].values if spam_v == 'V_agg': # total only (column 7) i_1 = 7 i_2 = 8 else: i_1 = 7 # get sum over all crops (columns 7 to 48) i_2 = 49 self.gdf['value'] = data.iloc[:, i_1:i_2].sum(axis=1).values self.gdf['latitude'] = lat.values self.gdf['longitude'] = lon.values LOGGER.info('Lat. range: {:+.3f} to {:+.3f}.'.format( np.min(self.gdf.latitude), np.max(self.gdf.latitude))) LOGGER.info('Lon. range: {:+.3f} to {:+.3f}.'.format( np.min(self.gdf.longitude), np.max(self.gdf.longitude))) # set region_id (numeric ISO3): country_id = data.loc[:, 'iso3'] if country_id.unique().size == 1: region_id = np.ones(self.gdf.value.size, int)\ * int(iso_cntry.get(country_id.iloc[0]).numeric) else: region_id = np.zeros(self.gdf.value.size, int) for i in range(0, self.gdf.value.size): region_id[i] = int(iso_cntry.get(country_id.iloc[i]).numeric) self.gdf['region_id'] = region_id self.ref_year = 2005 self.tag = Tag() self.tag.description = ("SPAM agrar exposure for variable " + spam_v + " and technology " + spam_t) # if impact id variation iiv = 1, assign different damage function ID # per technology type. self._set_if(spam_t, haz_type) self.tag.file_name = (FILENAME_SPAM + '_' + spam_v + '_' + spam_t + '.csv') # self.tag.shape = cntry_info[2] #self.tag.country = cntry_info[1] if spam_v in ('A', 'H'): self.value_unit = 'Ha' elif spam_v == 'Y': self.value_unit = 'kg/Ha' elif spam_v == 'P': self.value_unit = 'mt' else: self.value_unit = 'USD' LOGGER.info('Total {} {} {}: {:.1f} {}.'.format( spam_v, spam_t, region, self.gdf.value.sum(), self.value_unit)) self.check()
def rename(country): try: return countries.get(country).alpha3 except: return (np.nan)
def get_full_name(country: str) -> str: return (iso_info.name if (iso_info := iso_countries.get(country, False)) else country)
def cnt_iso3166(country): c = countries.get(country) return c[1]
def trafficLogStats(logsPath, outFilePath): # Read in a file containing the full access log. if logsPath.endswith('.zip'): # Support for reading zipped logs. zfile = zipfile.ZipFile(logsPath, 'r') fname = [x for x in zfile.namelist() if '/' not in x][0] zcontent = zfile.open(fname) lines = zcontent.readlines() else: # Support for plain text logs. logfile = open(logsPath, 'r') lines = logfile.readlines() logfile.close() # Create data structures for tracking metrics recordCount = collections.Counter() monthCount = collections.Counter() browserCount = collections.Counter() IPCount = collections.Counter() userCount = collections.Counter() users = set() # Create set of users to prevent duplications. # Process the log file to generate hit and session counts. # Filter out lines containing these strings. for line in lines: # Now split and define things. words = line.split() try: ip = geolite2.lookup(words[0]) except: ip = None if ip is not None and ip.country is not None: if ip.country == 'XK': IPCount["Kosovo"] += 1 else: nation = countries.get(ip.country) IPCount[nation.name] += 1 # Browser Type if "Chrome" in line: browserCount["Chrome"] += 1 elif "Firefox" in line: browserCount["FireFox"] += 1 elif "Safari" in line: browserCount["Safari"] += 1 elif "Explorer" in line: browserCount["Internet Explorer"] += 1 else: browserCount["Other"] += 1 browserCount["Other"] += 1 # Get date of access. try: dtStr = words[3][1:].replace(':', ' ', 1) dt = parseDt(dtStr) accessDt = str(dt.year) + '-' + str(dt.month).zfill(2) except: accessDt = '2019-01' # Is this is a unique viewer? ipStr = words[0] if ipStr not in users: # Add another user to the count. recordCount[accessDt] += 1 users.add(ipStr) # No matter what, we update the monthly count. monthCount[accessDt] += 1 userCount[ipStr] += 1 # Set up plotting: plt.figure(figsize=(15, 15)) ggColors = [x['color'] for x in plt.rcParams['axes.prop_cycle']] # Session counts by month: log = collections.OrderedDict( sorted(recordCount.items(), key=lambda x: x[0])) plt.subplot(3, 1, 1) ax = plt.gca() totalSessions = "{:,}".format(sum(log.values())) creationTime = datetime.now().strftime('%Y-%m-%d') ax.set_title('Session Count By Month. Total: ' + totalSessions + '\nGenerated: ' + creationTime) barRange = range(len(log)) plt.bar(barRange, log.values(), align='center') plt.xticks(barRange, [x.replace('/', '\n') for x in log.keys()]) plt.axis('tight') # Hit counts by month: log = collections.OrderedDict( sorted(monthCount.items(), key=lambda x: x[0])) plt.subplot(3, 1, 2) ax = plt.gca() ax.set_title('Hit Count By Month. Total: ' + "{:,}".format(sum(log.values()))) barRange = range(len(log)) plt.bar(barRange, log.values(), align='center') plt.xticks(barRange, [x.replace('/', '\n') for x in log.keys()]) plt.axis('tight') # Plot the hits per user histogram: userElements = userCount.items() browserElements = browserCount.items() plt.subplot(3, 3, 7) userValues = list(pair[1] for pair in userElements) title = 'Histogram of Hits Per User' plt.title(title) plt.hist(userValues, bins=range(0, 50, 5)) # Country hit counts: log = collections.OrderedDict( sorted(IPCount.items(), key=lambda x: x[1], reverse=True)) countryTotal = str(len(log)) # Just look at top 10 countries: for i, k in enumerate(log): if i > 10: del log[k] plt.subplot(3, 3, 8) ax = plt.gca() title = 'Hits by Country. Total Countries: ' + countryTotal ax.set_title(title) people = [x[0:14] for x in log.keys()] y_pos = range(len(people)) performance = [x for x in log.values()] ax.barh(y_pos, performance, align='center') ax.set_yticks(y_pos) ax.set_yticklabels(people, fontsize=8) ax.invert_yaxis() # labels read top-to-bottom # Plot of the number of hits by IP address: # u_label_list = list(pair[0] for pair in userElements) # IPCounts = filter(lambda x: x[1] >= 100, [(l, s) for l, s in zip(u_label_list, userValues)]) # colors = ('red', 'green', 'orange', 'cyan', 'brown', 'grey', 'blue', 'indigo', 'beige', 'yellow') # plt.pie(sorted(userValues, reverse=True), colors=colors) # plt.axis("equal") # plt.legend(loc=(-0.15, 0.05), labels=sorted(IPCounts, key = lambda x: x[1], reverse=True), shadow=True) # plt.savefig('USER HITS' + '.png') # plt.show() # plt.close() # def IPConvert(ip): # removePeriods = ''.join(ip.split('.')) # final = removePeriods.replace(':', '') # return int((''.join(ip.split('.')).replace(':',''))) # Browser type breakdown: b_label_list = list(x[0] for x in browserElements if x[0] != 'Other') browserValues = list(int(x[1]) for x in browserElements if x[0] != 'Other') plt.subplot(3, 3, 9) plt.pie(sorted(browserValues, reverse=True), colors=ggColors) browserLabels = [(l, s) for l, s in zip(b_label_list, browserValues)] plt.legend(labels=sorted(browserLabels, key=lambda x: x[1], reverse=True), shadow=True) plt.title('Browser Type Breakdown') # Adjust and write out the image. plt.subplots_adjust(left=0.1, right=0.9) plt.savefig(outFilePath)
def home(): city_form = CityForm(request.form) if request.method == "POST": corrected_user_query_location = city_form.location.data.replace( " ", "%20") nominatim_json_reponse = get( f"https://nominatim.openstreetmap.org/search/" f"{corrected_user_query_location}" f"?format=json").json() location_lat = nominatim_json_reponse[0]["lat"] location_lon = nominatim_json_reponse[0]["lon"] opwm_json_response = get(f"https://api.openweathermap.org/data/2.5/weather" f"?lat={location_lat}" f"&lon={location_lon}" f"&appid={OPWM_API_KEY}" f"&units=metric")\ .json() opwm_uv_index_json_response = get(f"https://api.openweathermap.org/data/2.5/uvi" f"?&appid={OPWM_API_KEY}" f"&lat={location_lat}" f"&lon={location_lon}")\ .json() opwm_forecast_json_response = get(f"https://api.openweathermap.org/data/2.5/forecast" f"?lat={location_lat}" f"&lon={location_lon}" f"&appid={OPWM_API_KEY}" f"&units=metric")\ .json() timezonedb_json_response = get(f"http://api.timezonedb.com/v2.1/get-time-zone" f"?format=json" f"&by=position" f"&lat={location_lat}" f"&lng={location_lon}" f"&key={TIMEZONEDB_API_KEY}")\ .json() aq_json_response = get(f"http://api.airvisual.com/v2/nearest_city" f"?lat={location_lat}" f"&lon={location_lon}" f"&key={AIRQUALITY_API_KEY}")\ .json() try: cache.set("weather_wind_direction_deg", round(opwm_json_response["wind"]["deg"])) cache.set("weather_wind_direction_abbr", portolan.point(degree=cache.get("weather_wind_direction_deg"))\ .capitalize()) except KeyError: cache.set("weather_wind_direction_deg", None) cache.set("weather_wind_direction_abbr", "No data") cache.set("user_query_location", city_form.location.data.split(",")[0]\ .title()) cache.set("country_code", opwm_json_response["sys"]["country"]) cache.set("country_full_name", countries.get(opwm_json_response["sys"]["country"]).name) cache.set("country_emoji_flag", flag.flagize(f":{opwm_json_response['sys']['country']}:")) cache.set("location_station_name", opwm_json_response["name"]) cache.set( "location_more_link", f"https://www.google.com/search" f"?q={cache.get('user_query_location')}") cache.set( "location_local_time", datetime.strptime(timezonedb_json_response["formatted"], "%Y-%m-%d %H:%M:%S")) cache.set("weather_time_calc_utc", datetime.fromtimestamp(opwm_json_response["dt"])\ .strftime("%d/%m/%Y, at %H:%M")) cache.set("weather_temp_current", round(opwm_json_response["main"]["temp"], 1)) cache.set("weather_temp_min", round(opwm_json_response["main"]["temp_min"], 1)) cache.set("weather_temp_max", round(opwm_json_response["main"]["temp_max"], 1)) cache.set("weather_description", opwm_json_response["weather"][0]["description"]\ .capitalize()) cache.set("weather_pressure", opwm_json_response["main"]["pressure"]) cache.set("weather_humidity", opwm_json_response["main"]["humidity"]) cache.set("weather_wind_speed", opwm_json_response["wind"]["speed"]) cache.set("weather_uv_index", round(opwm_uv_index_json_response["value"])) cache.set("weather_air_quality_index", aq_json_response["data"]["current"]["pollution"]["aqius"]) cache.set("weather_temp_forecast", [ temp["main"]["temp"] for temp in opwm_forecast_json_response["list"][::5] ]) cache.set("weather_forecast_time_calc_utc", [ datetime.strptime(time_calc["dt_txt"], "%Y-%m-%d %H:%M:%S") for time_calc in opwm_forecast_json_response["list"][::5] ]) city_form.location.data = "" return render_template("weather_report.html", cache=cache, city_form=city_form) return render_template("home.html", cache=cache, city_form=city_form)
args = parser.parse_args() country_code = args.country_code #delta = args.delta #corr = args.corr with open('correlations-basic.json') as json_file: correlations_basic = json.load(json_file) with open('correlations.json') as json_file: correlations = json.load(json_file) #1 fig1, a = pyplot.subplots(nrows=4, ncols=2) plot_map_json(correlations_basic[countries.get(country_code).alpha2]['twitter_smooth'], 'corr_twitter', a[0][1]) #plot_map_json(correlations[countries.get(country_code).alpha2]['twitter_smooth'], 'corr_twitter_followers') plot_map_json(correlations[countries.get(country_code).alpha2]['who_conf_smooth'], 'corr_who_conf', a[1][1]) plot_map_json(correlations[countries.get(country_code).alpha2]['who_rec_smooth'], 'corr_who_rec', a[2][1]) plot_map_json(correlations[countries.get(country_code).alpha2]['who_deaths_smooth'], 'corr_who_deaths', a[3][1]) data = pd.read_csv("to_visualize.csv", index_col=0) data_basic = pd.read_csv("to_visualize-basic.csv", index_col=0) series_list = ['twitter_raw', 'who_conf_smooth', 'who_rec_smooth', 'who_deaths_smooth'] code = countries.get(country_code).alpha2 country_summary = data_basic.loc[code] country_summary_foll = data.loc[code] for i, series in enumerate(series_list): a[i][0].plot(process_series(country_summary[series]))
""" Just some examples of using iso3166 for now """ from iso3166 import countries afg = countries.get('AFG')
def validate_country(word): country = countries.get(word) return country[0]
def test_country(self): country = countries.get('US') self.assertEqual(country.alpha2, 'US') instance = G(models.CourseEnrollmentByCountry, country_code=country.alpha2) self.assertEqual(instance.country, country)
def test_it_has_a_country(): address = Address(countries.get('gb'), 'City') assert address.country == countries.get('gb')
def main(): getNewsAPI() addToDict() fileDict = {} with open('completedJson.txt', 'r') as inf: fileDict = eval(inf.read()) for key in fileDict.keys(): try: key = countries.get(key).alpha3 except: key = key # with open("template.json", 'r') as f: # read_data = f.read() data = json.load(open('template.json'), object_pairs_hook=OrderedDict) data = data[u'features'] for x in range(0, len(data)): line = data[x] topics = {} country = line[u'id'] country = str(country) name = line[u'properties'][u'name'] name = str(name) try: if country in fileDict: for y in range(0, len(fileDict[country])): topic = fileDict[country][y]['topic'] href = "<a target='_blank' href='" + fileDict[country][y][ 'url'] + "'>" + str( fileDict[country][y]['title']) + '</a>' if topic not in topics: topics[topic] = [] eachTopic = topics[topic] credibility = {} author = {} article = {} credibility['Credibility'] = str( fileDict[country][y]['credRating']) author['Author'] = str(fileDict[country][y]['author']) article[str(href)] = [] article[str(href)].append(credibility) article[str(href)].append(author) eachTopic.append(article) topics[topic] = eachTopic elif countries.get(country).alpha2 in fileDict: for y in range(0, len(fileDict[countries.get(country).alpha2])): topic = fileDict[countries.get(country).alpha2][y]['topic'] href = "<a target='_blank' href='" + fileDict[ countries.get(country).alpha2][y]['url'] + "'>" + str( fileDict[countries.get( country).alpha2][0]['title']) + '</a>' if topic not in topics: topics[topic] = [] eachTopic = topics[topic] credibility = {} author = {} article = {} credibility['Credibility'] = str(fileDict[countries.get( country).alpha2][y]['credRating']) author['Author'] = str( fileDict[countries.get(country).alpha2][y]['author']) article[str(href)] = [] article[href].append(credibility) article[href].append(author) eachTopic.append(article) topics[topic] = eachTopic a = "" for topic in topics.keys(): for article in topics[topic]: for element in article: for y in range(0, len(article[element])): for key in article[element][y]: a += article[element][y] + '\n' line[u'properties'][topic] = { element: { 'info': a } } except: None test = './test.geojson' with open('data.txt', 'w') as outfile: json.dump(data, outfile) insertJson('2/19', str(data)) insertDict('2/19', aggregateDict) with open('final.txt', 'w') as outfile: json.dump(aggregatedDict, outfile)
with new_data_file.open(mode='a') as f: print('})', file=f) pth = Path('.') for dtfl in pth.iterdir(): if dtfl.suffix == '.csv': data_file = dtfl if not data_file.exists() or not data_file.is_file(): print('CSV file invalid or cannot be found') exit(1) fs = data_file.stem try: country = countries.get(fs.title()) except: print(f'Cannot determine country from filename {fs}') break csv_size = data_file.stat().st_size if csv_size == 635414: letter = 'a' print(f'{country.name} [ {country.alpha2} ] is [ {letter.upper()} ] Type') check_countrymap(country, letter) elif csv_size == 682975: letter = 'b' print(f'{country.name} [ {country.alpha2} ] is [ {letter.upper()} ] Type') check_countrymap(country, letter) elif csv_size == 660096: letter = 'c'
def country_to_osm_number(country): # returns the country code osm-boundaries needs: # list taken from: https://osm-boundaries.com/ countries = { "Afghanistan": -303427, "Albania": -53292, "Algeria": -192756, "Andorra": -9407, "Angola": -195267, "Anguilla": -2177161, "Antigua and Barbuda": -536900, "Argentina": -286393, "Armenia": -364066, "Australia": -80500, "Austria": -16239, "Azerbaijan": -364110, "Bahrain": -378734, "Ban Than (Zhongzhou) Reef": 159002389, "Bangladesh": -184640, "Barbados": -547511, "Belarus": -59065, "Belgium": -52411, "Belize": -287827, "Benin": -192784, "Bermuda": -1993208, "Bhutan": -184629, "Bolivia": -252645, "Bosnia and Herzegovina": -2528142, "Botswana": -1889339, "Brazil": -59470, "British Indian Ocean Territory": -1993867, "British Sovereign Base Areas": -3263728, "British Virgin Islands": -285454, "Brunei": -2103120, "Bulgaria": -186382, "Burkina Faso": -192783, "Burundi": -195269, "Cambodia": -49898, "Cameroon": -192830, "Canada": -1428125, "Cape Verde": -535774, "Cayman Islands": -2185366, "Central African Republic": -192790, "Chad": -2361304, "Chile": -167454, "China": -270056, "Colombia": -120027, "Comoros": -535790, "Congo-Brazzaville": -192794, "Cook Islands": -2184233, "Costa Rica": -287667, "Croatia": -214885, "Cuba": -307833, "Cyprus": -307787, "Czechia": -51684, "Côte d'Ivoire": -192779, "Democratic Republic of the Congo": -192795, "Denmark": -50046, "Djibouti": -192801, "Dominica": -307823, "Dominican Republic": -307828, "East Timor": -305142, "Ecuador": -108089, "Egypt": -1473947, "El Salvador": -1520612, "Equatorial Guinea": -192791, "Eritrea": -296961, "Estonia": -79510, "Eswatini": -88210, "Ethiopia": -192800, "Falkland Islands": -2185374, "Faroe Islands": -52939, "Federated States of Micronesia": -571802, "Fiji": -571747, "Finland": -54224, "France": -2202162, "Gabon": -192793, "Georgia": -28699, "Germany": -51477, "Ghana": -192781, "Gibraltar": -1278736, "Greece": -192307, "Greenland": -2184073, "Grenada": -550727, "Guatemala": -1521463, "Guernsey": -270009, "Guinea": -192778, "Guinea-Bissau": -192776, "Guyana": -287083, "Haiti": -307829, "Honduras": -287670, "Hungary": -21335, "Iceland": -299133, "India": -304716, "Indonesia": -304751, "Iran": -304938, "Iraq": -304934, "Ireland": -62273, "Isle of Man": -62269, "Israel": -1473946, "Italy": -365331, "Jamaica": -555017, "Japan": -382313, "Jersey": -367988, "Jordan": -184818, "Kazakhstan": -214665, "Kenya": -192798, "Kiribati": -571178, "Kosovo": -2088990, "Kuwait": -305099, "Kyrgyzstan": -178009, "Laos": -49903, "Latvia": -72594, "Lebanon": -184843, "Lesotho": -2093234, "Liberia": -192780, "Libya": -192758, "Liechtenstein": -1155955, "Lithuania": -72596, "Luxembourg": -2171347, "Madagascar": -447325, "Malawi": -195290, "Malaysia": -2108121, "Maldives": -536773, "Mali": -192785, "Malta": -365307, "Marshall Islands": -571771, "Mauritania": -192763, "Mauritius": -535828, "Mengalum Island": 367540794, "Mexico": -114686, "Moldova": -58974, "Monaco": -1124039, "Mongolia": -161033, "Montenegro": -53296, "Montserrat": -537257, "Morocco": -3630439, "Mozambique": -195273, "Myanmar": -50371, "Namibia": -195266, "Nauru": -571804, "Nepal": -184633, "Netherlands": -2323309, "New Zealand": -556706, "Nicaragua": -287666, "Niger": -192786, "Nigeria": -192787, "Niue": -1558556, "North Korea": -192734, "North Macedonia": -53293, "Norway": -2978650, "Oman": -305138, "Pakistan": -307573, "Palau": -571805, "Panama": -287668, "Papua New Guinea": -307866, "Paraguay": -287077, "Peru": -288247, "Philippines": -443174, "Pitcairn Islands": -2185375, "Poland": -49715, "Portugal": -295480, "Qatar": -305095, "Romania": -90689, "Russia": -60189, "Rwanda": -171496, "Saint Helena, Ascension and Tristan da Cunha": -1964272, "Saint Kitts and Nevis": -536899, "Saint Lucia": -550728, "Saint Vincent and the Grenadines": -550725, "Samoa": -1872673, "San Marino": -54624, "Saudi Arabia": -307584, "Senegal": -192775, "Serbia": -1741311, "Seychelles": -536765, "Sierra Leone": -192777, "Singapore": -536780, "Slovakia": -14296, "Slovenia": -218657, "Solomon Islands": -1857436, "Somalia": -192799, "South Africa": -87565, "South Georgia and the South Sandwich Islands": -1983628, "South Korea": -307756, "South Sudan": -1656678, "Spain": -1311341, "Sri Lanka": -536807, "Sudan": -192789, "Suriname": -287082, "Swallow Reef": -5220687, "Sweden": -52822, "Switzerland": -51701, "Syria": -184840, "São Tomé and Príncipe": -535880, "Taiping Island": 741647339, "Taiwan": -449220, "Tajikistan": -214626, "Tanzania": -195270, "Thailand": -2067731, "The Bahamas": -547469, "The Gambia": -192774, "Togo": -192782, "Tokelau": -2186600, "Tonga": -2186665, "Trinidad and Tobago": -555717, "Tunisia": -192757, "Turkey": -174737, "Turkmenistan": -223026, "Turks and Caicos Islands": -547479, "Tuvalu": -2177266, "Uganda": -192796, "Ukraine": -60199, "United Arab Emirates": -307763, "United Kingdom": -62149, "United States": -148838, "Uruguay": -287072, "Uzbekistan": -196240, "Vanuatu": -2177246, "Vatican City": -36989, "Venezuela": -272644, "Vietnam": -49915, "Yemen": -305092, "Zambia": -195271, "Zimbabwe": -195272, } country = countries.get(country.name, countries.get(country.apolitical_name, None)) if not country: raise ValueError(f"Country not found by name {country.name}.") return country