def viaggi_to_json_txt(lista_viaggi, filename_output): lout = [] cc = countries.CountryChecker("borders/TM_WORLD_BORDERS-0.3.shp") for v in lista_viaggi: lv = [v.id_, v.tempo_inizio, v.tempo_fine] in_italy = True for p in v.punti: point = countries.Point(float(p.latitudine), float(p.longitudine)) if cc.getCountry( point) == None or cc.getCountry(point).iso != "IT": in_italy = False break lv.append([p.timestamp, p.latitudine, p.longitudine, p.velocita]) if in_italy: lout.append(lv) print len(lout) if len(lout) > 0: ftxtout = open(filename_output, "w") json.dump(lout, ftxtout) ftxtout.close() print filename_output
def prod_sq(): file_path = './second_data.txt' file_path1 = './geomap.txt' file_path2 = './sq_data.txt' with open(file_path2, 'w') as w: with open(file_path, 'r') as f: with open(file_path1, 'r') as g: lst = f.readlines() pos = g.readlines()[0].split() for s in pos: lat, lng = s.split(",") cc = countries.CountryChecker('TM_WORLD_BORDERS-0.3.shp') name = cc.getCountry(countries.Point(int(lat), int(lng))) flag = True for i in range(1, len(lst)): data = lst[i].split("\n")[0].split(",") if data[1] in str(name): w.write(str(i) + " ") flag = False break if flag: w.write("0 ") print("序列输出完成")
def getCountry(coord): copyshapes.filter_file( lambda x: x.GetField('REGION') == 150, 'TM_WORLD_BORDERS-0.3.shp', 'EUROPE.shp') cc = countries.CountryChecker('TM_WORLD_BORDERS-0.3.shp') country = cc.getCountry(countries.Point(coord[0], coord[1])) return country
def __get_entries_iso(country_entries, cc): iso = "" if country_entries.shape[0] > 1: lats = country_entries['Lat'].values lons = country_entries['Long'].values for i in range(0, len(lats)): lat = lats[i] lon = lons[i] c = cc.getCountry(countries.Point(lat, lon)) if c: iso = c.iso break else: lat = country_entries['Lat'].values[0] lon = country_entries['Long'].values[0] c = cc.getCountry(countries.Point(lat, lon)) if c: iso = c.iso return None if iso == "" else iso
def detect_landmarks(user_id): urls = [] response = vk_api.photos.get(owner_id=user_id,album_id="wall", rev=1, count=30) for photo in response['items']: for size in photo['sizes']: if size['width'] > 600 and size['height'] > 600: url = size['url'] urls.append(url) break result = {} # urls = ["https://sun9-4.userapi.com/impf/c604625/v604625712/265c9/WwBwt2_6ygk.jpg?size=1280x960&quality=96&proxy=1&sign=a1649aba17c875e482fbe4e2c069efb3","https://sun9-12.userapi.com/impf/c623900/v623900048/2a2f1/ezedrrOTrno.jpg?size=816x1088&quality=96&proxy=1&sign=b9f44a4a1b515f4173fc845019eb340b","https://sun9-56.userapi.com/impf/c841138/v841138048/3aa38/TiBv8n5E7UU.jpg?size=744x744&quality=96&proxy=1&sign=b80d93be964e670e8e935a091a54f309"] contents = ThreadPool(8).map(urlToContent, urls) countryList = [] tags_weight = 0 tags_count = 1 for content in contents: try: client = vision.ImageAnnotatorClient() # with io.open(path, 'rb') as image_file: # content = image_file.read() image = vision.Image(content=content) landmarkResponse = client.landmark_detection(image=image) landmarks = landmarkResponse.landmark_annotations labelsResponse = client.label_detection(image=image) labels = labelsResponse.label_annotations # return str(labelsResponse)+";"+str(landmarks) print('Labels:') # Получение объектов на картинке for label in labels: label = label.description if label in result: result[label] = result[label] + 1 else: result[label] = 1 try: weight = weights_photo_tagsD[label] print(weight) if weight is not None: tags_weight = tags_weight + weight tags_count = tags_count + 1 except Exception as e: pass print(label) print('Landmarks:') # Получение координат и названий городов for landmark in landmarks: print(landmark.description) for location in landmark.locations: lat_lng = location.lat_lng print('Latitude {}'.format(lat_lng.latitude)) print('Longitude {}'.format(lat_lng.longitude)) cc = countries.CountryChecker('/home/admin2/TM_WORLD_BORDERS-0.3.shp') country = cc.getCountry(countries.Point(lat_lng.latitude, lat_lng.longitude)).iso print(country) countryList.append(country) # print(country) except Exception as e: print("detect_landmarks error = " + str(e)) solvency = tags_weight / tags_count sorted_result = {} for key,value in reversed(sorted(result.items(), key=lambda kv: kv[1])): sorted_result[key] = value return str('{"tagsFromPhoto":' + str(sorted_result) + ', "countries": ' + str(countryList) + ', "solvency": ' + str(solvency) +'}').replace("'",'"');
def getCountry(self, lat, lon): country = self.cc.getCountry(countries.Point(lat, lon)) if not country == None: c = country.iso return c return "XX"
import countries cc = countries.CountryChecker('TM_WORLD_BORDERS-0.3.shp') print(cc.getCountry(countries.Point(49.7821, 3.5708)).iso) print('test working')
import urllib import countries non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd) data = open('naturaldisaster.json', 'r', encoding='utf-8-sig', errors='ignore') data2 = json.load(data, strict=False) cc = countries.CountryChecker('TM_WORLD_BORDERS-0.3.shp') c = 0 with open('natural_disaster_US.json', 'a', encoding='utf-8') as f: f.write('[') f.flush() f.close() for line in data2: lat = float(line['coordinates'].split(',')[0][1:]) log = float(line['coordinates'].split(',')[1][:-1]) print(c) if (cc.getCountry(countries.Point(log, lat)) is None): pass else: if (cc.getCountry(countries.Point(log, lat)).iso == 'US'): with open('natural_disaster_US.json', 'a', encoding='utf-8') as file: result = '{"text":"' + line['text'] + '","created_at":"' + line[ 'created_at'] + '",' + '"coordinates":"' + line[ 'coordinates'] + '"},' file.write(result) file.write('\n') file.flush() file.close() c += 1 with open('natural_disaster_US.json', 'a', encoding='utf-8') as file2: file2.write(']')
# https://github.com/che0/countries import countries cc = countries.CountryChecker('TM_WORLD_BORDERS-0.3.shp') f = open('enc.txt').read().splitlines() for i in f: word = i.split(", ") try: cCode = cc.getCountry(countries.Point(float(word[0]), float(word[1]))).iso print(cCode) except: print(word)
def respond(): data = request.get_json(force=True) # TODO check if request is gucci country = str( country_checker.getCountry( countries.Point((data['north'] + data['south']) / 2, (data['east'] + data['west']) / 2))) # TODO Remove the following block of code in production if country == 'United States': path_suffix = '_13' useful_urls = us_urls else: path_suffix = '_1' useful_urls = mx_ca_urls for lat in range(math.ceil(float(data['south'])), math.ceil(float(data['north'])) + 1 # Eg N 87.7 to N 86. ): for lng in range(math.floor(float(data['west'])), math.floor(float(data['east'])) + 1): fname = ('grd' + ('n' if lat > 0 else 's') + str(abs(math.ceil(lat))).zfill(2) + ('e' if lng >= 0 else 'w') + str(abs(math.floor(lng))).zfill(3)) database_path = ('elevationdata/' + fname + path_suffix + '/w001001.adf') if not os.path.exists(database_path): try: print("downloading" + useful_urls[(lat, lng)] + "\n") wget.download(useful_urls[(lat, lng)]) print("\n") file_name = useful_urls[(lat, lng)].split('/')[-1] archive = zipfile.ZipFile(file_name) for file in archive.namelist(): if file.startswith("grd" + fname[3:] + path_suffix + "/"): archive.extract(file, "elevationdata") os.remove(file_name) except (urllib.error.HTTPError): print("Could not download data for", (lat, lng)) except KeyError: print("Thing not found in urls: " (lat, lng)) # except: # print(sys.exc_info()[0], (lat, lng)) success, stoplights, local_maxima, graph, node_heights, node_latlons, \ edge_heights = make_map(( data['west'], data['south'], data['east'], data['north'] ), country) if (success): return jsonify({ "stoplights": stoplights, "local_maxima": local_maxima, "graph": graph, "node_heights": node_heights, "node_latlons": node_latlons, "edge_heights": edge_heights }) return jsonify(False)
def in_switzerland(row): longitude = row['longitude'] latitude = row['latitude'] cc = countries.CountryChecker('TM_WORLD_BORDERS/TM_WORLD_BORDERS-0.3.shp') country = cc.getCountry(countries.Point(latitude, longitude)).iso return country