Beispiel #1
0
def get_info_yandex(latitude, longitude):
    items = {}
    query = "{0}?format=json&geocode={1},{2}".format(yandex_geocoder,
                                                     longitude, latitude)

    try:
        result = request_get(query)
    except ConnectionError as errno:
        trace.error(" ".join(("Connection error:", str(errno))))
        return items
    except Timeout as errno:
        trace.error(" ".join(("Connection timeoute exceed:", str(errno))))
        return items
    except HTTPError as errno:
        trace.error(" ".join(("Invalid HTTP response:", str(errno))))
        return items
    except RequestException as errno:
        trace.error('Exception: {0}'.format(errno))
        return items

    core_dic = json_loads(result.text)
    try:
        for feature_member in core_dic['response']['GeoObjectCollection'][
                'featureMember']:
            try:
                kind = feature_member['GeoObject']['metaDataProperty'][
                    'GeocoderMetaData']['kind']
                items[kind] = feature_member['GeoObject']['name']
            except KeyError as errno:
                trace.warning("Invalid key: {0}".format(errno))
    except KeyError as errno:
        trace.warning("Invalid key: {0}".format(errno))
        return items

    return items
Beispiel #2
0
def get_info_osm(latitude, longitude):
    keywords = []
    query = "{0}?format=json&q={1},{2}".format(osm_geocoder, latitude, longitude)

    try:
        result = request_get(query)
    except ConnectionError as errno:
        trace.error(" ".join(("Connection error:", str(errno))))
        return keywords
    except Timeout as errno:
        trace.error(" ".join(("Connection timeoute exceed:", str(errno))))
        return keywords
    except HTTPError as errno:
        trace.error(" ".join(("Invalid HTTP response:", str(errno))))
        return keywords
    except RequestException as errno:
        trace.error('Exception: {0}'.format(errno))
        return keywords

    core_dic = json_loads(result.text)

    object_class = core_dic[0]["class"]
    object_type = core_dic[0]["type"]

    keywords.append(core_dic[0]["display_name"].split(",")[0])
    keywords.extend(get_osm_keywords(object_class, object_type))
    return keywords
Beispiel #3
0
def get_info_osm(latitude, longitude):
    keywords = []
    query = "{0}?format=json&q={1},{2}".format(osm_geocoder, latitude,
                                               longitude)

    try:
        result = request_get(query)
    except ConnectionError as errno:
        trace.error(" ".join(("Connection error:", str(errno))))
        return keywords
    except Timeout as errno:
        trace.error(" ".join(("Connection timeoute exceed:", str(errno))))
        return keywords
    except HTTPError as errno:
        trace.error(" ".join(("Invalid HTTP response:", str(errno))))
        return keywords
    except RequestException as errno:
        trace.error('Exception: {0}'.format(errno))
        return keywords

    core_dic = json_loads(result.text)

    object_class = core_dic[0]["class"]
    object_type = core_dic[0]["type"]

    keywords.append(core_dic[0]["display_name"].split(",")[0])
    keywords.extend(get_osm_keywords(object_class, object_type))
    return keywords
Beispiel #4
0
def get_info_yandex(latitude, longitude):
    items = {}
    query = "{0}?format=json&geocode={1},{2}".format(yandex_geocoder, longitude, latitude)

    try:
        result = request_get(query)
    except ConnectionError as errno:
        trace.error(" ".join(("Connection error:", str(errno))))
        return items
    except Timeout as errno:
        trace.error(" ".join(("Connection timeoute exceed:", str(errno))))
        return items
    except HTTPError as errno:
        trace.error(" ".join(("Invalid HTTP response:", str(errno))))
        return items
    except RequestException as errno:
        trace.error('Exception: {0}'.format(errno))
        return items

    core_dic = json_loads(result.text)
    try:
        for feature_member in core_dic['response']['GeoObjectCollection']['featureMember']:
            try:
                kind = feature_member['GeoObject']['metaDataProperty']['GeocoderMetaData']['kind']
                items[kind] = feature_member['GeoObject']['name']
            except KeyError as errno:
                trace.warning("Invalid key: {0}".format(errno))
    except KeyError as errno:
        trace.warning("Invalid key: {0}".format(errno))
        return items

    return items
Beispiel #5
0
def read_json(url):
    """Get parsed JSON from URL"""
    try:
        with request_get(url) as response:
            return response.json()
    except (URLError, OSError):
        raise GeneFabDataManagerException("Not found", url=url)
 def get_oidc_urls(self, session: Dict[str, str]) -> Dict[str, str]:
     session = dict(session.items())  # .copy() is not available on SessionStore
     if settings.OIDC_OP_DISCOVERY_DOCUMENT_URL:
         if any((
             constants.SESSION_OP_AUTHORIZATION_URL not in session,
             constants.SESSION_OP_TOKEN_URL not in session,
             constants.SESSION_OP_USERINFO_URL not in session,
             constants.SESSION_OP_JWKS_URL not in session,
             constants.SESSION_OP_END_SESSION_URL not in session,
         )):
             doc_resp = request_get(settings.OIDC_OP_DISCOVERY_DOCUMENT_URL)
             doc_resp.raise_for_status()
             doc = doc_resp.json()
             session[constants.SESSION_OP_AUTHORIZATION_URL] = doc.get('authorization_endpoint')
             session[constants.SESSION_OP_TOKEN_URL] = doc.get('token_endpoint')
             session[constants.SESSION_OP_USERINFO_URL] = doc.get('userinfo_endpoint')
             session[constants.SESSION_OP_JWKS_URL] = doc.get('jwks_uri')
             session[constants.SESSION_OP_END_SESSION_URL] = doc.get('end_session_endpoint')
     elif any((
         settings.OIDC_OP_AUTHORIZATION_URL,
         settings.OIDC_OP_TOKEN_URL,
         settings.OIDC_OP_USERINFO_URL,
         settings.OIDC_OP_JWKS_URL,
         settings.OIDC_OP_END_SESSION_URL,
     )):
         for conf in ('OP_AUTHORIZATION_URL', 'OP_TOKEN_URL', 'OP_USERINFO_URL', 'OP_JWKS_URL', 'OP_END_SESSION_URL'):
             session_conf = getattr(constants, 'SESSION_' + conf)
             if session_conf not in session:
                 session[session_conf] = getattr(settings, 'OIDC_' + conf)
     confs = ['OP_AUTHORIZATION_URL', 'OP_TOKEN_URL']
     if settings.OIDC_OP_FETCH_USER_INFO:
         confs.append('OP_USERINFO_URL')
     for conf in confs:
         session_conf = getattr(constants, 'SESSION_' + conf)
         if not session.get(session_conf):
             raise ImproperlyConfigured(f'OIDC_{conf} is undefined')
     if not session.get(constants.SESSION_OP_JWKS):
         if session.get(constants.SESSION_OP_JWKS_URL):
             jwks_resp = request_get(session[constants.SESSION_OP_JWKS_URL])
             jwks_resp.raise_for_status()
             jwks = jwks_resp.json()['keys']
         else:
             jwks = []
         session[constants.SESSION_OP_JWKS] = {key['kid']: key for key in jwks if key['kty'] == 'RSA' and key['use'] == 'sig'}
     return session
Beispiel #7
0
    def run(self) -> None:
        try:
            data = request_get(self.url +
                               "/snapshot?ext=png&compress=1&orient=" +
                               self.orient)
        except Exception:
            # self.screenshot_complete.emit(None)
            print("错误")
            return

        qimg = QImage.fromData(data.content)
        self.screenshot_complete.emit(qimg)
Beispiel #8
0
    def request_word(cls, word):
        url = f'https://wordsapiv1.p.rapidapi.com/words/{word}'
        headers = {
            'x-rapidapi-host': os.getenv('RAPID_API_HOST'),
            'x-rapidapi-key': os.getenv('RAPID_API_KEY')
        }

        print(f'searching "{word}" in rapid api')

        try:
            res = request_get(url, headers=headers)
            return res.json(), 200
        except RequestException:
            return RequestException.strerror, 400
Beispiel #9
0
 def _pick():
     get_kws = dict(allow_redirects=True, stream=True)
     for url in urls:
         try:
             with request_get(url, **get_kws) as response:
                 if response.ok:
                     return url
         except (URLError, OSError):
             continue
     else:
         if name:
             raise URLError(f"No URLs are reachable for {name}: {urls}")
         else:
             raise URLError(f"No URLs are reachable: {urls}")
Beispiel #10
0
def get_metar_of(station):
    station = station.upper()
    if station not in valid_airports_by_icao_code.keys():
        raise WeatherError.UnknownAirportError(station)
    url = "http://weather.noaa.gov/pub/data/observations/metar/stations/{}.TXT".format(
        station.upper())
    try:
        urlh = request_get(url).content.decode('utf8')
        for line in urlh.split('\n'):
            if line.startswith(station):
                return line.strip()
    except:
        raise WeatherError.BaseWeatherError(
            'error while retrieveing METAR for {}'.format(station))
Beispiel #11
0
 def retrieve_online_metar(self, location):
     print(location)
     # html = request_get('http://www.acukwik.com/AirportInfo/{}'.format(location)).content.decode('utf8')
     html = request_get(
         'https://www.aviationweather.gov/adds/tafs?station_ids={}&std_trans=standard&submit_taf=Get+TAFs'
         .format(location.lower())).content.decode('utf8')
     soup = BeautifulSoup(html, 'html.parser')
     taf_string = soup.find('font')
     if taf_string is None:
         print('nope')
         return
     taf_string = taf_string.string
     taf = TAF(taf_string)
     decoder = Decoder(taf)
     print(decoder.decode_taf())
     return
Beispiel #12
0
 def get_full_claims(self, request, id_claims: Dict,
                     access_token: str) -> Dict:
     """access_token is not used here, id_claims is enough"""
     if settings.OIDC_OP_FETCH_USER_INFO and constants.SESSION_OP_USERINFO_URL in request.session and access_token:
         claims = id_claims.copy()
         claims.update(
             request_get(
                 request.session[constants.SESSION_OP_USERINFO_URL],
                 headers={
                     'Authorization':
                     f'{settings.OIDC_AUTHORIZATION_HEADER_PREFIX} {access_token}'
                 },
             ).json())
         return claims
     else:
         return id_claims
 def run(self):
     version = APP_DAWN.value('VERSION')
     # print(version)
     if not version:
         version = ""
     try:
         r = request_get(url=HTTP_SERVER + 'update_delivery/?version=' +
                         str(version),
                         headers={'User-Agent': 'RuiDa_ADSClient'})
         if r.status_code != 200:
             raise ValueError('检测版本失败。')
         response = json.loads(r.content.decode('utf-8'))
     except Exception as e:
         self.check_fail.emit(True)
     else:
         self.check_successful.emit(response['data'])
 def download_file(self, file_name):
     file_path = os.path.join(BASE_DIR, file_name)
     # print('正准备下载文件', file_name)
     r = request_get(url=HTTP_SERVER + 'downloading_delivery/',
                     headers={
                         'User-Agent': 'RuiDa_DeliveryClient',
                         'Content-Type': 'application/json;charset=utf8'
                     },
                     data=json.dumps({'filename': file_name}))
     if r.status_code != 200:
         response = r.content.decode('utf-8')
         raise ValueError(response['message'])
     file_dir = os.path.split(file_path)[0]
     if not os.path.exists(file_dir):
         os.makedirs(file_dir)
     file_name = open(file_path, 'wb')
     file_name.write(r.content)
     file_name.close()
 def run(self):
     version = APP_DAWN.value('VERSION')
     # print(version)
     if not version:
         version = ""
     identify = ADMINISTRATOR
     try:
         r = request_get(url=HTTP_SERVER + 'update/?identify=' + identify +
                         '&version=' + str(version) + '&sbit=' + SYSTEM_BIT,
                         headers={'User-Agent': 'RuiDa_ADSClient'})
         if r.status_code != 200:
             raise ValueError('检测版本失败。{}'.format(r.status_code))
         response = json.loads(r.content.decode('utf-8'))
     except Exception as e:
         logger.error("检测版本出错:{}".format(e))
         self.check_fail.emit(True)
     else:
         self.check_successful.emit(response['data'])
Beispiel #16
0
 def __download_as_blob(self):
     """Download data from URL as-is"""
     for url in self.urls:
         GeneFabLogger.info(f"{self.name}; trying URL:\n  {url}")
         try:
             with request_get(url) as response:
                 data = response.content
         except (URLError, OSError) as e:
             msg = f"{self.name}; tried URL and failed:\n  {url}"
             GeneFabLogger.warning(msg, exc_info=e)
         else:
             msg = f"{self.name}; successfully fetched blob:\n  {url}"
             GeneFabLogger.info(msg)
             self.url = url
             return data
     else:
         msg = "None of the URLs are reachable for file"
         _kw = dict(name=self.name, urls=self.urls)
         raise GeneFabDataManagerException(msg, **_kw)
Beispiel #17
0
    def m_get(self, url, **kwargs):
        '''
        Get a resource from fixtures. If AELF_DEBUG is defined and the resource can not be
        found, load it from the Internet and save it for future use. An existing resource
        will never be overwriten.
        '''
        path = './test_fixtures/' + url.replace('/', ':')
        res = FakeResponse()
        try:
            with open(path, 'r') as f:
                res.text = f.read()
        except:
            if 'AELF_DEBUG' not in os.environ:
                raise
            res.text = request_get(url, **kwargs).text
            with open(path, 'w') as f:
                f.write(res.text.encode('utf8'))

        return res
Beispiel #18
0
    def __download_file(url="",
                        destination="",
                        d_num=1,
                        d_nums=10,
                        progress=True):
        """ Download a file to destination"""

        r = request_get(url, stream=True)
        if r.status_code == 200:

            total_length = r.headers.get('content-length')
            try:
                with open(destination, 'wb') as f:
                    if total_length is None:
                        if progress:
                            stdout.write("\r[Downloading %i of %i][%s%s]" %
                                         (d_num, d_nums, '=' * 20 +
                                          'please wait' + '=' * 19, ''))
                            stdout.flush()
                        f.write(r.content)
                    else:
                        dl = 0
                        total_length = int(total_length)
                        for data in r.iter_content(chunk_size=4096):
                            dl += len(data)
                            f.write(data)
                            done = int(50 * dl / total_length)
                            if progress:
                                stdout.write("\r[Downloading %i of %i][%s%s]" %
                                             (d_num, d_nums, '=' * done, ' ' *
                                              (50 - done)))
                                stdout.flush()
            except IOError as io_err:
                raise DataSetFetchException(
                    "Could not download because of invalid rights or no disk space left:"
                    + url)
        else:
            raise DataSetFetchException(
                "Could not download because the file was not found on the server: "
                + url)
Beispiel #19
0
 def __copyfileobj(self, tempfile):
     """Try all URLs and push data into temporary file"""
     for url in self.urls:
         with open(tempfile, mode="wb") as handle:
             GeneFabLogger.info(f"{self.name}; trying URL:\n  {url}")
             try:
                 with request_get(url, stream=True) as response:
                     response.raw.decode_content = True
                     msg = f"{self.name}:\n  streaming to {tempfile}"
                     GeneFabLogger.debug(msg)
                     copyfileobj(response.raw, handle)
             except (URLError, OSError) as e:
                 msg = f"{self.name}; tried URL and failed:\n  {url}"
                 GeneFabLogger.warning(msg, exc_info=e)
             else:
                 msg = f"{self.name}; successfully fetched data:\n  {url}"
                 GeneFabLogger.info(msg)
                 return url
     else:
         msg = "None of the URLs are reachable for file"
         _kw = dict(name=self.name, urls=self.urls)
         raise GeneFabDataManagerException(msg, **_kw)
    def m_get(self, url, **kwargs):
        '''
        Get a resource from fixtures. If AELF_DEBUG is defined and the resource can not be
        found, load it from the Internet and save it for future use. An existing resource
        will never be overwriten.
        '''
        filename = url.replace('/', ':')
        path = './test_fixtures/'+filename
        res = FakeResponse()
        try:
            with open(path, 'r') as f:
                res.text = f.read()
        except:
            if filename not in os.environ.get('AELF_DEBUG', ''):
                print('Lecture not found, please set AELF_DEBUG="%s" to load it' % (filename))
                res.text = ''
            else:
                res.text = request_get(url, **kwargs).text
                with open(path, 'w') as f:
                    f.write(res.text.encode('utf8'))

        return res
	def download_article(self, config, article):
		"""Download a local copy of an article."""
		# Find the link from the article
		link = article.entry_info.get("link")
		if link == "" or link is None:
			# No link to follow.
			return

		html = request_get(link).text

		parsed_data = ""
		try:
			parsed_data = Document(html).summary()
		except:
			config.log("error parsing article", article.entry_info.get("title"))
			return
		if self.options["inlinecontent"] is False:
			filename = sha1_hash(encode(article.entry_info.get("title"),'utf-8')).hexdigest()
			local_copy = path.join(self.options["downloaddir"],filename + ".html")

			filedata = codecs.open(local_copy,'w','utf-8-sig')
			#make sure to wipe any old content in order to update it
			filedata.seek(0)
			filedata.truncate()
			try:
				filedata.write(parsed_data)
			except:
				config.log("html failed to parse for article", article.entry_info.get("title"))
			filedata.close()

			local_copy = path.relpath(local_copy,self.options["downloaddir"])

			config.log("Downloaded: ", article.entry_info.get("title"), local_copy)
			# Add an attribute to the article to say where the local copy is.
			article.entry_info["download_articles_local_copy"] = local_copy
		else:
			config.log("Downloaded: ", article.entry_info.get("title"))
			# Add an attribute to the article with the html content.
			article.entry_info["download_articles_local_copy"] = parsed_data
Beispiel #22
0
def keccak_hash_file_url(file_url):
    response = request_get(file_url)
    file = response.content
    raw_hash = keccak.new(file)
    return raw_hash.digest().hex()