Пример #1
0
def get_ip_infos(ip_address: str = None) -> dict:
    """
    A dict with infos about the ip address of your computer (unless you use a vpn) or a specified ip

    Args:
        ip_address (optional): IP from which you want to receive the information

    Returns:
        A dict filled with the ip address information

    Examples:
        >>> print(get_ip_infos()())
        {'ip': '69.69.69.69',
         'hostname': 'examplehostname',
         'city': 'Suginami City',
         'region': 'Tokyo',
         'country': 'JP,
         'loc': '35.6986, 139.6367',
         'org': 'exampleprovider',
         'postal': '166-0015',
         'timezone': 'Asia/Tokyo',
         'readme': 'https://ipinfo.io/missingauth'}

    """
    if ip_address:
        return _load(_urlopen('http://ipinfo.io/' + ip_address + '/json'))
    else:
        return _load(_urlopen('http://ipinfo.io/json'))
Пример #2
0
def get_launcher(out_dir=".", quiet=False):
    """
    Download latest release of Minecraft Java launcher ("launcher.jar").

    Parameters
    ----------
    out_dir: str
        "server.jar" destination directory.
    quiet: bool
        If True, does not print output.
    """
    out_file = _join(out_dir, "launcher.jar")

    from lzma import decompress

    with _urlopen(_LAUNCHER_JSON_URL) as launcher_json:
        launcher_java = _loads(launcher_json.read())["java"]

    checksum = launcher_java["sha1"]
    if _already_exists(out_file, checksum, quiet):
        return

    with _urlopen(launcher_java["lzma"]["url"]) as lzma_file:
        launcher_bytes = decompress(lzma_file.read())

    _verify_and_save(out_file, launcher_bytes, checksum, quiet)
Пример #3
0
 def urlopen(*a, **kw):
     proxies = kw.pop('proxies', None)
     if proxies is None:
         return _urlopen(*a, **kw)
     r = Request(a[0])
     for k, v in proxies.items():
         r.set_proxy(v, k)
     return _urlopen(r, *a[1:], **kw)
Пример #4
0
 def urlopen(*a, **kw):
     proxies = kw.pop('proxies', None)
     if proxies is None:
         return _urlopen(*a, **kw)
     r = Request(a[0])
     for k, v in proxies.items():
         r.set_proxy(v, k)
     return _urlopen(r, *a[1:], **kw)
Пример #5
0
def urlopen(url, if_modified_since=None):
    parse_result = urllib_parse.urlparse(url)
    if parse_result.scheme not in ("http", "https"):
        return _urlopen(url)
    else:
        netloc = urllib_parse_splituser(parse_result.netloc)[1]
        url = urllib_parse.urlunparse(
            (parse_result.scheme, netloc, parse_result.path,
             parse_result.params, parse_result.query, parse_result.fragment))
        password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
        request = urllib_request.Request(url)
        request.add_header('User-Agent', 'Gentoo Portage')
        if if_modified_since:
            request.add_header('If-Modified-Since',
                               _timestamp_to_http(if_modified_since))
        if parse_result.username is not None:
            password_manager.add_password(None, url, parse_result.username,
                                          parse_result.password)
        auth_handler = CompressedResponseProcessor(password_manager)
        opener = urllib_request.build_opener(auth_handler)
        hdl = opener.open(request)
        if hdl.headers.get('last-modified', ''):
            try:
                add_header = hdl.headers.add_header
            except AttributeError:
                # Python 2
                add_header = hdl.headers.addheader
            add_header('timestamp',
                       _http_to_timestamp(hdl.headers.get('last-modified')))
        return hdl
Пример #6
0
def search_for_repository(module):
    if isinstance(module, _Npm2Deb):
        module = module.name
    repositories = ['collab-maint', 'pkg-javascript']
    formatted = "  {0:40} -- {1}"
    found = False
    result = {}
    my_print("Looking for existing repositories:")
    for repo in repositories:
        _debug(1, "search for %s in %s" % (module, repo))
        url_base = "http://anonscm.debian.org/gitweb"
        data = _urlopen("%s/?a=project_list&pf=%s&s=%s" %
                       (url_base, repo, module)).read()
        dom = _minidom.parseString(data)
        for row in dom.getElementsByTagName('tr')[1:]:
            try:
                columns = row.getElementsByTagName('td')
                name = columns[0].firstChild.getAttribute('href')\
                    .split('.git')[0].split('?p=')[1]
                description = columns[1].firstChild.getAttribute('title')
                found = True
                result[name] = description
                my_print(formatted.format(name, description))
            except:
                continue
    if not found:
        my_print("  None")
    return result
Пример #7
0
def urlopen(url, if_modified_since=None):
	parse_result = urllib_parse.urlparse(url)
	if parse_result.scheme not in ("http", "https"):
		return _urlopen(url)
	else:
		netloc = urllib_parse_splituser(parse_result.netloc)[1]
		url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
		password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
		request = urllib_request.Request(url)
		request.add_header('User-Agent', 'Gentoo Portage')
		if if_modified_since:
			request.add_header('If-Modified-Since', _timestamp_to_http(if_modified_since))
		if parse_result.username is not None:
			password_manager.add_password(None, url, parse_result.username, parse_result.password)
		auth_handler = CompressedResponseProcessor(password_manager)
		opener = urllib_request.build_opener(auth_handler)
		hdl = opener.open(request)
		if hdl.headers.get('last-modified', ''):
			try:
				add_header = hdl.headers.add_header
			except AttributeError:
				# Python 2
				add_header = hdl.headers.addheader
			add_header('timestamp', _http_to_timestamp(hdl.headers.get('last-modified')))
		return hdl
Пример #8
0
 def _submit(self, operationUrl, data):
     # type: (str, dict) -> dict
     
     orderedData = _OrderedDict()
     isBatch = "batch" in operationUrl
     
     if not self.submitRequests and "format" in data.keys():
         data.pop("format")
     
     for key in sorted(data.keys()):
         orderedData[key] = data[key]
     data = orderedData
     
     requestUrls = data.pop("requests") if isBatch else []
     requestAsParams = "&".join(["requests[]=" + url for url in requestUrls]) if isBatch else ""
         
     urlParams = _urlencode(data)
     urlParams += "&" + requestAsParams if isBatch else ""
     urlToSignature = operationUrl + urlParams + self.privateKey
     signature = _md5(urlToSignature.encode()).hexdigest()
     finalUrl = operationUrl + urlParams + "&signature=" + signature
     
     if self.submitRequests:
         if _DEBUG: print("Requesting URL:", finalUrl)
         response = _urlopen(finalUrl).read().decode()
         
         if self.responseFormat == "json":
             return _literal_eval(response)["response"]
         else:
             return response
     else:
         if _DEBUG: print("Generated URL:", finalUrl)
         return finalUrl
Пример #9
0
    def geocode(self, address):
        if self.status.remainingQuota == 0:
            raise GeocoderFatalException("Today's quota has been depleted.")

        url = '%s?%s' % (self.url, _urlencode({'sensor': 'false', 'address': address}))

        if self.logger: self.logger.info('GET: %s' % (url,))

        responseFile = _urlopen(url)

        self.status.decrementQuota()

        responseJson = responseFile.read().decode('utf-8')
        response = _json.loads(responseJson)

        if response['status'] == 'ZERO_RESULTS': return (None, None)

        if self.logger: self.logger.info('Status: %s' % (response['status'],))
        assert(response['status'] == 'OK')
        assert(len(response['results']) > 0)

        result = response['results'][0]
        geometry = result['geometry']
        location = geometry['location']
        latitude = location['lat']
        longitude = location['lng']

        if self.logger: self.logger.info('Geocoded "%s" to (%f,%f)' % (address, latitude, longitude))

        return (latitude, longitude)
Пример #10
0
    def _request(self, method='GET', data=None, **kwargs):
        url = self._create_url(**kwargs)

        if data is None:
            request = _Request(url=url, method=method)
        else:
            request = _Request(url=url,
                               method=method,
                               headers={"Content-Type": "application/json"},
                               data=_json.dumps(
                                   data, default=_jsonify_numpy).encode())

        try:
            url_conn = _urlopen(request,
                                timeout=ConfigDBClient._TIMEOUT_DEFAULT)
            response = _json.loads(url_conn.read().decode("utf-8"))
        except _json.JSONDecodeError:
            response = {"code": -1, "message": "JSON decode error"}
        except _URLError as err:
            response = {'code': -2, 'message': str(err)}

        # print(response)
        if response['code'] != 200:
            raise ConfigDBException(response)
        return response['result']
Пример #11
0
    def _open(self, id=None, mode='rb', compression='default', **kwargs):
        formatter = Formatter()
        fields = [f[1] for f in formatter.parse(self.url)]
        assert len(
            set(fields).intersection(['id', 'stubbypath', 'pairtreepath'])) > 0

        if compression == 'default':
            compression = self.compression
        if mode == 'wb':
            raise NotImplementedError("Mode is not defined")

        stubbypath, pairtreepath = None, None
        if 'stubbypath' in fields:
            stubbypath = id_to_stubbytree(id,
                                          format=self.format,
                                          compression=compression)
        if 'pairtreepath' in fields:
            pairtreepath = id_to_pairtree(id,
                                          format=self.format,
                                          compression=compression)

        path_or_url = self.url.format(id=id,
                                      stubbypath=stubbypath,
                                      pairtreepath=pairtreepath)

        try:
            byt = _urlopen(path_or_url).read()
            req = BytesIO(byt)
        except HTTPError:
            logging.exception("HTTP Error accessing %s" % path_or_url)
            raise
        return req
Пример #12
0
def search_for_repository(module):
    if isinstance(module, _Npm2Deb):
        module = module.name
    repositories = ['collab-maint', 'pkg-javascript']
    formatted = "  {0:40} -- {1}"
    found = False
    result = {}
    my_print("Looking for existing repositories:")
    for repo in repositories:
        _debug(1, "search for %s in %s" % (module, repo))
        url_base = "http://anonscm.debian.org/gitweb"
        data = _urlopen("%s/?a=project_list&pf=%s&s=%s" %
                        (url_base, repo, module)).read()
        dom = _minidom.parseString(data)
        for row in dom.getElementsByTagName('tr')[1:]:
            try:
                columns = row.getElementsByTagName('td')
                name = columns[0].firstChild.getAttribute('href')\
                    .split('.git')[0].split('?p=')[1]
                description = columns[1].firstChild.getAttribute('title')
                found = True
                result[name] = description
                my_print(formatted.format(name, description))
            except:
                continue
    if not found:
        my_print("  None")
    return result
Пример #13
0
def search_for_repository(module):
    if isinstance(module, _Npm2Deb):
        module = module.debian_name
    else:
        module = 'node-%s' % _debianize_name(module)

    formatted = "  {0:40} -- {1}"
    found = False
    result = {}

    my_print("Looking for existing repositories on salsa.debian.org:")
    data = json.loads(
        _urlopen(
            "https://salsa.debian.org/groups/js-team/-/children.json?filter=%s"
            % module).read())
    if len(data) > 0:
        found = True
        for repo in data:
            name = repo['name']
            description = repo['description']
            result[name] = description
            my_print(formatted.format(name, description))
    if not found:
        my_print("  None")
    return result
Пример #14
0
def read_file(filename, bbox=None, **kwargs):
    """
    Returns a GeoDataFrame from a file or URL.

    Parameters
    ----------
    filename: str
        Either the absolute or relative path to the file or URL to
        be opened.
    bbox : tuple | GeoDataFrame or GeoSeries, default None
        Filter features by given bounding box, GeoSeries, or GeoDataFrame.
        CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
    **kwargs:
        Keyword args to be passed to the `open` or `BytesCollection` method
        in the fiona library when opening the file. For more information on
        possible keywords, type:
        ``import fiona; help(fiona.open)``

    Examples
    --------
    >>> df = geopandas.read_file("nybb.shp")

    Returns
    -------
    geodataframe : GeoDataFrame
    """
    if _is_url(filename):
        req = _urlopen(filename)
        path_or_bytes = req.read()
        reader = fiona.BytesCollection
    else:
        path_or_bytes = filename
        reader = fiona.open

    with fiona_env():
        with reader(path_or_bytes, **kwargs) as features:

            # In a future Fiona release the crs attribute of features will
            # no longer be a dict. The following code will be both forward
            # and backward compatible.
            if hasattr(features.crs, "to_dict"):
                crs = features.crs.to_dict()
            else:
                crs = features.crs

            if bbox is not None:
                if isinstance(bbox, GeoDataFrame) or isinstance(
                        bbox, GeoSeries):
                    bbox = tuple(bbox.to_crs(crs).total_bounds)
                assert len(bbox) == 4
                f_filt = features.filter(bbox=bbox)
            else:
                f_filt = features

            columns = list(
                features.meta["schema"]["properties"]) + ["geometry"]
            gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)

    return gdf
Пример #15
0
    def read_package_info(self):
        data = None
        name_is = None
        if _re.match("^(http:\/\/|https:\/\/)", self.name):
            utils.debug(1, "reading json - opening url %s" % self.name)
            data = _urlopen(self.name).read().decode('utf-8')
            name_is = 'url'

        elif _os.path.isfile(self.name):
            utils.debug(1, "reading json - opening file %s" % self.name)
            with open(self.name, 'r') as fd:
                data = fd.read()
            name_is = 'file'

        else:
            name_is = 'npm'
            utils.debug(1, "reading json - calling npm view %s" % self.name)
            info = _getstatusoutput('npm view "%s" --json 2>/dev/null' %
                                    self.name)
            # if not status 0, raise expection
            if info[0] != 0:
                info = _getstatusoutput('npm view "%s" --json' % self.name)
                exception = 'npm reports errors about %s module:\n' % self.name
                exception += info[1]
                raise ValueError(exception)
            if not info[1]:
                exception = 'npm returns empty json for %s module' % self.name
                raise ValueError(exception)
            data = info[1]

        try:
            self.json = _parseJSON(data)
        except ValueError as value_error:
            # if error during parse, try to fail graceful
            if str(value_error) == 'Expecting value: line 1 column 1 (char 0)':
                if name_is != 'npm':
                    raise ValueError("Data read from %s "
                                     "is not in a JSON format." % name_is)
                versions = []
                for line in data.split('\n'):
                    if _re.match(r"^[a-z](.*)@[0-9]", line):
                        version = line.split('@')[1]
                        versions.append(version)
                if len(versions) > 0:
                    exception = "More than one version found. "\
                        "Please specify one of:\n %s" % '\n '.join(versions)
                    raise ValueError(exception)
                else:
                    raise value_error
            else:
                raise value_error

        self.name = self.json['name']
        self._get_json_author()
        self._get_json_repo_url()
        self._get_json_homepage()
        self._get_json_description()
        self._get_json_version()
        self._get_json_license()
Пример #16
0
    def read_package_info(self):
        data = None
        name_is = None
        if _re.match("^(http:\/\/|https:\/\/)", self.name):
            utils.debug(1, "reading json - opening url %s" % self.name)
            data = _urlopen(self.name).read().decode('utf-8')
            name_is = 'url'

        elif _os.path.isfile(self.name):
            utils.debug(1, "reading json - opening file %s" % self.name)
            with open(self.name, 'r') as fd:
                data = fd.read()
            name_is = 'file'

        else:
            name_is = 'npm'
            utils.debug(1, "reading json - calling npm view %s" % self.name)
            info = _getstatusoutput('npm view "%s" --json 2>/dev/null' %
                                    self.name)
            # if not status 0, raise expection
            if info[0] != 0:
                info = _getstatusoutput('npm view "%s" --json' % self.name)
                exception = 'npm reports errors about %s module:\n' % self.name
                exception += info[1]
                raise ValueError(exception)
            if not info[1]:
                exception = 'npm returns empty json for %s module' % self.name
                raise ValueError(exception)
            data = info[1]

        try:
            self.json = _parseJSON(data)
        except ValueError as value_error:
            # if error during parse, try to fail graceful
            if str(value_error) == 'Expecting value: line 1 column 1 (char 0)':
                if name_is != 'npm':
                    raise ValueError("Data read from %s "
                                     "is not in a JSON format." % name_is)
                versions = []
                for line in data.split('\n'):
                    if _re.match(r"^[a-z](.*)@[0-9]", line):
                        version = line.split('@')[1]
                        versions.append(version)
                if len(versions) > 0:
                    exception = "More than one version found. "\
                        "Please specify one of:\n %s" % '\n '.join(versions)
                    raise ValueError(exception)
                else:
                    raise value_error
            else:
                raise value_error

        self.name = self.json['name']
        self._get_json_author()
        self._get_json_repo_url()
        self._get_json_homepage()
        self._get_json_description()
        self._get_json_version()
        self._get_json_license()
Пример #17
0
    def _call(self, base_url, url, body):
        """Open a network connection and performs HTTP Post
		with provided body.
		"""
        # Combines the "base_url" with the
        # required "url" to be used for the specific request.
        url = urljoin(base_url.geturl(), url)
        return _urlopen(url, data=self._encode_params(body))
 def _call(self, base_url, url, body):
     """Open a network connection and performs HTTP Post
     with provided body.
     """
     # Combines the "base_url" with the
     # required "url" to be used for the specific request.
     url = urljoin(base_url.geturl(), url)
     return _urlopen(url, data=self._encode_params(body))
Пример #19
0
def urlopen(url):
    from sorl.thumbnail.conf import settings

    req = Request(
        url,
        headers={'User-Agent': "python-urllib%s/0.6" % PythonVersion}
    )
    return _urlopen(req, timeout=settings.THUMBNAIL_URL_TIMEOUT)
Пример #20
0
def urlopen(url):
    from sorl.thumbnail.conf import settings

    req = Request(
        url,
        headers={'User-Agent': "python-urllib%s/0.6" % PythonVersion}
    )
    return _urlopen(req, timeout=settings.THUMBNAIL_URL_TIMEOUT)
Пример #21
0
def _q(op, arg1, arg2=None, arg3=None, timeout=None):
    URL = "http://rest.kegg.jp/%s"
    if arg2 and arg3:
        args = "%s/%s/%s/%s" % (op, arg1, arg2, arg3)
    elif arg2:
        args = "%s/%s/%s" % (op, arg1, arg2)
    else:
        args = "%s/%s" % (op, arg1)

    if timeout is not None:
        resp = _urlopen(URL % (args), timeout=timeout)
    else:
        resp = _urlopen(URL % (args))

    if "image" == arg2:
        return resp

    return _binary_to_string_handle(resp)
Пример #22
0
def read_file(filename, bbox=None, **kwargs):
    """
    Returns a GeoDataFrame from a file or URL.

    Parameters
    ----------
    filename: str
        Either the absolute or relative path to the file or URL to
        be opened.
    bbox : tuple | GeoDataFrame or GeoSeries, default None
        Filter features by given bounding box, GeoSeries, or GeoDataFrame.
        CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
    **kwargs:
        Keyword args to be passed to the `open` or `BytesCollection` method
        in the fiona library when opening the file. For more information on
        possible keywords, type:
        ``import fiona; help(fiona.open)``

    Examples
    --------
    >>> df = geopandas.read_file("nybb.shp")

    Returns
    -------
    geodataframe : GeoDataFrame
    """
    if _is_url(filename):
        req = _urlopen(filename)
        path_or_bytes = req.read()
        reader = fiona.BytesCollection
    else:
        path_or_bytes = filename
        reader = fiona.open

    with fiona_env():
        with reader(path_or_bytes, **kwargs) as features:

            # In a future Fiona release the crs attribute of features will
            # no longer be a dict. The following code will be both forward
            # and backward compatible.
            if hasattr(features.crs, 'to_dict'):
                crs = features.crs.to_dict()
            else:
                crs = features.crs

            if bbox is not None:
                if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):
                    bbox = tuple(bbox.to_crs(crs).total_bounds)
                assert len(bbox) == 4
                f_filt = features.filter(bbox=bbox)
            else:
                f_filt = features

            columns = list(features.meta["schema"]["properties"]) + ["geometry"]
            gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)

    return gdf
Пример #23
0
def urlopen(url):
    from sorl.thumbnail.conf import settings

    headers = getattr(settings, 'THUMBNAIL_URL_HEADERS')
    req = Request(
        url,
        headers=headers
    )
    return _urlopen(req, timeout=settings.THUMBNAIL_URL_TIMEOUT)
Пример #24
0
 def __init__(self):
     if self.INSTANCE is not None:
         raise ValueError("Mapper is a Singleton. "
                          "Please use get_instance method.")
     _debug(2, 'loading database from %s' % DB_URL)
     data = _findall('{{{(.*)}}}', _urlopen("%s?action=raw"
                     % DB_URL).read().decode('utf-8').replace('\n', ''))[0]
     self.json = _parseJSON(data)
     self._warnings = {}
     self.reset_warnings()
def download(url, sha1):
    """Download a file and verify it's hash"""
    r = _urlopen(url)
    byte_content = r.read()
    download_sha1 = hashlib.sha1(byte_content).hexdigest()
    if download_sha1 != sha1:
        raise ValueError(
            'downloaded {!r} has the wrong SHA1 hash: {} != {}'.format(
                url, download_sha1, sha1))
    return byte_content
Пример #26
0
def install_nano(install_directory):
    """Download and install the nano text editor"""
    url = "http://www.nano-editor.org/dist/v2.2/NT/nano-2.2.6.zip"
    r = _urlopen(url)
    nano_zip_content = _BytesIO(r.read())
    nano_zip = zipfile.ZipFile(nano_zip_content)
    nano_files = ['nano.exe', 'cygwin1.dll', 'cygintl-8.dll',
                  'cygiconv-2.dll', 'cyggcc_s-1.dll']
    for file_name in nano_files:
        nano_zip.extract(file_name, install_directory)
Пример #27
0
def download_file(url, filename=None, overwrite=True):
    response = _urlopen(url)
    if not filename:
        _, params = _cgi.parse_header(
            response.headers.get('Content-Disposition', ''))
        filename = params.get('filename', url.split('/')[-1].split('?')[0])
    if not _isfile(filename):
        with open(filename, 'wb') as f:
            f.write(response.read())
    return filename
 def _call(self, base_url, url, params):
     """Open a network connection and performs HTTP Post
     with provided params.
     """
     # Combines the "base_url" with the
     # required "url" to be used for the specific request.
     url = urljoin(base_url.geturl(), url)
     body = self._encode_params(params)
     log.debug("Making a request to <%s> with body <%s>", url, body)
     return _urlopen(url, data=body)
Пример #29
0
def download(url, sha1):
    """Download a file and verify it's hash"""
    r = _urlopen(url)
    byte_content = r.read()
    download_sha1 = hashlib.sha1(byte_content).hexdigest()
    if download_sha1 != sha1:
        raise ValueError(
            'downloaded {!r} has the wrong SHA1 hash: {} != {}'.format(
                url, download_sha1, sha1))
    return byte_content
 def _call(self, base_url, url, params):
     """Open a network connection and performs HTTP Post
     with provided params.
     """
     # Combines the "base_url" with the
     # required "url" to be used for the specific request.
     url = urljoin(base_url.geturl(), url)
     body = self._encode_params(params)
     log.debug("Making a request to <%s> with body <%s>", url, body)
     return _urlopen(url, data=body)
Пример #31
0
    def get_debian_package(self, node_module):
        result = {}
        result['info'] = None
        result['name'] = None
        result['version'] = None
        result['suite'] = None
        result['repr'] = None
        db_package = None

        if node_module in self.json:
            db_package = self.json[node_module]
        else:
            for pattern in self.json.keys():
                if _fnmatch(node_module, pattern):
                    db_package = self.json[pattern]
                    break

        if db_package:
            if 'replace' in db_package:
                result['name'] = db_package['replace']
            if 'info' in db_package:
                result['info'] = ('info', db_package['info'])
                self.append_warning('info', node_module, db_package['info'])
            elif 'warning' in db_package:
                result['info'] = ('warning', db_package['warning'])
                self.append_warning('warning', node_module,
                                    db_package['warning'])
            elif 'error' in db_package:
                result['info'] = ('error', db_package['error'])
                self.append_warning('error', node_module, db_package['error'])
        else:
            result['name'] = 'node-%s' % _debianize_name(node_module)

        if not result['name']:
            return result

        data = _urlopen(MADISON_URL % result['name']).read().decode('utf-8')
        packages = _parseJSON(data)
        if len(packages) < 1:
            result['name'] = None
            return result
        result['version'] = '0:0'
        for suite, versions in packages[0][result['name']].items():
            for version, source in versions.items():
                if apt_pkg.version_compare(version, result['version']) > 0:
                    result['version'] = version
                    result['suite'] = suite
                    result['name'] = source['source']
                    result['repr'] = '%s (%s)' % (result['name'],
                                                  result['version'])
        result['version'] = result['version']
        if result['version'] == '0:0':
            result['version'] = None

        return result
Пример #32
0
def download(url, sha1):
    """Download a file and verify its hash"""
    LOG.debug('download {}'.format(url))
    r = _urlopen(url)
    byte_content = r.read()
    download_sha1 = hashlib.sha1(byte_content).hexdigest()
    if download_sha1 != sha1:
        raise ValueError(
            'downloaded {!r} has the wrong SHA-1 hash: {} != {}'.format(
                url, download_sha1, sha1))
    LOG.debug('SHA-1 for {} matches the expected {}'.format(url, sha1))
    return byte_content
Пример #33
0
    def _get_response(signed_url):
        """Perform an API query and return the result.

        :param signed_url: A fully formed url with signature.
        :return: A dict containing the response.
        """
        try:
            http_response = _urlopen(signed_url)
            response = _json.loads(http_response.read())['response']
            return response
        except Exception as e:
            return {'success': 'false', 'error': str(e)}
def download(url, sha1):
    """Download a file and verify its hash"""
    LOG.debug('download {}'.format(url))
    r = _urlopen(url)
    byte_content = r.read()
    download_sha1 = hashlib.sha1(byte_content).hexdigest()
    if download_sha1 != sha1:
        raise ValueError(
            'downloaded {!r} has the wrong SHA-1 hash: {} != {}'.format(
                url, download_sha1, sha1))
    LOG.debug('SHA-1 for {} matches the expected {}'.format(url, sha1))
    return byte_content
Пример #35
0
    def _check_external_link(self, link):
        sock, code, error = None, None, None

        try:
            sock = _urlopen(link, timeout=5)
            code = sock.getcode()
        except IOError as e:
            error = e
        finally:
            if sock:
                sock.close()

        return code, error
def zip_install(url, sha1, install_directory):
    """Download and install a zipped bundle of compiled software"""
    r = _urlopen(url)
    zip_bytes = r.read()
    download_sha1 = hashlib.sha1(zip_bytes).hexdigest()
    if download_sha1 != sha1:
        raise ValueError(
            'downloaded {!r} has the wrong SHA1 hash: {} != {}'.format(
                url, downloaded_sha1, sha1))
    zip_io = _BytesIO(zip_bytes)
    zip_file = zipfile.ZipFile(zip_io)
    if not os.path.isdir(install_directory):
        os.makedirs(install_directory)
        zip_file.extractall(install_directory)
Пример #37
0
def zip_install(url, sha1, install_directory):
    """Download and install a zipped bundle of compiled software"""
    r = _urlopen(url)
    zip_bytes = r.read()
    download_sha1 = hashlib.sha1(zip_bytes).hexdigest()
    if download_sha1 != sha1:
        raise ValueError(
            'downloaded {!r} has the wrong SHA1 hash: {} != {}'.format(
                url, downloaded_sha1, sha1))
    zip_io = _BytesIO(zip_bytes)
    zip_file = zipfile.ZipFile(zip_io)
    if not os.path.isdir(install_directory):
        os.makedirs(install_directory)
        zip_file.extractall(install_directory)
    def _read_json(self, path_or_url, compressed=True, advanced_path=False):
        ''' Load JSON for a path. Allows remote files in addition to local ones. '''
        if parse_url(path_or_url).scheme in ['http', 'https']:
            try:
                req = _urlopen(path_or_url)
                filename_or_buffer = BytesIO(req.read())
            except HTTPError:
                logging.exception("HTTP Error accessing %s" % path_or_url)
                raise
            compressed = False
        else:
            filename_or_buffer = path_or_url

        try:
            if compressed:
                f = bz2.BZ2File(filename_or_buffer)
            else:
                if (type(filename_or_buffer) != BytesIO) and not isinstance(
                        filename_or_buffer, StringIO):
                    f = codecs.open(filename_or_buffer, 'r+', encoding="utf-8")
                else:
                    f = filename_or_buffer
            rawjson = f.readline()
            f.close()
        except IOError:
            logging.exception(
                "Can't read %s. Did you pass the incorrect "
                "'compressed=' argument?", path_or_url)
            raise
        except:
            print(compressed, type(filename_or_buffer))
            logging.exception("Can't open %s", path_or_url)
            raise

        # This is a bandaid for schema version 2.0, not over-engineered
        # since upcoming releases of the extracted features
        # dataset won't keep the basic/advanced split

        try:
            # For Python3 compatibility, decode to str object
            if PY3 and (type(rawjson) != str):
                rawjson = rawjson.decode()
            volumejson = json.loads(rawjson)
        except:
            logging.exception(
                "Problem reading JSON for %s. One common reason"
                " for this error is an incorrect compressed= "
                "argument", path_or_url)
            raise
        return volumejson
Пример #39
0
    def _open(self, id = None, mode = 'rb', compression='default', **kwargs):
        if compression == 'default':
            compression = self.compression
        if compression == 'bz2':
            raise Exception("You have requested to read from HTTP with bz2 compression, but at time of writing this was not supported.")
        if mode == 'wb':
            raise NotImplementedError("Mode is not defined")
        path_or_url = self.url.format(id = id)

        try:
            req = BytesIO(_urlopen(path_or_url).read())
        except HTTPError:
            logging.exception("HTTP Error accessing %s" % path_or_url)
            raise
        return req
Пример #40
0
def get_server(out_dir=".", quiet=False):
    """
    Download latest release of Minecraft server ("server.jar").

    Parameters
    ----------
    out_dir: str
        "server.jar" destination directory.
    quiet: bool
        If True, does not print output.
    """
    out_file = _join(out_dir, "server.jar")

    with _urlopen(_MANIFEST_URL) as manifest_json:
        manifest = _loads(manifest_json.read())

    latest = manifest["latest"]["release"]

    for version in manifest["versions"]:
        if version["id"] == latest:
            version_json_url = version["url"]
            break
    else:
        raise RuntimeError(f"Server version {latest} not found in versions list.")

    with _urlopen(version_json_url) as version_json:
        server = _loads(version_json.read())["downloads"]["server"]

    checksum = server["sha1"]
    if _already_exists(out_file, checksum, quiet):
        return

    with _urlopen(server["url"]) as server_file:
        server_bytes = server_file.read()

    _verify_and_save(out_file, server_bytes, checksum, quiet)
def download(url, sha1):
    """Download a file (if not present in directory) and verify its hash"""
    LOG.debug('download {}'.format(url))
    try:
        r = open(url.split('/')[-1])
    except FileNotFoundError:
        r = _urlopen(url)
    byte_content = r.read()
    download_sha1 = hashlib.sha1(byte_content).hexdigest()
    if download_sha1 != sha1:
        raise ValueError(
            'downloaded {!r} has the wrong SHA-1 hash: {} != {}'.format(
                url, download_sha1, sha1))
    LOG.debug('SHA-1 for {} matches the expected {}'.format(url, sha1))
    return byte_content
Пример #42
0
    def _read_json(self, path_or_url, compressed=True, advanced_path=False):
        ''' Load JSON for a path. Allows remote files in addition to local ones. '''
        if parse_url(path_or_url).scheme in ['http', 'https']:
            try:
                req = _urlopen(path_or_url)
                filename_or_buffer = BytesIO(req.read())
            except HTTPError:
                logging.exception("HTTP Error accessing %s" % path_or_url)
                raise
            compressed = False
        else:
            filename_or_buffer = path_or_url
        
        try:
            if compressed:
                f = bz2.BZ2File(filename_or_buffer)
            else:
                if (type(filename_or_buffer) != BytesIO) and not isinstance(filename_or_buffer, StringIO):
                    f = codecs.open(filename_or_buffer, 'r+', encoding="utf-8")
                else:
                    f = filename_or_buffer
            rawjson = f.readline()
            f.close()
        except IOError:
            logging.exception("Can't read %s. Did you pass the incorrect "
                              "'compressed=' argument?", path_or_url)
            raise
        except:
            print(compressed, type(filename_or_buffer))
            logging.exception("Can't open %s", path_or_url)
            raise

        # This is a bandaid for schema version 2.0, not over-engineered
        # since upcoming releases of the extracted features
        # dataset won't keep the basic/advanced split

        try:
            # For Python3 compatibility, decode to str object
            if PY3 and (type(rawjson) != str):
                rawjson = rawjson.decode()
            volumejson = json.loads(rawjson)
        except:
            logging.exception("Problem reading JSON for %s. One common reason"
                              " for this error is an incorrect compressed= "
                              "argument", path_or_url)
            raise
        return volumejson
Пример #43
0
def read_file(filename, **kwargs):
    """
    Returns a GeoDataFrame from a file or URL.

    Parameters
    ----------
    filename: str
        Either the absolute or relative path to the file or URL to
        be opened.
    **kwargs:
        Keyword args to be passed to the `open` or `BytesCollection` method
        in the fiona library when opening the file. For more information on
        possible keywords, type:
        ``import fiona; help(fiona.open)``

    Examples
    --------
    >>> df = geopandas.read_file("nybb.shp")

    Returns
    -------
    geodataframe : GeoDataFrame
    """
    bbox = kwargs.pop('bbox', None)
    if _is_url(filename):
        req = _urlopen(filename)
        path_or_bytes = req.read()
        reader = fiona.BytesCollection
    else:
        path_or_bytes = filename
        reader = fiona.open
    with reader(path_or_bytes, **kwargs) as f:
        crs = f.crs
        if bbox is not None:
            assert len(bbox) == 4
            f_filt = f.filter(bbox=bbox)
        else:
            f_filt = f
        gdf = GeoDataFrame.from_features(f_filt, crs=crs)
        # re-order with column order from metadata, with geometry last
        columns = list(f.meta["schema"]["properties"]) + ["geometry"]
        gdf = gdf[columns]

    return gdf
Пример #44
0
def search_for_reverse_dependencies(module):
    if isinstance(module, _Npm2Deb):
        module = module.name
    url = "http://registry.npmjs.org/-/_view/dependedUpon?startkey=" \
        + "[%%22%(name)s%%22]&endkey=[%%22%(name)s%%22,%%7B%%7D]&group_level=2"
    url = url % {'name': module}
    _debug(1, "opening url %s" % url)
    data = _urlopen(url).read().decode('utf-8')
    data = _parseJSON(data)
    result = []
    if 'rows' in data and len(data['rows']) > 0:
        my_print("Reverse Depends:")
        for row in data['rows']:
            dependency = row['key'][1]
            result.append(dependency)
            my_print("  %s" % dependency)
    else:
        my_print("Module %s has no reverse dependencies" % module)
    return result
Пример #45
0
def search_for_reverse_dependencies(module):
    if isinstance(module, _Npm2Deb):
        module = module.name
    url = "http://registry.npmjs.org/-/_view/dependedUpon?startkey=" \
        + "[%%22%(name)s%%22]&endkey=[%%22%(name)s%%22,%%7B%%7D]&group_level=2"
    url = url % {'name': module}
    _debug(1, "opening url %s" % url)
    data = _urlopen(url).read().decode('utf-8')
    data = _parseJSON(data)
    result = []
    if 'rows' in data and len(data['rows']) > 0:
        my_print("Reverse Depends:")
        for row in data['rows']:
            dependency = row['key'][1]
            result.append(dependency)
            my_print("  %s" % dependency)
    else:
        my_print("Module %s has no reverse dependencies" % module)
    return result
Пример #46
0
def urlopen(url, if_modified_since=None, headers={}, proxies=None):
    parse_result = urllib_parse.urlparse(url)
    if parse_result.scheme not in ("http", "https"):
        return _urlopen(url)

    netloc = parse_result.netloc.rpartition("@")[-1]
    url = urllib_parse.urlunparse((
        parse_result.scheme,
        netloc,
        parse_result.path,
        parse_result.params,
        parse_result.query,
        parse_result.fragment,
    ))
    password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
    request = urllib_request.Request(url)
    request.add_header("User-Agent", "Gentoo Portage")
    for key in headers:
        request.add_header(key, headers[key])
    if if_modified_since:
        request.add_header("If-Modified-Since",
                           _timestamp_to_http(if_modified_since))
    if parse_result.username is not None:
        password_manager.add_password(None, url, parse_result.username,
                                      parse_result.password)

    handlers = [CompressedResponseProcessor(password_manager)]
    if proxies:
        handlers.append(urllib_request.ProxyHandler(proxies))
    opener = urllib_request.build_opener(*handlers)

    hdl = opener.open(request)
    if hdl.headers.get("last-modified", ""):
        try:
            add_header = hdl.headers.add_header
        except AttributeError:
            # Python 2
            add_header = hdl.headers.addheader
        add_header("timestamp",
                   _http_to_timestamp(hdl.headers.get("last-modified")))
    return hdl
Пример #47
0
def search_for_repository(module):
    if isinstance(module, _Npm2Deb):
        module = module.debian_name
    else:
        module = 'node-%s' % _debianize_name(module)

    formatted = "  {0:40} -- {1}"
    found = False
    result = {}

    my_print("Looking for existing repositories on salsa.debian.org:")
    data = json.loads(_urlopen(
        "https://salsa.debian.org/groups/js-team/-/children.json?filter=%s" % module).read())
    if len(data) > 0:
        found = True
        for repo in data:
            name = repo['name']
            description = repo['description']
            result[name] = description
            my_print(formatted.format(name, description))
    if not found:
        my_print("  None")
    return result
Пример #48
0
    def collect_data(self, nsec_wait=10, ignore_time=86400, verbose=False,\
      data_url=bixi_urls["montreal"], **kwargs):
        """
        kwargs:
          dump_wait: If set, dump every dump_wait seconds.
          dump_fname: If dump_wait is set, write to this file. If not set,
                      defaults to "bixi_data.json".
        """
        dump_wait = None
        dump_fname = "bixi_data.json"

        if 'dump_wait' in kwargs:
            dump_wait = kwargs['dump_wait']
        if 'dump_fname' in kwargs:
            dump_fname = kwargs['dump_fname']

        start_time = _datetime.now()
        last_dump = _datetime.now()
        iter_num = 0
        try:
            print("Press Ctrl+C to stop collecting data.")
            while True:
                if iter_num > 0:
                    if verbose: print("Waiting %d seconds..." % nsec_wait)
                    _time.sleep(nsec_wait)
                iter_num += 1
                if verbose: print("Iteration number %d" % iter_num)
                if verbose: print("Querying XML data...")
                try:
                    # NOTE: Incredibly dumb form of error testing: get data
                    # twice and make sure they're the same
                    with _urlopen(data_url) as f:
                        parsed_xml = _ET.parse(f)
                        xml_tree = parsed_xml.getroot()
                    with _urlopen(data_url) as f:
                        parsed_xml = _ET.parse(f)
                        xml_tree_2 = parsed_xml.getroot()
                    if _ET.tostring(xml_tree) != _ET.tostring(xml_tree_2):
                        if verbose: print("Redundant XML tree doesn't match.")
                        continue
                except:
                    if verbose:
                        print("Something went wrong while creating element",\
                          "tree.")
                    continue
                if verbose: print("Checking for new data...")
                try:
                    self.last_updated = int(xml_tree.get('lastUpdate'))
                except:
                    self.last_updated = int(xml_tree.get('LastUpdate'))
                for xml_element in xml_tree:
                    station_id = int(xml_element[0].text)
                    last_comm_with_server = int(xml_element[3].text)
                    current_time = _datetime_to_tstamp(_datetime.now())
                    sec_since_comm = (current_time - last_comm_with_server)/1000
                    if sec_since_comm < ignore_time:
                        if station_id not in self.stations:
                            self.stations[station_id] =\
                              Station.from_element(xml_element)
                        self.stations[station_id].update_from_element(\
                          xml_element, verbose)
                if dump_wait is not None:
                    time_to_dump = dump_wait -\
                      (_datetime.now() - last_dump).total_seconds()
                    if time_to_dump <= 0:
                        if verbose: print("Dumping data...")
                        self.to_json_file(dump_fname)
                        last_dump = _datetime.now()
                    elif verbose: print("%d seconds to dump." % time_to_dump)
        except KeyboardInterrupt:
            nmin = (_datetime.now() - start_time).total_seconds()/60
            print("Stopped after %d iterations and %.1f minutes." %\
              (iter_num, nmin))
Пример #49
0
def urlopen(*av, **kw):
	# just to allow testing
	return _urlopen(*av, **kw)
Пример #50
0
def download(url, into='~/.cache/DeepFried2', saveas=None, desc=None, quiet=False):
    """
    Downloads the content of `url` into a file in the directory `into`.

    - `url`: The URL to download content from.
    - `into`: The folder to save the downloaded content to.
    - `saveas`: Optionally a different filename than that from the URL.
    - `desc`: Text used for progress-description.
    - `quiet`: Suppresses any console-output of this function if `True`.
    """

    # Make sure the target folder exists.
    into = _os.path.expanduser(into)
    try:
        _os.makedirs(into)
    except FileExistsError:
        pass

    try:
        response = _urlopen(url, timeout=5)
    except (_URLError, _timeout):
        # No internet connection is available, so just trust the file if it's already there.
        saveas = saveas or _os.path.basename(url)
        target = _os.path.join(into, saveas)

        if not _os.path.isfile(target):
            raise FileNotFoundError(target)
        if not quiet:
            print("No internet connection; using untrusted cached file at {}".format(target))
        return target

    # We do have an internet connection and were able to get to the URL.
    saveas = saveas or _httpfilename(response) or _os.path.basename(url)
    target = _os.path.join(into, saveas)

    leng = int(_getheader(response, 'Content-Length', 0))
    # assert leng == response.length, "Huh, looks like we didn't get all data. Maybe retry?"

    # In case the file's already there, we may avoid re-downloading it.
    if _os.path.isfile(target):
        # First, check if we got ETag which is a widely-supported checksum-ish HTTP header.
        try:
            with open(target + '.etag', 'r') as f:
                etag = f.read()
            if _getheader(response, 'ETag') == etag:
                return target
        except FileNotFoundError:
            pass

        # Alternatively, check whether the file has the same size.
        if _os.path.getsize(target) == leng:
            # If there's no last-modified header, just trust it blindly.
            servertime = _eutils.parsedate_tz(_getheader(response, 'Last-Modified'))
            if servertime is None:
                return target
            else:
                # But if there is, we may also check that.
                if _os.path.getmtime(target) >= _eutils.mktime_tz(servertime):
                    return target

    # TODO: Use progressbar from example utils.
    if not quiet:
        desc = desc or '{} to {}'.format(url, target)
        _sys.stdout.write('Downloading {}: {}k/{}k (:.2%)'.format(desc, 0, leng//1024, 0))
        _sys.stdout.flush()

    with open(target, 'wb+') as f:
        while f.tell() < leng:
            f.write(response.read(1024*8))
            if not quiet:
                _sys.stdout.write('\rDownloading {}: {}k/{}k ({:.2%})'.format(desc, f.tell()//1024, leng//1024, float(f.tell())/leng))
                _sys.stdout.flush()
    if not quiet:
        print("")

    # Finally, if present, save the ETag for later checking.
    etag = _getheader(response, 'ETag')
    if etag is not None:
        with open(target + '.etag', 'w+') as f:
            f.write(etag)

    return target
Пример #51
0
# -*- coding: utf-8 -*-
"""
Terminator plugin
-----------------

Leaflet.Terminator is a simple plug-in to the Leaflet library to overlay day and night regions on maps.
"""
try:
    from urllib.request import urlopen as _urlopen
except:
    from urllib import urlopen as _urlopen

from .plugin import Plugin

# As LO.Terminator.js is not served on both HTTP and HTTPS, we need to embed it explicitely into the code.
_request = _urlopen("http://rawgithub.com/joergdietrich/Leaflet.Terminator/master/L.Terminator.js")
assert _request.getcode()==200, "Error while loading Leaflet.terminator.js"
_terminator_script = _request.read().decode('utf8')

class Terminator(Plugin):
    """Leaflet.Terminator is a simple plug-in to the Leaflet library to overlay day and night regions on maps."""
    def __init__(self):
        """Creates a Terminator plugin to append into a map with
        Map.add_plugin.

        Parameters
        ----------
        """
        super(Terminator, self).__init__()
        self.plugin_name = 'Terminator'