def _session_request(session, uri, method): session.mount('file://', requests_file.FileAdapter()) user_agent = random.choice(user_agents) headers = {'User-Agent': user_agent, 'Accept': 'application/json'} return session.request(method, uri, headers=headers, timeout=CONF.read_timeout)
def read_url(urlpath): session = requests.Session() session.mount('file://', requests_file.FileAdapter()) # session.config['keep_alive'] = False # response = requests.get( urlpath, timeout=5 ) response = session.get(urlpath, timeout=5) # response = requests.get( urlpath, timeout=5, hooks={'response': print_url} ) return response.text
def _session_request(session, uri, method): session.mount('file://', requests_file.FileAdapter()) user_agent = random.choice(user_agents) return session.request(method, uri, headers={'User-Agent': user_agent}, timeout=cfg.CONF.read_timeout)
def _func_fetch(self, url, allow_file=False): session = requests.Session() if allow_file: session.mount('file://', requests_file.FileAdapter()) try: response = session.get(url) except requests.exceptions.RequestException: self.logger.error('template failed to load url: ' + url) return None return response.text
def __init__(self, lookup_paths=None, dict_class=dict, verbose=False): self.cache = {} self.dict_class = dict_class self.lookup_paths = list(lookup_paths or []) self.session = requests.Session() self.session.mount('file://', requests_file.FileAdapter()) self.session.mount('resource://', requests_resource.ResourceAdapter()) #: whether or not to resolve relative paths using cwd self.rel_cwd_paths = True self.verbose = verbose # TODO: use logging instead
def __init__(self, data, keys=None): """ :param dict data: The formatted repository data. :param keys: The keys to use for verifying remote data. :type keys: :py:class:`~king_phisher.security_keys.SecurityKeys` """ self.security_keys = keys or security_keys.SecurityKeys() """The :py:class:`~king_phisher.security_keys.SecurityKeys` used for verifying remote data.""" created = data.get('created') if isinstance(created, str): self.created = dateutil.parser.parse(created) else: self.created = None self._req_sess = requests.Session() self._req_sess.mount('file://', requests_file.FileAdapter()) self.description = data.get('description') self.homepage = data.get('homepage') """The URL of the homepage for this repository if it was specified.""" for key in ('title', 'url-base'): if isinstance(data.get(key), str) and data[key]: continue raise KeyError('repository data is missing non-empty string key: ' + key) self.title = data['title'] """The title string of this repository.""" self.url_base = data['url-base'] """The base URL string of files included in this repository.""" self.collections = utilities.FreezableDict() """The dictionary of the different collection types included in this repository.""" if 'collections-include' in data: # include-files is reversed so the dictionary can get .update()'ed and the first seen will be the value kept for include in reversed(data['collections-include']): include_data = self._fetch_json(include) if 'collections' not in include_data: self.logger.warning("included file {0} missing 'collections' entry".format(include['path'])) continue include_data = include_data['collections'] for collection_type in include.get('types', COLLECTION_TYPES): if collection_type not in include_data: continue collection = include_data.get(collection_type) if collection is None: continue self._add_collection_data(collection_type, collection) if 'collections' in data: for collection_type in COLLECTION_TYPES: collection = data['collections'].get(collection_type) if collection is None: continue self._add_collection_data(collection_type, collection) item_count = sum(len(collection) for collection in self.collections.values()) self.logger.debug("initialized catalog repository with {0} collection types and {1} total items".format(len(self.collections), item_count)) for collection_type, collection in self.collections.items(): collection.freeze() self.collections[collection_type] = Collection(self, collection_type, collection) self.collections.freeze()
def __init__(self, data, keys=None): """ :param dict data: The formatted repository data. :param keys: The keys to use for verifying remote data. :type keys: :py:class:`~king_phisher.security_keys.SecurityKeys` """ self.security_keys = keys or security_keys.SecurityKeys() """The :py:class:`~king_phisher.security_keys.SecurityKeys` used for verifying remote data.""" self._req_sess = requests.Session() self._req_sess.mount('file://', requests_file.FileAdapter()) self.description = data.get('description') self.homepage = data.get('homepage') """The URL of the homepage for this repository if it was specified.""" self.id = data['id'] """The unique identifier of this repository.""" self.title = data['title'] """The title string of this repository.""" self.url_base = data['url-base'] """The base URL string of files included in this repository.""" self.collections = utilities.FreezableDict() """The dictionary of the different collection types included in this repository.""" if 'collections-include' in data: # include-files is reversed so the dictionary can get .update()'ed and the first seen will be the value kept for include in reversed(data['collections-include']): include_data = self._fetch_json(include) utilities.validate_json_schema( include_data, 'king-phisher.catalog.collections') include_data = include_data['collections'] for collection_type in include.get('types', COLLECTION_TYPES): collection = include_data.get(collection_type) if collection is None: continue self._add_collection_data(collection_type, collection) if 'collections' in data: for collection_type in COLLECTION_TYPES: collection = data['collections'].get(collection_type) if collection is None: continue self._add_collection_data(collection_type, collection) item_count = sum( len(collection) for collection in self.collections.values()) self.logger.debug( "initialized catalog repository with {0} collection types and {1} total items" .format(len(self.collections), item_count)) for collection_type, collection in self.collections.items(): collection.freeze() self.collections[collection_type] = Collection( self, collection_type, collection) self.collections.freeze()
def resolve(self, doc_uri, obj): """Entry point for resolving JSON pointers doc_uri is the URI of the JSON document in obj. obj can either be a dict or list object (possibly of a sub class) raises RefResolutionError on problems with the resolution """ self.cache = {doc_uri: obj} session = requests.Session() session.mount('file://', requests_file.FileAdapter()) session.mount('resource://', requests_resource.ResourceAdapter()) with session: return self._resolve(type(obj)(), obj, session)
def check_to_open(filename): """check if `filename` is a fetchable uri and returns True in the case is true False otherwise""" url_name = urllify(filename) with r.Session() as r_session: r_session.mount('file://', requests_file.FileAdapter()) f = r_session.get(url_name, stream=True) is_ok = True try: f.raise_for_status() except Exception as e: is_ok = False finally: f.close() return is_ok
def open(self, mode='r'): """ This downloads the URL to a temporary file, naming the file based on the URL. """ if self.filename.startswith('ftp://'): raise NotImplementedError('finish ftp') elif self.filename.startswith('file://'): #if its already a file, we can handle it directly file_to_open = self.filename[7:] with self._open_local(file_to_open, mode) as fd: yield fd else: file_to_open = None with requests.Session() as r_session: #get the remote url r_session.mount('file://', requests_file.FileAdapter()) f = r_session.get(url=self.filename, stream=True, **self.kwargs) f.raise_for_status() local_filename = self.filename.split('://')[-1].split('/')[-1] #this has to be "delete=false" so that it can be re-opened with the same filename #to be read out again #name the temporary file similar to the url so we can find it and tell if it #needs to be uncompressed with tempfile.NamedTemporaryFile(mode='wb', suffix=local_filename, delete=False) as fd: file_to_open = fd.name self.logger.debug("downloading %s to %s", self.filename, file_to_open) # write data into file in streaming fashion for block in f.iter_content(1024): fd.write(block) with self._open_local(file_to_open, mode) as fd: yield fd
def __init__(self, filename, *args, **kwargs): """Easy way to open multiple types of URL protocol (e.g. http:// and file://) as well as handling compressed content (e.g. .gz or .zip) if appropriate. Just in case you need to use proxies for url use it as normal named arguments to requests. >>> # proxies = {} >>> # if Config.HAS_PROXY: ... # self.proxies = {"http": Config.PROXY, ... # "https": Config.PROXY} >>> # with URLZSource('http://var.foo/noname.csv',proxies=proxies).open() as f: """ self._log = logging.getLogger(__name__) self.filename = urllify(filename) self.args = args self.kwargs = kwargs self.proxies = None self.r_session = r.Session() self.r_session.mount('file://', requests_file.FileAdapter())
def from_url(cls, url, keys=None, encoding='utf-8'): """ Initialize a new :py:class:`.Catalog` object from a resource at the specified URL. :param str url: The URL to the catalog data to load. :param keys: The keys to use for verifying remote data. :type keys: :py:class:`~king_phisher.security_keys.SecurityKeys` :param str encoding: The encoding of the catalog data. :return: The new catalog instance. :rtype: :py:class:`.Catalog` """ keys = keys or security_keys.SecurityKeys() req_sess = requests.Session() req_sess.mount('file://', requests_file.FileAdapter()) cls.logger.debug('fetching catalog from: ' + url) resp = req_sess.get(url) data = resp.content.decode(encoding) data = serializers.JSON.loads(data) keys.verify_dict(data, signature_encoding='base64') return cls(data, keys=keys)
def __init__(self, rootfs_path, rootfs_url, rootfs_sha1=None, rootfs_md5=None): """ Initialize the rootfs instance Creates a root file system in the specified directory, with the appropriate properties for the work at hand :param:`rootfs_path` location of rootfs :param:`rootfs_url` url to initial rootfs location :param:`rootfs_sha1` SHA1 digest of rootfs tarball :param:`rootfs_md5` MD5 digest of rootfs tarball """ # Create temporary file for fakeroot state self._fakeroot_state = tempfile.NamedTemporaryFile() # Create temporary directory for rootfs # if rootfs_path is None, use a temporary directory; # otherwise use the supplied path. if rootfs_path is None: self._rootfs_tmpdir = TemporaryDirectory() self._rootpath = self._rootfs_tmpdir.name else: self._rootpath = rootfs_path shutil.rmtree(self._rootpath, ignore_errors=True) self._my_mkdir(self._rootpath) # Use temporary file to hold incoming rootfs tar with tempfile.NamedTemporaryFile() as fd_: # request the specified archive print("fetching %s ..." % rootfs_url) s = requests.Session() s.mount('file://', requests_file.FileAdapter()) resp = s.get(rootfs_url, stream=True) if not resp.status_code == requests.codes.ok: print(".remote fetch failed for %s : %d." % (rootfs_url, resp.status_code), file=sys.stderr) print(resp.headers['status'], file=sys.stderr) resp.raise_for_status() chunk_size = 4096 md5 = hashlib.md5() sha1 = hashlib.sha1() for chunk in resp.iter_content(chunk_size): md5.update(chunk) sha1.update(chunk) fd_.write(chunk) fd_.flush() # Validate MD5 digest if rootfs_md5: if rootfs_md5 != md5.hexdigest(): raise OpxrootfsError( "MD5 validation failed: got %s, expected %s" % (md5.hexdigest(), rootfs_md5)) # Validate SHA1 digest if rootfs_sha1: if rootfs_sha1 != sha1.hexdigest(): raise OpxrootfsError( "SHA1 validation failed: got %s, expected %s" % (sha1.hexdigest(), rootfs_sha1)) # load the initial file system self.tar_in(fd_.name)
def __init__(self): self.session = requests.Session() self.session.mount("file://", requests_file.FileAdapter())