def dispatch(self, request, *args, slug=None, **kwargs): self.slug = slug try: self.cookie_data = json.loads(url_unquote(request.COOKIES.get(self.cookie_name, ''))) except (TypeError, ValueError): pass return super().dispatch(request, *args, **kwargs)
def view_title(filepath): """View an album's images""" filepath = Path(url_unquote(filepath)) page_num = int(request.query.page or '1') resize = bool(int(request.query.resize or '0')) _path_query = '{path!s}?{query!s}' #constant for the url, to make "path/to/comic?query=data" album_path = root_path / filepath if is_useful_file(album_path): with ZipFile(str(album_path), 'r', ZIP_BZIP2) as zf: number_of_pages = len(list( filter(is_image_file, zf.namelist()) )) source = Path('/', 'static', 'compressed', filepath) elif is_image_album(album_path): number_of_pages = len(list( filter(is_image_file, album_path.iterdir()) )) source = Path('/', 'static', 'uncompressed', filepath) page_num = min(page_num, number_of_pages) return { 'image' : _path_query.format( path = url_quote( source.as_posix() ), query = urlencode( {'page' : page_num, 'resize' : int(resize)} )), 'next' : _path_query.format( path = url_quote( filepath.name ), query = urlencode( {'page' : min(number_of_pages, page_num+1), 'resize' : int(resize)} )), 'prev' : _path_query.format( path = url_quote( filepath.name ), query = urlencode( {'page' : max(1, page_num-1), 'resize' : int(resize)} )), 'up' : url_quote( Path('/', 'browse', filepath).parent.as_posix() + '/'), 'resize_link' : { 'href' : _path_query.format( path = url_quote( filepath.name ), query = urlencode( {'page' : page_num, 'resize' : int(not resize)} )), 'title' : 'original' if resize else 'smaller', }, }
def list_dirs_and_files(dirname=''): """List links useful directories and files in 'dirname'""" dirpath = Path(url_unquote(dirname)) file_links = list() dir_links = list() if dirpath.parent != dirpath: dir_links.append({ 'href' : Path('/', 'browse', dirpath.parent), 'title' : '..', }) for child in sorted((root_path / dirpath).iterdir()): if is_useful_dir(child): dir_links.append({ 'href' : child.relative_to(root_path).relative_to(dirpath), 'title' : child.name, }) if is_useful_file(child) or is_image_album(child): file_links.append({ 'href' : Path('/', 'view', dirpath, child.name), 'title' : child.name, }) for link_list in (dir_links, file_links): for link in link_list: link['href'] = url_quote(link['href'].as_posix()) if link in dir_links: link['href'] += '/' dir_and_file_links = dict() if len(file_links) != 0: dir_and_file_links['file_link_list'] = {'link_list' : {'links' : file_links} } if len(dir_links) != 0: dir_and_file_links['dir_link_list'] = {'link_list' : {'links' : dir_links} } return dir_and_file_links
def player_update(self, obj, player): # player name obj.part_text_set('player_name', player.label or player.name) # play/pause button if player.playback_status == 'Playing': obj.signal_emit('state,set,playing', '') else: obj.signal_emit('state,set,paused', '') # metadata txt = '' if 'xesam:title' in player.metadata: txt += '<title>%s</><br>' % player.metadata['xesam:title'] if 'xesam:artist' in player.metadata: txt += '<tag>by</> %s<br>' % player.metadata['xesam:artist'][0] if 'xesam:album' in player.metadata: txt += '<tag>from</> %s<br>' % player.metadata['xesam:album'] obj.part_text_set('metadata', txt) # cover image img = obj.content_unset('cover.swallow') if img: img.delete() if 'mpris:artUrl' in player.metadata: fname = url_unquote(player.metadata['mpris:artUrl']) fname = fname.replace('file://', '') try: img = evas.FilledImage(obj.evas, file=fname) obj.content_set('cover.swallow', img) except: pass
def download(self,url=None,dest=None,est_size=None,show_progress=False): if url and check_if_file_exists(Path(url_unquote(url))): self.download_status['success'] = True self.download_status['message'] = 'File was accessible via local filesystem' xbmc.log(msg='IAGL: Game was found to exists on the local filesystem: %(url)s'%{'url':url},level=xbmc.LOGDEBUG) self.download_status['updated_dest'] = Path(url_unquote(url)) elif url and check_if_file_exists(url_unquote(url)): self.download_status['success'] = True self.download_status['message'] = 'File was accessible via Kodi Source' xbmc.log(msg='IAGL: Game was found to exists at a Kodi Source: %(url)s'%{'url':url},level=xbmc.LOGDEBUG) self.download_status['updated_dest'] = url_unquote(url) else: self.download_status['success'] = False self.download_status['message'] = 'File was not accessible' xbmc.log(msg='IAGL: Game was not accessible: %(url)s'%{'url':url},level=xbmc.LOGERROR) return self.download_status
def unquote(text): """Replace all percent-encoded entities in text.""" while '%' in text: newtext = url_unquote(text) if newtext == text: break text = newtext return text
def path(self): """ Requested path. This works a bit like the regular path info in the WSGI environment, but always include a leading slash, even if the URL root is accessed. :return: """ return url_unquote(self.environ.get('PATH_INFO', ''))
def show_outbound_msg(request, verification_code): '''Show the outbound message with the verification code.''' try: template = 'mail/verified_encrypted.html' result_headers = [] results = [] error_message = None email = request.user.email records = history.get_outbound_messages(email) if records: # narrow the messages to those matching the verification_code records = records.filter( verification_code=url_unquote(verification_code)) if not records: try: # use the verification_code without unquoting it in case they pasted it into a url field records = records.filter(verification_code=verification_code) except: pass if records: results, private, private_signed, clear_signed, dkim_signed = summarize_outbound_messages( records) main_headline, subheadline = get_verify_msg_headlines( 'sent', private, private_signed, clear_signed, dkim_signed) else: main_headline = i18n('<font color="red">Not</font> Verified') subheadline = i18n( 'Message not sent privately from {}'.format(email)) error1 = NOT_SENT_PRIVATELY.format( email=email, verification_code=verification_code) error2 = TAMPERED_SENT_WARNING.format(email=email) error_message = '{} {}'.format(error1, error2) log_message(error_message) params = { 'email': email, 'main_headline': main_headline, 'subheadline': subheadline, 'results': results, 'error_message': error_message } response = render_to_response(template, params, context_instance=RequestContext(request)) except Exception: record_exception() log_message('EXCEPTION - see syr.exception.log for details') response = HttpResponseRedirect('/mail/show_encrypted_history/') return response
def GenerateTarball(output_filename, members): """ Given a tarball name and a sequence of filenames, creates a tarball containing the named files. """ tarball = tarfile.open(output_filename, 'w') for filename in members: # A hacky convention to get around the spaces in filenames is to # urlencode them. So at this point we unescape those characters. scrubbed_filename = url_unquote(os.path.basename(filename)) if scrubbed_filename.startswith(TEST_PREFIXES): scrubbed_filename = scrubbed_filename.split('_', 1)[1] tarball.add(filename, scrubbed_filename) tarball.close()
def group_by_dir(urlist): """Sorts urls into groups based on shared url directory paths. Returns: - dir_groups: dict """ dir_groups = {} for url in urlist: net_subdir, filename = url_unquote(url).rsplit('/', 1) if net_subdir in dir_groups: dir_groups[net_subdir].append((url, filename)) else: dir_groups[net_subdir] = [(url, filename)] return dir_groups
def brushinfo_unquote(quoted): """Unquote a serialised string value from a brush field. >>> brushinfo_unquote(b"foo") == u'foo' True >>> brushinfo_unquote(b"foo%2fbar%20blah") == u'foo/bar blah' True >>> expected = u'Have a nice day \u263A' >>> brushinfo_unquote(b'Have%20a%20nice%20day%20%E2%98%BA') == expected True """ if not isinstance(quoted, bytes): raise ValueError("Cann") u8bytes = url_unquote(quoted) return unicode(u8bytes.decode("utf-8"))
def show_outbound_msg(request, verification_code): '''Show the outbound message with the verification code.''' try: template = 'mail/verified_encrypted.html' result_headers = [] results = [] error_message = None email = request.user.email records = history.get_outbound_messages(email) if records: # narrow the messages to those matching the verification_code records = records.filter(verification_code=url_unquote(verification_code)) if not records: try: # use the verification_code without unquoting it in case they pasted it into a url field records = records.filter(verification_code=verification_code) except: pass if records: results, private, private_signed, clear_signed, dkim_signed = summarize_outbound_messages( records) main_headline, subheadline = get_verify_msg_headlines( 'sent', private, private_signed, clear_signed, dkim_signed) else: main_headline = i18n('<font color="red">Not</font> Verified') subheadline = i18n('Message not sent privately from {}'.format(email)) error1 = NOT_SENT_PRIVATELY.format(email=email, verification_code=verification_code) error2 = TAMPERED_SENT_WARNING.format(email=email) error_message = '{} {}'.format(error1, error2) log_message(error_message) params = {'email': email, 'main_headline': main_headline, 'subheadline': subheadline, 'results': results, 'error_message': error_message} response = render_to_response( template, params, context_instance=RequestContext(request)) except Exception: record_exception() log_message('EXCEPTION - see syr.exception.log for details') response = HttpResponseRedirect('/mail/show_encrypted_history/') return response
def send_compressed(titlename): """Serve picture from archive, resizing if requested""" zip_path = Path(root_path, url_unquote(titlename)).with_suffix('.cbz') if zip_path.exists(): page_num = int(request.query.page or '1') resize = bool(int(request.query.resize or '0')) with ZipFile(str(zip_path), 'r', ZIP_BZIP2) as zf: pages = sorted(filter(is_image_file, zf.namelist())) page_num = min(page_num, len(pages)) page_name = pages[page_num-1] mime, encoding = guess_type(page_name) response.set_header('Content-Type', mime) if resize: body = resize_image(zf.read(page_name)) return body else: return zf.read(page_name)
def send_image(titlename): """serve picture from directory, resizing if requested""" dir_path = Path(root_path, url_unquote(titlename)) if dir_path.exists(): page_num = int(request.query.page or '1') resize = bool(int(request.query.resize or '0')) pages = sorted(filter(is_image_file, dir_path.iterdir())) page_num = min(page_num, len(pages)) page_name = pages[page_num-1].name if resize: mime, encoding = guess_type(page_name) response.set_header('Content-Type', mime) with open(str(dir_path.joinpath(page_name)), mode='rb') as imfile: body = resize_image(imfile.read()) return body else: return static_file(page_name, root=str(dir_path))
def translate_path(self, path): """Translate a /-separated PATH to the local filename syntax. Components that mean special things to the local file system (e.g. drive or directory names) are ignored. (XXX They should probably be diagnosed.) """ path = posixpath.normpath(url_unquote(path)) words = path.split('/') words = filter(None, words) path = os.getcwd() for word in words: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) return path
def _classify_link(self, link): # get extension, file name, download url #get_ext_and_url = lambda url,: ## Parse the link to a file format name print("full link: {}".format(link)) print("Base url {}".format(self.base_url)) url_seg = url_unquote(link).replace(" ", "_").split('/') print(url_seg) # = link.split('/') if self.isFile.match(url_seg[-1]): _, ext = url_seg[-1].split('.') try: self.file_types[ext].append((url_seg[-1], link)) except: self.file_types[ext] = [] self.file_types[ext].append((url_seg[-1], link)) else: pass #print("not a file {}".format(url_seg[-1]))
def test_file_permissions_on_moved_in_file_have_correct_value( self, store, perms, key, value): tmpfile = tempfile.NamedTemporaryFile(delete=False) tmpfile.write(value) tmpfile.close() os.chmod(tmpfile.name, 0o777) try: key = store.put_file(key, tmpfile.name) parts = urlparse(store.url_for(key)) path = url_unquote(parts.path) mode = os.stat(path).st_mode mask = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO assert mode & mask == perms finally: if os.path.exists(tmpfile.name): os.unlink(tmpfile.name)
def make_environ(self): request_url = url_parse(self.path) def shutdown_server(): self.server.shutdown_signal = True url_scheme = self.server.ssl_context is None and 'http' or 'https' path_info = url_unquote(request_url.path) environ = { 'wsgi.version': (1, 0), 'wsgi.url_scheme': url_scheme, 'wsgi.input': self.rfile, 'wsgi.errors': sys.stderr, 'wsgi.multithread': self.server.multithread, 'wsgi.multiprocess': self.server.multiprocess, 'wsgi.run_once': False, 'werkzeug.server.shutdown': shutdown_server, 'SERVER_SOFTWARE': self.server_version, 'REQUEST_METHOD': self.command, 'SCRIPT_NAME': '', 'PATH_INFO': wsgi_encoding_dance(path_info), 'QUERY_STRING': wsgi_encoding_dance(request_url.query), 'CONTENT_TYPE': self.headers.get('Content-Type', ''), 'CONTENT_LENGTH': self.headers.get('Content-Length', ''), 'REMOTE_ADDR': self.client_address[0], 'REMOTE_PORT': self.client_address[1], 'SERVER_NAME': self.server.server_address[0], 'SERVER_PORT': str(self.server.server_address[1]), 'SERVER_PROTOCOL': self.request_version } for key, value in self.headers.items(): key = 'HTTP_' + key.upper().replace('-', '_') if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'): environ[key] = value if request_url.netloc: environ['HTTP_HOST'] = request_url.netloc return environ
def _process_attrs(self, raw_attrs): attrs = {} for raw in raw_attrs: attr = self.attr_map[raw] if attr[0] in [ 'author', 'lmkr', 'insertorder', 'start', 'align', 'bold', 'italic', 'underline', 'strikethrough' ]: attrs[attr[0]] = attr[1] elif attr[0] == 'list': attrs['list_depth'] = int( re.search(r'[0-9]+$', attr[1]).group(0)) attrs['list_class'] = attr[1].strip(string.digits) elif attr[0].startswith('struct-'): s = attr[0].split('_') attrs['table_id'] = s[0].replace('struct-table', '') attrs['table_' + s[1][:3]] = s[1][3:] elif attr[0].startswith('link-'): # base64 encoded s = base64.b64decode(attr[0][5:]).decode('utf-8').split('-', 1) attrs['href_id'] = s[0] attrs['href'] = s[1] elif attr[0].startswith('image-'): s = attr[0].split('-', 2) # looks suspiciously like a hash attrs['image_id'] = s[1] # base64 encoded, urlencoded JSON attrs['image'] = json.loads( url_unquote(base64.b64decode(s[2]).decode('utf-8'))) elif attr[0].startswith('annotation-'): # Some sort of opaque identifier, doesn't appear anywhere else # in the file and neither does my annotation text. attrs['annotation_id'] = attr[0].split('-', 1)[1] elif attr[0].startswith('font-'): if 'font' not in attrs: attrs['font'] = {} s = attr[0].split('-', 2) attrs['font'][s[1]] = s[2] else: self.logger.debug('unhandled attribute: %s', attr) return attrs
def parse_ticket(secret, ticket, ip, digest_algo=DEFAULT_DIGEST): """ Parse the ticket, returning (timestamp, userid, tokens, user_data). If the ticket cannot be parsed, ``BadTicket`` will be raised with an explanation. """ if isinstance(digest_algo, str): # correct specification of digest from hashlib or fail digest_algo = getattr(hashlib, digest_algo) digest_hexa_size = digest_algo().digest_size * 2 ticket = ticket.strip('"') digest = ticket[:digest_hexa_size] try: timestamp = int(ticket[digest_hexa_size:digest_hexa_size + 8], 16) except ValueError as e: raise BadTicket('Timestamp is not a hex integer: %s' % e) try: userid, data = ticket[digest_hexa_size + 8:].split('!', 1) except ValueError: raise BadTicket('userid is not followed by !') userid = url_unquote(userid) if '!' in data: tokens, user_data = data.split('!', 1) else: # @@: Is this the right order? tokens = '' user_data = data expected = calculate_digest(ip, timestamp, secret, userid, tokens, user_data, digest_algo) if expected != digest: raise BadTicket('Digest signature is not correct', expected=(expected, digest)) tokens = tokens.split(',') return (timestamp, userid, tokens, user_data)
def _get_parsed_url(url): # type: (S) -> Url """This is a stand-in function for `urllib3.util.parse_url` The orignal function doesn't handle special characters very well, this simply splits out the authentication section, creates the parsed url, then puts the authentication section back in, bypassing validation. :return: The new, parsed URL object :rtype: :class:`~urllib3.util.url.Url` """ try: parsed = urllib3_parse(url) except ValueError: scheme, _, url = url.partition("://") auth, _, url = url.rpartition("@") url = "{scheme}://{url}".format(scheme=scheme, url=url) parsed = urllib3_parse(url)._replace(auth=auth) if parsed.auth: return parsed._replace(auth=url_unquote(parsed.auth)) return parsed
def __init__(self, request, panel_classes, global_panel_classes): self.panels = [] self.global_panels = [] self.request = request self.status = 200 # Panels can be be activated (more features) (e.g. Performace panel) pdtb_active = url_unquote(request.cookies.get('pdtb_active', '')) activated = pdtb_active.split(';') # XXX for panel_class in panel_classes: panel_inst = panel_class(request) if panel_inst.dom_id in activated and panel_inst.has_content: panel_inst.is_active = True self.panels.append(panel_inst) for panel_class in global_panel_classes: panel_inst = panel_class(request) if panel_inst.dom_id in activated and panel_inst.has_content: panel_inst.is_active = True self.global_panels.append(panel_inst)
def __call__(self, value): try: # if the URL does not misuse the '%' character if not self.GENERIC_URL.search(value): # if the URL is only composed of valid characters if self.GENERIC_URL_VALID.match(value): # Then split up the URL into its components and check on # the scheme scheme = url_split_regex.match(value).group(2) # Clean up the scheme before we check it if scheme is not None: scheme = url_unquote(scheme).lower() # If the scheme really exists if scheme in self.allowed_schemes: # Then the URL is valid return value, None else: # else, for the possible case of abbreviated URLs with # ports, check to see if adding a valid scheme fixes # the problem (but only do this if it doesn't have # one already!) if value.find('://') < 0 and \ None in self.allowed_schemes: schemeToUse = self.prepend_scheme or 'http' prependTest = self.__call__(schemeToUse + '://' + value) # if the prepend test succeeded if prependTest[1] is None: # if prepending in the output is enabled if self.prepend_scheme: return prependTest else: # else return the original, # non-prepended value return value, None except: pass # else the URL is not valid return value, translate(self.message)
def decleanId(id): """Reverse cleanId.""" if id: id = id.replace('--', '\x00').replace('-', '%').replace('\x00', '-') return url_unquote(id) return ''
def path_info(self): return url_unquote(self.environ['PATH_INFO'])
def url_unquote_text(v, encoding="utf-8", errors="replace"): v = url_unquote(v) return v.decode(encoding, errors)
def get_password(self, unquote=False, include_token=True): # type: (bool, bool) -> str password = self.password if self.password else "" if password and unquote and self._password_is_quoted: password = url_unquote(password) return password
def _url_unquote(self, url_string): return url_unquote(url_string)
def test_deep_stats(alice): """ create a directory, do deep-stats on it and prove the /operations/ URIs work """ resp = requests.post( util.node_url(alice.node_dir, "uri"), params={ "format": "sdmf", "t": "mkdir", "redirect_to_result": "true", }, ) assert resp.status_code >= 200 and resp.status_code < 300 # when creating a directory, we'll be re-directed to a URL # containing our writecap.. uri = url_unquote(resp.url) assert 'URI:DIR2:' in uri dircap = uri[uri.find("URI:DIR2:"):].rstrip('/') dircap_uri = util.node_url(alice.node_dir, "uri/{}".format(url_quote(dircap))) # POST a file into this directory FILE_CONTENTS = u"a file in a directory" resp = requests.post( dircap_uri, data={ u"t": u"upload", }, files={ u"file": FILE_CONTENTS, }, ) resp.raise_for_status() # confirm the file is in the directory resp = requests.get( dircap_uri, params={ u"t": u"json", }, ) d = json.loads(resp.content) k, data = d assert k == u"dirnode" assert len(data['children']) == 1 k, child = list(data['children'].values())[0] assert k == u"filenode" assert child['size'] == len(FILE_CONTENTS) # perform deep-stats on it... resp = requests.post( dircap_uri, data={ u"t": u"start-deep-stats", u"ophandle": u"something_random", }, ) assert resp.status_code >= 200 and resp.status_code < 300 # confirm we get information from the op .. after its done tries = 10 while tries > 0: tries -= 1 resp = requests.get( util.node_url(alice.node_dir, u"operations/something_random"), ) d = json.loads(resp.content) if d['size-literal-files'] == len(FILE_CONTENTS): print("stats completed successfully") break else: print("{} != {}; waiting".format(d['size-literal-files'], len(FILE_CONTENTS))) time.sleep(.5)
def read_gff(genome, gff, gff_file): insert_ftype = gff["ftype"] seqid_alias = gff["seqid_alias"] alias_attr = gff["alias_attr"] dbxref_attr = gff["dbxref_attr"] data_attr = gff["data_attr"] feature = {} insert_features = [] print('Reading GFF file', flush=True) lineno = 0 curr = None currid = None for line in gff_file: lineno += 1 if line.startswith('#'): continue line = line.rstrip().split('\t') if len(line) < 9: raise Exception('GFF column error: line {}'.format(lineno)) attributes = { keyval[0]: list(map(lambda value: url_unquote(value), keyval[1].split(','))) for keyval in map(lambda itr: itr.split('='), line[ COLUMN.ATTRIBUTES].split(';')) } ftype = line[COLUMN.TYPE] doc = { 'genome': genome, 'ftype': ftype, 'region': seqid_alias[line[COLUMN.SEQID]] if line[COLUMN.SEQID] in seqid_alias else line[COLUMN.SEQID], 'start': int(line[COLUMN.START]) - 1, 'end': int(line[COLUMN.END]), 'locrange': { 'gte': int(line[COLUMN.START]) - 1, 'lt': int(line[COLUMN.END]), } } # Location loc = { 'start': int(line[COLUMN.START]) - 1, 'end': int(line[COLUMN.END]), } strand = line[COLUMN.STRAND] if strand == '+': loc['strand'] = 1 elif strand == '-': loc['strand'] = -1 elif strand == '.': loc['strand'] = 0 else: loc['strand'] = None phase = line[COLUMN.PHASE] if phase in ["0", "1", "2"]: loc['phase'] = int(phase) else: loc['phase'] = None gffid = attributes['ID'][0] if 'Name' in attributes: doc['name'] = attributes['Name'][0] if alias_attr in attributes: doc['name'].extend(attributes[alias_attr]) if dbxref_attr in attributes: doc['dbxref'] = attributes[dbxref_attr] # Data data = {} for key in attributes: if key in data_attr: data[data_attr[key]] = attributes[key] # Storing doc if gffid in feature: #feature[gffid] = {**feature[gffid], **doc} feature[gffid] = merge_two_dicts(feature[gffid], doc) if loc['start'] < feature[gffid]['start']: feature[gffid]['locrange']['gte'] = loc['start'] feature[gffid]['start'] = loc['start'] if loc['end'] > feature[gffid]['end']: feature[gffid]['locrange']['lt'] = loc['end'] feature[gffid]['end'] = loc['end'] feature[gffid]['loc'].append(loc) #feature[gffid]['data'] = {**feature[gffid]['data'], **data} feature[gffid]['data'] = merge_two_dicts(feature[gffid]['data'], data) else: doc['locrange'] = { 'gte': loc['start'], 'lt': loc['end'], } doc['start'] = loc['start'] doc['end'] = loc['end'] doc['loc'] = [loc] doc['data'] = data feature[gffid] = doc if 'Parent' in attributes: parent = attributes['Parent'][0] if 'child' not in feature[parent]: feature[parent]['child'] = [] feature[parent]['child'].append(doc) if ftype in insert_ftype and 'name' in doc: insert_features.append(feature[gffid]) # Return features to load return insert_features
def download(self,url=None,dest=None,est_size=None,show_progress=True): if url and dest: if self.cookies and isinstance(self.cookies,dict): domain = self.cookies.get('domain') for k,v in self.cookies.items(): if k!='domain': self.session.cookies.set(k,v,domain=domain) xbmc.log(msg='IAGL: Attempting download file',level=xbmc.LOGDEBUG) xbmc.log(msg='IAGL: URL: %(value)s'%{'value':url},level=xbmc.LOGDEBUG) xbmc.log(msg='IAGL: Dest: %(value)s'%{'value':dest},level=xbmc.LOGDEBUG) if show_progress: dp = xbmcgui.DialogProgress() description = next(iter([str(x) for x in [dest.name,url_unquote(os.path.split(url)[-1].split('%2F')[-1])] if x]),'Unknown File') dp.create(loc_str(30376),description) dp.update(0,description) try: with self.session.get(url,verify=False,stream=True,timeout=self.timeout,headers=self.header) as self.r: self.r.raise_for_status() filesize = next(iter([int(x) for x in [self.r.headers.get('Content-length'),est_size] if x]),0) filesize_str = bytes_to_string_size(filesize) with xbmcvfs.File(str(dest),'wb') as ff: size = 0 last_time = time.time() for chunk in self.r.iter_content(chunk_size=self.chunk_size): ff.write(bytearray(chunk)) if show_progress and dp.iscanceled(): dp.close() raise Exception('User Cancelled Download') if show_progress: size = size+len(chunk) #chunks may be a different size when streaming percent = int(100.0 * size / (filesize + 1)) #Added 1 byte to avoid div by zero now = time.time() diff = now - last_time if diff > 1: #Only show progress updates in 1 second or greater intervals last_time = now if filesize: dp.update(percent,'%(fn)s[CR]%(current_size)s / %(estimated_size)s'%{'current_size':bytes_to_string_size(size),'fn':description,'estimated_size':filesize_str}) else: dp.update(percent,'%(fn)s[CR]%(current_size)s / Unknown Size'%{'current_size':bytes_to_string_size(size),'fn':description}) except requests.exceptions.RequestException as rexc: self.download_status['success'] = False if self.r.status_code == 403: self.download_status['message'] = 'Download Request Exception. Access is forbidden (login required).' else: self.download_status['message'] = 'Download Request Exception. See Kodi Log.' xbmc.log(msg='IAGL: Download request exception for %(url)s. Request Exception %(exc)s'%{'url':url,'exc':rexc},level=xbmc.LOGERROR) except requests.exceptions.HTTPError as hexc: self.download_status['success'] = False self.download_status['message'] = 'Download HTTP error %(exc)s'%{'exc':hexc} xbmc.log(msg='IAGL: Download HTTP exception for %(url)s. HTTP Exception %(exc)s'%{'url':url,'exc':hexc},level=xbmc.LOGERROR) except requests.exceptions.ConnectionError as cexc: self.download_status['success'] = False self.download_status['message'] = 'Download Connection error %(exc)s'%{'exc':cexc} xbmc.log(msg='IAGL: Download connection exception for %(url)s. Connection Exception %(exc)s'%{'url':url,'exc':cexc},level=xbmc.LOGERROR) except requests.exceptions.Timeout as texc: self.download_status['success'] = False self.download_status['message'] = 'Download Timeout error %(exc)s'%{'exc':texc} xbmc.log(msg='IAGL: Download timeout exception for %(url)s. Timeout Exception %(exc)s'%{'url':url,'exc':texc},level=xbmc.LOGERROR) except Exception as exc: self.download_status['success'] = False self.download_status['message'] = 'Download failed or was cancelled' xbmc.log(msg='IAGL: Download exception for %(url)s. Exception %(exc)s'%{'url':url,'exc':exc},level=xbmc.LOGERROR) self.download_status['success'] = True self.download_status['message'] = 'Download complete' dp.close() del dp return self.download_status else: xbmc.log(msg='IAGL: Badly formed download request. URL %(url)s, Dest %(dest)s'%{'url':url,'dest':dest},level=xbmc.LOGDEBUG) return None
def make_environ(inp, host, port, script_name): """ Take 'inp' as if it were HTTP-speak being received on host:port, and parse it into a WSGI-ok environment dictionary. Return the dictionary. Set 'SCRIPT_NAME' from the 'script_name' input, and, if present, remove it from the beginning of the PATH_INFO variable. """ # # parse the input up to the first blank line (or its end). # environ = {} method_line = inp.readline() if six.PY3: method_line = method_line.decode('ISO-8859-1') content_type = None content_length = None cookies = [] for line in inp: if not line.strip(): break k, v = line.strip().split(b':', 1) v = v.lstrip() # Make header value a "native" string. PEP 3333 requires that # string-like things in headers be of type `str`. Much of the # time this isn't a problem but the SimpleCookie library does # type checking against `type("")`. v = str(v.decode('ISO-8859-1')) # # take care of special headers, and for the rest, put them # into the environ with HTTP_ in front. # if k.lower() == b'content-type': content_type = v elif k.lower() == b'content-length': content_length = v elif k.lower() == b'cookie' or k.lower() == b'cookie2': cookies.append(v) else: h = k.upper() h = h.replace(b'-', b'_') environ['HTTP_' + h.decode('ISO-8859-1')] = v if debuglevel >= 2: print('HEADER:', k, v) # # decode the method line # if debuglevel >= 2: print('METHOD LINE:', method_line) method, url, protocol = method_line.split(' ') # Store the URI as requested by the user, without modification # so that PATH_INFO munging can be corrected. environ['REQUEST_URI'] = url environ['RAW_URI'] = url # clean the script_name off of the url, if it's there. if not url.startswith(script_name): script_name = '' # @CTB what to do -- bad URL. scrap? else: url = url[len(script_name):] url = url.split('?', 1) path_info = url_unquote(url[0]) query_string = "" if len(url) == 2: query_string = url[1] if debuglevel: print("method: %s; script_name: %s; path_info: %s; query_string: %s" % (method, script_name, path_info, query_string)) r = inp.read() inp = BytesIO(r) # # fill out our dictionary. # # In Python3 turn the bytes of the path info into a string of # latin-1 code points, because that's what the spec says we must # do to be like a server. Later various libraries will be forced # to decode and then reencode to get the UTF-8 that everyone # wants. if six.PY3: path_info = path_info.decode('latin-1') environ.update({ "wsgi.version": (1, 0), "wsgi.url_scheme": "http", "wsgi.input": inp, # to read for POSTs "wsgi.errors": sys.stderr, "wsgi.multithread": 0, "wsgi.multiprocess": 0, "wsgi.run_once": 0, "PATH_INFO": path_info, "REMOTE_ADDR": '127.0.0.1', "REQUEST_METHOD": method, "SCRIPT_NAME": script_name, "SERVER_NAME": host, "SERVER_PORT": port, "SERVER_PROTOCOL": protocol, }) # # query_string, content_type & length are optional. # if query_string: environ['QUERY_STRING'] = query_string if content_type: environ['CONTENT_TYPE'] = content_type if debuglevel >= 2: print('CONTENT-TYPE:', content_type) if content_length: environ['CONTENT_LENGTH'] = content_length if debuglevel >= 2: print('CONTENT-LENGTH:', content_length) # # handle cookies. # if cookies: environ['HTTP_COOKIE'] = "; ".join(cookies) if debuglevel: print('WSGI environ dictionary:', environ) return environ
def get_username(self, unquote=False): # type: (bool) -> str username = self.username if self.username else "" if username and unquote and self._username_is_quoted: username = url_unquote(username) return username
def url_unquote_text(v, encoding='utf-8', errors='replace'): # pragma: no cover v = url_unquote(v) return v.decode(encoding, errors)
def make_environ(inp, host, port, script_name): """ Take 'inp' as if it were HTTP-speak being received on host:port, and parse it into a WSGI-ok environment dictionary. Return the dictionary. Set 'SCRIPT_NAME' from the 'script_name' input, and, if present, remove it from the beginning of the PATH_INFO variable. """ # # parse the input up to the first blank line (or its end). # environ = {} method_line = inp.readline() if six.PY3: method_line = method_line.decode('ISO-8859-1') content_type = None content_length = None cookies = [] for line in inp: if not line.strip(): break k, v = line.strip().split(b':', 1) v = v.lstrip() # Make header value a "native" string. PEP 3333 requires that # string-like things in headers be of type `str`. Much of the # time this isn't a problem but the SimpleCookie library does # type checking against `type("")`. v = str(v.decode('ISO-8859-1')) # # take care of special headers, and for the rest, put them # into the environ with HTTP_ in front. # if k.lower() == b'content-type': content_type = v elif k.lower() == b'content-length': content_length = v elif k.lower() == b'cookie' or k.lower() == b'cookie2': cookies.append(v) else: h = k.upper() h = h.replace(b'-', b'_') environ['HTTP_' + h.decode('ISO-8859-1')] = v if debuglevel >= 2: print('HEADER:', k, v) # # decode the method line # if debuglevel >= 2: print('METHOD LINE:', method_line) method, url, protocol = method_line.split(' ') # Store the URI as requested by the user, without modification # so that PATH_INFO munging can be corrected. environ['REQUEST_URI'] = url environ['RAW_URI'] = url # clean the script_name off of the url, if it's there. if not url.startswith(script_name): script_name = '' # @CTB what to do -- bad URL. scrap? else: url = url[len(script_name):] url = url.split('?', 1) path_info = url_unquote(url[0]) query_string = "" if len(url) == 2: query_string = url[1] if debuglevel: print("method: %s; script_name: %s; path_info: %s; query_string: %s" % (method, script_name, path_info, query_string)) r = inp.read() inp = BytesIO(r) # # fill out our dictionary. # # In Python3 turn the bytes of the path info into a string of # latin-1 code points, because that's what the spec says we must # do to be like a server. Later various libraries will be forced # to decode and then reencode to get the UTF-8 that everyone # wants. if six.PY3: path_info = path_info.decode('latin-1') environ.update({ "wsgi.version": (1, 0), "wsgi.url_scheme": "http", "wsgi.input": inp, # to read for POSTs "wsgi.errors": BytesIO(), "wsgi.multithread": 0, "wsgi.multiprocess": 0, "wsgi.run_once": 0, "PATH_INFO": path_info, "REMOTE_ADDR": '127.0.0.1', "REQUEST_METHOD": method, "SCRIPT_NAME": script_name, "SERVER_NAME": host, "SERVER_PORT": port, "SERVER_PROTOCOL": protocol, }) # # query_string, content_type & length are optional. # if query_string: environ['QUERY_STRING'] = query_string if content_type: environ['CONTENT_TYPE'] = content_type if debuglevel >= 2: print('CONTENT-TYPE:', content_type) if content_length: environ['CONTENT_LENGTH'] = content_length if debuglevel >= 2: print('CONTENT-LENGTH:', content_length) # # handle cookies. # if cookies: environ['HTTP_COOKIE'] = "; ".join(cookies) if debuglevel: print('WSGI environ dictionary:', environ) return environ