def DownloadUpdate(self, file): self.log('Downloading: %s' % file) dirfile = os.path.join(self.UpdateTempDir,file) dirname, filename = os.path.split(dirfile) if not os.path.isdir(dirname): try: os.makedirs(dirname) except: self.log('Error creating directory: ' +dirname) url = self.SVNPathAddress+urllib.quote(file) try: if re.findall(".xbt",url): self.totalsize = int(re.findall("File length: ([0-9]*)",urllib2.urlopen(url+"?view=log").read())[0]) urllib.urlretrieve( url.decode("utf-8"), dirfile.decode("utf-8")) else: urllib.urlretrieve( url.decode("utf-8"), dirfile.decode("utf-8") ) self.DownloadedFiles.append(urllib.unquote(url)) return 1 except: try: time.sleep(2) if re.findall(".xbt",url): self.totalsize = int(re.findall("File length: ([0-9]*)",urllib2.urlopen(url+"?view=log").read())[0]) urllib.urlretrieve(url.decode("utf-8"), dirfile.decode("utf-8")) else: urllib.urlretrieve(url.decode("utf-8"), dirfile.decode("utf-8") ) urllib.urlretrieve(url.decode("utf-8"), dirfile.decode("utf-8")) self.DownloadedFiles.append(urllib.unquote(url)) return 1 except: self.log("Download failed: %s" % url) self.DownloadFailedFiles.append(urllib.unquote(url)) return 0
def redirect(self, url): self.timeoutCall.reset(self.timeout) scheme, host, port, orig_path = _parse(self.url) scheme, host, port, new_path = _parse(url) if (urllib.unquote(orig_path.split('/')[-1]) != urllib.unquote(new_path.split('/')[-1])): # Server redirected us to a file which doesn't seem to be what we # requested. It's likely to be a stupid server which redirects # instead of 404ing (https://launchpad.net/bugs/204460). self.failed(Failure(RedirectToDifferentFile(orig_path, new_path))) return try: if self.redirection_count >= MAX_REDIRECTS: raise InfiniteLoopDetected() self.redirection_count += 1 logger = logging.getLogger('distributionmirror-prober') logger.debug('Got redirected from %s to %s' % (self.url, url)) # XXX Guilherme Salgado 2007-04-23 bug=109223: # We can't assume url to be absolute here. self.setURL(url) except UnknownURLScheme as e: # Since we've got the UnknownURLScheme after a redirect, we need # to raise it in a form that can be ignored in the layer above. self.failed(UnknownURLSchemeAfterRedirect(url)) except InfiniteLoopDetected as e: self.failed(e) else: self.connect()
def crack_action(params): if params[1] == 'search': load_search(params[2]) elif params[1] == 'choose_file': callback = params[2] file_path = utils.open_file_chooser_dialog() webv.execute_script('%s("%s")' % (callback, file_path)) elif params[1] == 'save_avatar': img_uri = urllib.unquote(params[2]) avatar_file = urllib.unquote(params[3]) avatar_path = os.path.join(config.AVATAR_CACHE_DIR, avatar_file) if not (os.path.exists(avatar_path) and avatar_file.endswith(img_uri[img_uri.rfind('/')+1:])): print 'Download:', img_uri , 'To' , avatar_path th = threading.Thread( target = save_file_proc, args=(img_uri, avatar_path)) th.start() elif params[1] == 'log': print '\033[1;31;40m[%s]\033[0m %s' % (urllib.unquote(params[2]) ,urllib.unquote(params[3])) elif params[1] == 'paste_clipboard_text': webv.paste_clipboard(); elif params[1] == 'set_clipboard_text': clipboard = gtk.clipboard_get() text = list(params) del text[0:2] clipboard.set_text('/'.join(text))
def get_objs_to_delete(self, req): """ Will populate objs_to_delete with data from request input. :params req: a Swob request :returns: a list of the contents of req.body when separated by newline. :raises: HTTPException on failures """ line = '' data_remaining = True objs_to_delete = [] if req.content_length is None and \ req.headers.get('transfer-encoding', '').lower() != 'chunked': raise HTTPLengthRequired(request=req) while data_remaining: if '\n' in line: obj_to_delete, line = line.split('\n', 1) objs_to_delete.append(unquote(obj_to_delete)) else: data = req.environ['wsgi.input'].read(MAX_PATH_LENGTH) if data: line += data else: data_remaining = False if line.strip(): objs_to_delete.append(unquote(line)) if len(objs_to_delete) > self.max_deletes_per_request: raise HTTPRequestEntityTooLarge( 'Maximum Bulk Deletes: %d per request' % self.max_deletes_per_request) if len(line) > MAX_PATH_LENGTH * 2: raise HTTPBadRequest('Invalid File Name') return objs_to_delete
def PLAY(params): # -- get filter parameters par = Get_Parameters(params) # -- if requested continious play if Addon.getSetting("continue_play") == "true": # create play list pl = xbmc.PlayList(1) pl.clear() # -- get play list playlist = Get_PlayList(par.playlist) is_found = False for rec in playlist: name = rec["comment"].encode("utf-8") s_url = rec["file"] # -- add item to play list if s_url == par.url: is_found = True if is_found: i = xbmcgui.ListItem(name, path=urllib.unquote(s_url), thumbnailImage=par.img) i.setProperty("IsPlayable", "true") pl.add(s_url, i) xbmc.Player().play(pl) # -- play only selected item else: i = xbmcgui.ListItem(par.name, path=urllib.unquote(par.url), thumbnailImage=par.img) i.setProperty("IsPlayable", "true") xbmcplugin.setResolvedUrl(h, True, i)
def get(self): user = users.get_current_user() user_oauth = oauth.get_current_user() self.response.write(user_oauth) if user: client_id = "676481030478-0fi923mg6rbe1tqbvffr8n5ih56p63gg.apps.googleusercontent.com" client_secret = "AXSaN3iaVse0lL_GCRp7ioPQ" scope = urllib.unquote(self.request.get("scope")).decode("utf-8") redirect_uri = urllib.unquote(self.request.get("redirect_uri")).decode("utf-8") flow = Oauth2WebServerFlow(client_id, client_secret, scope,redirect_uri=redirect_uri) code = self.request.get("code") redirect_uri = "http://localhost:19080/oauth" grant_type = "authorization_code" form_fields = { "code" : code, "client_id" : client_id, "client_secret" : client_secret, "redirect_uri" : redirect_uri, "grant_type" : grant_type, } form_data = urllib.urlencode(form_fields) url_validator = "https://www.googleapis.com/oauth2/v1/tokeninfo" #url_validator = "https://www.googleapis.com/o/oauth2/token?access_token=" + code result = urlfetch.fetch( headers = {'Content-Type': 'application/x-www-form-urlencoded'}, url = url_validator, payload = form_data, method = urlfetch.POST, ) self.response.write(result.content)
def fromString(self, tagstring): """ go from string to Tag class filled in @param tagstring: example "important customer:kristof" @type tagstring: string """ tagstring=j.tools.text.hrd2machinetext(tagstring) if not tagstring: return tags = tagstring.split() for tag in tags: if tag.find(':') > 0: key = tag.split(':',1)[0] value = tag.split(':',1)[1] key=unquote(key) value = unquote(j.tools.text.machinetext2hrd(value)) self.tags[key.lower()] = value self.tags[key] = value else: self.labels.add(unquote(j.tools.text.machinetext2hrd(tag))) self.tagstring=tagstring
def filter_headers(headers, prefix): meta = {} for k, v in headers.iteritems(): if not k.startswith(prefix): continue meta[unquote(k[len(prefix):])] = unquote(v) return meta
def parse_args(args=""): "Parse input args" out_args = { "verb" : "", "metadataPrefix" : "", "from" : "", "until" : "", "set" : "", "identifier" : "", "resumptionToken" : "" } if args == "" or args is None: pass else: list_of_arguments = args.split('&') for item in list_of_arguments: keyvalue = item.split('=') if len(keyvalue) == 2: if (out_args.has_key(keyvalue[0])): if(out_args[keyvalue[0]] != ""): out_args[keyvalue[0]] = "Error" else: out_args[keyvalue[0]] = urllib.unquote(keyvalue[1]) else: out_args[keyvalue[0]] = urllib.unquote(keyvalue[1]) else: out_args['verb'] = "" return out_args
def proxy_open(self, req, proxy, type): orig_type = req.get_type() proxy_type, user, password, hostport = _parse_proxy(proxy) if proxy_type is None: proxy_type = orig_type if req.get_host() and self._proxy_bypass(req.get_host()): return None if user and password: user_pass = '******' % (unquote(user), unquote(password)) creds = base64.b64encode(user_pass).strip() req.add_header('Proxy-authorization', 'Basic ' + creds) hostport = unquote(hostport) req.set_proxy(hostport, proxy_type) if orig_type == proxy_type or orig_type == 'https': # let other handlers take care of it return None else: # need to start over, because the other handlers don't # grok the proxy's URL type # e.g. if we have a constructor arg proxies like so: # {'http': 'ftp://proxy.example.com'}, we may end up turning # a request for http://acme.example.com/a into one for # ftp://proxy.example.com/a return self.parent.open(req)
def _form_uri_parts(self, netloc, path): if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') if len(cred_parts) < 2: reason = _("Badly formed credentials in Swift URI.") LOG.info(reason) raise exceptions.BadStoreUri(message=reason) key = cred_parts.pop() user = '******'.join(cred_parts) creds = urllib.unquote(creds) try: self.user, self.key = creds.rsplit(':', 1) except exceptions.BadStoreConfiguration: self.user = urllib.unquote(user) self.key = urllib.unquote(key) else: self.user = None self.key = None return netloc, path
def basicauth_decode(encoded_str): """Decode an encrypted HTTP basic authentication string. Returns a tuple of the form (username, password), and raises a DecodeError exception if nothing could be decoded. """ split = encoded_str.strip().split(' ') # If split is only one element, try to decode the username and password # directly. if len(split) == 1: try: username, password = b64decode(split[0]).split(':', 1) except: raise DecodeError # If there are only two elements, check the first and ensure it says # 'basic' so that we know we're about to decode the right thing. If not, # bail out. elif len(split) == 2: if split[0].strip().lower() == 'basic': try: username, password = b64decode(split[1]).split(':', 1) except: raise DecodeError else: raise DecodeError # If there are more than 2 elements, something crazy must be happening. # Bail. else: raise DecodeError return unquote(username), unquote(password)
def createMediaRequest(self,stream): if stream == None: self.error_str = "No event-id present to create media request." raise try: sessionKey = urllib.unquote(self.session.cookies['ftmu']) except: sessionKey = None # Query values query_values = { 'contentId': self.content_id, 'sessionKey': sessionKey, 'fingerprint': urllib.unquote(self.session.cookies['fprt']), 'identityPointId': self.session.cookies['ipid'], 'playbackScenario': self.scenario, 'subject': self.subject } # Build query url = self.base_url + urllib.urlencode(query_values) # And make the request req = urllib2.Request(url) response = urllib2.urlopen(req) reply = xml.dom.minidom.parse(response) return reply
def download(url, path_to_directory): print "starting download with", url, path_to_directory if "geoserver/wfs" in url: filename = unquote(search("(?<=typename=)[^&]*",url).group(0)).replace(":","_") extension = "zip" filename_with_extension = filename + "." + extension else: filename_with_extension = unquote(url.split("?")[0].split("/")[-1]) print "filename_with_extension = ", filename_with_extension filename = filename_with_extension.split(".")[0] print "filename is", filename print "filename_with_extension = ", filename_with_extension path_to_directory_of_downloadable = path_to_directory + "/" + filename if not isdir(path_to_directory_of_downloadable): mkdir(path_to_directory_of_downloadable) path_to_downloaded_file = path_to_directory_of_downloadable + "/" + filename_with_extension if not isfile(path_to_downloaded_file): urlretrieve(url, path_to_downloaded_file) print "saved file to ", path_to_downloaded_file if path_to_downloaded_file.endswith("zip"): if is_zipfile(path_to_downloaded_file): with zipfile.ZipFile(path_to_downloaded_file, "r") as z: z.extractall(path_to_directory_of_downloadable) print "unzipped", path_to_downloaded_file else: remove(path_to_downloaded_file) print "removed file because it wasn't a zip file eventhough had zip extension"
def render(self, request): if 'dir' in request.args: dir = unquote(request.args['dir'][0]) elif 'root' in request.args: dir = unquote(request.args['root'][0]) else: dir = '' if 'file' in request.args: filename = unquote(request.args["file"][0]) path = dir + filename #dirty backwards compatibility hack if not os_path.exists(path): path = resolveFilename(SCOPE_HDD, filename) print "[WebChilds.FileStreamer] path is %s" %path if os_path.exists(path): basename = filename.decode('utf-8', 'ignore').encode('ascii', 'ignore') if '/' in basename: basename = basename.split('/')[-1] request.setHeader("content-disposition", "attachment;filename=\"%s\"" % (basename)) file = static.File(path, defaultType = "application/octet-stream") return file.render_GET(request) else: return resource.NoResource(message="file '%s' was not found" %(dir + filename)).render(request) else: return resource.NoResource(message="no file given with file={filename}").render(request) return server.NOT_DONE_YET
def __init__(self, href_or_path, basedir=os.getcwdu(), is_path=True): from urllib import unquote self._href = None self._basedir = basedir self.path = None self.fragment = '' try: self.mime_type = guess_type(href_or_path)[0] except: self.mime_type = None if self.mime_type is None: self.mime_type = 'application/octet-stream' if is_path: path = href_or_path if not os.path.isabs(path): path = os.path.abspath(os.path.join(basedir, path)) if isinstance(path, str): path = path.decode(sys.getfilesystemencoding()) self.path = path else: url = urlparse(href_or_path) if url[0] not in ('', 'file'): self._href = href_or_path else: pc = url[2] if isinstance(pc, unicode): pc = pc.encode('utf-8') pc = unquote(pc).decode('utf-8') self.path = os.path.abspath(os.path.join(basedir, pc.replace('/', os.sep))) self.fragment = unquote(url[-1])
def _check_oob_iq(self, iq_event): assert iq_event.iq_type == 'set' assert iq_event.connection == self.incoming self.iq = iq_event.stanza assert self.iq['to'] == self.contact_name query = self.iq.firstChildElement() assert query.uri == 'jabber:iq:oob' url_node = xpath.queryForNodes("/iq/query/url", self.iq)[0] assert url_node['type'] == 'file' assert url_node['size'] == str(self.file.size) assert url_node['mimeType'] == self.file.content_type self.url = url_node.children[0] _, self.host, self.filename, _, _, _ = urlparse.urlparse(self.url) urllib.unquote(self.filename) == self.file.name desc_node = xpath.queryForNodes("/iq/query/desc", self.iq)[0] self.desc = desc_node.children[0] assert self.desc == self.file.description # Metadata forms forms = extract_data_forms(xpath.queryForNodes('/iq/query/x', self.iq)) if self.service_name: assertEquals({'ServiceName': [self.service_name]}, forms[ns.TP_FT_METADATA_SERVICE]) else: assert ns.TP_FT_METADATA_SERVICE not in forms if self.metadata: assertEquals(self.metadata, forms[ns.TP_FT_METADATA]) else: assert ns.TP_FT_METADATA not in forms
def speaker(): idx = { k.split('_')[-1]: k for k in request.cookies if k.startswith(options.discuz_cookiepre) } if not ('auth' in idx and 'saltkey' in idx): response.status = 403 return auth = unquote(request.get_cookie(idx['auth'])) saltkey = unquote(request.get_cookie(idx['saltkey'])) uid, pwd = Account.decode_cookie(auth, saltkey) user = Account.find(uid) if not user: return 'false' if user.jiecao < 0: return 'false' message = request.forms.get('message').decode('utf-8', 'ignore') username = user.username.decode('utf-8', 'ignore') interconnect.publish('speaker', [username, message]) return 'true'
def _lookup(self, next, *rest): next = h.really_unicode(unquote(next)) if not rest: # Might be a file rather than a dir filename = h.really_unicode( unquote( request.environ['PATH_INFO'].rsplit('/')[-1])) if filename: try: obj = self._tree[filename] except KeyError: raise exc.HTTPNotFound() if isinstance(obj, M.repository.Blob): return self.FileBrowserClass( self._commit, self._tree, filename), rest elif rest == ('index', ): rest = (request.environ['PATH_INFO'].rsplit('/')[-1],) try: tree = self._tree[next] except KeyError: raise exc.HTTPNotFound return self.__class__( self._commit, tree, self._path + '/' + next, self), rest
def get_video(self, video=None): if video is None: video = DailymotionVideo(self.group_dict['id']) div = self.parser.select(self.document.getroot(), 'div#content', 1) video.title = unicode(self.parser.select(div, 'span.title', 1).text).strip() video.author = unicode(self.parser.select(div, 'a.name, span.name', 1).text).strip() try: video.description = html2text(self.parser.tostring(self.parser.select(div, 'div#video_description', 1))).strip() or unicode() except BrokenPageError: video.description = u'' for script in self.parser.select(self.document.getroot(), 'div.dmco_html'): # TODO support videos from anyclip, cf http://www.dailymotion.com/video/xkyjiv for example if 'id' in script.attrib and script.attrib['id'].startswith('container_player_') and \ script.find('script') is not None: text = script.find('script').text mobj = re.search(r'(?i)addVariable\(\"video\"\s*,\s*\"([^\"]*)\"\)', text) if mobj is None: mobj = re.search('"sdURL":.*?"(.*?)"', urllib.unquote(text)) mediaURL = mobj.group(1).replace("\\", "") else: mediaURL = urllib.unquote(mobj.group(1)) video.url = mediaURL video.set_empty_fields(NotAvailable) return video
def create_form(self, params, tg_errors=None): params.id = params.o2m_id params.model = params.o2m_model params.view_mode = ['form', 'tree'] params.view_type = 'form' #XXX: dirty hack to fix bug #401700 if not params.get('_terp_view_ids'): params['_terp_view_ids'] = [] # to get proper view, first generate form using the view_params vp = params.view_params form = tw.form_view.ViewForm(vp, name="view_form", action="/openo2m/save") cherrypy.request.terp_validators = {} wid = form.screen.widget.get_widgets_by_name(params.o2m)[0] # save view_params for later phazes vp = vp.make_plain('_terp_view_params/') hiddens = map(lambda x: tw.form.Hidden(name=x, default=ustr(vp[x])), vp) params.prefix = params.o2m params.views = wid.view # IE hack, get context from cookies (see o2m.js) o2m_context = {} parent_context = {} try: o2m_context = urllib.unquote(cherrypy.request.cookie['_terp_o2m_context'].value) parent_context = urllib.unquote(cherrypy.request.cookie['_terp_parent_context'].value) cherrypy.request.cookie['_terp_o2m_context']['expires'] = 0 cherrypy.response.cookie['_terp_o2m_context']['expires'] = 0 cherrypy.request.cookie['_terp_parent_context']['expires'] = 0 cherrypy.response.cookie['_terp_parent_context']['expires'] = 0 except: pass params.o2m_context = params.o2m_context or o2m_context params.parent_context = params.parent_context or parent_context ctx = params.context or {} ctx.update(params.parent_context or {}) ctx.update(params.o2m_context or {}) p, ctx = TinyDict.split(ctx) params.context = ctx or {} params.hidden_fields = [tw.form.Hidden(name='_terp_parent_model', default=params.parent_model), tw.form.Hidden(name='_terp_parent_id', default=params.parent_id), tw.form.Hidden(name='_terp_parent_context', default=ustr(params.parent_context)), tw.form.Hidden(name='_terp_o2m', default=params.o2m), tw.form.Hidden(name='_terp_o2m_id', default=params.id or None), tw.form.Hidden(name='_terp_o2m_model', default=params.o2m_model), tw.form.Hidden(name='_terp_o2m_context', default=ustr(params.o2m_context or {})), tw.form.Hidden(name=params.prefix + '/__id', default=params.id or None)] + hiddens form = tw.form_view.ViewForm(params, name="view_form", action="/openo2m/save") form.screen.string = wid.screen.string return form
def findVideoFrameLink(page, data): minheight = 300 minwidth = 300 frames = findFrames(data) if not frames: return None iframes = regexUtils.findall( data, "(frame(?![^>]*cbox\.ws)(?![^>]*Publi)(?![^>]*chat\d*\.\w+)(?![^>]*ad122m)(?![^>]*adshell)(?![^>]*capacanal)(?![^>]*blacktvlive\.com)[^>]*\sheight\s*=\s*[\"']*([\%\d]+)(?:px)?[\"']*[^>]*>)", ) if iframes: for iframe in iframes: if iframe[1] == "100%": height = minheight + 1 else: height = int(iframe[1]) if height > minheight: m = regexUtils.findall(iframe[0], "[\"' ]width\s*=\s*[\"']*(\d+[%]*)(?:px)?[\"']*") if m: if m[0] == "100%": width = minwidth + 1 else: width = int(m[0]) if width > minwidth: m = regexUtils.findall(iframe[0], "['\"\s]src=[\"']*\s*([^>\"' ]+)\s*[>\"']*") if m: return urlparse.urljoin(urllib.unquote(page), m[0]).strip() # Alternative 1 iframes = regexUtils.findall( data, '(frame(?![^>]*cbox\.ws)(?![^>]*capacanal)(?![^>]*blacktvlive\.com)[^>]*["; ]height:\s*(\d+)[^>]*>)' ) if iframes: for iframe in iframes: height = int(iframe[1]) if height > minheight: m = regexUtils.findall(iframe[0], '["; ]width:\s*(\d+)') if m: width = int(m[0]) if width > minwidth: m = regexUtils.findall(iframe[0], '["; ]src=["\']*\s*([^>"\' ]+)\s*[>"\']*') if m: return urlparse.urljoin(urllib.unquote(page), m[0]).strip() # Alternative 2 (Frameset) m = regexUtils.findall(data, '<FRAMESET[^>]+100%[^>]+>\s*<FRAME[^>]+src="([^"]+)"') if m: return urlparse.urljoin(urllib.unquote(page), m[0]).strip() m = regexUtils.findall( data, '<a href="([^"]+)" target="_blank"><img src="[^"]+" height="450" width="600" longdesc="[^"]+"/></a>' ) if m: return urlparse.urljoin(urllib.unquote(page), m[0]).strip() return None
def canonicalize_rootpath(rootpath): # Try to canonicalize the rootpath using Subversion semantics. rootpath = _canonicalize_path(rootpath) # ViewVC's support for local repositories is more complete and more # performant than its support for remote ones, so if we're on a # Unix-y system and we have a file:/// URL, convert it to a local # path instead. if os.name == 'posix': rootpath_lower = rootpath.lower() if rootpath_lower in ['file://localhost', 'file://localhost/', 'file://', 'file:///' ]: return '/' if rootpath_lower.startswith('file://localhost/'): rootpath = os.path.normpath(urllib.unquote(rootpath[16:])) elif rootpath_lower.startswith('file:///'): rootpath = os.path.normpath(urllib.unquote(rootpath[7:])) # Ensure that we have an absolute path (or URL), and return. if not re.search(_re_url, rootpath): assert os.path.isabs(rootpath) return rootpath
def read_from_file(config_filename): """ Reads an installed.lst file from a given location :param config_filename: the configuration file to read """ global installed_packages_list try: installed_list_file=open(config_filename) except IOError: # not a problem if not found in a location pass else: # read from file and # create a dictionary # for line in installed_list_file: l=line.rstrip().split(' ') # removes trailing whitespaces (\n) if l: installed_packages_list[l[0]]=\ this_package=package_info( config_filename, l[0], # name l[1], # timestamp l[2], # human-readable size urllib.unquote(l[3]), # source on the web urllib.unquote(l[4])) # where installed else: pass # skip blank lines (there shouldn't be any)
def _show_Pic( self ): try: self.title = urllib.unquote(self.MainWindow.getListItem(self.MainWindow.getCurrentListPosition()).getProperty("Title")) self.info = urllib.unquote(self.MainWindow.getListItem(self.MainWindow.getCurrentListPosition()).getProperty("Genre")) #set Pic if self.pic != "": self.getControl( 20 ).setImage(self.pic) #self.getControl(21).setVisible(0) #self.getControl(20).setVisible(1) else: #self.getControl(21).setVisible(1) #self.getControl(20).setVisible(0) self.width = 425 self.height = 350 if self.width < 700: self.getControl( 20 ).setWidth(self.width) self.getControl( 20 ).setHeight(self.height) else: self.getControl( 20 ).setWidth(500) self.getControl( 20 ).setHeight(100) #set Background Dim self.getControl( 19 ).setWidth(self.width+13) self.getControl( 19 ).setHeight(self.height+43) self.getControl( 206 ).setPosition(7,self.height+2) self.getControl( 207 ).setPosition(9,self.height+3) self.getControl( 206 ).setLabel(self.title) self.getControl( 207 ).setLabel(self.info) except: traceback.print_exc()
def ftp_open(self, req): import ftplib import mimetypes host = req.get_host() if not host: raise IOError, ('ftp error', 'no host given') host, port = splitport(host) if port is None: port = ftplib.FTP_PORT else: port = int(port) # username/password handling user, host = splituser(host) if user: user, passwd = splitpasswd(user) else: passwd = None host = unquote(host) user = unquote(user or '') passwd = unquote(passwd or '') try: host = socket.gethostbyname(host) except socket.error, msg: raise URLError(msg)
def speaker(): idx = { k.split('_')[-1]: k for k in request.cookies if k.startswith(options.discuz_cookiepre) } if not ('auth' in idx and 'saltkey' in idx): response.status = 403 return auth = unquote(request.get_cookie(idx['auth'])) saltkey = unquote(request.get_cookie(idx['saltkey'])) member = member_service.validate_by_cookie(auth, saltkey) if not member: return 'false' if member['credits'] < 10: return 'false' message = request.forms.get('message').decode('utf-8', 'ignore') username = member['username'].decode('utf-8', 'ignore') member_service.add_credit(member['uid'], 'credits', -10) Interconnect.publish('speaker', [username, message]) return 'true'
def parse_mailto(mailto_str): """ Interpret mailto-string :param mailto_str: the string to interpret. Must conform to :rfc:2368. :type mailto_str: str :return: the header fields and the body found in the mailto link as a tuple of length two :rtype: tuple(dict(str->list(str)), str) """ if mailto_str.startswith('mailto:'): import urllib to_str, parms_str = mailto_str[7:].partition('?')[::2] headers = {} body = u'' to = urllib.unquote(to_str) if to: headers['To'] = [to] for s in parms_str.split('&'): key, value = s.partition('=')[::2] key = key.capitalize() if key == 'Body': body = urllib.unquote(value) elif value: headers[key] = [urllib.unquote(value)] return (headers, body) else: return (None, None)
def search(self, search_query): """ Search in the store for tiddlers that match search_query. This is intentionally simple, slow and broken to encourage overriding. """ bag_filenames = self._bag_filenames() query = search_query.lower() for bagname in bag_filenames: bagname = urllib.unquote(bagname).decode('utf-8') tiddler_dir = self._tiddlers_dir(bagname) tiddler_files = self._files_in_dir(tiddler_dir) for tiddler_name in tiddler_files: tiddler = Tiddler( title=urllib.unquote(tiddler_name).decode('utf-8'), bag=bagname) try: revision_id = self.list_tiddler_revisions(tiddler)[0] if query in tiddler.title.lower(): yield tiddler continue tiddler_file = codecs.open( self._tiddler_full_filename(tiddler, revision_id), encoding='utf-8') for line in tiddler_file: if query in line.lower(): yield tiddler break except (OSError, NoTiddlerError), exc: logging.warn('malformed tiddler during search: %s:%s', bagname, tiddler_name)
def testPutModWhenError(self): """Test _PutMod.""" mod_type = 'owner' target = 'foouser' pkg_name = 'Foo%20Pkg-1.2' install_types = ['optional_installs'] mock_entity = self.mox.CreateMockAnything() # This code partially replicates self.c._ParseParameters. # It is hard to reuse that function for testing because it uses # self.request... self.c.mod_type = mod_type # Instead of dyn_man.models.MANIFEST_MOD_MODELS.get(mod_type, None) # plug our mock in immediately. self.c.model = self.mox.CreateMockAnything() self.c.target = urllib.unquote(target) self.c.pkg_name = urllib.unquote(pkg_name) self.c.key_name = '%s##%s' % (self.c.target, self.c.pkg_name) self.c.install_types = install_types self.c.manifests = [] # End partially replicated section. self.c.model(key_name=self.c.key_name).AndReturn(mock_entity) self.mox.StubOutWithMock( dyn_man.models.BaseManifestModification, 'ResetModMemcache') mock_entity.put().AndRaise(dyn_man.db.Error) self.mox.ReplayAll() self.assertRaises(dyn_man.db.Error, self.c._PutMod) self.mox.VerifyAll()
def list_filesystem(dir, path, trans_data, file_type, show_hidden): """ It lists all file and folders within the given directory. """ Filemanager.suspend_windows_warning() is_show_hidden_files = show_hidden path = unquote(path) if hasattr(str, 'decode'): path = unquote(path).encode('utf-8').decode('utf-8') try: Filemanager.check_access_permission(dir, path) except Exception as e: Filemanager.resume_windows_warning() err_msg = u"Error: {0}".format(e) files = {'Code': 0, 'Error': err_msg} return files files = {} if (_platform == "win32" and (path == '/' or path == '\\'))\ and dir is None: drives = Filemanager._get_drives() for drive in drives: protected = 0 path = file_name = u"{0}:".format(drive) try: drive_size = getDriveSize(path) drive_size_in_units = sizeof_fmt(drive_size) except Exception: drive_size = 0 protected = 1 if drive_size == 0 else 0 files[file_name] = { "Filename": file_name, "Path": path, "file_type": 'drive', "Protected": protected, "Properties": { "Date Created": "", "Date Modified": "", "Size": drive_size_in_units } } Filemanager.resume_windows_warning() return files orig_path = Filemanager.get_abs_path(dir, path) if not path_exists(orig_path): Filemanager.resume_windows_warning() return { 'Code': 0, 'Error': gettext(u"'{0}' file does not exist.".format(path)) } user_dir = path folders_only = trans_data['folders_only'] \ if 'folders_only' in trans_data else '' files_only = trans_data['files_only'] \ if 'files_only' in trans_data else '' supported_types = trans_data['supported_types'] \ if 'supported_types' in trans_data else [] orig_path = unquote(orig_path) try: mylist = [x for x in sorted(os.listdir(orig_path))] for f in mylist: protected = 0 system_path = os.path.join(os.path.join(orig_path, f)) # continue if file/folder is hidden (based on user preference) if not is_show_hidden_files and \ (is_folder_hidden(system_path) or f.startswith('.')): continue user_path = os.path.join(os.path.join(user_dir, f)) created = time.ctime(os.path.getctime(system_path)) modified = time.ctime(os.path.getmtime(system_path)) file_extension = str(splitext(system_path)) # set protected to 1 if no write or read permission if (not os.access(system_path, os.R_OK) or not os.access(system_path, os.W_OK)): protected = 1 # list files only or folders only if os.path.isdir(system_path): if files_only == 'true': continue file_extension = u"dir" user_path = u"{0}/".format(user_path) else: # filter files based on file_type if file_type is not None and file_type != "*": if folders_only or len(supported_types) > 0 and \ file_extension not in supported_types or \ file_type != file_extension: continue # create a list of files and folders files[f] = { "Filename": f, "Path": user_path, "file_type": file_extension, "Protected": protected, "Properties": { "Date Created": created, "Date Modified": modified, "Size": sizeof_fmt(getSize(system_path)) } } except Exception as e: Filemanager.resume_windows_warning() if (hasattr(e, 'strerror') and e.strerror == gettext('Permission denied')): err_msg = u"Error: {0}".format(e.strerror) else: err_msg = u"Error: {0}".format(e) files = {'Code': 0, 'Error': err_msg} Filemanager.resume_windows_warning() return files
def extract_videos(video_id): fmt_value = { 5: "240p h263 flv", 6: "240p h263 flv", 18: "360p h264 mp4", 22: "720p h264 mp4", 26: "???", 33: "???", 34: "360p h264 flv", 35: "480p h264 flv", 36: "3gpp", 37: "1080p h264 mp4", 38: "4K h264 mp4", 43: "360p vp8 webm", 44: "480p vp8 webm", 45: "720p vp8 webm", 46: "1080p vp8 webm", 59: "480p h264 mp4", 78: "480p h264 mp4", 82: "360p h264 3D", 83: "480p h264 3D", 84: "720p h264 3D", 85: "1080p h264 3D", 100: "360p vp8 3D", 101: "480p vp8 3D", 102: "720p vp8 3D" } url = 'http://www.youtube.com/get_video_info?video_id=%s&eurl=https://youtube.googleapis.com/v/%s&ssl_stream=1' % \ (video_id, video_id) data = httptools.downloadpage(url).data video_urls = [] params = dict(urlparse.parse_qsl(data)) if params.get('hlsvp'): video_urls.append(["(LIVE .m3u8) [youtube]", params['hlsvp']]) return video_urls if config.is_xbmc(): import xbmc xbmc_version = config.get_platform(True)['num_version'] if xbmc_version >= 17 and xbmc.getCondVisibility('System.HasAddon(inputstream.adaptive)') \ and params.get('dashmpd'): if params.get('use_cipher_signature', '') != 'True': video_urls.append( ['mpd HD [youtube]', params['dashmpd'], 0, '', True]) js_signature = "" youtube_page_data = httptools.downloadpage( "http://www.youtube.com/watch?v=%s" % video_id).data params = extract_flashvars(youtube_page_data) if params.get('url_encoded_fmt_stream_map'): data_flashvars = params["url_encoded_fmt_stream_map"].split(",") for url_desc in data_flashvars: url_desc_map = dict(urlparse.parse_qsl(url_desc)) if not url_desc_map.get("url") and not url_desc_map.get("stream"): continue try: key = int(url_desc_map["itag"]) if not fmt_value.get(key): continue if url_desc_map.get("url"): url = urllib.unquote(url_desc_map["url"]) elif url_desc_map.get("conn") and url_desc_map.get("stream"): url = urllib.unquote(url_desc_map["conn"]) if url.rfind("/") < len(url) - 1: url += "/" url += urllib.unquote(url_desc_map["stream"]) elif url_desc_map.get( "stream") and not url_desc_map.get("conn"): url = urllib.unquote(url_desc_map["stream"]) if url_desc_map.get("sig"): url += "&signature=" + url_desc_map["sig"] elif url_desc_map.get("s"): sig = url_desc_map["s"] if not js_signature: urljs = scrapertools.find_single_match( youtube_page_data, '"assets":.*?"js":\s*"([^"]+)"') urljs = urljs.replace("\\", "") if urljs: if not re.search(r'https?://', urljs): urljs = urlparse.urljoin( "https://www.youtube.com", urljs) data_js = httptools.downloadpage(urljs).data from jsinterpreter import JSInterpreter funcname = scrapertools.find_single_match( data_js, '\.sig\|\|([A-z0-9$]+)\(') if not funcname: funcname = scrapertools.find_single_match( data_js, '["\']signature["\']\s*,\s*' '([A-z0-9$]+)\(') jsi = JSInterpreter(data_js) js_signature = jsi.extract_function(funcname) signature = js_signature([sig]) url += "&signature=" + signature url = url.replace(",", "%2C") video_urls.append(["(" + fmt_value[key] + ") [youtube]", url]) except: import traceback logger.info(traceback.format_exc()) return video_urls
def getinfo(self, path=None, getsize=True, name=None, req=None): """ Returns a JSON object containing information about the given file. """ path = unquote(path) if hasattr(str, 'decode'): path = unquote(path).encode('utf-8').decode('utf-8') if self.dir is None: self.dir = "" orig_path = u"{0}{1}".format(self.dir, path) try: Filemanager.check_access_permission(self.dir, path) except Exception as e: thefile = { 'Filename': split_path(path)[-1], 'FileType': '', 'Path': path, 'Error': gettext(u"Error: {0}".format(e)), 'Code': 0, 'Info': '', 'Properties': { 'Date Created': '', 'Date Modified': '', 'Width': '', 'Height': '', 'Size': '' } } return thefile user_dir = path thefile = { 'Filename': split_path(orig_path)[-1], 'FileType': '', 'Path': user_dir, 'Error': '', 'Code': 1, 'Info': '', 'Properties': { 'Date Created': '', 'Date Modified': '', 'Width': '', 'Height': '', 'Size': '' } } if not path_exists(orig_path): thefile['Error'] = gettext( u"'{0}' file does not exist.".format(path)) thefile['Code'] = -1 return thefile if split_path(user_dir)[-1] == '/'\ or os.path.isfile(orig_path) is False: thefile['FileType'] = 'Directory' else: thefile['FileType'] = splitext(user_dir) created = time.ctime(os.path.getctime(orig_path)) modified = time.ctime(os.path.getmtime(orig_path)) thefile['Properties']['Date Created'] = created thefile['Properties']['Date Modified'] = modified thefile['Properties']['Size'] = sizeof_fmt(getSize(orig_path)) return thefile