def info_alumno_cra(): cursor = configuracion.conexion().cursor() filtroCRA = '' #Capturamos los diferentes filtros if request.args.get('filtroCRA'): filtroCRA = url2pathname(request.args.get('filtroCRA')).encode('utf-8') filtroMuni = '' if request.args.get('filtroMuni'): filtroMuni = url2pathname(request.args.get('filtroMuni')).encode('utf-8') query = "SELECT to_number(REPLACE(SUBSTRING(distancia from 1 for (char_length(distancia)-3)),',','.'), '99999.999'),\ to_number(REPLACE(SUBSTRING(tiempo_estimado from 1 for (char_length(tiempo_estimado)-19)),',','.'), '99999.999')/60,\ educ_cra_evol.año FROM public.educ_cra_evol, public.trayecto WHERE educ_cra_evol.id_mun = trayecto.id_mun AND \ educ_cra_evol.id_cra = trayecto.cra_id AND trayecto.año = (SELECT MAX(año) FROM public.trayecto) AND\ educ_cra_evol.año = (SELECT MAX(año) FROM public.educ_cra_evol) AND educ_cra_evol.id_cra="+str(filtroCRA).strip()+" AND educ_cra_evol.id_mun="+str(filtroMuni).strip()+";" cursor.execute(query) resultado = cursor.fetchone() cursor.close() if resultado is not None: info_alumno_cra={ 'cra_id':str(filtroCRA).strip(), 'id_mun':str(filtroMuni).strip(), 'distancia':resultado[0], 'tiempo_estimado':resultado[1] } return json.dumps(info_alumno_cra) else: return "No existe ese trayecto"
def rutas_para_scrapear(): cursor = configuracion.conexion().cursor() filtroCRA = '' #Capturamos los diferentes filtros if request.args.get('filtroCRA'): filtroCRA = url2pathname(request.args.get('filtroCRA')).encode('utf-8') filtroMuni = '' if request.args.get('filtroMuni'): filtroMuni = url2pathname(request.args.get('filtroMuni')).encode('utf-8') query = "SELECT educ_cra.id_cra, educ_cra.cra, educ_cra.lat, educ_cra.lon, a_municipios.id_mun, a_municipios.municipio, a_municipios.lat, a_municipios.lon, educ_cra_evol.año FROM public.educ_cra_evol, public.educ_cra, public.a_municipios WHERE educ_cra.id_cra = educ_cra_evol.id_cra AND a_municipios.id_mun = educ_cra_evol.id_mun AND año = (SELECT MAX(año) FROM educ_cra_evol) AND educ_cra_evol.id_mun="+str(filtroMuni).strip()+" AND educ_cra_evol.id_cra="+str(filtroCRA).strip()+";" cursor.execute(query) resultado = cursor.fetchone() cursor.close() if resultado is not None: cra={ 'id': resultado[0], 'name': resultado[1].replace('á', 'aacute;').replace('é', 'eacute;').replace('í', 'iacute;').replace('ó', 'oacute;').replace('ú', 'uacute;').replace('Á', 'Aacute;').replace('É', 'Eacute;').replace('Í', 'Iacute;').replace('Ó', 'Oacute;').replace('Ú', 'Uacute;').replace('"', 'quot;').replace('<', 'lt;').replace('>', 'gt;').replace('¿', 'iquest;').replace('¡', 'iexcl;').replace('Ñ', 'Ntilde;').replace('ñ', 'ntilde;').replace('º', 'ordm;').replace('ª', 'ordf;').replace('#', 'almohadilla;').replace('ü', 'uuml;'), 'latlng': [resultado[2], resultado[3]] } municipio={ 'id':resultado[4], 'name': resultado[5].replace('á', 'aacute;').replace('é', 'eacute;').replace('í', 'iacute;').replace('ó', 'oacute;').replace('ú', 'uacute;').replace('Á', 'Aacute;').replace('É', 'Eacute;').replace('Í', 'Iacute;').replace('Ó', 'Oacute;').replace('Ú', 'Uacute;').replace('"', 'quot;').replace('<', 'lt;').replace('>', 'gt;').replace('¿', 'iquest;').replace('¡', 'iexcl;').replace('Ñ', 'Ntilde;').replace('ñ', 'ntilde;').replace('º', 'ordm;').replace('ª', 'ordf;').replace('#', 'almohadilla;').replace('ü', 'uuml;'), 'latlng': [resultado[6], resultado[7]] } datos={ 'origen':municipio, 'destino':cra } return render_template('scrapeo.html', data=datos) else: return "No existe ese trayecto"
def onDND(self, list, context, x, y, dragData, dndId, time): """ External Drag'n'Drop """ import urllib if dragData.data == '': context.finish(False, False, time) return # A list of filenames, without 'file://' at the beginning if dndId == consts.DND_POGO_URI: tracks = media.getTracks([urllib.url2pathname(uri) for uri in dragData.data.split()]) # A list of filenames starting with 'file://' elif dndId == consts.DND_URI: tracks = media.getTracks([urllib.url2pathname(uri)[7:] for uri in dragData.data.split()]) else: assert False # dropInfo is tuple (path, drop_pos) dropInfo = list.get_dest_row_at_pos(x, y) # Insert the tracks, but beware of the AFTER/BEFORE mechanism used by GTK if dropInfo is None: self.insert(tracks, playNow=False, highlight=True) else: path, drop_mode = dropInfo iter = self.tree.store.get_iter(path) self.insert(tracks, iter, drop_mode, playNow=False, highlight=True) # We want to allow dropping tracks only when we are sure that no dir is # selected. This is needed for dnd from nautilus. self.tree.dir_selected = True context.finish(True, False, time)
def onDND(self, list, context, x, y, dragData, dndId, time): """ External Drag'n'Drop """ import urllib if dragData.data == '': context.finish(False, False, time) return # A list of filenames, without 'file://' at the beginning if dndId == consts.DND_DAP_URI: tracks = media.getTracks([urllib.url2pathname(uri) for uri in dragData.data.split()]) # A list of filenames starting with 'file://' elif dndId == consts.DND_URI: tracks = media.getTracks([urllib.url2pathname(uri)[7:] for uri in dragData.data.split()]) # A list of tracks elif dndId == consts.DND_DAP_TRACKS: tracks = [track.unserialize(serialTrack) for serialTrack in dragData.data.split('\n')] dropInfo = list.get_dest_row_at_pos(x, y) # Insert the tracks, but beware of the AFTER/BEFORE mechanism used by GTK if dropInfo is None: self.insert(tracks, False) elif dropInfo[1] == gtk.TREE_VIEW_DROP_AFTER: self.insert(tracks, False, dropInfo[0][0] + 1) else: self.insert(tracks, False, dropInfo[0][0]) context.finish(True, False, time)
def destPathFromURI(uri): """Return the local path where a uri should be stored""" urlParts = urlparse.urlparse(uri) # take the leading / off the path part, and return it pathPart = urlParts[2][1:] fullPath = os.path.join(config.mediaStoreRoot, urllib.url2pathname(urlParts[1]), pathPart) # check if the pathname is already in use, and if it is, # append a number that increments until we find a # filename that's available. pathNameOK = not os.path.exists(fullPath) count = 1 while not pathNameOK: # another download has this filename already try: basename = pathPart[:pathPart.rindex(u'.')] exten = pathPart[pathPart.rindex(u'.'):] except ValueError: basename = pathPart exten = "" fullPath = os.path.join(config.mediaStoreRoot, urllib.url2pathname(urlParts[1]), basename + u'_' + unicode(count) + exten) pathNameOK = not os.path.exists(fullPath) count += 1 return fullPath
def test_quoting(self): # Test automatic quoting and unquoting works for pathnam2url() and # url2pathname() respectively given = os.path.join("needs", "quot=ing", "here") expect = "needs/%s/here" % urllib.quote("quot=ing") result = urllib.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) expect = given result = urllib.url2pathname(result) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result)) given = os.path.join("make sure", "using_quote") expect = "%s/using_quote" % urllib.quote("make sure") result = urllib.pathname2url(given) self.assertEqual(expect, result, "pathname2url() failed; %s != %s" % (expect, result)) given = "make+sure/using_unquote" expect = os.path.join("make+sure", "using_unquote") result = urllib.url2pathname(given) self.assertEqual(expect, result, "url2pathname() failed; %s != %s" % (expect, result))
def __iter__(self): base = urllib.url2pathname(self.output.strip('/')) for item in self.previous: pathkey = self.pathkey(*item.keys())[0] if not pathkey or not self.output: # not enough info yield item; continue path = item[pathkey] type = item.get('_type') path = os.path.join(base, urllib.url2pathname(path)) #TODO replace field in item with file object and make other # blueprints expect a file. This will reduce memory usage. meta_data = item.get('_content_info') if meta_data: meta_data = dict(meta_data) if type in ['Document']: item['text'] = self.savefile(item['text'], path, meta_data) elif type in ['Page']: item['body'] = self.savefile(item['body'], path, meta_data) elif type in ['File']: item['file'] = self.savefile(item['file'], path, meta_data) elif type in ['Image']: item['image'] = self.savefile(item['image'], path, meta_data) elif type in ['Folder', 'ContentFolder']: makedirs(path) elif item.get('_html', None) is not None: item['_html'] = self.savefile(item['_html'], path, meta_data) elif item.get('_content') is not None: item['_content'] = self.savefile(item['_content'], path, meta_data) yield item
def __iter__(self): base = urllib.url2pathname(self.output.strip('/')) for item in self.previous: pathkey = self.pathkey(*item.keys())[0] if not pathkey or not self.output: # not enough info yield item; continue path = item[pathkey] type = item.get('_type') path = os.path.join(base, urllib.url2pathname(path)) meta_data = item.get('_content_info') if meta_data: meta_data = dict(meta_data) if '_redir' in item: #important we save redirects to preseve exact behaviour of website meta_data = {'Location':item['_site_url']+item['_redir']} savefile(None, path, meta_data, self.logger) elif type in ['Document']: item['text'] = savefile(item['text'], path, meta_data, self.logger) elif type in ['Page']: item['body'] = savefile(item['body'], path, meta_data, self.logger) elif type in ['File']: item['file'] = savefile(item['file'], path, meta_data, self.logger) elif type in ['Image']: item['image'] = savefile(item['image'], path, meta_data, self.logger) elif type in ['Folder', 'ContentFolder']: makedirs(path) elif item.get('_html', None) is not None: item['_html'] = savefile(item['_html'], path, meta_data, self.logger) elif item.get('_content') is not None: item['_content'] = savefile(item['_content'], path, meta_data, self.logger) yield item
def Download(path): dp = xbmcgui.DialogProgress() dp.create("xnapi" ) dp.update( 50, "Searching for subtitles...", " ", " " ) d = md5.new(); subtitlesCustomPath = xbmc.executehttpapi('GetGuiSetting(3;subtitles.custompath)').replace("<li>", "") d.update(open(path,"rb").read(10485760)) arch=xbmc.translatePath( os.path.join( "special://userdata/", "napisy.txt")) str = "http://napiprojekt.pl/unit_napisy/dl.php?l=PL&f="+d.hexdigest()+"&t="+f(d.hexdigest())+"&v=dreambox&kolejka=false&nick=&pass=&napios="+os.name subs=urllib.urlopen(str).read() if (subs[0:4]=='NPc0'): xbmcgui.Dialog().ok( "xnapi","No subtitles found.") else: file(arch,"wb").write(subs) if subtitlesCustomPath == "": #save in movie directory if (path[0:6]=="""rar://"""): # playing rar file filename=(os.path.join((os.path.split(os.path.split(urllib.url2pathname(path))[0])[0])[6:],(os.path.split(path)[1])[:-3]+'txt'))#.replace('/','\\') #remove rar://, get directory outside archive, add playing file name else: filename=path[:-3]+'txt' else: filename=os.path.join(subtitlesCustomPath,os.path.split(urllib.url2pathname(path))[1][:-3]+'txt') if os.path.isfile(filename) == True: # there already were subs filename=filename[:-4]+'-xnapi.txt' if os.path.isfile(filename) == True: dp.update( 75, "Replacing old subtitles...", " ", " " ) shutil.copyfile(arch,filename) xbmcgui.Dialog().ok( "xnapi", "Subtitles extracted to: \n" + os.path.dirname(filename))
def put(self, sourceurl, desturl): sourceurl = urllib.url2pathname(sourceurl) desturl = urllib.url2pathname(desturl) self.instrumentation.say("put(%s, %s)" % (sourceurl, desturl)) destdir = os.path.dirname(desturl) self._shell.mkdirs(destdir, ignoreexisting=True) self._shell.copy(sourceurl, desturl)
def test_goodfile(self): # Our file templates. Try a vanilla version and one with escapes. # NB: # This is only supported on the Mac at the moment, when # windows support arrives we will need to extract the paths and # see what it looks like and add a test here. path1 = "/Users/xxx/Music/iTunes/iTunes%20Music/" path2 = ("/Volumes/%E3%83%9B%E3%83%BC%E3%83%A0/" + "xxx/Music/iTunes/iTunes%20Media/") file_snippet1 = file_template % dict(path=(self.file_url + path1)) file_snippet2 = file_template % dict(path=(self.file_url + path2)) tmpf_dir = os.path.dirname(self.tmpf_path) # Test vanilla path self._clean_tmpf() self.tmpf.write(file_snippet1) self.tmpf.flush() path = import_itunes_path(tmpf_dir) self.assertEquals(path, urllib.url2pathname(path1)) # Test path with utf-8 escapes self._clean_tmpf() self.tmpf.write(file_snippet2) self.tmpf.flush() path = import_itunes_path(tmpf_dir) self.assertEquals(path, urllib.url2pathname(path2))
def decoded_data(self): import urllib if not hasattr(self, '_decoded_data'): self._decoded_data = {} for keyvalue in self.data.split('&'): key, value = keyvalue.split(':') self._decoded_data[urllib.url2pathname(key)] = urllib.url2pathname(value) return self._decoded_data
def get_xhtml_content(path): if path.startswith("/") or path.startswith("\\"): path = os.path.join(settings.BASEDIR, url2pathname(path[1:])) else: path = os.path.join(settings.BASEDIR, url2pathname(path)) f = codecs.open(path, "r", "utf8") content = f.read() f.close() return content
def _uri_to_path(uri): """Return the path corresponding to the URI <uri>, unless it is a non-local resource in which case we return the pathname with the type identifier intact. """ if uri.startswith('file://'): return urllib.url2pathname(uri[7:]) else: return urllib.url2pathname(uri)
def test_ntpath(self): given = ("/C:/", "///C:/", "/C|//") expect = "C:\\" for url in given: result = urllib.url2pathname(url) self.assertEqual(expect, result, "nturl2path.url2pathname() failed; %s != %s" % (expect, result)) given = "///C|/path" expect = "C:\\path" result = urllib.url2pathname(given) self.assertEqual(expect, result, "nturl2path.url2pathname() failed; %s != %s" % (expect, result))
def filename(self): """a local filename equivalent to the URI""" if self.scheme != "file": raise ValueError("only the file scheme supports filenames") elif self.netloc: raise ValueError("only local files have filenames") else: if os.name == "nt": return fsdecode(url2pathname(self.path)) else: return url2pathname(self.path)
def _get_move(self, data): old_file_path = url2pathname(data[1]) new_file_path = url2pathname(data[2]) # check if the data string is correct if len(old_file_path) == 0 or len(new_file_path) == 0: self.connection.send('ERROR\n') else: answer = self.parent.move(old_file_path, new_file_path, self.computer_name) if answer: self.connection.send('OK\n')
def dbPath2SrcPathFn(self, path): p = urlparse.urlparse(path) if not p.scheme == 'file': self.number_warnings += 1 self.log.warning("WARNING: FIX dbPath2SrcPathFn: "+ path) path_fn = urllib.url2pathname(p.path) # converts to unicode, does not work path_fn = urllib.url2pathname(p.path).encode('latin-1') # does work #path_fn = urllib.unquote(p.path) #print "dbPath2SrcPathFn() Orig path: %s" % path #print "dbPath2SrcPathFn() New path (%s): %s" % (type(path_fn), path_fn) return path_fn
def _del_groups(self, req): groups_to_del = req.args.get('selgroup') try: if isinstance(groups_to_del,types.StringTypes): self.authz.del_group(url2pathname(groups_to_del)) elif isinstance(groups_to_del,types.ListType): for group in groups_to_del: self.authz.del_group(url2pathname(group)) else: req.hdf['delgroup.error'] = "Invalid type of group selection" except Exception, e: req.hdf['delgroup.error'] = e
def media_path(path): # Check for stacked movies try: path = os.path.split(path)[0].rsplit(' , ', 1)[1].replace(",,", ",") except: path = os.path.split(path)[0] # Fixes problems with rared movies and multipath if path.startswith("rar://"): path = os.path.split(urllib.url2pathname(path.replace("rar://", "")))[0] elif path.startswith("multipath://"): temp_path = path.replace("multipath://", "").split('%2f/') path = urllib.url2pathname(temp_path[0]) return path
def testescape(): # url escape data = 'name=dasf' data1 = urllib.quote(data) print data1 print urllib.unquote(data1) # json file data3 = urllib.urlencode({ 'name': 'dark-bull', 'age': 200 }) print data3 data4 = urllib.pathname2url(r'd:/a/b/c/23.php') print data4 # result: ///D|/a/b/c/23.php print urllib.url2pathname(data4) # result: D:/a/b/c/23.php
def test_ntpath(self): given = ('/C:/', '///C:/', '/C|//') expect = 'C:\\' for url in given: result = urllib.url2pathname(url) self.assertEqual(expect, result, 'nturl2path.url2pathname() failed; %s != %s' % (expect, result)) given = '///C|/path' expect = 'C:\\path' result = urllib.url2pathname(given) self.assertEqual(expect, result, 'nturl2path.url2pathname() failed; %s != %s' % (expect, result))
def get(self, *args): self.parse_params() if (len(args) > 1 and args[1]): if (args[1] == 'edit'): self.edit(urllib.url2pathname(args[0])) else: self.error(404) elif (len(args) > 0 and args[0]): if args[0] == 'new': self.new() else: self.show(urllib.url2pathname(args[0])) else: self.index()
def open_local_file(self, req): try: import email.utils as emailutils except ImportError: # python 2.4 import email.Utils as emailutils import mimetypes host = req.get_host() file = req.get_selector() localfile = url2pathname(file) try: stats = os.stat(localfile) size = stats.st_size modified = emailutils.formatdate(stats.st_mtime, usegmt=True) mtype = mimetypes.guess_type(file)[0] headers = mimetools.Message(StringIO( 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % (mtype or 'text/plain', size, modified))) if host: host, port = splitport(host) if not host or \ (not port and socket.gethostbyname(host) in self.get_names()): return addinfourl(open(localfile, 'rb'), headers, 'file:'+file) except OSError, msg: # urllib2 users shouldn't expect OSErrors coming from urlopen() raise URLError(msg)
def steal_emoticon(self, path_uri): '''receives the path or the uri for the emoticon to be added''' if path_uri.startswith("file://"): path_uri = path_uri[7:] path_uri = urllib.url2pathname(path_uri) directory = os.path.dirname(path_uri).lower() caches = e3.cache.CacheManager(self.session.config_dir.base_dir) emcache = caches.get_emoticon_cache(self.session.account.account) dialog = extension.get_default('dialog') if directory.endswith(gui.theme.emote_theme.path.lower()): dialog.information(_("Can't add, default emoticon")) elif directory == emcache.path.lower(): dialog.information(_("Can't add, own emoticon")) else: def on_response(response, shortcut): if response == stock.ACCEPT: shortcut = dialog.entry.get_text() if shortcut not in emcache.list(): self.emcache.insert((shortcut, path_uri)) # TODO: check if the file's hash is not already on the cache else: dialog.information(_("Shortcut already in use")) matches = re.search(r'<img src="' + path_uri + \ '" alt="(?P<alt>\S*)" name="(?P<name>\w*)"', self.output.view.text) groupdict = {'alt': ''} if not matches else matches.groupdict() dialog = dialog.entry_window( _("Type emoticon's shortcut: "), groupdict['alt'], on_response, _("Choose custom emoticon's shortcut")) dialog.entry.set_max_length(7) dialog.show()
def open(self, request): """ Open a file or url If request.url can't be identified as a url, it will return the content in a file-like object @param request: A suds Request @type Request: suds.transport.Request @return: A file-like object @rtype: file """ log.debug('opening: (%s)', request.url) fp = None location = request.url.lstrip() if location.startswith('<?'): log.debug('returning url (%s) as StringIO file') fp = cStringIO.StringIO(location) else: parsed = urlparse(request.url) if parsed.scheme == 'file': log.debug('opening file (%s) with open', parsed.path) try: fp = open(url2pathname(parsed.path)) except Exception, e: raise TransportError(str(e), 500, None) else:
def on_drag_data_received(self, widget, drag_context, x, y, selection, info, timestamp): # We do not want the default behaviour self.day_text_view.emit_stop_by_name('drag-data-received') iter = self.day_text_view.get_iter_at_location(x, y) def is_pic(uri): head, ext = os.path.splitext(uri) return ext.lower().strip('.') in 'png jpeg jpg gif eps bmp'.split() uris = selection.data.strip('\r\n\x00') logging.debug('URIs: "%s"' % uris) uris = uris.split() # we may have more than one file dropped uris = map(lambda uri: uri.strip(), uris) for uri in uris: uri = urllib.url2pathname(uri) dirs, filename = os.path.split(uri) uri_without_ext, ext = os.path.splitext(uri) if is_pic(uri): self.insert('[""%s""%s]\n' % (uri_without_ext, ext), iter) else: # It is always safer to add the "file://" protocol and the ""s self.insert('[%s ""%s""]\n' % (filename, uri), iter) drag_context.finish(True, False, timestamp) # No further processing return True
def prepare_files(self): if urlparse.urlsplit(self.inurl)[0] == 'file': self.infname = urllib.url2pathname(urlparse.urlsplit(self.inurl)[2]) self.infd = open(self.infname) else: # not a file url. download it. source = urllib.urlopen(self.inurl) self.infd, self.infname = tempfile.mkstemp(prefix="transcode-in-", suffix="." + self.inext) self._files_to_clean_up_on_success.append((self.infd, self.infname)) self._files_to_clean_up_on_error.append((self.infd, self.infname)) while True: chunk = source.read(1024 * 64) if not chunk: break os.write(self.infd, chunk) os.lseek(self.infd, 0, 0) self.outfd, self.outfname = tempfile.mkstemp(prefix="transcode-out-", suffix="." + self.tofmt) self._files_to_clean_up_on_error.append((self.outfd, self.outfname)) self.errfh, self.errfname = tempfile.mkstemp(prefix="transcode-", suffix=".log") self.outurl = urlparse.urlunsplit( ["file", None, self.outfname, None, None]) self._files_to_clean_up_on_success.append((self.errfh, self.errfname)) log.debug("Reading from " + self.infname + " (" + self.inurl + ")") log.debug("Outputting to " + self.outfname + " (" + self.outurl + ")") log.debug("Errors to " + self.errfname)
def create_input_source(source=None, publicID=None, location=None, file=None, data=None, format=None): """ Return an appropriate InputSource instance for the given parameters. """ # TODO: test that exactly one of source, location, file, and data # is not None. input_source = None if source is not None: if isinstance(source, InputSource): input_source = source else: if isinstance(source, basestring): location = source elif hasattr(source, "read") and not isinstance(source, Namespace): f = source input_source = InputSource() input_source.setByteStream(f) if hasattr(f, "name"): input_source.setSystemId(f.name) else: raise Exception("Unexpected type '%s' for source '%s'" % (type(source), source)) absolute_location = None # Further to fix for issue 130 if location is not None: # Fix for Windows problem https://github.com/RDFLib/rdflib/issues/145 if os.path.exists(location): location = pathname2url(location) base = urljoin("file:", "%s/" % pathname2url(os.getcwd())) absolute_location = URIRef(location, base=base).defrag() if absolute_location.startswith("file:///"): filename = url2pathname(absolute_location.replace("file:///", "/")) file = open(filename, "rb") else: input_source = URLInputSource(absolute_location, format) # publicID = publicID or absolute_location # More to fix for issue 130 if file is not None: input_source = FileInputSource(file) if data is not None: if isinstance(data, unicode): data = data.encode('utf-8') input_source = StringInputSource(data) if input_source is None: raise Exception("could not create InputSource") else: if publicID is not None: # Further to fix for issue 130 input_source.setPublicId(publicID) # Further to fix for issue 130 elif input_source.getPublicId() is None: input_source.setPublicId(absolute_location or "") return input_source
def playFile(self, file, stop=True): ## Plays the file 'file' (Could also be a URI). # Stop the player if requested. (Required for playbin2 and # changing streams midway through another). if stop: player.stop() if (file == None): # If no file is to be played, set the URI to None, and the file to "" file = "" # Set the now playing label to the file to be played. self.nowPlyLbl.set_label(os.path.basename(urllib.url2pathname(file))) if (os.path.exists(file) or '://' in file): # If it's not already a uri, make it one. # Also escape any # characters in the filename file = useful.filenameToUri(file).replace('#', '%23') # Set the URI to the file's one. player.setURI(file) # Try to set the subtitle track if requested. if cfg.getBool('video/autosub'): subtitles.trySubs(file) # Add the file to recently opened files. gtk.recent_manager_get_default().add_item(file) # Start the player, if it isn't already running. if (not player.isPlaying()): player.play() elif (file != ""): # If none of the above, a bad filename was passed. print _("Something's stuffed up, no such file: %s") % (file) self.playFile(None)
def file_uri_to_path(uri): '''Convert File URI to local filesystem path according to: http://en.wikipedia.org/wiki/File_URI_scheme ''' return urllib.url2pathname(urlparse(uri).path)
def parse(raw_request): headers = {} request_uri = '/' request_path = '/' request_method = 'GET' request_get = {} request_cookie = {} request_body = '' # Decode request try: raw_request = raw_request.encode('utf-8', 'ignore') except: pass # Parse headers request_lines = raw_request.split("\n") line_stop = 0 for i, line in enumerate(request_lines): # Stop processing headers if line.strip('\r\n. ') == '': line_stop = i break # Process first headers line if ':' not in line: data = line.split(' ') request_method = data[0].strip().upper() request_uri = url2pathname(data[1].strip()) request_path = urlparse(request_uri).path continue # Parse headers data = line.split(':') headers[data[0].strip().upper()] = data[1].strip() # Parse request body request_body = '\n'.join(request_lines[line_stop:]).strip('\r\n ') # Parse post parameters request_post = parse_qs(request_body, keep_blank_values=True) # Parse get parameters request_get = parse_qs(urlparse(request_uri).query, keep_blank_values=True) # Parse cookies cookies = headers.get('COOKIE') or '' if cookies: for item in cookies.split(';'): item = item.strip().split('=') request_cookie[item[0].strip()] = None if len(item) > 1: request_cookie[item[0].strip()] = item[1].strip() return Request( raw=raw_request, method=request_method, path=request_path, uri=request_uri, headers=headers, get=request_get, post=request_post, cookie=request_cookie, body=request_body )
def splitUri(uri): uri = urllib.url2pathname(uri) # escape special chars uri = uri.strip('\r\n\x00') # remove \r\n and NULL return uri.split("://")
def __init__(self, engine, **kw): super(SyncStore, self).__init__(engine, **kw) if engine.startswith(self.init): self._engine = url2pathname(engine.split('://')[1])
def url_to_file(url): folder = os.path.expanduser(config.get("storage", "folder")) tail = urllib.url2pathname(url.strip("/")) file = os.path.join(folder, tail) return file
def url2path(urlparts): return urllib.url2pathname(urlparts.path.encode('ascii')).decode('utf8')
def create_input_source(source=None, publicID=None, location=None, file=None, data=None, format=None): """ Return an appropriate InputSource instance for the given parameters. """ # TODO: test that exactly one of source, location, file, and data # is not None. input_source = None if source is not None: if isinstance(source, InputSource): input_source = source else: if isinstance(source, basestring): location = source elif hasattr(source, "read") and not isinstance(source, Namespace): f = source input_source = InputSource() input_source.setByteStream(f) if hasattr(f, "name"): input_source.setSystemId(f.name) else: raise Exception("Unexpected type '%s' for source '%s'" % (type(source), source)) absolute_location = None # Further to fix for issue 130 if location is not None: # Fix for Windows problem https://github.com/RDFLib/rdflib/issues/145 if os.path.exists(location): location = pathname2url(location) base = urljoin("file:", "%s/" % pathname2url(os.getcwd())) absolute_location = URIRef(location, base=base).defrag() if absolute_location.startswith("file:///"): filename = url2pathname(absolute_location.replace("file:///", "/")) file = open(filename, "rb") else: input_source = URLInputSource(absolute_location, format) # publicID = publicID or absolute_location # Further to fix # for issue 130 if file is not None: input_source = FileInputSource(file) if data is not None: if isinstance(data, unicode): data = data.encode('utf-8') input_source = StringInputSource(data) if input_source is None: raise Exception("could not create InputSource") else: if publicID is not None: # Further to fix for issue 130 input_source.setPublicId(publicID) # Further to fix for issue 130 elif input_source.getPublicId() is None: input_source.setPublicId(absolute_location or "") return input_source
class ParsedURL(unicode): fragment = property(lambda self: self.__parsed__.fragment) netloc = property(lambda self: self.__parsed__.netloc) params = property(lambda self: self.__parsed__.params) path = property(lambda self: self.__parsed__.path if self.scheme != 'file' else url2pathname(self.__parsed__.path)) query = property(lambda self: self.__parsed__.query) scheme = property(lambda self: self.__parsed__.scheme) def __init__(self, value): self.__parsed__ = urlparse(self)
def deleteOneGadget(resource, user, request): # Delete the gadget only if this user is the owner if not user.is_superuser: try: userRelated = UserRelatedToGadgetResource.objects.get( gadget=resource, user=user, added_by=True) except UserRelatedToGadgetResource.DoesNotExist: #the user is not the owner msg = _( "user %(username)s is not the owner of the resource %(resource_id)s" ) % { 'username': user.username, 'resource_id': resource.id } raise Http403(msg) #Delete data from Application model apps = Application.objects.filter(resources=resource) for app in apps: app.remove_resource(resource) # Delete the related user information to that gadget userRelated = UserRelatedToGadgetResource.objects.filter(gadget=resource) userRelated.delete() # Delete the related wiring information for that gadget GadgetWiring.objects.filter(idResource=resource.id).delete() # Delete the related tags for that gadget resourceTags = UserTag.objects.filter(idResource=resource.id) for t in resourceTags: #if there is no more gadgets tagged with these tags, delete the Tag #if UserTag.objects.filter(tag = t.tag).count() == 1: # t.tag.delete() t.delete() # Delete the related votes for that gadget UserVote.objects.filter(idResource=resource.id).delete() # Remove the gadget from the showcase result = deleteGadget(user, resource.short_name, resource.vendor, resource.version) # Delete the gadget if it is saved in the platform if resource.fromWGT: # pattern /deployment/gadgets/(username)/(vendor)/(name)/(version)/... exp = re.compile('/deployment/gadgets/(?P<path>.+/.+/.+/.+/).*$') if exp.search(resource.template_uri): v = exp.search(resource.template_uri) path = url2pathname(v.group('path')) path = os.path.join(settings.GADGETS_DEPLOYMENT_DIR, path).encode("utf8") if os.path.isdir(path): rmtree(path) # Delete the object resource.delete() return result
def url2pathname(url): if PYVER >= 3: return urllib.request.url2pathname(url) else: return urllib.url2pathname(url)
def _url_decode(self, url): # TODO: to test return urllib.url2pathname(url.encode('utf8'))
def __init__(self, dataPath, overwrite=False): self._dataPath = dataPath self._absPath = urllib.url2pathname(urlparse.urlparse(dataPath).path) self._overwrite = overwrite self._checked = False
def run(self): self.log.info('[' + self.thread_name + '] ' + 'Created') # endless loop to recieve commands while not self._stop: # split the recieved data try: data = self.open_socket.recv(1024).split(' ') except Exception, err: self.log.error(str(err)) if data[0] == self.COMMAND_ACK: # If a OK command was recieved # Fire the 'ok' Event self.ok.set() elif data[0] == 'ALREADY_LOCKED': self.error.set('ALREADY_LOCKED') elif data[ 0] == self.COMMAND_CREATE: # if a create command was recieved (when other clients changed the folder) self.log.debug('Recieved Create Command' + str(data)) self.open_socket.send('OK\n') file_size = int(data[1]) file_path = url2pathname(data[2]) #self.log.debug(self.parent.fs.ignoreModify) # read data from the socket if not file_size == 0: content = '' while file_size > len(content): data = self.open_socket.recv(1024) if not data: break content += data self.open_socket.send('OK\n') self.parent.fs.createFile(file_path, content) else: self.parent.fs.createFile(file_path, '') elif data[0] == self.COMMAND_DELETEFILE: self.log.debug('Recieved Delete Command' + str(data)) file_path = url2pathname(data[1]) self.open_socket.send('OK\n') self.parent.fs.deleteFile(file_path) elif data[0] == self.COMMAND_MODIFYFILE: self.log.debug('Recieved Modify Command' + str(data)) self.open_socket.send('OK\n') file_size = int(data[1]) file_path = url2pathname(data[2]) # read data from the socket if not file_size == 0: content = '' while file_size > len(content): data = self.open_socket.recv(1024) if not data: break content += data self.open_socket.send('OK\n') self.parent.fs.writeFile(file_path, content) elif data[0] == self.COMMAND_LOCKFILE: self.log.debug('Recieved Lock Command' + str(data)) file_path = url2pathname(data[1]) self.open_socket.send('OK\n') self.parent.fs.lockFile(file_path) elif data[0] == self.COMMAND_UNLOCKFILE: self.log.debug('Recieved Unlock Command' + str(data)) file_path = url2pathname(data[1]) self.open_socket.send('OK\n') self.parent.fs.unlockFile(file_path) elif data[0] == self.COMMAND_CREATE_DIR: self.log.debug('Recieved COMMAND_CREATE_DIR' + str(data)) file_path = url2pathname(data[1]) self.open_socket.send('OK\n') self.parent.fs.createDir(file_path) elif data[0] == self.COMMAND_DELETE_DIR: self.log.debug('Recieved COMMAND_DELETE_DIR' + str(data)) file_path = url2pathname(data[1]) self.open_socket.send('OK\n') self.parent.fs.deleteDir(file_path) elif data[0] == self.COMMAND_MOVE: self.log.debug('Recieved COMMAND_MOVE' + str(data)) src_path = url2pathname(data[1]) dest_path = url2pathname(data[2]) self.open_socket.send('OK\n') self.parent.fs.moveFileDir(src_path, dest_path) elif data[0] == 'CLOSE\n': try: self.open_socket.send('OK\n') self.open_socket.close() except Exception, err: self.log.error(str(err)) self.parent.gui.changeStatus()
def get_filename(track): location = track['Location'] if location.startswith(FILE_PREFIX): return urllib.url2pathname(location[len(Constants.FILE_PREFIX):])
def do_GET(self): up = urlparse.urlparse(self.path) content, gzip_content = None, None try: if up.path == '/': content = getIndexPage() content_type = self.html_content elif up.path == '/favicon.ico': content = favicon content_type = 'image/x-icon' elif self.reLapCounterHtml.match(up.path): content = getLapCounterHtml() content_type = self.html_content elif up.path == '/qrcode.html': urlPage = GetCrossMgrHomePage() content = getQRCodePage(urlPage) content_type = self.html_content elif up.path == '/servertimestamp.html': content = json.dumps({ 'servertime': time.time() * 1000.0, 'requesttimestamp': float(up.query), }) content_type = self.json_content else: file = None if up.path == '/CurrentResults.html': try: file = os.path.splitext( Model.race.getFileName())[0] + '.html' except: pass elif up.path == '/PreviousResults.html': file = GetPreviousFileName() if file is None: file = urllib.url2pathname(os.path.basename(up.path)) content, gzip_content = contentBuffer.getContent(file) content_type = self.html_content except Exception as e: self.send_error( 404, 'File Not Found: {} {}\n{}'.format(self.path, e, traceback.format_exc())) return self.send_response(200) self.send_header('Content-Type', content_type) if content_type == self.html_content: if gzip_content and 'Accept-Encoding' in self.headers and 'gzip' in self.headers[ 'Accept-Encoding']: content = gzip_content self.send_header('Content-Encoding', 'gzip') self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate') self.send_header('Pragma', 'no-cache') self.send_header('Expires', '0') self.send_header('Content-Length', len(content)) self.end_headers() self.wfile.write(content)
def create_wsgi_request(event, server_name='apigw'): """Create a wsgi environment from an apigw request. """ path = urllib.url2pathname(event['path']) script_name = (event['headers']['Host'].endswith('.amazonaws.com') and event['requestContext']['stage'] or '').encode('utf8') query = event['queryStringParameters'] query_string = query and urllib.urlencode(query) or "" body = event['body'] and event['body'].encode('utf8') or '' environ = { 'HTTPS': 'on', 'PATH_INFO': path.encode('utf8'), 'QUERY_STRING': query_string.encode('utf8'), 'REMOTE_ADDR': event['requestContext']['identity']['sourceIp'].encode('utf8'), 'REQUEST_METHOD': event['httpMethod'].encode('utf8'), 'SCRIPT_NAME': script_name, 'SERVER_NAME': server_name.encode('utf8'), 'SERVER_PORT': '80'.encode('utf8'), 'SERVER_PROTOCOL': u'HTTP/1.1'.encode('utf8'), 'wsgi.errors': sys.stderr, 'wsgi.input': StringIO(body), 'wsgi.multiprocess': False, 'wsgi.multithread': False, 'wsgi.run_once': False, 'wsgi.url_scheme': u'https'.encode('utf8'), 'wsgi.version': (1, 0), } headers = event['headers'] # Input processing if event['httpMethod'] in ("POST", "PUT", "PATCH"): if 'Content-Type' in headers: environ['CONTENT_TYPE'] = headers['Content-Type'] environ['CONTENT_LENGTH'] = str(len(body)) for header in list(event['headers'].keys()): wsgi_name = "HTTP_" + header.upper().replace('-', '_') environ[wsgi_name] = headers[header].encode('utf8') if script_name: path_info = environ['PATH_INFO'] if script_name in path_info: environ['PATH_INFO'].replace(script_name, '') # Extract remote user from event remote_user = None if event['requestContext'].get('authorizer'): remote_user = event['requestContext']['authorizer'].get('principalId') elif event['requestContext'].get('identity'): remote_user = event['requestContext']['identity'].get('userArn') if remote_user: environ['REMOTE_USER'] = remote_user # apigw aware integrations environ['apigw.request'] = event['requestContext'] environ['apigw.stagevars'] = event['stageVariables'] return environ
def get_search_results(self, req, keywords, filters): if not 'attachments' in filters: return # Prepare keywords query = '' for word in keywords : query = query + word + ' ' query.rstrip(' ') self.env.log.debug ('Search query: %s' % query ) # Run external command to get raw search result index_file = self._get_index_file() if not index_file: return index_dir = self.absolute_index_dir cmd = 'cd %s && %s -f %s -w %s' % ( index_dir , self.swish , index_file , query ) self.env.log.debug ( 'command % s =' % cmd) error , output = commands.getstatusoutput( cmd ) if error : # TODO:Just return or raise exception here? self.env.log.error ( output ) raise Exception( output ) return # Parse output of the command for line in output.split('\n'): line = line.strip(' ') if line and line[0] != '#' : #sel.env.log.debug ( 'line = %s ' % line ) # This is not a comment... let's parse the line pattern = re.compile ('^(\d*) (.*)/(.*)\.meta "(.*)" (\d*)$') hit = pattern.match ( line ) if hit: sw_rank = hit.group(1) sw_abs_dir = hit.group(2) sw_filename = hit.group(3) sw_title = hit.group(4) sw_end = hit.group(5) regexp = '^' + self.env.path + '/attachments/(.*)$' p = re.compile(regexp) m = p.match ( sw_abs_dir ) if m : sw_dir = m.group(1) file = os.path.join( sw_abs_dir , sw_filename ) else: sw_dir = sw_abs_dir file = os.path.join( self.env.path , 'attachments' , sw_dir , sw_filename ) # Build variables that we'll return for this hit relative_url = 'attachment/%s/%s' % ( sw_dir , sw_filename ) title = 'Attachment::%s/%s' % ( sw_dir , urllib.url2pathname(sw_filename) ) if os.path.exists (file): date = os.path.getmtime( file ) else: return excerpt = self._make_excerpt ( file + '.meta' , keywords ) # Return the hits yield ( relative_url , title , date , 'SearchAttachments' , excerpt )
def downloadSection(session, s, path): global sections if s['id'] == 'section-0': try: info = s.find(class_='activity label modtype_label ').get_text() except AttributeError: pass else: saveInfo(path, info, u'') res = s.find_all(class_='activity resource modtype_resource ') for r in res: downloadResource(session, r, path) folders = s.find_all(class_='box generalbox foldertree') root = path for f in folders: res = f.find_all(class_='fp-filename-icon') label = res.pop(0).text path = root + u'/' + label.replace('/', '-') path = urllib.url2pathname(path.encode('utf-8')) if not os.path.exists(path): os.makedirs(path) print ' | +--' + colors.BOLD + label + colors.ENDC for r in res: downloadResource(session, r, path + u'/') else: sections.next() s = list(s.children)[2] name = s.find(class_='sectionname').contents[0].replace( '/', '-').strip().strip(':') + '/' info = '' info = s.find(class_='summary').get_text().strip() if len(info) > 0: if 'Thema' in name: #prof failed to add a proper section name <.< temp = info.split('\n') name = temp.pop(0).strip().strip(':').replace('/', '-') info = "\n".join(temp) root = path path = root + name + '/' if not os.path.exists(path): try: os.makedirs(path) except OSError: #filename too long name = name.split(':')[0] path = root + name + '/' if not os.path.exists(path): os.makedirs(path) print ' | +--' + colors.BOLD + name + colors.ENDC if len(info) > 0: saveInfo(path, info, u'| ') res = s.find_all(class_='activity resource modtype_resource ') for r in res: downloadResource(session, r, path) """ links = s.find_all(class_='activity url modtype_url ') for l in links: ln = l.find(class_='instancename') ln.span.extract() saveLink(session, l.a['href'], path, ln.get_text()) """ #remove empty folders if os.listdir(path) == []: os.rmdir(path)
def _uri_to_path(self, uri): """Returns the path corresponding of the given URI. Args: uri -- The URI to convert.""" return url2pathname(uri[7:])
def render_PUT(self, request): """ .. http:put:: /downloads A PUT request to this endpoint will start a download from a provided URI. This URI can either represent a file location, a magnet link or a HTTP(S) url. - anon_hops: the number of hops for the anonymous download. 0 hops is equivalent to a plain download - safe_seeding: whether the seeding of the download should be anonymous or not (0 = off, 1 = on) - destination: the download destination path of the torrent - torrent: the URI of the torrent file that should be downloaded. This parameter is required. **Example request**: .. sourcecode:: none curl -X PUT http://localhost:8085/downloads --data "anon_hops=2&safe_seeding=1&destination=/my/dest/on/disk/&uri=file:/home/me/test.torrent **Example response**: .. sourcecode:: javascript {"started": True, "infohash": "4344503b7e797ebf31582327a5baae35b11bda01"} """ parameters = http.parse_qs(request.content.read(), 1) if 'uri' not in parameters or len(parameters['uri']) == 0: request.setResponseCode(http.BAD_REQUEST) return json.dumps({"error": "uri parameter missing"}) download_config, error = DownloadsEndpoint.create_dconfig_from_params( parameters) if error: request.setResponseCode(http.BAD_REQUEST) return json.dumps({"error": error}) def download_added(download): request.write( json.dumps({ "started": True, "infohash": download.get_def().get_infohash().encode('hex') })) request.finish() def on_error(error): request.setResponseCode(http.INTERNAL_SERVER_ERROR) request.write(json.dumps({"error": error.getErrorMessage()})) request.finish() uri = parameters['uri'][0] if uri.startswith("file:"): download_uri = u"file:%s" % url2pathname(unicode(uri[5:], 'utf-8')) else: download_uri = unquote_plus(unicode(uri, 'utf-8')) download_deferred = self.session.start_download_from_uri( download_uri, download_config) download_deferred.addCallback(download_added) download_deferred.addErrback(on_error) return NOT_DONE_YET
def createurlfile(self, url): filename = urllib.url2pathname(url) dirname = os.path.split(filename)[0] if dirname and not os.path.exists(dirname): os.makedirs(dirname) return open(filename, 'w')
def file_path(self, url): """ Returns the relative path to the media file on disk for the given URL. """ relative_url = url[len(self.base_url[2]):] return urllib.url2pathname(relative_url)
def run(self): node = nodes.Element() node.document = self.state.document jinja_context_name = self.arguments[0] env = self.state.document.settings.env docname = env.docname template_filename = self.options.get("file") debug_template = self.options.get("debug") cxt = self.app.config.jinja_contexts[jinja_context_name] cxt["options"] = {"header_char": self.options.get("header_char")} if template_filename: if debug_template is not None: print('') print( '********** Begin Jinja Debug Output: Template Before Processing **********' ) print('********** From {} **********'.format(docname)) reference_uri = directives.uri( os.path.join('source', template_filename)) template_path = urllib.url2pathname(reference_uri) encoded_path = template_path.encode( sys.getfilesystemencoding()) imagerealpath = os.path.abspath(encoded_path) with codecs.open(imagerealpath, encoding='utf-8') as f: print(f.read()) print( '********** End Jinja Debug Output: Template Before Processing **********' ) print('') tpl = Environment(loader=FileSystemLoader( self.app.config.jinja_base, followlinks=True)).get_template( template_filename) else: if debug_template is not None: print('') print( '********** Begin Jinja Debug Output: Template Before Processing **********' ) print('********** From {} **********'.format(docname)) print('\n'.join(self.content)) print( '********** End Jinja Debug Output: Template Before Processing **********' ) print('') tpl = Environment(loader=FileSystemLoader( self.app.config.jinja_base, followlinks=True)).from_string( '\n'.join(self.content)) new_content = tpl.render(**cxt) if debug_template is not None: print('') print( '********** Begin Jinja Debug Output: Template After Processing **********' ) print(new_content) print( '********** End Jinja Debug Output: Template After Processing **********' ) print('') new_content = StringList(new_content.splitlines()) self.state.nested_parse(new_content, self.content_offset, node, match_titles=1) return node.children
def collection_index(request, collection): """ An Index like html response for get request on a collection. """ xhtml = etree.Element(XHTML + "html", nsmap={None: XHTML_NAMESPACE}) xhtml.set("lang", "en") name = urllib.url2pathname(collection.uri).decode('utf-8') # Html header, title, meta, style head_e = etree.SubElement(xhtml, XHTML + "head") meta_e = etree.SubElement(head_e, XHTML + "meta") meta_e.set("http-equiv", "Content-Type") meta_e.set("content", "text/html; charset=UTF-8") title_e = etree.SubElement(head_e, XHTML + "title") title_e.text = "Direcory Index of %s" % name style_e = etree.SubElement(head_e, XHTML + "style") style_e.set("type", "text/css") style_e.text = "body { Font-family: arial }" body_e = etree.SubElement(xhtml, XHTML + "body") # Page header h1_e = etree.SubElement(body_e, XHTML + "h1") h1_e.text = "Direcory Index of %s" % name # Table table_e = etree.SubElement(body_e, XHTML + "table") # Table header row = etree.SubElement(table_e, XHTML + "tr") col = etree.SubElement(row, XHTML + "th") col.text = "Name" col = etree.SubElement(row, XHTML + "th") col.text = "Size" col = etree.SubElement(row, XHTML + "th") col.text = "Last Modified" # Line seperator row = etree.SubElement(table_e, XHTML + "tr") col = etree.SubElement(row, XHTML + "td") col.set("colspan", "3") th = etree.SubElement(col, XHTML + "hr") # Parent row reference if (collection.get_parent() != ''): row = etree.SubElement(table_e, XHTML + "tr") col = etree.SubElement(row, XHTML + "td") col.set("colspan", "3") href = etree.SubElement(col, "a") parent_uri = request.protocol + '://' + \ request.host + '/' + \ collection.get_parent() href.set("href", parent_uri) href.text = ".." # Table rows for obj in collection.childs(): row = etree.SubElement(table_e, XHTML + "tr") col = etree.SubElement(row, XHTML + "td") href = etree.SubElement(col, "a") href.set("href", obj.uri) href.text = os.path.basename(obj.filename).decode('utf-8') col = etree.SubElement(row, XHTML + "td") if obj.is_collection(): col.text = '-' else: col.text = obj.getcontentlength().text col = etree.SubElement(row, XHTML + "td") col.text = obj.lastmodified() # Line seperator row = etree.SubElement(table_e, XHTML + "tr") col = etree.SubElement(row, XHTML + "td") col.set("colspan", "3") th = etree.SubElement(col, XHTML + "hr") # Page footer version_e = etree.SubElement(body_e, XHTML + "h4") version_e.text = "Tornado/%s Server/%s on %s" % ( server_version, dav_version, os.uname()[0] + ' ' + os.uname()[3]) return etree.tostring(xhtml, pretty_print=True)
def __init__(self, datapath, filename, overwrite=False): self._datapath = datapath self._filename = filename self._abspath = os.path.join(urllib.url2pathname(urlparse.urlparse(datapath).path), filename) self._overwrite = overwrite self._checked = False
def on_terminal_drag_data_received(self, widget, context, x, y, selection, target_type, time): uri_list = selection.data.strip('\r\n\x00').split() for uri in uri_list: if uri[:7] == "file://": #local file path = urllib.url2pathname(uri[7:]).replace("'", "'\"'\"'") widget.feed_child(" '%s' " % path)
print 'quote_plus():', urllib.quote_plus(url) #quote_plus()实现的加引号处理会更大程度地替换相应字符。 #要完成加引号操作的逆过程,可以根据需要相应地使用unqote()或unquote_plus() print urllib.unquote('http%3A%2F%2Flocalhost%3A8000%2F%7ESterncat%2F') print urllib.unquote_plus('http%3A%2F%2Flocalhost%3A8000%2F%7ESterncat%2F') #编码值会转换回一个常规的URL字符串 #12.3.3路径与URL print '\n12.3.3路径与URL' #有些操作系统在本地文件和URL中使用不同的值分隔路径的不同部分。为了保证代码可移植,可以使用函数pathname2url()和url2pathname()来回转换 import os from urllib import pathname2url, url2pathname print '== Default ==' path = '/a/b/c' print 'Original:', path print 'URL :', pathname2url(path) print 'Path :', url2pathname('/d/e/f') print from nturl2path import pathname2url, url2pathname print '== Windows, without drive letter ==' path = r'\a\b\c' print 'Original:', path print 'URL :', pathname2url(path) print 'Path :', url2pathname('/d/e/f') print print '== Windows, with drive letter ==' path = r'C:\a\b\c' print 'Original:', path print 'URL :', pathname2url(path) print 'Path :', url2pathname('/d/e/f') #这里有两个Windows的例子,其路径前缀中分别包含和不包含驱动器字母
def render_GET(self, request): """ .. http:get:: /torrentinfo A GET request to this endpoint will return information from a torrent found at a provided URI. This URI can either represent a file location, a magnet link or a HTTP(S) url. - torrent: the URI of the torrent file that should be downloaded. This parameter is required. **Example request**: .. sourcecode:: none curl -X PUT http://localhost:8085/torrentinfo?torrent=file:/home/me/test.torrent **Example response**: .. sourcecode:: javascript {"metainfo": <torrent metainfo dictionary>} """ metainfo_deferred = Deferred() def on_got_metainfo(metainfo): if not isinstance(metainfo, dict): self._logger.warning("Received metainfo is not a dictionary") request.setResponseCode(http.INTERNAL_SERVER_ERROR) request.write(json.dumps({"error": 'invalid response'})) self.finish_request(request) return if self.infohash: # Save the torrent to our store try: self.session.save_collected_torrent( self.infohash, bencode(metainfo)) except TypeError: # TODO(Martijn): in libtorrent 1.1.1, bencode throws a TypeError which is a known bug pass request.write( json.dumps({"metainfo": metainfo}, ensure_ascii=False)) self.finish_request(request) def on_metainfo_timeout(_): request.setResponseCode(http.REQUEST_TIMEOUT) request.write(json.dumps({"error": "timeout"})) self.finish_request(request) def on_lookup_error(failure): failure.trap(ConnectError, DNSLookupError) request.setResponseCode(http.INTERNAL_SERVER_ERROR) request.write(json.dumps({"error": failure.getErrorMessage()})) self.finish_request(request) if 'uri' not in request.args or len(request.args['uri']) == 0: request.setResponseCode(http.BAD_REQUEST) return json.dumps({"error": "uri parameter missing"}) uri = unicode(request.args['uri'][0], 'utf-8') if uri.startswith('file:'): try: filename = url2pathname(uri[5:]) metainfo_deferred.callback(bdecode(fix_torrent(filename))) except TypeError: request.setResponseCode(http.INTERNAL_SERVER_ERROR) return json.dumps( {"error": "error while decoding torrent file"}) elif uri.startswith('http'): def _on_loaded(tdef): metainfo_deferred.callback(bdecode(tdef)) http_get(uri.encode('utf-8')).addCallback(_on_loaded).addErrback( on_lookup_error) elif uri.startswith('magnet'): self.infohash = parse_magnetlink(uri)[1] if self.infohash is None: request.setResponseCode(http.BAD_REQUEST) return json.dumps({"error": "missing infohash"}) if self.session.has_collected_torrent(self.infohash): try: tdef = TorrentDef.load_from_memory( self.session.get_collected_torrent(self.infohash)) except ValueError as exc: request.setResponseCode(http.INTERNAL_SERVER_ERROR) return json.dumps( {"error": "invalid torrent file: %s" % str(exc)}) on_got_metainfo(tdef.get_metainfo()) return NOT_DONE_YET self.session.lm.ltmgr.get_metainfo( uri, callback=metainfo_deferred.callback, timeout=20, timeout_callback=on_metainfo_timeout, notify=True) else: request.setResponseCode(http.BAD_REQUEST) return json.dumps({"error": "invalid uri"}) metainfo_deferred.addCallback(on_got_metainfo) return NOT_DONE_YET
def visit_toctree(self, node): # div class=row {{ section_type }} # h2 class=col-sm-12 # {{ section title }} # div class=col-sm-6 col-md-3 # figure class=card # a href=current_link style=background-image: document-image-attribute class=card-img # figcaption # {{ card title }} env = self.builder.env conf = self.builder.app.config for title, ref in ((e[0], e[1]) for e in node['entries']): # external URL, no toc, can't recurse into if ref not in env.tocs: continue toc = env.tocs[ref].traverse(addnodes.toctree) classes = env.metadata[ref].get('types', 'tutorials') classes += ' toc-single-entry' if not toc else ' toc-section' self.body.append(self.starttag(node, 'div', CLASS="row " + classes)) self.body.append(u'<h2 class="col-sm-12">') self.body.append(title if title else util.nodes.clean_astext(env.titles[ref])) self.body.append(u'</h2>') entries = [(title, ref)] if not toc else ((e[0], e[1]) for e in toc[0]['entries']) for subtitle, subref in entries: baseuri = self.builder.get_target_uri(node['parent']) if subref in env.metadata: cover = env.metadata[subref].get('banner', conf.odoo_cover_default) elif subref in conf.odoo_cover_external: cover = conf.odoo_cover_external[subref] else: cover = conf.odoo_cover_default_external if cover: banner = '_static/' + cover base, ext = os.path.splitext(banner) small = "{}.small{}".format(base, ext) if os.path.isfile(urllib.url2pathname(small)): banner = small style = u"background-image: url('{}')".format( util.relative_uri(baseuri, banner) or '#') else: style = u'' self.body.append(u""" <div class="col-sm-6 col-md-3"> <figure class="card"> <a href="{link}" class="card-img"> <span style="{style}"></span> <figcaption>{title}</figcaption> </a> </figure> </div> """.format( link=subref if util.url_re.match(subref) else util.relative_uri( baseuri, self.builder.get_target_uri(subref)), style=style, title=subtitle if subtitle else util.nodes.clean_astext(env.titles[subref]), )) self.body.append(u'</div>') raise nodes.SkipNode
def expand_file(pattern, metadata): """ Expands the pattern to a file name according to the infomation of a music The following are supported place holder in the pattern: - %t: Title of the track. 'title' in metadata - %p: Performer (artist) of the music. 'artist' in metadata - %a: Album of the music. 'album' in metadata - %n: Track number of the music. 'tracknumber' in metadata - %f: Filename without extension of the music. 'location' in metadata. - %%: The `%' punctuation Arguments: - `pattern`: The pattern to expand. - `metadata`: A dict representing metadata. Useful keys are listed above. If the pattern cannot be expand, raise an PatternException. Otherwise return the expended pattern. >>> metadata = {'artist': 'Foo', ... 'title': 'Bar', ... 'tracknumber': '1', ... 'album': 'Album', ... 'location': 'file:///%E6%AD%8C%E6%9B%B2/%E7%9A%84/%E5%9C%B0%E5%9D%80.mp3'} >>> expand_file('%p - %t', metadata) 'Foo - Bar' >>> expand_file('foobar', metadata) 'foobar' >>> print expand_file('name is %f :)', metadata) name is 地址 :) >>> expand_file('%something else', metadata) '%something else' >>> expand_file('%%a - %%t', metadata) '%a - %t' >>> expand_file('%%%', metadata) '%%' >>> expand_file('%n - %a:%p,%t', metadata) '1 - Album:Foo,Bar' >>> expand_file('%t', {}) Traceback (most recent call last): ... PatternException: 'title not in metadata' """ keys = { 't': 'title', 'p': 'artist', 'a': 'album', 'n': 'tracknum', } start = 0 parts = [] while start < len(pattern): end = pattern.find('%', start) if end > -1: parts.append(pattern[start:end]) has_tag = False if end + 1 < len(pattern): tag = pattern[end + 1] if tag == '%': has_tag = True parts.append('%') elif tag == 'f': location = metadata.location if not location: raise PatternException( 'Location not found in metadata') uri = urlparse.urlparse(location) if uri.scheme != '' and not uri.scheme in ['file']: raise PatternException('Unsupported file scheme %s' % uri.scheme) if uri.scheme == '': path = uri.path else: path = urllib.url2pathname(uri.path) basename = os.path.basename(path) root, ext = os.path.splitext(basename) has_tag = True parts.append(root) elif tag in keys: value = getattr(metadata, keys[tag]) if not value: raise PatternException('%s not in metadata' % keys[tag]) has_tag = True parts.append(value) if has_tag: start = end + 2 else: start = end + 1 parts.append('%') else: parts.append(pattern[start:]) break return ''.join(parts)