def renameit(path, fromenc, toenc): dest = path try: if os.name != 'nt': ansi = path.decode(fnenc).encode(fromenc) else: ansi = path.encode(fromenc) except UnicodeDecodeError: ansi = path except UnicodeEncodeError: if fromenc == toenc: ansi = path.encode(toenc, 'replace').replace('?', '_') else: print >> sys.stderr, 'Not of encoding %s: ' % (fromenc), writeunicode(path, sys.stderr) raise global errors try: dest = unicode(ansi, toenc, errors) except UnicodeDecodeError: print >> sys.stderr, 'Cannot convert from %s to %s: ' % (fromenc, toenc), writeunicode(path, sys.stderr) raise if os.name != 'nt': dest = dest.encode(fnenc, errors) return (path, dest)
def addMetaData(path, job, result): """ Use this method to add meta data to the image. Due to a bug in exiv2, its python wrapper pyexiv2 is of no use to us. This bug (http://dev.exiv2.org/issues/762) hinders us to work on multi-page TIFF files. Instead, we use a separate tool called exiftool to write meta data. Currently, there seems no better solution than this. If the tool is not found, no meta data is produced and no error is raised. """ # Add resolution information in pixel per nanometer. The stack info # available is nm/px and refers to a zoom-level of zero. res_x_scaled = job.ref_stack.resolution.x * 2**job.zoom_level res_y_scaled = job.ref_stack.resolution.y * 2**job.zoom_level res_x_nm_px = 1.0 / res_x_scaled res_y_nm_px = 1.0 / res_y_scaled res_args = "-EXIF:XResolution={0} -EXIF:YResolution={1} -EXIF:" \ "ResolutionUnit=None".format( str(res_x_nm_px), str(res_y_nm_px) ) # ImageJ specific meta data to allow easy embedding of units and # display options. n_images = len(result) ij_version = "1.45p" unit = "nm" newline = "\n" # sample with (the actual is a line break instead of a .): # ImageJ=1.45p.images={0}.channels=1.slices=2.hyperstack=true.mode=color.unit=micron.finterval=1.spacing=1.5.loop=false.min=0.0.max=4095.0. ij_data = "ImageJ={1}{0}unit={2}{0}".format(newline, ij_version, unit) if n_images > 1: n_channels = len(job.stack_mirrors) if n_images % n_channels != 0: raise ValueError( "Meta data creation: the number of images " \ "modulo the channel count is not zero" ) n_slices = n_images / n_channels ij_data += "images={1}{0}channels={2}{0}slices={3}{0}hyperstack=true{0}mode=color{0}".format( newline, str(n_images), str(n_channels), str(n_slices)) ij_args = "-EXIF:ImageDescription=\"{0}\"".format(ij_data) # Information about the software used sw_args = "-EXIF:Software=\"Created with CATMAID and GraphicsMagic, " \ "processed with exiftool.\"" # Build up the final tag changing arguments for each slice tag_args = "{0} {1} {2}".format(res_args, ij_args, sw_args) per_slice_tag_args = [] for i in range(0, n_images): # the string EXIF gets replaced for every image with IFD<N> slice_args = tag_args.replace("EXIF", "IFD" + str(i)) per_slice_tag_args.append(slice_args) final_tag_args = " ".join(per_slice_tag_args) # Create the final call and execute call = "exiftool -overwrite_original {0} {1}".format(final_tag_args, path) os.system(call) # Re-save the image with GraphicsMagick, otherwise ImageJ won't read the # images directly. images = ImageList() images.readImages(path.encode('ascii', 'ignore')) images.writeImages(path.encode('ascii', 'ignore'))
def extract_cpio_archive(path, destdir): cmd = ['cpio', '--no-absolute-filenames', '--quiet', '-idF', os.path.abspath(path.encode('utf-8'))] logger.debug("extracting %s into %s", path.encode('utf-8'), destdir) p = subprocess.Popen(cmd, shell=False, cwd=destdir) p.communicate() p.wait() if p.returncode != 0: logger.error('cpio exited with error code %d', p.returncode)
def addMetaData( path, job, result ): """ Use this method to add meta data to the image. Due to a bug in exiv2, its python wrapper pyexiv2 is of no use to us. This bug (http://dev.exiv2.org/issues/762) hinders us to work on multi-page TIFF files. Instead, we use a separate tool called exiftool to write meta data. Currently, there seems no better solution than this. If the tool is not found, no meta data is produced and no error is raised. """ # Add resolution information in pixel per nanometer. The stack info # available is nm/px and refers to a zoom-level of zero. res_x_scaled = job.ref_stack.resolution.x * 2**job.zoom_level res_y_scaled = job.ref_stack.resolution.y * 2**job.zoom_level res_x_nm_px = 1.0 / res_x_scaled res_y_nm_px = 1.0 / res_y_scaled res_args = "-EXIF:XResolution={0} -EXIF:YResolution={1} -EXIF:" \ "ResolutionUnit=None".format( str(res_x_nm_px), str(res_y_nm_px) ) # ImageJ specific meta data to allow easy embedding of units and # display options. n_images = len( result ) ij_version= "1.45p" unit = "nm" newline = "\n" # sample with (the actual is a line break instead of a .): # ImageJ=1.45p.images={0}.channels=1.slices=2.hyperstack=true.mode=color.unit=micron.finterval=1.spacing=1.5.loop=false.min=0.0.max=4095.0. ij_data = "ImageJ={1}{0}unit={2}{0}".format( newline, ij_version, unit) if n_images > 1: n_channels = len(job.stack_mirrors) if n_images % n_channels != 0: raise ValueError( "Meta data creation: the number of images " \ "modulo the channel count is not zero" ) n_slices = n_images / n_channels ij_data += "images={1}{0}channels={2}{0}slices={3}{0}hyperstack=true{0}mode=color{0}".format( newline, str(n_images), str(n_channels), str(n_slices) ) ij_args = "-EXIF:ImageDescription=\"{0}\"".format( ij_data ) # Information about the software used sw_args = "-EXIF:Software=\"Created with CATMAID and GraphicsMagic, " \ "processed with exiftool.\"" # Build up the final tag changing arguments for each slice tag_args = "{0} {1} {2}".format( res_args, ij_args, sw_args ) per_slice_tag_args = [] for i in range(0, n_images): # the string EXIF gets replaced for every image with IFD<N> slice_args = tag_args.replace( "EXIF", "IFD" + str(i) ) per_slice_tag_args.append( slice_args ) final_tag_args = " ".join( per_slice_tag_args ) # Create the final call and execute call = "exiftool -overwrite_original {0} {1}".format( final_tag_args, path ) os.system( call ) # Re-save the image with GraphicsMagick, otherwise ImageJ won't read the # images directly. images = ImageList() images.readImages( path.encode('ascii', 'ignore') ) images.writeImages( path.encode('ascii', 'ignore') )
def printstorage(stg, basepath=''): names = list(stg) names.sort() for name in names: path = basepath + name item = stg[name] if is_storage(item): printstorage(item, path + '/') elif is_stream(item): print path.encode('string_escape')
def hist_checking(control_hist_location, cur_hist_location, path, technique): with root_open(control_hist_location) as control_file, \ root_open(cur_hist_location) as cur_file: cur_hist = cur_file.get(path.encode('ascii','ignore')) control_hist = control_file.get(path.encode('ascii','ignore')) if technique == 'Kolmogorov-Smirnov': p_value = cur_hist.KolmogorovTest(control_hist) elif technique == 'chi_square': p_value = cur_hist.Chi2Test(control_hist) return 1. - p_value
def printstorage(stg, basepath=""): names = list(stg) names.sort() for name in names: path = basepath + name item = stg[name] if is_storage(item): printstorage(item, path + "/") elif is_stream(item): print path.encode("string_escape")
def recv_file_error_handler(self, exception, path): handler = { 'IsADirectoryError': b'remote: ' + path.encode() + b': Is a directory.\n', 'PermissionError': b'remote: ' + path.encode() + b': Permission denied.\n', 'FileNotFoundError': b'remote: 550 ' + path.encode() + b': No such file or directory.\n' } return handler.get(exception)
def extract_cpio_archive(path, destdir): cmd = [ 'cpio', '--no-absolute-filenames', '--quiet', '-idF', os.path.abspath(path.encode('utf-8')) ] logger.debug("extracting %s into %s", path.encode('utf-8'), destdir) p = subprocess.Popen(cmd, shell=False, cwd=destdir) p.communicate() p.wait() if p.returncode != 0: logger.error('cpio exited with error code %d', p.returncode)
def getFile(self, path): path = self.normalizePath(path.encode('utf-8')) ext = self.getExtension(path=path) result = {'ext': ext} self.request.response.setHeader('Content-Type', 'application/json') if ext in self.imageExtensions: obj = self.getObject(path) info = self.getInfo(obj) info['preview'] = path result['info'] = self.previewTemplate(info=info) return json.dumps(result) else: data = self.context.openFile(path) if hasattr(data, 'read'): data = data.read() result['contents'] = str(data) try: return json.dumps(result) except UnicodeDecodeError: # The file we're trying to get isn't unicode encodable # so we just return the file information, not the content del result['contents'] obj = self.getObject(path) info = self.getInfo(obj) result['info'] = self.previewTemplate(info=info) return json.dumps(result)
def __init__(self, path, authz, log): self.path = path # might be needed by __del__()/close() self.log = log if core.SVN_VER_MAJOR < 1: raise TracError("Subversion >= 1.0 required: Found %d.%d.%d" % \ (core.SVN_VER_MAJOR, core.SVN_VER_MINOR, core.SVN_VER_MICRO)) self.pool = Pool() # Remove any trailing slash or else subversion might abort if isinstance(path, unicode): path = path.encode('utf-8') path = os.path.normpath(path).replace('\\', '/') self.path = repos.svn_repos_find_root_path(path, self.pool()) if self.path is None: raise TracError("%s does not appear to be a Subversion repository." \ % path) self.repos = repos.svn_repos_open(self.path, self.pool()) self.fs_ptr = repos.svn_repos_fs(self.repos) uuid = fs.get_uuid(self.fs_ptr, self.pool()) name = 'svn:%s:%s' % (uuid, _from_svn(path)) Repository.__init__(self, name, authz, log) if self.path != path: self.scope = path[len(self.path):] if not self.scope[-1] == '/': self.scope += '/' else: self.scope = '/' assert self.scope[0] == '/' self.clear()
def request(host, path, url_params=None): url_params = url_params or {} url = 'http://{0}{1}?'.format(host, urllib.quote(path.encode('utf8'))) consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET) oauth_request = oauth2.Request(method="GET", url=url, parameters=url_params) oauth_request.update( { 'oauth_nonce': oauth2.generate_nonce(), 'oauth_timestamp': oauth2.generate_timestamp(), 'oauth_token': TOKEN, 'oauth_consumer_key': CONSUMER_KEY } ) token = oauth2.Token(TOKEN, TOKEN_SECRET) oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token) signed_url = oauth_request.to_url() print u'Querying {0} ...'.format(url) conn = urllib2.urlopen(signed_url, None) response_data = conn.read() try: response = json.loads(response_data) finally: conn.close() return response
def request(host, path, url_params=None): url_params = url_params or {} url = 'http://{0}{1}?'.format(host, urllib.quote(path.encode('utf8'))) consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET) oauth_request = oauth2.Request(method="GET", url=url, parameters=url_params) oauth_request.update( { 'oauth_nonce': oauth2.generate_nonce(), 'oauth_timestamp': oauth2.generate_timestamp(), 'oauth_token': TOKEN, 'oauth_consumer_key': CONSUMER_KEY } ) token = oauth2.Token(TOKEN, TOKEN_SECRET) oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token) signed_url = oauth_request.to_url() print signed_url print u'Querying {0} ...'.format(url) conn = urllib2.urlopen(signed_url, None) response_data = conn.read() try: response = json.loads(response_data) finally: conn.close() return response
def __init__(self, pool, path, txn): self.pool = pool; repos_ptr = repos.open(path, pool) self.fs_ptr = repos.fs(repos_ptr) self.look = SVNLook(self.pool, path, 'changed', None, txn) # Get the list of files and directories which have been added. changed = self.look.cmd_changed() if debug: for item in changed.added + changed.addeddir: print >> sys.stderr, 'Adding: ' + item.encode('utf-8') if self.numadded(changed) != 0: # Find the part of the file tree which they live in. changedroot = self.findroot(changed) if debug: print >> sys.stderr, 'Changedroot is ' + changedroot.encode('utf-8') # Get that part of the file tree. tree = self.look.cmd_tree(changedroot) if debug: print >> sys.stderr, 'File tree:' for path in tree.paths.keys(): print >> sys.stderr, ' [%d] %s len %d' % (tree.paths[path], path.encode('utf-8'), len(path)) # If a member of the paths hash has a count of more than one there is a # case conflict. for path in tree.paths.keys(): if tree.paths[path] > 1: # Find out if this is one of the files being added, if not ignore it. addedfile = self.showfile(path, changedroot, changed) if addedfile <> '': print >> sys.stderr, "Case conflict: " + addedfile.encode('utf-8') \ + "\nA file with same filename but different cases already exist!" globals()["exitstat"] = 1
def __init__(self, path, authz, log, options={}): self.log = log self.options = options self.pool = Pool() if isinstance(path, unicode): path = path.encode('utf-8') path = os.path.normpath(path).replace('\\', '/') self.path = repos.svn_repos_find_root_path(path, self.pool()) if self.path is None: raise Exception('%(path)s does not appear to be a Subversion' 'Repository.' % {'path': path}) self.repos = repos.svn_repos_open(self.path, self.pool()) self.fs_ptr = repos.svn_repos_fs(self.repos) uuid = fs.get_uuid(self.fs_ptr, self.pool()) name = 'svn:%s:%s' % (uuid, _from_svn(path)) if self.path != path: self.scope = path[len(self.path):] if not self.scope[-1] == '/': self.scope += '/' else: self.scope = '/' assert self.scope[0] == '/' self.clear() self.youngest_rev = property(lambda x: x.get_youngest_rev())
def mock_repository(mock_config): ''' Create a dummy mercurial repository ''' # Init repo hglib.init(mock_config.repo_dir) # Init clean client client = hglib.open(mock_config.repo_dir) # Add test.txt file path = os.path.join(mock_config.repo_dir, 'test.txt') with open(path, 'w') as f: f.write('Hello World\n') # Initiall commit client.add(path.encode('utf-8')) client.commit(b'Hello World', user=b'Tester') # Write dummy 3rd party file third_party = os.path.join(mock_config.repo_dir, mock_config.third_party) with open(third_party, 'w') as f: f.write('test/dummy') # Remove pull capabilities client.pull = Mock(return_value=True) return client
def mimefile(inputs,outputs,options={},callbacks=[]): try: mfileid = inputs[0] path = _get_mfile(mfileid) m = magic.open(magic.MAGIC_MIME) m.load() upath = path.encode("utf-8") result = m.file(upath) mimetype = result.split(';')[0] from dataservice.models import MFile mf = MFile.objects.get(id=mfileid) mf.mimetype = mimetype mf.save() for callback in callbacks: logging.info("Mimefile callback - "% callback) subtask(callback).delay() return {"success":True,"message":"Mime detection successful", "mimetype" : mimetype} except Exception as e: logging.info("Error with mime %s" % e) import sys import traceback traceback.print_exc(file=sys.stdout) raise e
def unicode_to_path(path): """ Convert a Unicode string into a file path. We don't do any of the string replace nonsense that unicode_to_filename does. We also convert separators into the appropriate type for the platform. """ return utf8_to_filename(path.encode('utf8')).replace('/', os.path.sep)
def wk_dbox_delete(self, user, path, callback=None): path = self._unify_path(path) # make dropbox request access_token = user.get_dropbox_token() post_args = { 'root': DropboxMixin.ACCESS_TYPE, 'path': path.encode(DEFAULT_ENCODING), } response = yield gen.Task(self.dropbox_request, "api", "/1/fileops/delete", access_token=access_token, post_args=post_args) if _check_bad_response(response, callback): return file_meta = json.loads(response.body) is_dir, f_path = file_meta['is_dir'], file_meta['path'] if is_dir: # TODO: try to avoid full collection scan # to include root_path.startswith(f_path) elements yield motor.Op(DropboxFile.remove_entries, self.db, {"$or": [{"_id": f_path}, {"root_path": {'$regex': '^%s.*' % f_path, '$options': 'i'}}]}, collection=user.name) else: yield motor.Op(DropboxFile.remove_entries, self.db, {"_id": f_path}, collection=user.name) callback({'status': ErrCode.ok})
def getByteCode(self, path, cached=True): """Load a python file and return the compiled byte-code. @param path: The path of the python file to load. @type path: string @param cached: True if the byte code should be cached to a separate file for quicker loading next time. @type cached: bool @return: A code object that can be executed with the python 'exec' statement. @rtype: C{types.CodeType} """ byteCode = self._byteCodeCache.get(path, None) if byteCode is None: # Cache the code in a user-supplied directory if provided. if self.scriptCachePath is not None: assert cake.path.isAbs(path) # Need an absolute path to get a unique hash. pathDigest = cake.hash.sha1(path.encode("utf8")).digest() pathDigestStr = cake.hash.hexlify(pathDigest) cacheFilePath = cake.path.join( self.scriptCachePath, pathDigestStr[0], pathDigestStr[1], pathDigestStr[2], pathDigestStr ) cake.filesys.makeDirs(cake.path.dirName(cacheFilePath)) else: cacheFilePath = None byteCode = cake.bytecode.loadCode(path, cfile=cacheFilePath, cached=cached) self._byteCodeCache[path] = byteCode return byteCode
def get_canonical_filesystem_path(name): gfpnbh = ctypes.windll.kernel32.GetFinalPathNameByHandleW close_handle = ctypes.windll.kernel32.CloseHandle h = open_file_win(name) try: gfpnbh = ctypes.windll.kernel32.GetFinalPathNameByHandleW numwchars = 1024 while True: buf = ctypes.create_unicode_buffer(numwchars) result = gfpnbh(h, buf, numwchars, 0) if result == 0: raise Exception("unknown error while normalizing path") # The first four chars are //?/ if result <= numwchars: path = buf.value[4:].replace("\\", "/") if compat.PYTHON2: path = path.encode("utf8") return path # Not big enough; the result is the amount we need numwchars = result + 1 finally: close_handle(h)
def delete(self, path): """Delete the item at the given path. """ path = path.encode('utf-8') npath = self.normalizePath(path) parentPath = '/'.join(npath.split('/')[:-1]) name = npath.split('/')[-1] code = 0 error = '' try: parent = self.getObject(parentPath) except KeyError: error = translate(_(u'filemanager_invalid_parent', default=u"Parent folder not found."), context=self.request) code = 1 else: try: del parent[name] except KeyError: error = translate(_(u'filemanager_error_file_not_found', default=u"File not found."), context=self.request) code = 1 self.request.response.setHeader('Content-Type', 'application/json') return json.dumps({ 'path': self.normalizeReturnPath(path), 'error': error, 'code': code, })
def dispatch_request(target_admin_unit_id, viewname, path='', data={}, headers={}): """ Sends a request to another zope instance Returns a response stream Authentication: In the request there is a attribute '__cortex_ac' which is set to the username of the current user. :target_admin_unit_id: id of the target AdminUnit :viewname: name of the view to call on the target :path: context path relative to site root :data: dict of additional data to send :headers: dict of additional headers to send """ if isinstance(viewname, unicode): viewname = viewname.encode('utf-8') if isinstance(path, unicode): path = path.encode('utf-8') if get_current_admin_unit().id() == target_admin_unit_id: return _local_request(viewname, path, data) else: return _remote_request(target_admin_unit_id, viewname, path, data, headers)
def launch_open_file(self, path, cwd=None): try: win32api.ShellExecute(0, "open", path.encode(sys.getfilesystemencoding()), None, None, 1) except: raise NotImplementedError()
def determine_tar_file_size(path): '''Determine the size of a file within a GNU tar archive Args: path: The file path. The path may be of type str which is encoded as UTF-8 or of type bytes in the encoding of the tar archive. Returns: The determined size of the file ''' # 512 byte header size = 512 try: path = path.encode() except: pass # GNU tar path string size workaround path_size = len(path) if path_size > 100: # @LongLink/@LongName header + path string fit within # blocks of 512 bytes size += 512 + 512 * int(math.ceil(path_size / 512)) # Not within the following if statement to raise OSError if not exist file_size = os.path.getsize(path) if os.path.isfile(path): # file data fit within blocks of 512 bytes size += 512 * int(math.ceil(file_size) / 512) return size
def obj_get(self, request=None, **kwargs): # get the system dir and list child folders base = staging_settings.STAGING_IMPORT_DIR path = kwargs['pk'].decode('hex') system_dir = os.path.join(base, path) parent = {} parent_path = os.path.dirname(path) if parent_path == "": parent_path = "./" parent['pk'] = parent_path.encode('hex') parent_obj = StagingFileObject(initial=parent) data = {} data['path'] = path data['pk'] = path.encode('hex') data['parent'] = parent['pk'] data['name'] = os.path.basename(path) data['is_dir'] = os.path.isdir(system_dir) obj = StagingFileObject(initial=data) actions = {} if AUVImporter.dependency_check(system_dir): actions['auvcreate'] = self.get_auv_create_uri(obj) if DeploymentImporter.dependency_check(system_dir): actions['deploymentcreate'] = self.get_deployment_create_uri(obj) obj.actions = actions return obj
def addManualInstance( self, name: str, address: str, port: int, path: str, useHttps: bool = False, userName: str = "", password: str = "", ) -> None: self._manual_instances[name] = { "address": address, "port": port, "path": path, "useHttps": useHttps, "userName": userName, "password": password, } self._preferences.setValue("octoprint/manual_instances", json.dumps(self._manual_instances)) properties = { b"path": path.encode("utf-8"), b"useHttps": b"true" if useHttps else b"false", b"userName": userName.encode("utf-8"), b"password": password.encode("utf-8"), b"manual": b"true", } if name in self._instances: self.removeInstance(name) self.addInstance(name, address, port, properties) self.instanceListChanged.emit()
def getFolder(self, path, getSizes=False): """Returns a dict of file and folder objects representing the contents of the given directory (indicated by a "path" parameter). The values are dicts as returned by getInfo(). A boolean parameter "getsizes" indicates whether image dimensions should be returned for each item. Folders should always be returned before files. Optionally a "type" parameter can be specified to restrict returned files (depending on the connector). If a "type" parameter is given for the HTML document, the same parameter value is reused and passed to getFolder(). This can be used for example to only show image files in a file system tree. """ path = path.encode('utf-8') folders = [] files = [] path = self.normalizePath(path) folder = self.getObject(path) for name in folder.listDirectory(): if IResourceDirectory.providedBy(folder[name]): folders.append( self.getInfo(path='{0}/{1}/'.format(path, name), getSize=getSizes)) else: files.append( self.getInfo(path='{0}/{1}'.format(path, name), getSize=getSizes)) return folders + files
def delete(self, path): """Delete the item at the given path. """ path = path.encode('utf-8') npath = self.normalizePath(path) parentPath = '/'.join(npath.split('/')[:-1]) name = npath.split('/')[-1] code = 0 error = '' try: parent = self.getObject(parentPath) except KeyError: error = translate(_(u'filemanager_invalid_parent', default=u'Parent folder not found.'), context=self.request) code = 1 else: try: del parent[name] except KeyError: error = translate(_(u'filemanager_error_file_not_found', default=u'File not found.'), context=self.request) code = 1 return { 'path': self.normalizeReturnPath(path), 'error': error, 'code': code, }
def _do_load_page(app, path, path_mtime): # Check the cache first. cache = app.cache.getCache('pages') cache_path = hashlib.md5(path.encode('utf8')).hexdigest() + '.json' page_time = path_mtime or os.path.getmtime(path) if cache.isValid(cache_path, page_time): cache_data = json.loads(cache.read(cache_path), object_pairs_hook=collections.OrderedDict) config = PageConfiguration(values=cache_data['config'], validate=False) content = json_load_segments(cache_data['content']) return config, content, True # Nope, load the page from the source file. logger.debug("Loading page configuration from: %s" % path) with codecs.open(path, 'r', 'utf-8') as fp: raw = fp.read() header, offset = parse_config_header(raw) if not 'format' in header: auto_formats = app.config.get('site/auto_formats') name, ext = os.path.splitext(path) header['format'] = auto_formats.get(ext, None) config = PageConfiguration(header) content = parse_segments(raw, offset) config.set('segments', list(content.keys())) # Save to the cache. cache_data = { 'config': config.get(), 'content': json_save_segments(content)} cache.write(cache_path, json.dumps(cache_data)) return config, content, False
def __iter__(self): for item in self.previous: pathkey = self.pathkey(*item.keys())[0] # no path .. can't do anything if not pathkey: yield item continue path = item[pathkey] # Skip the Plone site object itself if not path: yield item continue obj = self.context.unrestrictedTraverse( path.encode().lstrip('/'), None) # path doesn't exist if obj is None: yield item continue bccvlmd = item.get(self.bccvlmdkey) if not bccvlmd: yield item continue # apply bccvl metadata # FIXME: replace or update? IBCCVLMetadata(obj).update(bccvlmd) yield item
def _file_url_to_local_path(self, url): path = urlparse.urlparse(url).path path = unquote(path) if not path.startswith('/packages'): raise RuntimeError('Got invalid download URL: {0}'.format(url)) path = path[1:] return os.path.join(self.mirror.webdir, path.encode('utf-8'))
def __init__(self, path, authz, log, options={}): self.log = log self.options = options self.pool = Pool() # Remove any trailing slash or else subversion might abort if isinstance(path, unicode): self.path = path path_utf8 = path.encode("utf-8") else: # note that this should usually not happen (unicode arg expected) self.path = to_unicode(path) path_utf8 = self.path.encode("utf-8") path_utf8 = os.path.normpath(path_utf8).replace("\\", "/") root_path_utf8 = repos.svn_repos_find_root_path(path_utf8, self.pool()) if root_path_utf8 is None: raise TracError(_("%(path)s does not appear to be a Subversion " "repository.", path=to_unicode(path_utf8))) try: self.repos = repos.svn_repos_open(root_path_utf8, self.pool()) except core.SubversionException, e: raise TracError( _( "Couldn't open Subversion repository %(path)s: " "%(svn_error)s", path=to_unicode(path_utf8), svn_error=exception_to_unicode(e), ) )
def __iter__(self): for item in self.previous: pathkey = self.pathkey(*item.keys())[0] # no path .. can't do anything if not pathkey: yield item continue path = item[pathkey] # Skip the Plone site object itself if not path: yield item continue obj = self.context.unrestrictedTraverse(path.encode().lstrip('/'), None) # path doesn't exist if obj is None: yield item continue bccvlmd = item.get(self.bccvlmdkey) if not bccvlmd: yield item continue # apply bccvl metadata # FIXME: replace or update? IBCCVLMetadata(obj).update(bccvlmd) yield item
def secure_filename(path, destiny_os=os.name, fs_encoding=fs_encoding): ''' Get rid of parent path components and special filenames. If path is invalid or protected, return empty string. :param path: unsafe path :param destiny_os: destination operative system :param fs_encoding: destination filesystem filename encoding :return: filename or empty string :rtype: str or unicode (depending on python version, destiny_os and fs_encoding) ''' path = generic_filename(path) path = clean_restricted_chars(path) if check_forbidden_filename(path, destiny_os=destiny_os, fs_encoding=fs_encoding): return '' if fs_encoding != 'unicode': if PY_LEGACY and not isinstance(path, unicode): path = unicode(path, encoding='latin-1') path = path.encode(fs_encoding, errors=undescore_replace).decode(fs_encoding) return path
def request(host, path, api_key, url_params=None): """Given your API_KEY, send a GET request to the API. Args: host (str): The domain host of the API. path (str): The path of the API after the domain. API_KEY (str): Your API Key. url_params (dict): An optional set of query parameters in the request. Returns: dict: The JSON response from the request. Raises: HTTPError: An error occurs from the HTTP request. """ url_params = url_params or {} url = '{0}{1}'.format(host, quote(path.encode('utf8'))) headers = { 'Authorization': 'Bearer %s' % api_key, } print(u'Querying {0} ...'.format(url)) response = requests.request('GET', url, headers=headers, params=url_params) return response.json()
def getFolder(self, path, getSizes=False): """Returns a dict of file and folder objects representing the contents of the given directory (indicated by a "path" parameter). The values are dicts as returned by getInfo(). A boolean parameter "getsizes" indicates whether image dimensions should be returned for each item. Folders should always be returned before files. Optionally a "type" parameter can be specified to restrict returned files (depending on the connector). If a "type" parameter is given for the HTML document, the same parameter value is reused and passed to getFolder(). This can be used for example to only show image files in a file system tree. """ path = path.encode('utf-8') folders = [] files = [] path = self.normalizePath(path) folder = self.getObject(path) for name in folder.listDirectory(): if IResourceDirectory.providedBy(folder[name]): folders.append(self.getInfo( path='{0}/{1}/'.format(path, name), getSize=getSizes)) else: files.append(self.getInfo( path='{0}/{1}'.format(path, name), getSize=getSizes)) return folders + files
def translate_path(self, path): """map url path to local file system. path and return path are str type TODO: - fspath with os.sep from url always slash - URL_ROOT codecs simplify? - in the end of if body use super translate_path directly? """ path = urllib_request.unquote(path).decode('utf-8') fsenc = sys.getfilesystemencoding() path = path.encode(fsenc) if URL_ROOT and self.path.startswith(URL_ROOT): if self.path == URL_ROOT or self.path == URL_ROOT + '/': fspath = os.path.join(PUBLIC_DIRECTORY, 'index.html').encode(fsenc) # noqa else: _url_root = urllib_request.unquote(URL_ROOT) \ .decode('utf-8').encode(fsenc) fspath = os.path.join(PUBLIC_DIRECTORY.encode(fsenc), path[len(_url_root) + 1:]) return fspath else: return http_server.SimpleHTTPRequestHandler \ .translate_path(self, path)
def _op(path, encodetype): try: tmp = path.encode(encodetype) self._chdir(tmp) return tmp except Exception as e: return ""
def get_doublon(systeme): """ recover name and path of duplicate games in gamelist.xml for a specific system""" rom = [] rompath = [] compteur_a = 0 compteurdoublon = 0 #print os.path.isfile("/recalbox/share/roms/"+systeme+"/gamelist.xml") if os.path.isfile("/recalbox/share/roms/" + systeme + "/gamelist.xml"): tree = ET.parse("/recalbox/share/roms/" + systeme + "/gamelist.xml") root = tree.getroot() print "START SEARCH FOR DUPLICATE " + systeme + " ROMS" for line in root.findall('game'): compteur_b = 0 name = line.find('name').text name = name.encode('utf-8') name = re.sub("\\[([^\\[]*)\\]", '', name) path = line.find('path').text path = path.encode('utf-8') for lines2 in rom: if name == lines2: print lines2 print path print rompath[compteur_b] compteurdoublon = compteurdoublon + 1 compteur_b = compteur_b + 1 rom.append(name) rompath.append(path) compteur_a = compteur_a + 1 else: print "no gamelist found" print "Duplicate roms for " + systeme + " : " + str(compteurdoublon) print ""
def actual_path(path): """Get the actual path of `path`, including the correct case.""" if env.PY2 and isinstance(path, unicode_class): path = path.encode(sys.getfilesystemencoding()) if path in _ACTUAL_PATH_CACHE: return _ACTUAL_PATH_CACHE[path] head, tail = os.path.split(path) if not tail: # This means head is the drive spec: normalize it. actpath = head.upper() elif not head: actpath = tail else: head = actual_path(head) if head in _ACTUAL_PATH_LIST_CACHE: files = _ACTUAL_PATH_LIST_CACHE[head] else: try: files = os.listdir(head) except OSError: files = [] _ACTUAL_PATH_LIST_CACHE[head] = files normtail = os.path.normcase(tail) for f in files: if os.path.normcase(f) == normtail: tail = f break actpath = os.path.join(head, tail) _ACTUAL_PATH_CACHE[path] = actpath return actpath
def _tb_restart(self): self.c_paths = (ctypes.c_char_p * len(self.paths))() self.c_paths[:] = [path.encode("utf-8") for path in self.paths] verbosity = ctypes.c_int(1) compression_scheme = ctypes.c_int(4) ret = self.libgtb.tb_restart(verbosity, compression_scheme, self.c_paths) if ret: logging.debug(ret.decode("utf-8")) logging.debug("Main path has been set to %s", self.libgtb.tbpaths_getmain().decode("utf-8")) av = self.libgtb.tb_availability() if av & 1: logging.debug("Some 3 piece tablebases available") if av & 2: logging.debug("All 3 piece tablebases complete") if av & 4: logging.debug("Some 4 piece tablebases available") if av & 8: logging.debug("All 4 piece tablebases complete") if av & 16: logging.debug("Some 5 piece tablebases available") if av & 32: logging.debug("All 5 piece tablebases complete")
def loadPeakMap(path=None): """ loads mzXML, mzML and mzData files If *path* is missing, a dialog for file selection is opened instead. """ # local import in order to keep namespaces clean import ms import os.path import sys from pyopenms import MSExperiment, FileHandler from libms.DataStructures import PeakMap if isinstance(path, unicode): path = path.encode(sys.getfilesystemencoding()) elif path is None: path = ms.askForSingleFile(extensions="mzML mzXML mzData".split()) if path is None: return None # open-ms returns empty peakmap if file not exists, so we # check ourselves: if not os.path.exists(path): raise Exception("file %s does not exist" % path) if not os.path.isfile(path): raise Exception("path %s is not a file" % path) experiment = MSExperiment() fh = FileHandler() if sys.platform == "win32": path = path.replace("/","\\") # needed for network shares fh.loadExperiment(path, experiment) return PeakMap.fromMSExperiment(experiment)
def download(self, path=None, name=None, req=None): """ Functionality to download file """ if not self.validate_request('download'): return {'Error': gettext('Not allowed'), 'Code': 0} dir = self.dir if self.dir is not None else '' if hasattr(str, 'decode'): path = path.encode('utf-8') orig_path = u"{0}{1}".format(dir, path.decode('utf-8')) else: orig_path = u"{0}{1}".format(dir, path) try: Filemanager.check_access_permission(dir, u"{}{}".format(path, path)) except Exception as e: resp = Response(gettext(u"Error: {0}".format(e))) resp.headers['Content-Disposition'] = \ 'attachment; filename=' + name return resp name = path.split('/')[-1] content = open(orig_path, 'rb') resp = Response(content) resp.headers['Content-Disposition'] = 'attachment; filename=' + name return resp
def __init__(self, path, params, log): self.log = log self.pool = Pool() # Remove any trailing slash or else subversion might abort if isinstance(path, unicode): path_utf8 = path.encode('utf-8') else: # note that this should usually not happen (unicode arg expected) path_utf8 = to_unicode(path).encode('utf-8') path_utf8 = os.path.normpath(path_utf8).replace('\\', '/') self.path = path_utf8.decode('utf-8') root_path_utf8 = repos.svn_repos_find_root_path(path_utf8, self.pool()) if root_path_utf8 is None: raise TracError( _("%(path)s does not appear to be a Subversion " "repository.", path=to_unicode(path_utf8))) try: self.repos = repos.svn_repos_open(root_path_utf8, self.pool()) except core.SubversionException, e: raise TracError( _( "Couldn't open Subversion repository %(path)s: " "%(svn_error)s", path=to_unicode(path_utf8), svn_error=exception_to_unicode(e)))
def is_file_exist(self, path, name, req=None): """ Checks whether given file exists or not """ dir = self.dir if self.dir is not None else '' err_msg = '' code = 1 name = unquote(name) path = unquote(path) if hasattr(str, 'decode'): name = name.encode('utf-8').decode('utf-8') path = path.encode('utf-8').decode('utf-8') try: orig_path = u"{0}{1}".format(dir, path) Filemanager.check_access_permission(dir, u"{}{}".format(path, name)) newName = u"{0}{1}".format(orig_path, name) if not os.path.exists(newName): code = 0 except Exception as e: code = 0 if hasattr(e, 'strerror'): err_msg = u"Error: {0}".format(e.strerror) else: err_msg = u"Error: {0}".format(e) result = {'Path': path, 'Name': name, 'Error': err_msg, 'Code': code} return result
def __init__(self, path, entrystart, entryend): if hasattr(path, "encode"): path = path.encode("ascii") entrystart = int(entrystart) entryend = int(entryend) self.path = path self.entrystart = entrystart self.entryend = entryend
def path2uri(path): r""" Converts a path to URI with file sheme. If a path does not start with a slash (/), it is considered to be an invalid path and returned directly. >>> path2uri('/path/to/file') 'file:///path/to/file' >>> path2uri('file:///path/to/file') 'file:///path/to/file' >>> path2uri(u'/path/to/file') 'file:///path/to/file' >>> path2uri('invalid/path') 'invalid/path' >>> path2uri('/\xe8\xb7\xaf\xe5\xbe\x84/\xe6\x96\x87\xe4\xbb\xb6') 'file:///%E8%B7%AF%E5%BE%84/%E6%96%87%E4%BB%B6' """ if path.startswith('~'): path = os.path.expanduser(path) if not path.startswith('/'): return path if isinstance(path, unicode): path = path.encode('utf8') return 'file://' + urllib.pathname2url(path)
def _do_load_page(app, path, path_mtime): # Check the cache first. cache = app.cache.getCache("pages") cache_path = hashlib.md5(path.encode("utf8")).hexdigest() + ".json" page_time = path_mtime or os.path.getmtime(path) if cache.isValid(cache_path, page_time): cache_data = json.loads(cache.read(cache_path), object_pairs_hook=collections.OrderedDict) config = PageConfiguration(values=cache_data["config"], validate=False) content = json_load_segments(cache_data["content"]) return config, content, True # Nope, load the page from the source file. logger.debug("Loading page configuration from: %s" % path) with open(path, "r", encoding="utf-8") as fp: raw = fp.read() header, offset = parse_config_header(raw) if "format" not in header: auto_formats = app.config.get("site/auto_formats") name, ext = os.path.splitext(path) header["format"] = auto_formats.get(ext, None) config = PageConfiguration(header) content = parse_segments(raw, offset) config.set("segments", list(content.keys())) # Save to the cache. cache_data = {"config": config.getAll(), "content": json_save_segments(content)} cache.write(cache_path, json.dumps(cache_data)) return config, content, False
def delete(self, path=None, req=None): """ Delete file or folder """ if not self.validate_request('delete'): return {'Error': gettext('Not allowed'), 'Code': 0} dir = self.dir if self.dir is not None else '' path = path.encode('utf-8').decode('utf-8') if hasattr( str, 'decode') else path orig_path = u"{0}{1}".format(dir, path) try: Filemanager.check_access_permission(dir, path) except Exception as e: res = {'Error': gettext(u"Error: {0}".format(e)), 'Code': 0} return res err_msg = '' code = 1 try: if os.path.isdir(orig_path): os.rmdir(orig_path) else: os.remove(orig_path) except Exception as e: code = 0 err_msg = u"Error: {0}".format(e.strerror) result = {'Path': path, 'Error': err_msg, 'Code': code} return result
def save_file(this_download_url, path): print "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - " time1 = datetime.datetime.now() print str(time1)[:-7], if (os.path.isfile(path)): file_size = os.path.getsize(path) / 1024 / 1024 print "File " + path + " (" + str(file_size) + "Mb) already exists." return else: print "Downloading " + path + "..." r = requests.get(this_download_url, stream=True) with open(path.encode('utf-8'), "wb") as code: code.write(r.content) time2 = datetime.datetime.now() print str(time2)[:-7], print path + " Done." use_time = time2 - time1 print "Time used: " + str(use_time)[:-7] + ", ", file_size = os.path.getsize(path) / 1024 / 1024 print "File size: " + str(file_size) + " MB, Speed: " + str( file_size / (use_time.total_seconds()))[:4] + "MB/s"
def _file_url_to_local_path(self, url): path = url.replace(self.mirror.master.url, '') path = urllib.unquote(path) if not path.startswith('/packages'): raise RuntimeError('Got invalid download URL: {}'.format(url)) path = path[1:] return os.path.join(self.mirror.webdir, path.encode('utf-8'))