def parse(self): try: if not self.m3u: return data = any2utf(self.m3u.read()) data = data.replace('\r\n', '\n').lstrip('\xef\xbb\xbf').split('\n') paths = [os.path.normpath(line).strip('\r\n').strip() for line in data if line.startswith("##") or not line.startswith("#")] dirname = os.path.dirname(self.path) full_paths = [] paths = iter(paths) for path in paths: text = None if path.startswith("##"): def task(path): text = path[2 : ] try: next_path = paths.next() path = next_path if not next_path.startswith("##") else None except StopIteration: path = None next_path = None if not path: full_paths.append( [path, text.strip('\r\n')] ) if next_path: path, text = task(next_path) return path, text path, text = task(path) if not path: break if text: text = text.strip('\r\n') else: new_text = path.rsplit('/', 1)[-1] if path == new_text: text = path.rsplit('\\', 1)[-1] else: text = new_text if (path in "\\/"): full_paths.append( [path.replace("\\", "/"), text] ) elif path.startswith('http'): if not text: text = path.rsplit('/', 1)[-1] full_paths.append( [path.replace('/', '//', 1), text] ) else: full_paths.append([os.path.join(dirname, path).replace("\\", "/"), text] ) return full_paths except IndexError: logging.warn("You try to load empty playlist")
def remove_volume(path): if not enabled(): return if not volume_exists(path): return volume_name_path = path.rsplit('/', 1)[0] volume_uuid = path.rsplit('/', 1)[1] log.info("Removing volume %s for instance %s" % ( volume_uuid, _get_volume_instance_name(volume_name_path))) remove_internal_snapshots_for_volume(volume_uuid) v.umount_volume(volume_uuid, Config.volmgr_mount_namespace_fd()) v.delete_volume(volume_uuid) shutil.rmtree(volume_name_path) log.info("Cleaned volume %s's mount directory at %s" % ( volume_uuid, volume_name_path))
def read_losc_hdf5(h5f, path='strain/Strain', start=None, end=None, copy=False): """Read a `TimeSeries` from a LOSC-format HDF file. Parameters ---------- h5f : `str`, `h5py.HLObject` path of HDF5 file, or open `H5File` path : `str` name of HDF5 dataset to read. Returns ------- data : `~gwpy.timeseries.TimeSeries` a new `TimeSeries` containing the data read from disk """ dataset = io_hdf5.find_dataset(h5f, path) # read data nddata = dataset.value # read metadata xunit = parse_unit(dataset.attrs['Xunits']) epoch = dataset.attrs['Xstart'] dt = Quantity(dataset.attrs['Xspacing'], xunit) unit = dataset.attrs['Yunits'] # build and return return TimeSeries(nddata, epoch=epoch, sample_rate=(1/dt).to('Hertz'), unit=unit, name=path.rsplit('/', 1)[1], copy=copy).crop(start=start, end=end)
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir): import fs.appdirfs fs_class = getattr(fs.appdirfs, cls.FSClass) if ':' in fs_path: appauthor, appname = fs_path.split(':', 1) else: appauthor = None appname = fs_path if '/' in appname: appname, path = appname.split('/', 1) else: path = '' if '.' in appname: appname, appversion = appname.split('.', 1) else: appversion = None fs = fs_class(appname, appauthor=appauthor, version=appversion, create=create_dir) if '/' in path: subdir, path = path.rsplit('/', 1) if create_dir: fs = fs.makeopendir(subdir, recursive=True) else: fs = fs.opendir(subdir) return fs, path
def POST(self, map_name, format): data = get_data(name="layer", mandatory=["name", "resource"], authorized=["name", "title", "abstract", "resource", "enabled"]) l_name = data.pop("name") l_enabled = data.pop("enabled", True) # This means we can have one mapfile for each workspace # and if eveything uses urls it should work *almost* as is. url = urlparse.urlparse(data["resource"]["href"]) if url.path.startswith(web.ctx.homepath): path = url.path[len(web.ctx.homepath):] else: raise webapp.BadRequest(message="Resource href is not handled by MRA.") try: _, map_name, _, ws_name, st_type, st_name, r_type, r_name = path.rsplit("/", 7) except ValueError: raise webapp.NotFound(message="ressource '%s' was not found." % path) r_name = r_name.rsplit(".", 1)[0] mf, ws = get_mapfile_workspace(map_name, ws_name) with webapp.mightNotFound(r_type, workspace=ws_name): try: model = ws.get_model(r_name, r_type[:-1], st_name) except ValueError: webapp.NotFound("Invalid layer model '%s'" % r_type[:-1]) with webapp.mightConflict("layer", mapfile=map_name): mf.create_layer(ws, model, l_name, l_enabled) mf.save() webapp.Created("%s/maps/%s/layers/%s%s" % (web.ctx.home, map_name, l_name, (".%s" % format) if format else ""))
def mime_type_from_path(path): ext = path.rsplit('.', 1)[-1] return {'js': 'application/javascript', 'css': 'text/css', 'html': 'text/html', 'txt': 'text/plain', 'py': 'text/plain'}.get(ext, 'text/plain')
def get_playlist(self, url, latest_episode): (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url) if '.' in path: path = path.rsplit('.', 1)[0] path = path + '.json' jsonurl = urlparse.urlunparse((scheme, netloc, path, '', '', '')) mediajson = download_page(jsonurl) if mediajson is None: return None # Yle server sends UTF-8 but doesn't set charset in # Content-type header. This will workaround the problem. mediajson = mediajson.encode('iso-8859-1').decode('utf-8') playlist = self.extract_playlist(mediajson) if len(playlist) == 0: log(u"Can't find streams at %s." % url) return None if latest_episode: playlist = playlist[:1] return playlist
def POST(self, map_name, l_name, format): data = get_data(name="style", mandatory=["resource"], authorized=["name", "title", "abstract", "resource"]) url = urlparse.urlparse(data["resource"]["href"]) if url.path.startswith(web.ctx.homepath): path = url.path[len(web.ctx.homepath):] else: raise webapp.BadRequest(message="Resource href (%s) is not handled by MRA." % url.path) try: _, map_name, _, s_name = path.rsplit("/", 3) except ValueError: raise webapp.NotFound(message="ressource '%s' was not found." % path) s_name = s_name.rsplit(".", 1)[0] # Get the new style. mf = get_mapfile(map_name) try: style = open(tools.get_style_path(s_name)).read() except IOError, OSError: with webapp.mightNotFound("style", mapfile=map_name): style = mf.get_style_sld(s_name)
def gen_package_pickled_dic(path, module_name): modules_dic={} start_path=module_name.replace(".", "/") search_path=os.path.dirname(path) print "embedding %s ..."%os.path.join(search_path, start_path) #TODO: remove comments from python files || compile to .pyc to make payloads lighter if os.path.isdir(path): for root, dirs, files in os.walk(os.path.join(search_path, start_path)): for f in files: module_code="" with open(os.path.join(root,f),'rb') as fd: module_code=fd.read() modprefix = root[len(search_path.rstrip(os.sep))+1:] modpath = os.path.join(modprefix,f).replace("\\","/") modules_dic[modpath]=module_code elif os.path.isfile(path): ext=path.rsplit(".",1)[1] module_code="" with open(path,'rb') as f: module_code=f.read() cur="" for rep in start_path.split("/")[:-1]: if not cur+rep+"/__init__.py" in modules_dic: modules_dic[rep+"/__init__.py"]="" cur+=rep+"/" modules_dic[start_path+"."+ext]=module_code if not modules_dic: raise NameError("path %s not found"%path) return modules_dic
def import_target(target, default=None): """ >>> import_target('foo.bar:blah', 'get_jobs') <function foo.bar.blah> >>> import_target('foo.bar', 'get_jobs') <function foo.bar.get_jobs> >>> import_target('foo.bar:get_jobs') <function foo.bar.get_jobs> >>> import_target('foo/bar.py:get_jobs') <function get_jobs> """ if ':' not in target: target += ':%s' % default path, func_name = target.split(':', 1) if exists(path): module_name = path.rsplit('/', 1)[-1].split('.', 1)[0] module = imp.new_module(module_name) module.__file__ = path try: execfile(path, module.__dict__) except IOError, e: e.strerror = 'Unable to load file (%s)' % e.strerror raise sys.modules[module_name] = module
def opencom(addr, connect_retrys=3, retry_intermission=1): global sock,proc if sock: return if 'localhost:' in addr: import os.path import subprocess path = os.path.abspath('.').replace('\\','/') while '/' in path and not proc: try: proc = subprocess.Popen([os.path.join(path,'bin/tv3d.exe'), addr], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) except: path = path.rsplit('/',1)[0] if not proc: print('tv3d could not be started.') import atexit import signal def ctrlc(s,f): closecom() import sys sys.exit(0) signal.signal(signal.SIGINT, ctrlc) atexit.register(closecom) import socket ip,port = addr.split(':') for _ in range(connect_retrys): try: sock = socket.socket() sock.connect((ip,int(port))) break except socket.error: sock = None time.sleep(retry_intermission) if not sock: print('tv3d not available through tcp on %s.' % addr)
def get_module_name(path, debug): orig_path = path path, fn = os.path.split(path) if not fn.endswith('.py'): if debug: print(path, 'is not python file') return None module_name = os.path.splitext(fn)[0] names = [module_name] while path: if not is_package(path, debug): if debug: print(path, 'is not a package') print('names=', names) break path, name = path.rsplit(os.path.sep, 1) names.append(name) # means that file was not within a package if len(names) == 1: return None module_ns = '.'.join(reversed(names)) root_package = names[-1] bits = imp.find_module(root_package) root_path = bits[1] # instead of loading, we use simple heuristic. if root package # exists and in same location as orig_path, then we're good orig_path = os.path.realpath(orig_path) root_path = os.path.realpath(root_path) if orig_path.startswith(root_path): return module_ns return None
def get_parent_paths(self): parents = [] for path in filter(lambda x: '.' in x, self.paths): parent = path.rsplit('.' + self.get_name(), 1)[0] if len(parent): parents.append(parent) return parents
def _path_split(path): bits = path.rsplit('/', 1) path = '' name = bits.pop() if bits: path = bits[0] return name, path
def result_path(self, basename=None, ext=''): """ Return the full path to a test result, generated from the \ calling file, class and, optionally, method. Optional kwargs : * basename - File basename. If omitted, this is \ generated from the calling method. * ext - Appended file extension. """ if ext and not ext.startswith('.'): ext = '.' + ext # Generate the folder name from the calling file name. path = os.path.abspath(inspect.getfile(self.__class__)) path = os.path.splitext(path)[0] sub_path = path.rsplit('iris', 1)[1].split('tests', 1)[1][1:] # Generate the file name from the calling function name? if basename is None: stack = inspect.stack() for frame in stack[1:]: if 'test_' in frame[3]: basename = frame[3].replace('test_', '') break filename = basename + ext result = os.path.join(self.get_result_path(''), sub_path.replace('test_', ''), self.__class__.__name__.replace('Test_', ''), filename) return result
def serve_file(self, request): ''' Load files from static directory ''' path = os.path.join(self.static, request.path.lstrip('/')).replace(os.sep, '/') if path.startswith(self.static): # extra check due ".." components if utils.get_resource_isdir(path): path = path.rstrip("/") + "/index.html" if utils.get_resource_exists(path): ext = path.rsplit(".", 1)[-1] if ext in self._forced_mimes: mime = self._forced_mimes[ext] else: mime, encoding = mimetypes.guess_type(path) if mime is None: mime = "application/octet-stream" if encoding: mime += "; " + encoding request.response_headers['Content-Type'] = mime fp = utils.get_resource_stream(path) chunk = fp.read(self.buffsize) if not config.DEBUG: request.cache_for(3600) # an hour while chunk: yield chunk chunk = fp.read(self.buffsize) else: request.error(404) else: request.error(404)
def export_to_images(main_frame): path = get_image_path(main_frame) if path is not None: try: periods, current_period = main_frame.get_export_periods() except ValueError: msg = _("The first image contains a Julian day < 0\n\nNavigate to first event or\nUse the feature 'Accept negative Julian days'") display_warning_message(msg) return view_properties = main_frame.main_panel.timeline_panel.timeline_canvas.controller.view_properties view_properties.set_use_fixed_event_vertical_pos(True) path_without_extension, extension = path.rsplit(".", 1) view_properties.set_use_fixed_event_vertical_pos(True) view_properties.periods = periods count = 1 paths = [] for period in periods: path = "%s_%d.%s" % (path_without_extension, count, extension) if overwrite_existing_path(main_frame, path): main_frame.main_panel.timeline_panel.timeline_canvas.Navigate(lambda tp: period) main_frame.main_panel.timeline_panel.timeline_canvas.SaveAsPng(path) count += 1 paths.append(path) view_properties.set_use_fixed_event_vertical_pos(False) main_frame.main_panel.timeline_panel.timeline_canvas.Navigate(lambda tp: current_period) merged_image_path = "%s_merged.%s" % (path_without_extension, extension) merge_images(paths, merged_image_path)
def edit_file(domain, path): """Edit a file if we can.""" fname = filesystem_path(domain, path) # calculate the folder this file is in if '/' in path: browse = url_for('browse_files', domain=domain, path=path.rsplit('/', 1)[0]) else: browse = '' # check file size size = os.path.getsize(fname) # check magic mime = magic.from_file(fname) if 'text' not in mime and size > 16: flash("This doesn't appear to be a text file, so we can't edit it here. If you want to replace it, just upload it again.", 'error') return redirect(browse) # warn about size issues after magic check if size > app.config['MAX_EDITABLE']: flash("Sorry, that file is a little too large to edit on the web.", 'error') return redirect(browse) with open(fname) as f: return dict(file_content=f.read().decode('utf-8'), domain=domain, path=path, visual=int(request.args.get('visual', 0)))
def find_directory_file(db, path): path = path.strip("/") file_id = find_file(db, path) if "/" in path: directory_id = find_directory(db, path.rsplit("/", 1)[0]) else: directory_id = 0 return directory_id, file_id
def split_path(path): path = Resource.normalize(path) fragment = path.rsplit('#', maxsplit=1) if len(fragment) == 2: uri, fragment = fragment else: uri = None return uri, fragment
def file_format_changed(self, index): path = unicode(self.ui.exportPath.text()) match = path.rsplit(".", 1) path = "%s.%s" % (match[0], FILE_TYPES[index]) self.ui.exportPath.setText(path) for widget in (self.ui.includeRaw, self.ui.marginLabel, self.ui.addMargin): widget.setEnabled(FILE_TYPES[index] != "csv")
def __init__(self, path): Page.__init__(self) module,name=path.rsplit('.', 1) app=__import__(module, fromlist=[ name ]) self.app=getattr(app, name)()
def write_data_in_docker_file(container, path, data): prefix, filename = path.rsplit("/", 1) info = tarfile.TarInfo(filename) info.size = len(data) dump = io.BytesIO(data) run_in_container(container, ["mkdir", "-p", prefix]) with open_tar_to_docker(container, prefix) as directory: directory.addfile(info, dump)
def _get_provider_class(path): module, klass = path.rsplit(".", 1) try: _mod = __import__(module, fromlist=[klass]) except ImportError as e: raise CloudUnavailableError("Unable to import provider class {0}: {1}".format(path, e)) else: return getattr(_mod, klass)
def mime_type_from_path(path): ext = path.rsplit('.', 1)[-1] return { 'js': 'application/javascript', 'css': 'text/css', 'html': 'text/html', 'txt': 'text/plain', 'py': 'text/plain' }.get(ext, 'text/plain')
def mkdir_p(path): pathList = path.rsplit("/", 1) try: os.makedirs(pathList[0]) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(pathList[0]): pass else: raise
def _import_by_path_from_zip(path): '''Import a module from a path inside a zip file.''' assert '.zip' in path parent_path, child_name = path.rsplit(os.path.sep, 1) zip_importer = zipimport.zipimporter(parent_path) module = zip_importer.load_module(child_name) return module
def _get_specfile(): ' Return a (specfile, sha224sum) tuple. ' spec = glob('srpm-unpacked/*.spec') if len(spec) != 1: return '?', '?' path = spec[0].strip() cs = check_output(['sha224sum', path]).split()[0] path = path.rsplit('/', 1)[1] return path, cs
def process_filenames(path): fname = path.rsplit(os.sep,1)[-1] parts = fname.split('_',4) bwid = parts[1] subname = parts[-1] date = parts[3].replace('_','-') return path, fname, date, bwid, subname
def _get_specfile(): " Return a (specfile, sha224sum) tuple. " spec = glob("srpm-unpacked/*.spec") if len(spec) != 1: return "?", "?" path = spec[0].strip() cs = subprocess.check_output(["sha224sum", path]).split()[0] path = path.rsplit("/", 1)[1] return path, cs
def get_experiment(path): mod, cls = path.rsplit('.', 1) try: return getattr(importlib.import_module(mod), cls)(path) except ImportError as e: raise RuntimeError('Could not import module %s.\n' 'Make sure it\'s in your Python path.\nImportError: %s' % (mod, e.message)) except AttributeError: raise RuntimeError('No class %s in module %s.' % (cls, mod))
def __getitem__(self, index): #print('getting item, ',index) """ Args: index (int): Index Returns: tuple: (image, target) where target is class_index of the target class. """ ################################ if os.path.exists(os.path.join(self.ProcessedDataPath,str(index))): img_stacked, target = torch.load(os.path.join(self.ProcessedDataPath,str(index))) img_stacked = torch.ByteTensor(img_stacked).float()/255. return img_stacked, target ############################### #print('index:',index) path, target = self.imgs[index] path_token = path.rsplit("/") # print path_token image_token = path_token[-1].rsplit("_") #print image_token path_e="../DB_E_101_anno_cent/"+path_token[1]+"/"+image_token[0]+"_"+path_token[1]+"/"+path_token[-1] path_h = "../DB_H_101_anno_cent/"+path_token[1]+"/"+image_token[0]+"_"+path_token[1]+"/"+path_token[-1] #img = self.loader(path) # img.save('./image.bmp', 'bmp') # print np.array(img).shape #print('reading img_e') img_e = self.loader(path_e) # print np.array(img_e).shape #print('reading img_h') img_h = self.loader(path_h) # print np.array(img_h).shape #print('reading img_hsv') img_hsv = self.hsv(path) # print img_hsv #img_hsv.save('./hsv_image.bmp', 'bmp') # print np.max(np.array(img_hsv),axis=0) #print('reading img_gray') img_gray = self.gray(path) # print np.array(np.expand_dims(img_gray,axis=2)).shape #print('concatinating') img_stacked = np.concatenate((img_hsv,img_h,img_e,img_gray),axis=2) # print np.array(img_stacked).shape # img_stacked =np.float32(np.transpose(img_stacked,(2,0,1))) if self.transform is not None: img_stacked = self.transform(img_stacked) if self.target_transform is not None: target = self.target_transform(target) with open(os.path.join(self.ProcessedDataPath,str(index)), 'wb') as f: DataSample = (255*img_stacked).byte().numpy(), target torch.save(DataSample, f) return img_stacked, target
def convert(self, allow_no_files=False): """Convert read before __opts to the commands stream Parameters ---------- allow_missing : bool Either to allow log files to be missing entirely. Primarily is used for testing """ stream = [] stream2 = [] e = self.__opts.get('config-error') if e: stream.extend([['config-error', "Jail '%s' skipped, because of wrong configuration: %s" % (self.__name, e)]]) return stream # fill jail with filter options, using filter (only not overriden in jail): if self.__filter: stream.extend(self.__filter.convert()) # and using options from jail: FilterReader._fillStream(stream, self.__opts, self.__name) for opt, value in self.__opts.iteritems(): if opt == "logpath": if self.__opts.get('backend', '').startswith("systemd"): continue found_files = 0 for path in value.split("\n"): path = path.rsplit(" ", 1) path, tail = path if len(path) > 1 else (path[0], "head") pathList = JailReader._glob(path) if len(pathList) == 0: logSys.notice("No file(s) found for glob %s" % path) for p in pathList: found_files += 1 # logpath after all log-related data (backend, date-pattern, etc) stream2.append( ["set", self.__name, "addlogpath", p, tail]) if not found_files: msg = "Have not found any log file for %s jail" % self.__name if not allow_no_files: raise ValueError(msg) logSys.warning(msg) elif opt == "backend": backend = value elif opt == "ignoreip": stream.append(["set", self.__name, "addignoreip"] + splitwords(value)) elif opt not in JailReader._ignoreOpts: stream.append(["set", self.__name, opt, value]) # consider options order (after other options): if stream2: stream += stream2 for action in self.__actions: if isinstance(action, (ConfigReaderUnshared, ConfigReader)): stream.extend(action.convert()) else: stream.append(action) stream.insert(0, ["add", self.__name, backend]) return stream
def __init__(self, path='', depth=0): self.name = path.rsplit('/', 1)[-1] if path: print('%s- %s...' % (' ' * (depth - 1), self.name), end='', flush=True) self.depth = depth self.path = path self.children = [] self.error = None self.error_detail = None self.files = []
def open_image(self, path): fname = path.rsplit('/', 1)[-1] if path.startswith('gs://'): # check for downloaded file if os.path.exists(fname): path = fname if path.startswith('gs://'): try: f = file_io.FileIO(path, 'r') except Exception as e: sys.stderr.write( 'Retrying after exception reading gcs file: %s\n' % path) f = file_io.FileIO(path, 'r') fname = path.rsplit('/', 1)[-1] out = open(fname, 'w') out.write(f.read()) out.close() return open(fname) else: return open(path)
def open(infile, name=None, mode = "r"): if isinstance(infile, file): path = infile.name else: path = infile # try: if type(path) == unicode: path = path.encode('utf-8') format = path.rsplit('.',1)[1].lower() return ezImage(path, mode, format, name)
def on_btn_import_folder_clicked(self): path = str(QFileDialog.getExistingDirectory( self, "Select Directory")).replace("\\", "/") if path == "": return self.line_import_source_folder.setText(path) folder = path.rsplit("/", 1)[1] self.line_import_save_folder.setText(folder) print(path)
def __call__(self, request, relpath, **args): if ".." in relpath: return Response("Forbidden", status=403) if relpath == "index": self.redirect("./index.html") home = self.Root path = os.path.join(home, relpath) if not os.path.exists(path): return Response("Not found", status=404) if os.path.isdir(path) and self.DefaultFile: path = os.path.join(path, self.DefaultFile) if not os.path.isfile(path): #print "not a regular file" return Response("Not found", status=404) mtime = os.path.getmtime(path) mtime = datetime.datetime.utcfromtimestamp(mtime) if "If-Modified-Since" in request.headers: # <day-name>, <day> <month> <year> <hour>:<minute>:<second> GMT dt_str = request.headers["If-Modified-Since"] words = dt_str.split() if len(words) == 6 and words[-1] == "GMT": dt_str = " ".join( words[1:-1] ) # keep only <day> <month> <year> <hour>:<minute>:<second> dt = datetime.datetime.strptime(dt_str, '%d %b %Y %H:%M:%S') if mtime < dt: return 304 size = os.path.getsize(path) ext = path.rsplit('.', 1)[-1] mime_type = _MIME_TYPES_BASE.get(ext, "text/plain") def read_iter(f): while True: data = f.read(8192) if not data: break yield data resp = Response(app_iter=read_iter(open(path, "rb")), content_length=size, content_type=mime_type) #resp.headers["Last-Modified"] = mtime.strftime("%a, %d %b %Y %H:%M:%S GMT") if self.CacheTTL is not None: resp.cache_control.max_age = self.CacheTTL return resp
def get_flavor(path) -> Flavor: # using protocol (types) if path.startswith(":"): return globals()[path[1:]]() else: module, name = path.rsplit(":", 1) if os.path.exists(module): module_id = module.replace(".", "_").replace("-", "_") m = SourceFileLoader(module_id, module).load_module() else: m = import_module(module) return getattr(m, name)()
def get(self, path): name = path.rsplit('/', 1)[-1] if name not in self.hosting: self.write('Notebook not currently being published') else: to_render = str(self.hosting[name]) to_render = to_render.replace('\\n', '') self.write( self.render_template('publish.html', notebook=to_render, notebook_name=name))
def main(): print("Good god just use RClone") return parser = argparse.ArgumentParser() parser.add_argument('--dir', '--dir', help='Top level directory') parser.add_argument('--pw', '--pw', help='Password to use for zip') parser.add_argument('--backup_dir', '--backup_dir', help='Where in remote to store backups') args = parser.parse_args() global PW PW = args.pw or '' assert os.path.exists(args.dir) with open('secret.json', 'r') as f: secrets = json.loads(f.read()) if 'fernet_key' not in secrets: secrets['fernet_key'] = Fernet.generate_key() global FERNET_KEY FERNET_KEY = secrets['fernet_key'] global TOP_LEVEL_DIR TOP_LEVEL_DIR = args.dir.strip(os.sep).rsplit(os.sep, 1)[0].strip(os.sep) global REMOTE_BACKUPS_DIR REMOTE_BACKUPS_DIR = args.backup_dir.strip(os.sep) global DRIVE_ID DRIVE_ID = secrets['drive_id'] service = get_service() service.files().emptyTrash() res = {} for dir, sub_dirs, files in os.walk(args.dir): for file in files: path = os.path.join(dir, file) hashed = local_hash(path) if hashed != get_remote_hash(path, file): res[path] = hashed logger.info('%d files', len(res)) for path in set(path.rsplit(os.sep, 1)[0] for path in res): print(path) createNewFolder(service=service, name=path) return #TODO for path, hashed in res.items(): upload(path, hashed, pw)
def release(self, path): client = self.client #just have to delete the right lock file request_folder, filename = path.rsplit('/', 1) lock_string = "_lock_%s" % filename lock_file_path = request_folder + '/' + lock_string try: client.file_delete(lock_file_path) except dropbox.rest.ErrorResponse: raise IOError("Unable to unlock file!")
def choose_path(self): filters = ";;".join(list(DIALOG_FILTERS.values())) current_extension = FILE_TYPES[self.ui.fileFormat.currentIndex()] current_filter = DIALOG_FILTERS[current_extension] path, _filter = QFileDialog.getSaveFileName(self.parent(), "Save File", self.ui.exportPath.text(), filters, current_filter) path = unicode(path) if path != "": match = path.rsplit(".", 1) if len(match) == 1: path = "%s.%s" % (path, current_extension) self.ui.exportPath.setText(path)
def parse_path(path, default_obj="gwf", default_file="workflow.py"): comps = path.rsplit(":") if len(comps) == 2: path, obj = comps[0] or default_file, comps[1] or default_obj elif len(comps) == 1: path, obj = comps[0], default_obj else: raise ValueError('Invalid path: "{}".'.format(path)) basedir, filename = os.path.split(path) filename, _ = os.path.splitext(filename) return basedir, filename, obj
def _robust_path_split(path): sep = "\\" if "\\" in path else "/" splits = path.rsplit(sep, 1) if len(splits) == 1: parent = '.' file = splits[0] elif len(splits) == 2: parent, file = splits else: raise ('Unknown filepath split for path {}'.format(path)) filename, ext = os.path.splitext(file) return parent, filename, ext
def guess_source(path): name, ext = os.path.split(path) if ext == "vcf": return ext # VCF is easy ;) # Okay, maybe it's in openSNP format: ``format.submission_id``. try: source, _ = path.rsplit(os.path.extsep, 2)[-2:] except ValueError: raise UnknownSource(path) else: return source
def buildImages(self): paths = glob.glob(os.path.join(os.path.dirname(self.metaImageset.filePath), self.path)) images = [] for path in paths: pathSplit = path.rsplit(".", 1) name = os.path.basename(pathSplit[0]) image = inputs.Image(name, QtGui.QImage(path), self.xOffset, self.yOffset) images.append(image) return images
def url_file_name(fh): try: _, params = cgi.parse_header(fh.headers['content-disposition']) return params['filename'] except KeyError: pass # header not found, fall back on parsing url url = urlparse.urlparse(fh.geturl()) path = urllib.unquote_plus(url.path) fname = path.rsplit('/', 1)[-1] logger.debug(u'Content-disposition missing, fallback file name {0}'.format(fname)) return fname
def list_runs(self, rse, nlast=NLAST_RUNS): files = glob.glob(f"{self.Path}/{rse}_*_stats.json") runs = [] for path in files: fn = path.rsplit("/", 1)[-1] if os.stat(path).st_size > 0: r, timestamp, typ, ext = self.parse_filename(fn) if r == rse: # if the RSE was X, then rses like X_Y will appear in this list too, # so double check that we get the right RSE runs.append(timestamp) return sorted(runs)[-nlast:]
def namespace_path_split(path): '''Split the namespace path into a pair (head, tail). Tail will be the last namespace path component and head will be everything leading up to that in the path. This is similar to os.path.split. :param path: (String) A namespace path. :returns: (String, String) A tuple where the first component is the base path and the second is the last path component. ''' return tuple(path.rsplit('.', 1))
def parse_path(path, default_obj='gwf'): comps = path.rsplit(':') if len(comps) == 2: path, obj = comps elif len(comps) == 1: path, obj = comps[0], default_obj else: raise ValueError('Invalid path: "{}".'.format(path)) basedir, filename = os.path.split(path) filename, _ = os.path.splitext(filename) return basedir, filename, obj
def checkPath(self, path): if path is None: print("Erro: Nenhum arquivo selecionado") return False elif not os.path.exists(path): print("Erro: Arquivo não existente") return False elif path.rsplit('.', 1)[1] not in ['tsv', 'TSV']: print("Erro: Extensão do arquivo deve ser '.tsv'") return False else: self.path = path return True
def generic_filename(path): ''' Extract filename of given path os-indepently, taking care of known path separators. :param path: path :return: filename :rtype: str or unicode (depending on given path) ''' for sep in common_path_separators: if sep in path: _, path = path.rsplit(sep, 1) return path
def _start_obci_supervisor_process(self, rq_message): path = obci_process_supervisor.__file__ path = '.'.join([path.rsplit('.', 1)[0], 'py']) start_params = rq_message.dict() start_params['path'] = path del start_params['type'] del start_params['sender'] del start_params['sender_ip'] del start_params['receiver'] sv_obj, details = self.subprocess_mgr.new_local_process(**start_params) if sv_obj is None: return None, details return sv_obj, False
def _load_files(_none, path, fnames): lang = path.rsplit('/', 1)[-1] for f in fnames: if not f.endswith('.xml'): continue view = f[:-4] fd = open(os.path.join(path, f)) data = fd.read() fd.close() doc = minidom.parseString(data.replace('&', '%26')) if f.startswith('nav-help'): _load_navhelp(doc, lang) else: _load_tips(doc, lang, view) doc.unlink()
def audio_compress(path, resource_name): '''压缩音频文件''' global GlobalAudioBitRate, GlobalDealFileCount try: print("当前压缩%s资源:%s" % (resource_name, path)) file_ext_name = path.rsplit('.', 1)[1] audio = AudioSegment.from_file(path, format=file_ext_name) except IOError: print("Error: 打开文件失败,文件名:%s" % path) except KeyboardInterrupt: print("压缩已终止……") sys.exit() else: GlobalDealFileCount += 1 audio.export(path, format=file_ext_name, bitrate=GlobalAudioBitRate)
def _split_iso_from_path(path): """Split ISO from NFS path. NFS path could also contain pointer to ISO which should be mounted. Problem of this is that NFS path with ISO cannot be mounted as NFS mount. We have to split these before mount. :param path: path on the NFS server which could point to ISO :return: tuple of path, iso_file_name; is_file_name is empty if no ISO is part of the path :rtype: tuple (str, str) """ if path.endswith(".iso"): return path.rsplit("/", maxsplit=1) return path, ""