def __init__(self): self.filesys = tempfs.TempFS() #dirs = ["bin", "boot", "cdrom", "dev", "etc", "home", "lib", "lib64", "lost+found", "media", "mnt", "opt", "root", "run", "sbin", "snap", "srv", "sys", "tmp", "usr", "var"] dirs = [ "etc", "boot" ] # smaller filesystem for demo, also we should not load /home/ as that has our ssh keys and stuff for my_dir in dirs: my_fs = OSFS("/" + my_dir) try: fs.copy.copy_fs(my_fs, self.filesys.makedir("/" + my_dir + "/"), walker=None, on_copy=None) print("dir: " + my_dir + " created") except: print("EXCEPTION: " + my_dir) #self.filesys.tree() #pointer = self.filesys.open("/newfile","w+") #pointer.write("Contents of a file.\r\n") #pointer.close() self.filesys.makedir("/home") pointer = self.filesys.open("/home/notes", "w+") pointer.write( "my username for email is bob, my password for email is password") pointer.close() print("File tree initalized:\r\n") self.filesys.tree()
def __init__(self, path, *, filesystem=None): yaml = YAML() if not filesystem: filesystem = fs.open_fs('/') with filesystem.open(str(path), 'r') as configfile: self.config = yaml.load(configfile) self.tmp = tempfs.TempFS()
def write_out_backup(backing_store_fs, data, outfile, prefix=''): """ Write the backup data to its final location. A backing store is required and either a filepath to the packaged backup or the tmp filesystem is required. :param backing_store_fs: a pyfilesystem2 object to be the final storage location of the backup. (should be `OSFS`, `S3FS`, `FTPFS`, etc.) Can be a single object or list of filesystem objects for copying to multiple backing stores. :param data: the byte stream that needs to be written to the file on the backing store fs. :param outfile: the name of the file to write out to. :param optional prefix: a parent directory for the files to be saved under. This is can be a good place to encode some information about the backup. A slash will be appended to the prefix to create a directory or pseudo-directory structure. """ if prefix and not prefix[-1] == '/': prefix = prefix + '/' if not isinstance(backing_store_fs, list): backing_store_fs = [backing_store_fs] for backing_fs in backing_store_fs: # print(backing_fs) tmp = tempfs.TempFS() with tmp.open("lp-tmp-backup", 'wb') as tmp_file: tmp_file.write(data) try: backing_fs.makedirs(prefix) except DirectoryExists: pass # print(prefix, outfile) copy_file(tmp, "lp-tmp-backup", backing_fs, str(prefix + outfile)) tmp.clean()
def read_backup(backing_store_fs, infile, prefix=""): """ Read a backup file from some pyfilesystem. :param backing_store_fs: The pyfilesystem object where the file is located :param infile: the name of the file :param optional prefix: the prefix before the filename :return: raw file data """ tmp = tempfs.TempFS() # data = "" if prefix and not prefix[-1] == '/': prefix = prefix + '/' if not isinstance(backing_store_fs, list): backing_store_fs = [backing_store_fs] restore_succeeded = False for backing_fs in backing_store_fs: try: copy_file(backing_fs, prefix + infile, tmp, infile) restore_succeeded = True break except (botocore.exceptions.NoCredentialsError, OSError, fs.errors.ResourceNotFound, fs.errors.PermissionDenied): continue if not restore_succeeded: raise exceptions.ConfigurationError( "Specified file could not be found in any" " of the available backing stores.") with tmp.open(infile, 'rb') as retrieved_file: data = retrieved_file.read() tmp.clean() return data
def setUp(self): self.fs = tempfs.TempFS() watchfs = osfs.OSFS(self.fs.root_path) self.watchfs = ensure_watchable(watchfs, poll_interval=0.1) if watch_inotify is not None: self.assertEqual(watchfs, self.watchfs) if watch_win32 is not None: self.assertEqual(watchfs, self.watchfs)
def enable_test_mode(self): if not self.test_mode: self._test_mode_old_writefs = self.writefs self.addfs( name='test_mode_fs', fs=tempfs.TempFS(), write=True, ) self.test_mode = True
def create_fs(self, root: str) -> FSBase: """create a PyFileSystem instance from `root`. `root` is in the format of `/` if local path, else `<scheme>://<netloc>`. You should override this method to provide custom instances, for example, if you want to create an S3FS with certain parameters. :param root: `/` if local path, else `<scheme>://<netloc>` """ if root.startswith("temp://"): fs = tempfs.TempFS(root[len("temp://"):]) return fs if root.startswith("mem://"): fs = memoryfs.MemoryFS() return fs return open_fs(root)
def setUpClass(cls): if 'VERBOSE' in os.environ: logger, _ = get_logger() else: logger = logging.getLogger('georef-ar-etl') logger.addHandler(logging.NullHandler()) config = read_config() cls._metadata = MetaData() cls._ctx = Context(config=config, fs=tempfs.TempFS(), engine=create_engine(config['test_db'], init_models=cls._uses_db), report=Report(logger), mode='testing')
all_tile_data_large = [tile.getdata() for tile in tiles_large] all_tile_data_small = [tile.getdata() for tile in tiles_small] tile_fitter = TileFitter(all_tile_data_small) for x in range(mosaic.x_tile_count): for y in range(mosaic.y_tile_count): large_box = (x * TILE_SIZE, y * TILE_SIZE, (x + 1) * TILE_SIZE, (y + 1) * TILE_SIZE) small_box = (x * TILE_SIZE // TILE_BLOCK_SIZE, y * TILE_SIZE // TILE_BLOCK_SIZE, (x + 1) * TILE_SIZE // TILE_BLOCK_SIZE, (y + 1) * TILE_SIZE // TILE_BLOCK_SIZE) img_data = list(original_img_small.crop(small_box).getdata()) winner_idx = tile_fitter.get_best_fit_tile(img_data) mosaic.add_tile(all_tile_data_large[winner_idx], large_box) return mosaic def mosaic(fs, img_path, tiles_path): tiles_data = TileProcessor(fs, tiles_path).get_tiles() image_data = TargetImage(fs, img_path).get_data() return compose(image_data, tiles_data) if __name__ == '__main__': if len(sys.argv) < 3: print(f"Usage: {sys.argv[0]} <image> <tiles directory>") else: mosaic(tempfs.TempFS(), sys.argv[1], sys.argv[2])
def make_fs_from_string(string, _cache={}): """ Create a FS object from a string. Uses a cache to avoid creating multiple FS objects for any given string (except for tempfs which allows multple instances). """ if string == 'tempfs': # Use a temporary filesystem which is only available to the current # process, and will be cleaned up automatically. Bypass the cache # for this type of filesystem, as they are unique and self-contained. return tempfs.TempFS() if string.startswith('~/'): string = os.path.expanduser(string) if string in _cache: return _cache[string] if string.startswith('/'): # Use a simple directory on the filesystem. if not os.path.exists(string): osfs.OSFS('/', dir_mode=0775).makedir( path=string, recursive=True, allow_recreate=True, ) fs = osfs.OSFS(string, dir_mode=0775) elif string.startswith('s3:'): # Use an S3 bucket. s3_bucket = string[3:] if '/' in s3_bucket: s3_bucket, path = s3_bucket.split('/', 1) else: path = '' # The S3FS class can poll S3 for a file's etag after writing # to it, to ensure that the file upload has been written to # all relevant nodes in the S3 cluster. # S3 has read-after-write consistency for PUTS of new objects # and eventual consistency for overwrite PUTS and DELETES. # See http://aws.amazon.com/s3/faqs/ # Most of our operations are writing to new files, so disable # this mostly wasteful check. This might need to be revisited # if there is a special case where we're updating files. key_sync_timeout = None fs = s3fs.S3FS(s3_bucket, key_sync_timeout=key_sync_timeout) if path: fs = fs.makeopendir(path, recursive=True) elif string.startswith('http://'): fs = httpfs.HTTPFS(string) else: raise ValueError('Unsupported storage string %r' % string) _cache[string] = fs return fs
def setUp(self): fs = tempfs.TempFS() self.fs = ensure_xattrs(fs)
def make_temp_fs(): return tempfs.TempFS()
def setUp(self): self.fs = tempfs.TempFS("iotoolstest")
def enable_test_mode(self): if not self.test_mode: self._cachefs = self.cachefs self.cachefs = tempfs.TempFS() self.test_mode = True
def setUp(self): self.fs = tempfs.TempFS()
def setUp(self): self.fs = ensure_xattrs(tempfs.TempFS())