def open_fs(self, fs_url, parse_result, writeable, create, cwd): # pylint: disable=no-self-use path_parts = iteratepath(parse_result.resource) bucket_name = path_parts[0] root_path = join(*path_parts[1:]) if not bucket_name: raise OpenerError("invalid bucket name in '{}'".format(fs_url)) if parse_result.params.get("strict") == "False": strict = False else: strict = True client = Client() project = parse_result.params.get("project") if project: client.project = project api_endpoint = parse_result.params.get("api_endpoint") if api_endpoint: client.client_options = {"api_endpoint": api_endpoint} return GCSFS(bucket_name, root_path=root_path, create=create, client=client, strict=strict)
def open_fs(self, fs_url, parse_result, writeable, create, cwd): # pylint: disable=no-self-use path_parts = iteratepath(parse_result.resource) bucket_name = path_parts[0] root_path = join(*path_parts[1:]) if not bucket_name: raise OpenerError("invalid bucket name in '{}'".format(fs_url)) return GCSFS(bucket_name, root_path=root_path, create=create)
def open_fs(self, fs_url, parse_result, writeable, create, cwd): bucket_name, _, dir_path = parse_result.resource.partition('/') if not bucket_name: raise OpenerError("invalid bucket name in '{}'".format(fs_url)) s3fs = S3FS( bucket_name, dir_path=dir_path or '/', aws_access_key_id=parse_result.username or None, aws_secret_access_key=parse_result.password or None, ) return s3fs
def open_fs(self, fs_url, parse_result, writeable, create, cwd): bucket_name, _, dir_path = parse_result.resource.partition('/') if not bucket_name: raise OpenerError("invalid bucket name in '{}'".format(fs_url)) strict = (parse_result.params['strict'] == '1' if 'strict' in parse_result.params else True) s3fs = S3FS(bucket_name, dir_path=dir_path or '/', aws_access_key_id=parse_result.username or None, aws_secret_access_key=parse_result.password or None, endpoint_url=parse_result.params.get('endpoint_url', None), strict=strict) return s3fs
def open_fs(self, fs_url, parse_result, writeable, create, cwd): bucket_name, _, dir_path = parse_result.resource.partition("/") if not bucket_name: raise OpenerError("invalid bucket name in '{}'".format(fs_url)) strict = (parse_result.params["strict"] == "1" if "strict" in parse_result.params else False) s3fs = S3FS( bucket_name, dir_path=dir_path or "/", aws_access_key_id=parse_result.username or None, aws_secret_access_key=parse_result.password or None, endpoint_url=parse_result.params.get("endpoint_url", None), acl=parse_result.params.get("acl", None), cache_control=parse_result.params.get("cache_control", None), strict=strict, ) return s3fs
def open_fs(self, fs_url, parse_result, writeable, create, cwd): # pylint: disable=no-self-use path_parts = iteratepath(parse_result.resource) bucket_name = path_parts[0] root_path = join(*path_parts[1:]) if not bucket_name: raise OpenerError("invalid bucket name in '{}'".format(fs_url)) if parse_result.params.get("strict") == "False": strict = False else: strict = True return GCSFS(bucket_name, root_path=root_path, create=create, strict=strict)
def open_fs(self, fs_url, parse_result, writeable, create, cwd): if writeable: raise OpenerError('BFS must be readonly!') return BFS()