def mkdir(self, path): try: call_check(['hadoop', 'fs', '-mkdir', path]) except HDFSCliError, ex: if "File exists" in ex.stderr: raise FileAlreadyExists(ex.stderr) else: raise
def mkdir(self, path, parents=True, raise_if_exists=False): if self._is_root(path): return if raise_if_exists and self.isdir(path): raise FileAlreadyExists() (s3_bucket, s3_key) = parse_s3_path(path) s3_obj = self.s3.Object(s3_bucket, s3_key) return s3_obj.put(Body=b'')
def mkdir(self, path): ''' No -p switch, so this will fail creating ancestors ''' try: call_check([load_hadoop_cmd(), 'fs', '-mkdir', path]) except HDFSCliError, ex: if "File exists" in ex.stderr: raise FileAlreadyExists(ex.stderr) else: raise
def mkdir(self, path): """ No -p switch, so this will fail creating ancestors. """ try: self.call_check(load_hadoop_cmd() + ['fs', '-mkdir', path]) except hdfs_error.HDFSCliError as ex: if "File exists" in ex.stderr: raise FileAlreadyExists(ex.stderr) else: raise
def mkdir(self, path, parents=True, raise_if_exists=False): """ No explicit -p switch, this version of Hadoop always creates parent directories. """ try: self.call_check(load_hadoop_cmd() + ['fs', '-mkdir', path]) except hdfs_error.HDFSCliError as ex: if "File exists" in ex.stderr: raise FileAlreadyExists(ex.stderr) else: raise
def mkdir(self, path, parents=True, raise_if_exists=False): if (parents and raise_if_exists): raise NotImplementedError("HdfsClient.mkdir can't raise with -p") try: cmd = (load_hadoop_cmd() + ['fs', '-mkdir'] + (['-p'] if parents else []) + [path]) call_check(cmd) except HDFSCliError, ex: if "File exists" in ex.stderr: if raise_if_exists: raise FileAlreadyExists(ex.stderr) else: raise
def mkdir(self, path, parents=True, raise_if_exists=False): if raise_if_exists and self.isdir(path): raise FileAlreadyExists() _, key = self._path_to_bucket_and_key(path) if self._is_root(key): return # isdir raises if the bucket doesn't exist; nothing to do here. key = self._add_path_delimiter(key) if not parents and not self.isdir(os.path.dirname(key)): raise MissingParentDirectory() return self.put_string("", self._add_path_delimiter(path))
def mkdir(self, path, parents=True, raise_if_exists=False): if self.exists(path): if raise_if_exists: raise FileAlreadyExists() elif not self.isdir(path): raise NotADirectory() else: return if parents: os.makedirs(path) else: if not os.path.exists(os.path.dirname(path)): raise MissingParentDirectory() os.mkdir(path)
def mkdir(self, path, parents=True, raise_if_exists=False): if self.exists(path): if raise_if_exists: raise FileAlreadyExists() elif not self.isdir(path): raise NotADirectory() else: return if parents: try: os.makedirs(path) except OSError as err: # somebody already created the path if err.errno != errno.EEXIST: raise else: if not os.path.exists(os.path.dirname(path)): raise MissingParentDirectory() os.mkdir(path)
def move(self, old_path, new_path, raise_if_exists=False): """ Move file atomically. If source and destination are located on different filesystems, atomicity is approximated but cannot be guaranteed. """ if raise_if_exists and os.path.exists(new_path): raise FileAlreadyExists('Destination exists: %s' % new_path) d = os.path.dirname(new_path) if d and not os.path.exists(d): self.mkdir(d) try: os.rename(old_path, new_path) except OSError as err: if err.errno == errno.EXDEV: new_path_tmp = '%s-%09d' % (new_path, random.randint(0, 999999999)) shutil.copy(old_path, new_path_tmp) os.rename(new_path_tmp, new_path) os.remove(old_path) else: raise err
def run(self): time.sleep(self.time_to_run_secs) if os.path.exists(self.file_name): raise FileAlreadyExists(self.file_name) open(self.file_name, 'w').close()
def mkdir(self, path, parents=True, raise_if_exists=False): container, blob = self.splitfilepath(path) if raise_if_exists and self.exists(path): raise FileAlreadyExists( "The Azure blob path '{blob}' already exists under container '{container}'" .format(blob=blob, container=container))