def test_overwrite(self): fn, _ = gen_rand_file() n = content.create_file(fn) self.assertIn('id', n) n = content.overwrite_file(n['id'], fn) self.assertEqual(n['contentProperties']['version'], 2) trash.move_to_trash(n['id'])
def rename(self, old, new): if old == new: return id = query.resolve_path(old, False) if not id: raise FuseOSError(errno.ENOENT) new_bn, old_bn = os.path.basename(new), os.path.basename(old) new_dn, old_dn = os.path.dirname(new), os.path.dirname(old) existing_id = query.resolve_path(new, False) if existing_id: en = query.get_node(existing_id) if en and en.is_file(): trash.move_to_trash(existing_id) else: raise FuseOSError(errno.EEXIST) if new_bn != old_bn: self._rename(id, new_bn) if new_dn != old_dn: odir_id = query.resolve_path(old_dn, False) ndir_id = query.resolve_path(new_dn, False) if not odir_id or not ndir_id: raise FuseOSError(errno.ENOTDIR) self._move(id, odir_id, ndir_id)
def rename(self, old, new): if old == new: return logger.debug('rename %s %s' % (old, new)) id = query.resolve_path(old) if not id: raise FuseOSError(errno.ENOENT) new_bn, old_bn = os.path.basename(new), os.path.basename(old) new_dn, old_dn = os.path.dirname(new), os.path.dirname(old) existing_id = query.resolve_path(new) if existing_id: en = query.get_node(existing_id) if en and en.is_file() and en.size == 0: trash.move_to_trash(existing_id) else: raise FuseOSError(errno.EEXIST) if new_bn != old_bn: self._rename(id, new_bn) if new_dn != old_dn: odir_id = query.resolve_path(old_dn) ndir_id = query.resolve_path(new_dn) if not odir_id or not ndir_id: raise FuseOSError(errno.ENOTDIR) self._move(id, odir_id, ndir_id)
def test_upload_stream(self): fn = gen_rand_nm() mmo = gen_rand_anon_mmap() h = hashing.IncrementalHasher() n = content.upload_stream(mmo, fn, parent=None, read_callbacks=[h.update]) self.assertEqual(n['contentProperties']['md5'], h.get_result()) trash.move_to_trash(n['id'])
def test_restore(self): f_id = self.create_random_dir() n = trash.move_to_trash(f_id) self.assertEqual(n['status'], 'TRASH') n = trash.restore(n['id']) self.assertEqual(n['status'], 'AVAILABLE') n = trash.move_to_trash(n['id']) self.assertEqual(n['status'], 'TRASH')
def test_rename_node(self): nm = gen_rand_nm() nm2 = gen_rand_nm() node = content.create_file(nm) self.assertEqual(node['name'], nm) node = metadata.rename_node(node['id'], nm2) self.assertEqual(node['name'], nm2) trash.move_to_trash(node['id'])
def test_move_node(self): f_id = self.create_random_dir() node = content.create_file(gen_rand_nm()) old_parent = node['parents'][0] node = metadata.move_node(node['id'], old_parent, f_id) self.assertEqual(node['parents'][0], f_id) trash.move_to_trash(f_id) trash.move_to_trash(node['id'])
def test_overwrite(self): fn = gen_rand_nm() open(fn, 'wb').close() n = content.upload_file(fn) self.assertIn('id', n) n = content.overwrite_file(n['id'], fn) self.assertEqual(n['contentProperties']['version'], 2) trash.move_to_trash(n['id']) os.remove(fn)
def test_download(self): fn, sz = gen_rand_file() self.assertTrue(sz < content.CONSECUTIVE_DL_LIMIT) md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertIn('id', n) os.remove(fn) self.assertFalse(os.path.exists(fn)) content.download_file(n['id'], fn) md5_dl = hashing.hash_file(fn) self.assertEqual(md5, md5_dl) trash.move_to_trash(n['id']) os.remove(fn)
def test_download_chunked(self): ch_sz = gen_rand_sz() content.CHUNK_SIZE = ch_sz fn, sz = gen_rand_file(size=5 * ch_sz) md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertEqual(n['contentProperties']['md5'], md5) os.remove(fn) self.assertFalse(os.path.exists(fn)) f = io.BytesIO() content.chunked_download(n['id'], f, length=sz) trash.move_to_trash(n['id']) dl_md5 = hashing.hash_bytes(f) self.assertEqual(sz, f.tell()) self.assertEqual(md5, dl_md5)
def trash_action(args: argparse.Namespace) -> int: try: r = trash.move_to_trash(args.node) sync.insert_node(r) except RequestError as e: print(e) return 1
def test_incomplete_download(self): ch_sz = gen_rand_sz() content.CHUNK_SIZE = ch_sz fn, sz = gen_rand_file(size=5 * ch_sz) md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertEqual(n['contentProperties']['md5'], md5) os.remove(fn) self.assertFalse(os.path.exists(fn)) with self.assertRaises(RequestError) as cm: content.download_file(n['id'], fn, length=sz + 1) #os.remove(fn + content.PARTIAL_SUFFIX) self.assertEqual(cm.exception.status_code, RequestError.CODE.INCOMPLETE_RESULT) content.download_file(n['id'], fn, length=sz) trash.move_to_trash(n['id']) os.remove(fn)
def test_upload(self): fn, sz = gen_rand_file() md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertIn('id', n) self.assertEqual(n['contentProperties']['size'], sz) self.assertEqual(n['contentProperties']['md5'], md5) n = trash.move_to_trash(n['id']) os.remove(fn)
def test_download_resume(self): ch_sz = gen_rand_sz() content.CHUNK_SIZE = ch_sz content.CONSECUTIVE_DL_LIMIT = ch_sz fn, sz = gen_rand_file(size=5 * ch_sz) md5 = hashing.hash_file(fn) n = content.upload_file(fn) self.assertEqual(n['contentProperties']['md5'], md5) os.remove(fn) self.assertFalse(os.path.exists(fn)) p_fn = fn + content.PARTIAL_SUFFIX with open(p_fn, 'wb') as f: content.chunked_download(n['id'], f, length=int(sz * random.random())) self.assertLess(os.path.getsize(p_fn), sz) content.download_file(n['id'], fn) trash.move_to_trash(n['id']) dl_md5 = hashing.hash_file(fn) self.assertEqual(md5, dl_md5) os.remove(fn)
def rmdir(self, path): n = query.resolve_path(path) if not n: raise FuseOSError(errno.ENOENT) try: r = trash.move_to_trash(n) sync.insert_node(r) except RequestError as e: if e.status_code == e.CODE.CONN_EXCEPTION: raise FuseOSError(errno.ECOMM) else: raise FuseOSError(errno.EREMOTEIO)
def _trash(path): logger.debug('trash %s' % path) node, parent = query.resolve(path, False) if not node: # or not parent: raise FuseOSError(errno.ENOENT) logger.debug('%s %s' % (node, parent)) try: # if len(node.parents) > 1: # r = metadata.remove_child(parent.id, node.id) # else: r = trash.move_to_trash(node.id) except RequestError as e: FuseOSError.convert(e) else: sync.insert_node(r)
def _trash(path): logger.debug("trash %s" % path) node, parent = query.resolve(path, False) if not node: # or not parent: raise FuseOSError(errno.ENOENT) logger.debug("%s %s" % (node, parent)) try: # if len(node.parents) > 1: # r = metadata.remove_child(parent.id, node.id) # else: r = trash.move_to_trash(node.id) except RequestError as e: FuseOSError.convert(e) else: sync.insert_node(r)
def _trash(path): logger.debug('trash %s' % path) node, parent = query.resolve(path) if not node or not parent: raise FuseOSError(errno.ENOENT) logger.debug('%s %s' % (node, parent)) try: # if len(node.parents) > 1: # r = metadata.remove_child(parent.id, node.id) # else: r = trash.move_to_trash(node.id) except RequestError as e: if e.status_code == e.CODE.CONN_EXCEPTION: raise FuseOSError(errno.ECOMM) else: raise FuseOSError(errno.EREMOTEIO) else: sync.insert_node(r)
def test_create_file(self): name = gen_rand_nm() node = content.create_file(name) trash.move_to_trash(node['id']) self.assertEqual(node['name'], name) self.assertEqual(node['parents'][0], metadata.get_root_id())
def test_purge(self): f_id = self.create_random_dir() n = trash.move_to_trash(f_id) self.assertEqual(n['status'], 'TRASH') with self.assertRaises(RequestError): trash.purge(n['id'])
def test_mkdir(self): f_id = self.create_random_dir() trash.move_to_trash(f_id)