def upload_folder(folder: str, parent_id: str, overwr: bool, force: bool, exclude: list) -> int: if parent_id is None: parent_id = query.get_root_id() parent = query.get_node(parent_id) real_path = os.path.realpath(folder) short_nm = os.path.basename(real_path) curr_node = parent.get_child(short_nm) if not curr_node or curr_node.status == 'TRASH' or parent.status == 'TRASH': try: r = content.create_folder(short_nm, parent_id) sync.insert_node(r) curr_node = query.get_node(r['id']) except RequestError as e: if e.status_code == 409: logger.error('Folder "%s" already exists. Please sync.' % short_nm) else: logger.error('Error creating remote folder "%s".' % short_nm) return ERR_CR_FOLDER elif curr_node.is_file(): logger.error('Cannot create remote folder "%s", because a file of the same name already exists.' % short_nm) return ERR_CR_FOLDER entries = sorted(os.listdir(folder)) ret_val = 0 for entry in entries: full_path = os.path.join(real_path, entry) ret_val |= upload(full_path, curr_node.id, overwr, force, exclude) return ret_val
def _move(id, old_folder, new_folder): try: r = metadata.move_node(id, old_folder, new_folder) except RequestError as e: FuseOSError.convert(e) else: sync.insert_node(r)
def rename_action(args: argparse.Namespace) -> int: try: r = metadata.rename_node(args.node, args.name) sync.insert_node(r) except RequestError as e: print(e) return 1
def trash_action(args: argparse.Namespace) -> int: try: r = trash.move_to_trash(args.node) sync.insert_node(r) except RequestError as e: print(e) return 1
def remove_child_action(args: argparse.Namespace) -> int: try: r = metadata.remove_child(args.parent, args.child) sync.insert_node(r) except RequestError as e: print(e) return 1
def restore_action(args: argparse.Namespace) -> int: try: r = trash.restore(args.node) except RequestError as e: logger.error('Error restoring "%s"' % args.node, e) return 1 sync.insert_node(r)
def _rename(id, name): try: r = metadata.rename_node(id, name) except RequestError as e: FuseOSError.convert(e) else: sync.insert_node(r)
def write_n_sync(stream: WriteStream, node_id: str): try: r = content.overwrite_stream(stream, node_id) except RequestError as e: stream.error = True logger.error("Error writing file. Code: %i, msg: %s" % (e.status_code, e.msg)) else: sync.insert_node(r)
def testInsertFile(self): root = gen_folder() sync.insert_node(root) file = gen_file([root]) sync.insert_node(file) n = query.get_node(file['id']) self.assertEqual(len(n.parents), 1) self.assertEqual(query.get_node_count(), 2)
def write_n_sync(stream: WriteStream, node_id: str): try: r = content.overwrite_stream(stream, node_id) except RequestError as e: stream.error = True logger.error('Error writing file. Code: %i, msg: %s' % (e.status_code, e.msg)) else: sync.insert_node(r)
def _move(id, old_folder, new_folder): try: r = metadata.move_node_from(id, old_folder, new_folder) except RequestError as e: if e.status_code == e.CODE.CONN_EXCEPTION: raise FuseOSError(errno.ECOMM) raise FuseOSError(errno.EREMOTEIO) else: sync.insert_node(r)
def overwrite(node_id, local_file) -> int: hasher = hashing.Hasher(local_file) try: r = content.overwrite_file(node_id, local_file) sync.insert_node(r) node = query.get_node(r['id']) return compare_hashes(node.md5, hasher.get_result(), local_file) except RequestError as e: hasher.stop() logger.error('Error overwriting file. Code: %s, msg: %s' % (e.status_code, e.msg)) return UL_DL_FAILED
def upload_file(path: str, parent_id: str, overwr: bool, force: bool) -> int: short_nm = os.path.basename(path) cached_file = query.get_node(parent_id).get_child(short_nm) file_id = None if cached_file: file_id = cached_file.id if not file_id: try: hasher = hashing.Hasher(path) r = content.upload_file(path, parent_id) sync.insert_node(r) file_id = r['id'] cached_file = query.get_node(file_id) return compare_hashes(hasher.get_result(), cached_file.md5, short_nm) except RequestError as e: if e.status_code == 409: # might happen if cache is outdated hasher.stop() logger.error('Uploading "%s" failed. Name collision with non-cached file. ' 'If you want to overwrite, please sync and try again.' % short_nm) # colliding node ID is returned in error message -> could be used to continue return UL_DL_FAILED elif e.status_code == 504 or e.status_code == 408: # proxy timeout / request timeout hasher.stop() logger.warning('Timeout while uploading "%s".' % short_nm) # TODO: wait; request parent folder's children return UL_TIMEOUT else: hasher.stop() logger.error('Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg)) return UL_DL_FAILED # else: file exists mod_time = (cached_file.modified - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1) logger.info('Remote mtime: ' + str(mod_time) + ', local mtime: ' + str(os.path.getmtime(path)) + ', local ctime: ' + str(os.path.getctime(path))) if not overwr and not force: print('Skipping upload of existing file "%s".' % short_nm) return 0 # ctime is checked because files can be overwritten by files with older mtime if mod_time < os.path.getmtime(path) \ or (mod_time < os.path.getctime(path) and cached_file.size != os.path.getsize(path)) \ or force: return overwrite(file_id, path) elif not force: print('Skipping upload of "%s" because of mtime or ctime and size.' % short_nm) return 0 else: hasher = hashing.Hasher(path)
def rmdir(self, path): n = query.resolve_path(path) if not n: raise FuseOSError(errno.ENOENT) try: r = trash.move_to_trash(n) sync.insert_node(r) except RequestError as e: if e.status_code == e.CODE.CONN_EXCEPTION: raise FuseOSError(errno.ECOMM) else: raise FuseOSError(errno.EREMOTEIO)
def _rename(id, name): try: r = metadata.rename_node(id, name) except RequestError as e: logger.debug(e) if e.status_code == e.CODE.CONN_EXCEPTION: raise FuseOSError(errno.ECOMM) elif e.status_code == 409: raise FuseOSError(errno.EEXIST) else: raise FuseOSError(errno.EREMOTEIO) else: sync.insert_node(r)
def rename(self, old, new): id = query.resolve_path(old) new = os.path.basename(new) try: r = metadata.rename_node(id, new) sync.insert_node(r) except RequestError as e: if e.status_code == e.CODE.CONN_EXCEPTION: raise FuseOSError(errno.ECOMM) elif e.status_code == 409: raise FuseOSError(errno.EEXIST) else: raise FuseOSError(errno.EREMOTEIO)
def mkdir(self, path, mode): name = os.path.basename(path) ppath = os.path.dirname(path) pid = query.resolve_path(ppath) if not pid: raise FuseOSError(errno.ENOTDIR) try: r = content.create_folder(name, pid) except RequestError as e: FuseOSError.convert(e) else: sync.insert_node(r)
def create(self, path, mode): name = os.path.basename(path) ppath = os.path.dirname(path) pid = query.resolve_path(ppath, False) if not pid: raise FuseOSError(errno.ENOTDIR) try: r = content.create_file(name, pid) sync.insert_node(r) except RequestError as e: FuseOSError.convert(e) self.fh += 1 return self.fh
def _trash(path): logger.debug('trash %s' % path) node, parent = query.resolve(path, False) if not node: # or not parent: raise FuseOSError(errno.ENOENT) logger.debug('%s %s' % (node, parent)) try: # if len(node.parents) > 1: # r = metadata.remove_child(parent.id, node.id) # else: r = trash.move_to_trash(node.id) except RequestError as e: FuseOSError.convert(e) else: sync.insert_node(r)
def _trash(path): logger.debug("trash %s" % path) node, parent = query.resolve(path, False) if not node: # or not parent: raise FuseOSError(errno.ENOENT) logger.debug("%s %s" % (node, parent)) try: # if len(node.parents) > 1: # r = metadata.remove_child(parent.id, node.id) # else: r = trash.move_to_trash(node.id) except RequestError as e: FuseOSError.convert(e) else: sync.insert_node(r)
def mkdir(self, path, mode): logger.debug('+mkdir %s' % path) name = os.path.basename(path) ppath = os.path.dirname(path) pid = query.resolve_path(ppath) if not pid: raise FuseOSError(errno.ENOTDIR) try: r = content.create_folder(name, pid) except RequestError as e: if e.status_code == e.CODE.CONN_EXCEPTION: raise FuseOSError(errno.ECOMM) elif e.status_code == 409: raise FuseOSError(errno.EEXIST) else: raise FuseOSError(errno.EREMOTEIO) else: sync.insert_node(r)
def create(self, path, mode): name = os.path.basename(path) ppath = os.path.dirname(path) pid = query.resolve_path(ppath) if not pid: raise FuseOSError(errno.ENOTDIR) try: r = content.create_file(name, pid) sync.insert_node(r) except RequestError as e: if e.status_code == e.CODE.CONN_EXCEPTION: raise FuseOSError(errno.ECOMM) elif e.status_code == 409: raise FuseOSError(errno.EEXIST) else: raise FuseOSError(errno.EREMOTEIO) return 0
def _trash(path): logger.debug('trash %s' % path) node, parent = query.resolve(path) if not node or not parent: raise FuseOSError(errno.ENOENT) logger.debug('%s %s' % (node, parent)) try: # if len(node.parents) > 1: # r = metadata.remove_child(parent.id, node.id) # else: r = trash.move_to_trash(node.id) except RequestError as e: if e.status_code == e.CODE.CONN_EXCEPTION: raise FuseOSError(errno.ECOMM) else: raise FuseOSError(errno.EREMOTEIO) else: sync.insert_node(r)
def upload_folder(folder: str, parent_id: str, overwr: bool, force: bool, exclude: list) -> int: if parent_id is None: parent_id = query.get_root_id() parent = query.get_node(parent_id) real_path = os.path.realpath(folder) short_nm = os.path.basename(real_path) curr_node = parent.get_child(short_nm) if not curr_node or curr_node.status == 'TRASH' or parent.status == 'TRASH': try: r = content.create_folder(short_nm, parent_id) sync.insert_node(r) curr_node = query.get_node(r['id']) except RequestError as e: if e.status_code == 409: logger.error('Folder "%s" already exists. Please sync.' % short_nm) else: logger.error('Error creating remote folder "%s".' % short_nm) return ERR_CR_FOLDER elif curr_node.is_file(): logger.error( 'Cannot create remote folder "%s", because a file of the same name already exists.' % short_nm) return ERR_CR_FOLDER entries = sorted(os.listdir(folder)) ret_val = 0 for entry in entries: full_path = os.path.join(real_path, entry) ret_val |= upload(full_path, curr_node.id, overwr, force, exclude) return ret_val
def testInsertFolder(self): folder = gen_folder() sync.insert_node(folder) n = query.get_node(folder['id']) self.assertEqual(n.id, folder['id']) self.assertEqual(query.get_node_count(), 1)
def testCalculateUsageEmpty2(self): sync.insert_node(gen_folder()) self.assertEqual(query.calculate_usage(), 0)
def upload_file(path: str, parent_id: str, overwr: bool, force: bool) -> int: short_nm = os.path.basename(path) cached_file = query.get_node(parent_id).get_child(short_nm) file_id = None if cached_file: file_id = cached_file.id if not file_id: try: hasher = hashing.Hasher(path) r = content.upload_file(path, parent_id) sync.insert_node(r) file_id = r['id'] cached_file = query.get_node(file_id) return compare_hashes(hasher.get_result(), cached_file.md5, short_nm) except RequestError as e: if e.status_code == 409: # might happen if cache is outdated hasher.stop() logger.error( 'Uploading "%s" failed. Name collision with non-cached file. ' 'If you want to overwrite, please sync and try again.' % short_nm) # colliding node ID is returned in error message -> could be used to continue return UL_DL_FAILED elif e.status_code == 504 or e.status_code == 408: # proxy timeout / request timeout hasher.stop() logger.warning('Timeout while uploading "%s".' % short_nm) # TODO: wait; request parent folder's children return UL_TIMEOUT else: hasher.stop() logger.error('Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg)) return UL_DL_FAILED # else: file exists mod_time = (cached_file.modified - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1) logger.info('Remote mtime: ' + str(mod_time) + ', local mtime: ' + str(os.path.getmtime(path)) + ', local ctime: ' + str(os.path.getctime(path))) if not overwr and not force: print('Skipping upload of existing file "%s".' % short_nm) return 0 # ctime is checked because files can be overwritten by files with older mtime if mod_time < os.path.getmtime(path) \ or (mod_time < os.path.getctime(path) and cached_file.size != os.path.getsize(path)) \ or force: return overwrite(file_id, path) elif not force: print('Skipping upload of "%s" because of mtime or ctime and size.' % short_nm) return 0 else: hasher = hashing.Hasher(path)