def upload_folder(folder, parent_id, overwr, force): if parent_id is None: parent_id = query.get_root_id() parent = query.get_node(parent_id) real_path = os.path.realpath(folder) short_nm = os.path.basename(real_path) curr_node = parent.get_child(short_nm) if not curr_node or curr_node.status == 'TRASH' or parent.status == 'TRASH': try: r = content.create_folder(short_nm, parent_id) sync.insert_node(r) curr_node = query.get_node(r['id']) except RequestError as e: print('Error creating remote folder "%s.' % short_nm) if e.status_code == 409: print('Folder already exists. Please sync.') logger.error(e) return elif curr_node.is_file(): print( 'Cannot create remote folder "%s", because a file of the same name already exists.' % short_nm) return entries = sorted(os.listdir(folder)) for entry in entries: full_path = os.path.join(real_path, entry) upload(full_path, curr_node.id, overwr, force)
def upload_folder(folder, parent_id, overwr, force): if parent_id is None: parent_id = query.get_root_id() parent = query.get_node(parent_id) real_path = os.path.realpath(folder) short_nm = os.path.basename(real_path) curr_node = parent.get_child(short_nm) if not curr_node or curr_node.status == 'TRASH': try: r = content.create_folder(short_nm, parent_id) sync.insert_node(r) curr_node = query.get_node(r['id']) except RequestError as e: print('Error creating remote folder "%s.' % short_nm) if e.status_code == 409: print('Folder already exists. Please sync.') logger.error(e) return elif curr_node.is_file(): print('Cannot create remote folder "%s", because a file of the same name already exists.' % short_nm) return entries = os.listdir(folder) for entry in entries: full_path = os.path.join(real_path, entry) upload(full_path, curr_node.id, overwr, force)
def restore_action(args): try: r = trash.restore(args.node) except RequestError as e: print('Error restoring "%s"' % args.node, e) return sync.insert_node(r)
def remove_child_action(args: argparse.Namespace): try: r = metadata.remove_child(args.parent, args.child) sync.insert_node(r) except RequestError as e: print(e) return 1
def restore_action(args: argparse.Namespace): try: r = trash.restore(args.node) except RequestError as e: logger.error('Error restoring "%s"' % args.node, e) return 1 sync.insert_node(r)
def rename_action(args: argparse.Namespace): try: r = metadata.rename_node(args.node, args.name) sync.insert_node(r) except RequestError as e: print(e) return 1
def trash_action(args: argparse.Namespace): try: r = trash.move_to_trash(args.node) sync.insert_node(r) except RequestError as e: print(e) return 1
def upload_folder(folder: str, parent_id: str, overwr: bool, force: bool, revision: bool, exclude: list) -> int: if parent_id is None: parent_id = query.get_root_id() parent = query.get_node(parent_id) real_path = os.path.realpath(folder) short_nm = os.path.basename(real_path) curr_node = parent.get_child(short_nm) if not curr_node or curr_node.status == 'TRASH' or parent.status == 'TRASH': try: r = content.create_folder(short_nm, parent_id) sync.insert_node(r) curr_node = query.get_node(r['id']) except RequestError as e: if e.status_code == 409: logger.error('Folder "%s" already exists. Please sync.' % short_nm) else: logger.error('Error creating remote folder "%s".' % short_nm) return ERR_CR_FOLDER elif curr_node.is_file(): logger.error('Cannot create remote folder "%s", because a file of the same name already exists.' % short_nm) return ERR_CR_FOLDER entries = sorted(os.listdir(folder)) ret_val = 0 for entry in entries: full_path = os.path.join(real_path, entry) ret_val |= upload(full_path, curr_node.id, overwr, force, revision, exclude) return ret_val
def upload_file(path, parent_id, overwr, force): hasher = utils.Hasher(path) short_nm = os.path.basename(path) cached_file = query.get_node(parent_id).get_child(short_nm) if cached_file: file_id = cached_file.id else: file_id = None if not file_id: try: r = content.upload_file(path, parent_id) sync.insert_node(r) file_id = r['id'] except RequestError as e: if e.status_code == 409: # might happen if cache is outdated hasher.stop() print('Uploading %s failed. Name collision with non-cached file. ' 'If you want to overwrite, please sync and try again.' % short_nm) # colliding node ID is returned in error message -> could be used to continue return elif e.status_code == 504 or e.status_code == 408: # proxy timeout / request timeout hasher.stop() print('Timeout while uploading "%s".') # TODO: wait; request parent folder's children return else: hasher.stop() print('Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg)) return else: mod_time = (cached_file.modified - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1) logger.info('Remote mtime:' + str(mod_time) + ', local mtime: ' + str(os.path.getmtime(path)) + ', local ctime: ' + str(os.path.getctime(path))) if not overwr and not force: print('Skipping upload of existing file "%s".' % short_nm) hasher.stop() return # ctime is checked because files can be overwritten by files with older mtime if mod_time < os.path.getmtime(path) \ or (mod_time < os.path.getctime(path) and cached_file.size != os.path.getsize(path)) \ or force: overwrite(file_id, path, _hash=False) elif not force: print('Skipping upload of "%s" because of mtime or ctime and size.' % short_nm) hasher.stop() return # might have changed cached_file = query.get_node(file_id) if hasher.get_result() != cached_file.md5: print('Hash mismatch between local and remote file for "%s".' % short_nm) else: logger.info('Local and remote hashes match for "%s".' % short_nm)
def upload_file(path: str, parent_id: str, overwr: bool, force: bool, revision: bool) -> int: short_nm = os.path.basename(path) cached_file = query.get_node(parent_id).get_child(short_nm) file_id = None if cached_file: file_id = cached_file.id if not file_id: try: hasher = utils.Hasher(path) r = content.upload_file(path, parent_id) sync.insert_node(r) file_id = r['id'] cached_file = query.get_node(file_id) return compare_hashes(hasher.get_result(), cached_file.md5, short_nm) except RequestError as e: if e.status_code == 409: # might happen if cache is outdated hasher.stop() logger.error('Uploading "%s" failed. Name collision with non-cached file. ' 'If you want to overwrite, please sync and try again.' % short_nm) # colliding node ID is returned in error message -> could be used to continue return UL_DL_FAILED elif e.status_code == 504 or e.status_code == 408: # proxy timeout / request timeout hasher.stop() logger.warning('Timeout while uploading "%s".' % short_nm) # TODO: wait; request parent folder's children return UL_TIMEOUT else: hasher.stop() logger.error('Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg)) return UL_DL_FAILED # else: file exists mod_time = (cached_file.modified - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1) logger.info('Remote mtime: ' + str(mod_time) + ', local mtime: ' + str(os.path.getmtime(path)) + ', local ctime: ' + str(os.path.getctime(path))) if not overwr and not force: print('Skipping upload of existing file "%s".' % short_nm) return 0 # ctime is checked because files can be overwritten by files with older mtime if mod_time < os.path.getmtime(path) \ or (mod_time < os.path.getctime(path) and cached_file.size != os.path.getsize(path)) \ or force: if revision: filec = query.get_node(file_id) revision_action_internal(str(filec.full_path())) return upload_file(path, parent_id, False, False, False) return overwrite(file_id, path) elif not force: print('Skipping upload of "%s" because of mtime or ctime and size.' % short_nm) return 0 else: hasher = utils.Hasher(path)
def overwrite(node_id, local_file): hasher = utils.Hasher(local_file) try: r = content.overwrite_file(node_id, local_file) sync.insert_node(r) if r['contentProperties']['md5'] != hasher.get_result(): print('Hash mismatch between local and remote file for "%s".' % local_file) except RequestError as e: hasher.stop() print('Error overwriting file. Code: %s, msg: %s' % (e.status_code, e.msg))
def overwrite(node_id, local_file) -> int: hasher = utils.Hasher(local_file) try: r = content.overwrite_file(node_id, local_file) sync.insert_node(r) node = query.get_node(r['id']) return compare_hashes(node.md5, hasher.get_result(), local_file) except RequestError as e: hasher.stop() logger.error('Error overwriting file. Code: %s, msg: %s' % (e.status_code, e.msg)) return UL_DL_FAILED
def overwrite(node_id, local_file, _hash=True): if hash: hasher = utils.Hasher(local_file) try: r = content.overwrite_file(node_id, local_file) sync.insert_node(r) if _hash and r['contentProperties']['md5'] != hasher.get_result(): print('Hash mismatch between local and remote file for "%s".' % local_file) except RequestError as e: if hash: hasher.stop() print('Error overwriting file. Code: %s, msg: %s' % (e.status_code, e.msg))
def overwrite(node_id, local_file, _hash=True): if hash: hasher = utils.Hasher(local_file) try: r = content.overwrite_file(node_id, local_file) sync.insert_node(r) if _hash and r['contentProperties']['md5'] != hasher.get_result(): logger.info('Hash mismatch between local and remote file for "%s".' % local_file) return HASH_MISMATCH return 0 except RequestError as e: if hash: hasher.stop() logger.error('Error overwriting file. Code: %s, msg: %s' % (e.status_code, e.msg)) return UL_DL_FAILED
def upload_file(path, parent_id, overwr, force): hasher = utils.Hasher(path) short_nm = os.path.basename(path) cached_file = query.get_node(parent_id).get_child(short_nm) if cached_file: file_id = cached_file.id else: file_id = None if not file_id: try: r = content.upload_file(path, parent_id) sync.insert_node(r) file_id = r['id'] except RequestError as e: if e.status_code == 409: # might happen if cache is outdated print('Uploading %s failed. Name collision with non-cached file. ' 'If you want to overwrite, please sync and try again.' % short_nm) # colliding node ID is returned in error message -> could be used to continue hasher.stop() return else: hasher.stop() print('Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg)) return else: if not overwr and not force: print('Skipping upload of existing file "%s".' % short_nm) hasher.stop() return if cached_file.size < os.path.getsize(path) or force: overwrite(file_id, path) elif not force: print('Skipping upload of "%s", because local file is smaller or of same size.' % short_nm) hasher.stop() return # might have changed cached_file = query.get_node(file_id) if hasher.get_result() != cached_file.md5: print('Hash mismatch between local and remote file for "%s".' % short_nm) else: logger.info('Local and remote hashes match for "%s".' % short_nm)
def revision_action_internal(path): if query.resolve_path(path) is None: logger.error(path+' does not exist') return if path.endswith('/'): path = path[0:len(path)-1] logger.info("Revisioning",path) revname = '.revs' revdir = os.path.dirname(path)+'/'+revname+'/' if query.resolve_path(revdir) is None: create_action_internal(revdir) # move to $revname if query.resolve_path(revdir+os.path.basename(path)) is not None: # if target name already exists, rename it with a unique name and continue # this may happen if an operation has been stopped in the middle. ts = time.time() timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S') nodename = os.path.basename(path) print('Renaming '+revdir+nodename+' to '+nodename+'.old.'+timestamp) r = metadata.rename_node(query.resolve_path(revdir+nodename), nodename+'.old.'+timestamp) sync.insert_node(r) r = metadata.move_node(query.resolve_path(path), query.resolve_path(revdir)) sync.insert_node(r) # rename to timestamped name ts = time.time() timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S') nodename = os.path.basename(path) r = metadata.rename_node(query.resolve_path(revdir+nodename), nodename+'.'+timestamp) sync.insert_node(r)
def move_action(args): r = metadata.move_node(args.child, args.parent) sync.insert_node(r)
def trash_action(args): r = trash.move_to_trash(args.node) sync.insert_node(r)
def rename_action(args): r = metadata.rename_node(args.node, args.name) sync.insert_node(r)
def remove_child_action(args): r = metadata.remove_child(args.parent, args.child) sync.insert_node(r)
def add_child_action(args): r = metadata.add_child(args.parent, args.child) sync.insert_node(r)
def upload_file(path, parent_id, overwr, force): hasher = utils.Hasher(path) short_nm = os.path.basename(path) cached_file = query.get_node(parent_id).get_child(short_nm) if cached_file: file_id = cached_file.id else: file_id = None if not file_id: try: r = content.upload_file(path, parent_id) sync.insert_node(r) file_id = r['id'] except RequestError as e: if e.status_code == 409: # might happen if cache is outdated hasher.stop() print( 'Uploading %s failed. Name collision with non-cached file. ' 'If you want to overwrite, please sync and try again.' % short_nm) # colliding node ID is returned in error message -> could be used to continue return elif e.status_code == 504 or e.status_code == 408: # proxy timeout / request timeout hasher.stop() print('Timeout while uploading "%s".') # TODO: wait; request parent folder's children return else: hasher.stop() print('Uploading "%s" failed. Code: %s, msg: %s' % (short_nm, e.status_code, e.msg)) return else: mod_time = (cached_file.modified - datetime.datetime( 1970, 1, 1)) / datetime.timedelta(seconds=1) logger.info('Remote mtime:' + str(mod_time) + ', local mtime: ' + str(os.path.getmtime(path)) + ', local ctime: ' + str(os.path.getctime(path))) if not overwr and not force: print('Skipping upload of existing file "%s".' % short_nm) hasher.stop() return # ctime is checked because files can be overwritten by files with older mtime if mod_time < os.path.getmtime(path) \ or (mod_time < os.path.getctime(path) and cached_file.size != os.path.getsize(path)) \ or force: overwrite(file_id, path, _hash=False) elif not force: print( 'Skipping upload of "%s" because of mtime or ctime and size.' % short_nm) hasher.stop() return # might have changed cached_file = query.get_node(file_id) if hasher.get_result() != cached_file.md5: print('Hash mismatch between local and remote file for "%s".' % short_nm) else: logger.info('Local and remote hashes match for "%s".' % short_nm)
def restore_action(args): r = trash.restore(args.node) sync.insert_node(r)