def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, # NOTE: path cannot use wildcards here typecheck_overrides={}, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) path = accepted['path'][-1] chosen_newline = accepted['newline'][-1] submitjob = accepted['submitjob'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not configuration.site_enable_jobs and submitjob: output_objects.append({ 'object_type': 'error_text', 'text': '''Job execution is not enabled on this system''' }) return (output_objects, returnvalues.SYSTEM_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep # HTML spec dictates newlines in forms to be MS style (\r\n) # rather than un*x style (\n): change if requested. form_newline = '\r\n' allowed_newline = {'unix': '\n', 'mac': '\r', 'windows': '\r\n'} output_objects.append({ 'object_type': 'header', 'text': 'Saving changes to edited file' }) if not chosen_newline in allowed_newline.keys(): output_objects.append({ 'object_type': 'error_text', 'text': 'Unsupported newline style supplied: %s (must be one of %s)' % (chosen_newline, ', '.join(allowed_newline.keys())) }) return (output_objects, returnvalues.CLIENT_ERROR) saved_newline = allowed_newline[chosen_newline] # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing consistent # error messages abs_path = '' unfiltered_match = glob.glob(base_dir + path) for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, path)) output_objects.append({ 'object_type': 'error_text', 'text': "Invalid path! (%s expands to an illegal path)" % path }) return (output_objects, returnvalues.CLIENT_ERROR) if abs_path == '': # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(os.path.join(base_dir, path.lstrip(os.sep))) if not valid_user_path(configuration, abs_path, base_dir, True): logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, path)) output_objects.append({ 'object_type': 'error_text', 'text': "Invalid path! (%s expands to an illegal path)" % path }) return (output_objects, returnvalues.CLIENT_ERROR) if not check_write_access(abs_path, parent_dir=True): logger.warning('%s called without write access: %s' % (op_name, abs_path)) output_objects.append({ 'object_type': 'error_text', 'text': 'cannot edit "%s": inside a read-only location!' % path }) status = returnvalues.CLIENT_ERROR return (output_objects, returnvalues.CLIENT_ERROR) (owner, time_left) = acquire_edit_lock(abs_path, client_id) if owner != client_id: output_objects.append({ 'object_type': 'error_text', 'text': "You don't have the lock for %s!" % path }) return (output_objects, returnvalues.CLIENT_ERROR) try: fh = open(abs_path, 'w+') fh.write(user_arguments_dict['editarea'][0].replace( form_newline, saved_newline)) fh.close() # everything ok output_objects.append({ 'object_type': 'text', 'text': 'Saved changes to %s.' % path }) logger.info('saved changes to %s' % path) release_edit_lock(abs_path, client_id) except Exception as exc: # Don't give away information about actual fs layout output_objects.append({ 'object_type': 'error_text', 'text': '%s could not be written! (%s)' % (path, str(exc).replace(base_dir, '')) }) return (output_objects, returnvalues.SYSTEM_ERROR) if submitjob: output_objects.append({ 'object_type': 'text', 'text': 'Submitting saved file to parser' }) submitstatus = {'object_type': 'submitstatus', 'name': path} (new_job_status, msg, job_id) = new_job(abs_path, client_id, configuration, False, True) if not new_job_status: submitstatus['status'] = False submitstatus['message'] = msg else: submitstatus['status'] = True submitstatus['job_id'] = job_id output_objects.append({ 'object_type': 'submitstatuslist', 'submitstatuslist': [submitstatus] }) output_objects.append({ 'object_type': 'link', 'destination': 'javascript:history.back()', 'class': 'backlink iconspace', 'title': 'Go back to previous page', 'text': 'Back to previous page' }) return (output_objects, returnvalues.OK)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, # NOTE: path can use wildcards, dst and current_dir cannot typecheck_overrides={'path': valid_path_pattern}, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) pattern_list = accepted['path'] dst = accepted['dst'][-1] current_dir = accepted['current_dir'][-1].lstrip(os.sep) # All paths are relative to current_dir pattern_list = [os.path.join(current_dir, i) for i in pattern_list] if dst: dst = os.path.join(current_dir, dst) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep status = returnvalues.OK if verbose(flags): for flag in flags: output_objects.append({ 'object_type': 'text', 'text': '%s using flag: %s' % (op_name, flag) }) # IMPORTANT: path must be expanded to abs for proper chrooting abs_dir = os.path.abspath( os.path.join(base_dir, current_dir.lstrip(os.sep))) if not valid_user_path(configuration, abs_dir, base_dir, True): output_objects.append({ 'object_type': 'error_text', 'text': "You're not allowed to work in %s!" % current_dir }) logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_dir, current_dir)) return (output_objects, returnvalues.CLIENT_ERROR) if verbose(flags): output_objects.append({ 'object_type': 'text', 'text': "working in %s" % current_dir }) if dst: if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # NOTE: dst already incorporates current_dir prefix here # IMPORTANT: path must be expanded to abs for proper chrooting abs_dest = os.path.abspath(os.path.join(base_dir, dst)) logger.info('chksum in %s' % abs_dest) # Don't use abs_path in output as it may expose underlying # fs layout. relative_dest = abs_dest.replace(base_dir, '') if not valid_user_path(configuration, abs_dest, base_dir, True): output_objects.append({ 'object_type': 'error_text', 'text': "Invalid path! (%s expands to an illegal path)" % dst }) logger.warning('%s tried to %s restricted path %s !(%s)' % (client_id, op_name, abs_dest, dst)) return (output_objects, returnvalues.CLIENT_ERROR) if not check_write_access(abs_dest, parent_dir=True): logger.warning('%s called without write access: %s' % (op_name, abs_dest)) output_objects.append({ 'object_type': 'error_text', 'text': 'cannot checksum to "%s": inside a read-only location!' % relative_dest }) return (output_objects, returnvalues.CLIENT_ERROR) all_lines = [] for pattern in pattern_list: # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages unfiltered_match = glob.glob(base_dir + pattern) match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, pattern)) continue match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'file_not_found', 'name': pattern }) status = returnvalues.FILE_NOT_FOUND # NOTE: we produce output matching an invocation of: # du -aL --apparent-size --block-size=1 PATH [PATH ...] filedus = [] summarize_output = summarize(flags) for abs_path in match: if invisible_path(abs_path): continue relative_path = abs_path.replace(base_dir, '') # cache accumulated sub dir sizes - du sums into parent dir size dir_sizes = {} try: # Assume a directory to walk for (root, dirs, files) in walk(abs_path, topdown=False, followlinks=True): if invisible_path(root): continue dir_bytes = 0 for name in files: real_file = os.path.join(root, name) if invisible_path(real_file): continue relative_file = real_file.replace(base_dir, '') size = os.path.getsize(real_file) dir_bytes += size if not summarize_output: filedus.append({ 'object_type': 'filedu', 'name': relative_file, 'bytes': size }) for name in dirs: real_dir = os.path.join(root, name) if invisible_path(real_dir): continue dir_bytes += dir_sizes[real_dir] relative_root = root.replace(base_dir, '') dir_bytes += os.path.getsize(root) dir_sizes[root] = dir_bytes if root == abs_path or not summarize_output: filedus.append({ 'object_type': 'filedu', 'name': relative_root, 'bytes': dir_bytes }) if os.path.isfile(abs_path): # Fall back to plain file where walk is empty size = os.path.getsize(abs_path) filedus.append({ 'object_type': 'filedu', 'name': relative_path, 'bytes': size }) except Exception as exc: output_objects.append({ 'object_type': 'error_text', 'text': "%s: '%s': %s" % (op_name, relative_path, exc) }) logger.error("%s: failed on '%s': %s" % (op_name, relative_path, exc)) status = returnvalues.SYSTEM_ERROR continue if dst: all_lines += [ '%(bytes)d\t\t%(name)s\n' % entry for entry in filedus ] else: output_objects.append({ 'object_type': 'filedus', 'filedus': filedus }) if dst and not write_file(''.join(all_lines), abs_dest, logger): output_objects.append({ 'object_type': 'error_text', 'text': "failed to write disk use to %s" % relative_dest }) logger.error("writing disk use to %s for %s failed" % (abs_dest, client_id)) status = returnvalues.SYSTEM_ERROR return (output_objects, status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, # NOTE: path can use wildcards, dst and current_dir cannot typecheck_overrides={'path': valid_path_pattern}, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) algo_list = accepted['hash_algo'] max_chunks = int(accepted['max_chunks'][-1]) pattern_list = accepted['path'] dst = accepted['dst'][-1] current_dir = accepted['current_dir'][-1].lstrip(os.sep) # All paths are relative to current_dir pattern_list = [os.path.join(current_dir, i) for i in pattern_list] if dst: dst = os.path.join(current_dir, dst) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep status = returnvalues.OK if verbose(flags): for flag in flags: output_objects.append({ 'object_type': 'text', 'text': '%s using flag: %s' % (op_name, flag) }) # IMPORTANT: path must be expanded to abs for proper chrooting abs_dir = os.path.abspath( os.path.join(base_dir, current_dir.lstrip(os.sep))) if not valid_user_path(configuration, abs_dir, base_dir, True): output_objects.append({ 'object_type': 'error_text', 'text': "You're not allowed to work in %s!" % current_dir }) logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_dir, current_dir)) return (output_objects, returnvalues.CLIENT_ERROR) if verbose(flags): output_objects.append({ 'object_type': 'text', 'text': "working in %s" % current_dir }) if dst: if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # NOTE: dst already incorporates current_dir prefix here # IMPORTANT: path must be expanded to abs for proper chrooting abs_dest = os.path.abspath(os.path.join(base_dir, dst)) logger.info('chksum in %s' % abs_dest) # Don't use abs_path in output as it may expose underlying # fs layout. relative_dest = abs_dest.replace(base_dir, '') if not valid_user_path(configuration, abs_dest, base_dir, True): output_objects.append({ 'object_type': 'error_text', 'text': "Invalid path! (%s expands to an illegal path)" % dst }) logger.warning('%s tried to %s restricted path %s !(%s)' % (client_id, op_name, abs_dest, dst)) return (output_objects, returnvalues.CLIENT_ERROR) if not check_write_access(abs_dest, parent_dir=True): logger.warning('%s called without write access: %s' % (op_name, abs_dest)) output_objects.append({ 'object_type': 'error_text', 'text': 'cannot checksum to "%s": inside a read-only location!' % relative_dest }) return (output_objects, returnvalues.CLIENT_ERROR) all_lines = [] for pattern in pattern_list: # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages unfiltered_match = glob.glob(base_dir + pattern) match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, pattern)) continue match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'file_not_found', 'name': pattern }) status = returnvalues.FILE_NOT_FOUND for abs_path in match: relative_path = abs_path.replace(base_dir, '') output_lines = [] for hash_algo in algo_list: try: chksum_helper = _algo_map.get(hash_algo, _algo_map["md5"]) checksum = chksum_helper(abs_path, max_chunks=max_chunks) line = "%s %s\n" % (checksum, relative_path) logger.info("%s %s of %s: %s" % (op_name, hash_algo, abs_path, checksum)) output_lines.append(line) except Exception as exc: output_objects.append({ 'object_type': 'error_text', 'text': "%s: '%s': %s" % (op_name, relative_path, exc) }) logger.error("%s: failed on '%s': %s" % (op_name, relative_path, exc)) status = returnvalues.SYSTEM_ERROR continue entry = {'object_type': 'file_output', 'lines': output_lines} output_objects.append(entry) all_lines += output_lines if dst and not write_file(''.join(all_lines), abs_dest, logger): output_objects.append({ 'object_type': 'error_text', 'text': "failed to write checksums to %s" % relative_dest }) logger.error("writing checksums to %s for %s failed" % (abs_dest, client_id)) status = returnvalues.SYSTEM_ERROR return (output_objects, status)
def main(client_id, user_arguments_dict, environ=None): """Main function used by front end""" if environ is None: environ = os.environ (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False, op_menu=client_id) client_dir = client_id_dir(client_id) status = returnvalues.OK defaults = signature()[1] (validate_status, accepted) = validate_input( user_arguments_dict, defaults, output_objects, allow_rejects=False, # NOTE: path cannot use wildcards here typecheck_overrides={}, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) patterns = accepted['path'] current_dir = accepted['current_dir'][-1] share_id = accepted['share_id'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Either authenticated user client_id set or sharelink ID if client_id: user_id = client_id target_dir = client_id_dir(client_id) base_dir = configuration.user_home id_query = '' page_title = 'Create User Directory' userstyle = True widgets = True elif share_id: try: (share_mode, _) = extract_mode_id(configuration, share_id) except ValueError as err: logger.error('%s called with invalid share_id %s: %s' % (op_name, share_id, err)) output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid sharelink ID: %s' % share_id }) return (output_objects, returnvalues.CLIENT_ERROR) # TODO: load and check sharelink pickle (currently requires client_id) user_id = 'anonymous user through share ID %s' % share_id if share_mode == 'read-only': logger.error('%s called without write access: %s' % (op_name, accepted)) output_objects.append({ 'object_type': 'error_text', 'text': 'No write access!' }) return (output_objects, returnvalues.CLIENT_ERROR) target_dir = os.path.join(share_mode, share_id) base_dir = configuration.sharelink_home id_query = '?share_id=%s' % share_id page_title = 'Create Shared Directory' userstyle = False widgets = False else: logger.error('%s called without proper auth: %s' % (op_name, accepted)) output_objects.append({ 'object_type': 'error_text', 'text': 'Authentication is missing!' }) return (output_objects, returnvalues.SYSTEM_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath(os.path.join(base_dir, target_dir)) + os.sep title_entry = find_entry(output_objects, 'title') title_entry['text'] = page_title title_entry['skipwidgets'] = not widgets title_entry['skipuserstyle'] = not userstyle output_objects.append({'object_type': 'header', 'text': page_title}) # Input validation assures target_dir can't escape base_dir if not os.path.isdir(base_dir): output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid client/sharelink id!' }) return (output_objects, returnvalues.CLIENT_ERROR) if verbose(flags): for flag in flags: output_objects.append({ 'object_type': 'text', 'text': '%s using flag: %s' % (op_name, flag) }) for pattern in patterns: # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages # NB: Globbing disabled on purpose here unfiltered_match = [base_dir + os.sep + current_dir + os.sep + pattern] match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.warn('%s tried to %s %s restricted path! (%s)' % (client_id, op_name, abs_path, pattern)) continue match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'error_text', 'text': "%s: cannot create directory '%s': Permission denied" % (op_name, pattern) }) status = returnvalues.CLIENT_ERROR for abs_path in match: relative_path = abs_path.replace(base_dir, '') if verbose(flags): output_objects.append({ 'object_type': 'file', 'name': relative_path }) if not parents(flags) and os.path.exists(abs_path): output_objects.append({ 'object_type': 'error_text', 'text': '%s: path exist!' % pattern }) status = returnvalues.CLIENT_ERROR continue if not check_write_access(abs_path, parent_dir=True): logger.warning('%s called without write access: %s' % (op_name, abs_path)) output_objects.append({ 'object_type': 'error_text', 'text': 'cannot create "%s": inside a read-only location!' % pattern }) status = returnvalues.CLIENT_ERROR continue try: gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'created', [relative_path]) if parents(flags): if not os.path.isdir(abs_path): os.makedirs(abs_path) else: os.mkdir(abs_path) logger.info('%s %s done' % (op_name, abs_path)) except Exception as exc: if not isinstance(exc, GDPIOLogError): gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'created', [relative_path], failed=True, details=exc) output_objects.append({ 'object_type': 'error_text', 'text': "%s: '%s' failed!" % (op_name, relative_path) }) logger.error("%s: failed on '%s': %s" % (op_name, relative_path, exc)) status = returnvalues.SYSTEM_ERROR continue output_objects.append({ 'object_type': 'text', 'text': "created directory %s" % (relative_path) }) if id_query: open_query = "%s;current_dir=%s" % (id_query, relative_path) else: open_query = "?current_dir=%s" % relative_path output_objects.append({ 'object_type': 'link', 'destination': 'ls.py%s' % open_query, 'text': 'Open %s' % relative_path }) output_objects.append({'object_type': 'text', 'text': ''}) output_objects.append({ 'object_type': 'link', 'destination': 'ls.py%s' % id_query, 'text': 'Return to files overview' }) return (output_objects, status)
def main(client_id, user_arguments_dict, environ=None): """Main function used by front end""" if environ is None: environ = os.environ (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False, op_menu=client_id) client_dir = client_id_dir(client_id) status = returnvalues.OK defaults = signature()[1] (validate_status, accepted) = validate_input( user_arguments_dict, defaults, output_objects, allow_rejects=False, # NOTE: path can use wildcards typecheck_overrides={'path': valid_path_pattern}, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) pattern_list = accepted['path'] iosessionid = accepted['iosessionid'][-1] share_id = accepted['share_id'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Either authenticated user client_id set or sharelink ID if client_id: user_id = client_id target_dir = client_id_dir(client_id) base_dir = configuration.user_home id_query = '' page_title = 'Remove User File' if force(flags): rm_helper = delete_path else: rm_helper = remove_path userstyle = True widgets = True elif share_id: try: (share_mode, _) = extract_mode_id(configuration, share_id) except ValueError as err: logger.error('%s called with invalid share_id %s: %s' % (op_name, share_id, err)) output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid sharelink ID: %s' % share_id }) return (output_objects, returnvalues.CLIENT_ERROR) # TODO: load and check sharelink pickle (currently requires client_id) user_id = 'anonymous user through share ID %s' % share_id if share_mode == 'read-only': logger.error('%s called without write access: %s' % (op_name, accepted)) output_objects.append({ 'object_type': 'error_text', 'text': 'No write access!' }) return (output_objects, returnvalues.CLIENT_ERROR) target_dir = os.path.join(share_mode, share_id) base_dir = configuration.sharelink_home id_query = '?share_id=%s' % share_id page_title = 'Remove Shared File' rm_helper = delete_path userstyle = False widgets = False elif iosessionid.strip() and iosessionid.isalnum(): user_id = iosessionid base_dir = configuration.webserver_home target_dir = iosessionid page_title = 'Remove Session File' rm_helper = delete_path userstyle = False widgets = False else: logger.error('%s called without proper auth: %s' % (op_name, accepted)) output_objects.append({ 'object_type': 'error_text', 'text': 'Authentication is missing!' }) return (output_objects, returnvalues.SYSTEM_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath(os.path.join(base_dir, target_dir)) + os.sep title_entry = find_entry(output_objects, 'title') title_entry['text'] = page_title title_entry['skipwidgets'] = not widgets title_entry['skipuserstyle'] = not userstyle output_objects.append({'object_type': 'header', 'text': page_title}) logger.debug("%s: with paths: %s" % (op_name, pattern_list)) # Input validation assures target_dir can't escape base_dir if not os.path.isdir(base_dir): output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid client/sharelink/session id!' }) logger.warning('%s used %s with invalid base dir: %s' % (user_id, op_name, base_dir)) return (output_objects, returnvalues.CLIENT_ERROR) if verbose(flags): for flag in flags: output_objects.append({ 'object_type': 'text', 'text': '%s using flag: %s' % (op_name, flag) }) for pattern in pattern_list: # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages unfiltered_match = glob.glob(base_dir + pattern) match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.warning('%s tried to %s restricted path %s ! ( %s)' % (client_id, op_name, abs_path, pattern)) continue match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: logger.warning("%s: no matching paths: %s" % (op_name, pattern_list)) output_objects.append({ 'object_type': 'file_not_found', 'name': pattern }) status = returnvalues.FILE_NOT_FOUND for abs_path in match: real_path = os.path.realpath(abs_path) relative_path = abs_path.replace(base_dir, '') if verbose(flags): output_objects.append({ 'object_type': 'file', 'name': relative_path }) # Make it harder to accidentially delete too much - e.g. do not # delete VGrid files without explicit selection of subdir contents if abs_path == os.path.abspath(base_dir): logger.error("%s: refusing rm home dir: %s" % (op_name, abs_path)) output_objects.append({ 'object_type': 'warning', 'text': "You're not allowed to delete your entire home directory!" }) status = returnvalues.CLIENT_ERROR continue # Generally refuse handling symlinks including root vgrid shares elif os.path.islink(abs_path): logger.error("%s: refusing rm link: %s" % (op_name, abs_path)) output_objects.append({ 'object_type': 'warning', 'text': """ You're not allowed to delete entire special folders like %s shares and %s """ % (configuration.site_vgrid_label, trash_linkname) }) status = returnvalues.CLIENT_ERROR continue # Additionally refuse operations on inherited subvgrid share roots elif in_vgrid_share(configuration, abs_path) == relative_path: output_objects.append({ 'object_type': 'warning', 'text': """You're not allowed to remove entire %s shared folders!""" % configuration.site_vgrid_label }) status = returnvalues.CLIENT_ERROR continue elif os.path.isdir(abs_path) and not recursive(flags): logger.error("%s: non-recursive call on dir '%s'" % (op_name, abs_path)) output_objects.append({ 'object_type': 'error_text', 'text': "cannot remove '%s': is a direcory" % relative_path }) status = returnvalues.CLIENT_ERROR continue trash_base = get_trash_location(configuration, abs_path) if not trash_base and not force(flags): logger.error("%s: no trash for dir '%s'" % (op_name, abs_path)) output_objects.append({ 'object_type': 'error_text', 'text': "No trash enabled for '%s' - read-only?" % relative_path }) status = returnvalues.CLIENT_ERROR continue try: if rm_helper == remove_path and \ os.path.commonprefix([real_path, trash_base]) \ == trash_base: logger.warning("%s: already in trash: '%s'" % (op_name, real_path)) output_objects.append({ 'object_type': 'error_text', 'text': """ '%s' is already in trash - no action: use force flag to permanently delete""" % relative_path }) status = returnvalues.CLIENT_ERROR continue except Exception as err: logger.error("%s: check trash failed: %s" % (op_name, err)) continue if not check_write_access(abs_path): logger.warning('%s called without write access: %s' % (op_name, abs_path)) output_objects.append({ 'object_type': 'error_text', 'text': 'cannot remove "%s": inside a read-only location!' % pattern }) status = returnvalues.CLIENT_ERROR continue # TODO: limit delete in vgrid share trash to vgrid owners / conf? # ... malicious members can still e.g. truncate all files. # we could consider removing write bit on move to trash. # TODO: user setting to switch on/off trash? # TODO: add direct delete checkbox in fileman move to trash dialog? # TODO: add empty trash option for Trash? # TODO: user settings to define read-only and auto-expire in trash? # TODO: add trash support for sftp/ftps/webdavs? gdp_iolog_action = 'deleted' gdp_iolog_paths = [relative_path] if rm_helper == remove_path: gdp_iolog_action = 'moved' trash_base_path = \ get_trash_location(configuration, abs_path, True) trash_relative_path = \ trash_base_path.replace(configuration.user_home, '') trash_relative_path = \ trash_relative_path.replace( configuration.vgrid_files_home, '') gdp_iolog_paths.append(trash_relative_path) try: gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], gdp_iolog_action, gdp_iolog_paths) gdp_iolog_status = True except GDPIOLogError as exc: gdp_iolog_status = False rm_err = [str(exc)] rm_status = False if gdp_iolog_status: (rm_status, rm_err) = rm_helper(configuration, abs_path) if not rm_status or not gdp_iolog_status: if gdp_iolog_status: gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], gdp_iolog_action, gdp_iolog_paths, failed=True, details=rm_err) logger.error("%s: failed on '%s': %s" % (op_name, abs_path, ', '.join(rm_err))) output_objects.append({ 'object_type': 'error_text', 'text': "remove '%s' failed: %s" % (relative_path, '. '.join(rm_err)) }) status = returnvalues.SYSTEM_ERROR continue logger.info("%s: successfully (re)moved %s" % (op_name, abs_path)) output_objects.append({ 'object_type': 'text', 'text': "removed %s" % (relative_path) }) output_objects.append({ 'object_type': 'link', 'destination': 'ls.py%s' % id_query, 'text': 'Return to files overview' }) return (output_objects, status)
def main(client_id, user_arguments_dict, environ=None): """Main function used by front end""" if environ is None: environ = os.environ (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) status = returnvalues.OK defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, # NOTE: src and dst can use wildcards here typecheck_overrides={ 'src': valid_path_pattern, 'dst': valid_path_pattern }, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) src_list = accepted['src'] dst = accepted['dst'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep status = returnvalues.OK abs_dest = base_dir + dst dst_list = glob.glob(abs_dest) if not dst_list: # New destination? if not glob.glob(os.path.dirname(abs_dest)): output_objects.append({ 'object_type': 'error_text', 'text': 'Illegal dst path provided!' }) return (output_objects, returnvalues.CLIENT_ERROR) else: dst_list = [abs_dest] # Use last match in case of multiple matches dest = dst_list[-1] if len(dst_list) > 1: output_objects.append({ 'object_type': 'warning', 'text': 'dst (%s) matches multiple targets - using last: %s' % (dst, dest) }) # IMPORTANT: path must be expanded to abs for proper chrooting abs_dest = os.path.abspath(dest) # Don't use abs_path in output as it may expose underlying # fs layout. relative_dest = abs_dest.replace(base_dir, '') if not valid_user_path(configuration, abs_dest, base_dir, True): logger.warning('%s tried to %s to restricted path %s ! (%s)' % (client_id, op_name, abs_dest, dst)) output_objects.append({ 'object_type': 'error_text', 'text': "Invalid path! (%s expands to an illegal path)" % dst }) return (output_objects, returnvalues.CLIENT_ERROR) if not check_write_access(abs_dest, parent_dir=True): logger.warning('%s called without write access: %s' % (op_name, abs_dest)) output_objects.append({ 'object_type': 'error_text', 'text': 'cannot move to "%s": inside a read-only location!' % relative_dest }) return (output_objects, returnvalues.CLIENT_ERROR) for pattern in src_list: unfiltered_match = glob.glob(base_dir + pattern) match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, pattern)) continue match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'error_text', 'text': '%s: no such file or directory! %s' % (op_name, pattern) }) status = returnvalues.CLIENT_ERROR for abs_path in match: relative_path = abs_path.replace(base_dir, '') if verbose(flags): output_objects.append({ 'object_type': 'file', 'name': relative_path }) # Generally refuse handling symlinks including root vgrid shares if os.path.islink(abs_path): output_objects.append({ 'object_type': 'warning', 'text': """You're not allowed to move entire special folders like %s shared folders!""" % configuration.site_vgrid_label }) status = returnvalues.CLIENT_ERROR continue # Additionally refuse operations on inherited subvgrid share roots elif in_vgrid_share(configuration, abs_path) == relative_path: output_objects.append({ 'object_type': 'warning', 'text': """You're not allowed to move entire %s shared folders!""" % configuration.site_vgrid_label }) status = returnvalues.CLIENT_ERROR continue elif os.path.realpath(abs_path) == os.path.realpath(base_dir): logger.error("%s: refusing move home dir: %s" % (op_name, abs_path)) output_objects.append({ 'object_type': 'warning', 'text': "You're not allowed to move your entire home directory!" }) status = returnvalues.CLIENT_ERROR continue if not check_write_access(abs_path): logger.warning('%s called without write access: %s' % (op_name, abs_path)) output_objects.append({ 'object_type': 'error_text', 'text': 'cannot move "%s": inside a read-only location!' % pattern }) status = returnvalues.CLIENT_ERROR continue # If destination is a directory the src should be moved in there # Move with existing directory as target replaces the directory! abs_target = abs_dest if os.path.isdir(abs_target): if os.path.samefile(abs_target, abs_path): output_objects.append({ 'object_type': 'warning', 'text': "Cannot move '%s' to a subdirectory of itself!" % relative_path }) status = returnvalues.CLIENT_ERROR continue abs_target = os.path.join(abs_target, os.path.basename(abs_path)) try: gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'moved', [relative_path, relative_dest]) shutil.move(abs_path, abs_target) logger.info('%s %s %s done' % (op_name, abs_path, abs_target)) except Exception as exc: if not isinstance(exc, GDPIOLogError): gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'moved', [relative_path, relative_dest], failed=True, details=exc) output_objects.append({ 'object_type': 'error_text', 'text': "%s: '%s': %s" % (op_name, relative_path, exc) }) logger.error("%s: failed on '%s': %s" % (op_name, relative_path, exc)) status = returnvalues.SYSTEM_ERROR continue return (output_objects, status)
def main(client_id, user_arguments_dict, environ=None): """Main function used by front end""" if environ is None: environ = os.environ (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, # NOTE: src and dst can use wildcards here typecheck_overrides={'src': valid_path_pattern, 'dst': valid_path_pattern}, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) src_list = accepted['src'] dst = accepted['dst'][-1] iosessionid = accepted['iosessionid'][-1] share_id = accepted['share_id'][-1] freeze_id = accepted['freeze_id'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append( {'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath(os.path.join(configuration.user_home, client_dir)) + os.sep # Special handling if used from a job (no client_id but iosessionid) if not client_id and iosessionid: base_dir = os.path.realpath(configuration.webserver_home + os.sep + iosessionid) + os.sep # Use selected base as source and destination dir by default src_base = dst_base = base_dir # Sharelink import if share_id is given - change to sharelink as src base if share_id: try: (share_mode, _) = extract_mode_id(configuration, share_id) except ValueError as err: logger.error('%s called with invalid share_id %s: %s' % (op_name, share_id, err)) output_objects.append( {'object_type': 'error_text', 'text': 'Invalid sharelink ID: %s' % share_id}) return (output_objects, returnvalues.CLIENT_ERROR) # TODO: load and check sharelink pickle (currently requires client_id) if share_mode == 'write-only': logger.error('%s called import from write-only sharelink: %s' % (op_name, accepted)) output_objects.append( {'object_type': 'error_text', 'text': 'Sharelink %s is write-only!' % share_id}) return (output_objects, returnvalues.CLIENT_ERROR) target_dir = os.path.join(share_mode, share_id) src_base = os.path.abspath(os.path.join(configuration.sharelink_home, target_dir)) + os.sep if os.path.isfile(os.path.realpath(src_base)): logger.error('%s called import on single file sharelink: %s' % (op_name, share_id)) output_objects.append( {'object_type': 'error_text', 'text': """Import is only supported for directory sharelinks!"""}) return (output_objects, returnvalues.CLIENT_ERROR) elif not os.path.isdir(src_base): logger.error('%s called import with non-existant sharelink: %s' % (client_id, share_id)) output_objects.append( {'object_type': 'error_text', 'text': 'No such sharelink: %s' % share_id}) return (output_objects, returnvalues.CLIENT_ERROR) # Archive import if freeze_id is given - change to archive as src base if freeze_id: if not is_frozen_archive(client_id, freeze_id, configuration): logger.error('%s called with invalid freeze_id: %s' % (op_name, freeze_id)) output_objects.append( {'object_type': 'error_text', 'text': 'Invalid archive ID: %s' % freeze_id}) return (output_objects, returnvalues.CLIENT_ERROR) target_dir = os.path.join(client_dir, freeze_id) src_base = os.path.abspath(os.path.join(configuration.freeze_home, target_dir)) + os.sep if not os.path.isdir(src_base): logger.error('%s called import with non-existant archive: %s' % (client_id, freeze_id)) output_objects.append( {'object_type': 'error_text', 'text': 'No such archive: %s' % freeze_id}) return (output_objects, returnvalues.CLIENT_ERROR) status = returnvalues.OK abs_dest = dst_base + dst dst_list = glob.glob(abs_dest) if not dst_list: # New destination? if not glob.glob(os.path.dirname(abs_dest)): logger.error('%s called with illegal dst: %s' % (op_name, dst)) output_objects.append( {'object_type': 'error_text', 'text': 'Illegal dst path provided!'}) return (output_objects, returnvalues.CLIENT_ERROR) else: dst_list = [abs_dest] # Use last match in case of multiple matches dest = dst_list[-1] if len(dst_list) > 1: output_objects.append( {'object_type': 'warning', 'text': 'dst (%s) matches multiple targets - using last: %s' % (dst, dest)}) # IMPORTANT: path must be expanded to abs for proper chrooting abs_dest = os.path.abspath(dest) # Don't use abs_path in output as it may expose underlying # fs layout. relative_dest = abs_dest.replace(dst_base, '') if not valid_user_path(configuration, abs_dest, dst_base, True): logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_dest, dst)) output_objects.append( {'object_type': 'error_text', 'text': "Invalid destination (%s expands to an illegal path)" % dst}) return (output_objects, returnvalues.CLIENT_ERROR) # We must make sure target dir exists if called in import X mode if (share_id or freeze_id) and not makedirs_rec(abs_dest, configuration): logger.error('could not create import destination dir: %s' % abs_dest) output_objects.append( {'object_type': 'error_text', 'text': 'cannot import to "%s" : file in the way?' % relative_dest}) return (output_objects, returnvalues.SYSTEM_ERROR) if not check_write_access(abs_dest, parent_dir=True): logger.warning('%s called without write access: %s' % (op_name, abs_dest)) output_objects.append( {'object_type': 'error_text', 'text': 'cannot copy to "%s": inside a read-only location!' % relative_dest}) return (output_objects, returnvalues.CLIENT_ERROR) if share_id and not force(flags) and not check_empty_dir(abs_dest): logger.warning('%s called %s sharelink import with non-empty dst: %s' % (op_name, share_id, abs_dest)) output_objects.append( {'object_type': 'error_text', 'text': """Importing a sharelink like '%s' into the non-empty '%s' folder will potentially overwrite existing files with the sharelink version. If you really want that, please try import again and select the overwrite box to confirm it. You may want to back up any important data from %s first, however. """ % (share_id, relative_dest, relative_dest)}) return (output_objects, returnvalues.CLIENT_ERROR) if freeze_id and not force(flags) and not check_empty_dir(abs_dest): logger.warning('%s called %s archive import with non-empty dst: %s' % (op_name, freeze_id, abs_dest)) output_objects.append( {'object_type': 'error_text', 'text': """Importing an archive like '%s' into the non-empty '%s' folder will potentially overwrite existing files with the archive version. If you really want that, please try import again and select the overwrite box to confirm it. You may want to back up any important data from %s first, however. """ % (freeze_id, relative_dest, relative_dest)}) return (output_objects, returnvalues.CLIENT_ERROR) for pattern in src_list: unfiltered_match = glob.glob(src_base + pattern) match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, src_base, True): logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, pattern)) continue match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({'object_type': 'file_not_found', 'name': pattern}) status = returnvalues.FILE_NOT_FOUND for abs_path in match: relative_path = abs_path.replace(src_base, '') if verbose(flags): output_objects.append( {'object_type': 'file', 'name': relative_path}) # Prevent vgrid share copy which would create read-only dot dirs # Generally refuse handling symlinks including root vgrid shares if os.path.islink(abs_path): output_objects.append( {'object_type': 'warning', 'text': """You're not allowed to copy entire special folders like %s shared folders!""" % configuration.site_vgrid_label}) status = returnvalues.CLIENT_ERROR continue elif os.path.realpath(abs_path) == os.path.realpath(base_dir): logger.error("%s: refusing copy home dir: %s" % (op_name, abs_path)) output_objects.append( {'object_type': 'warning', 'text': "You're not allowed to copy your entire home directory!" }) status = returnvalues.CLIENT_ERROR continue # src must be a file unless recursive is specified if not recursive(flags) and os.path.isdir(abs_path): logger.warning('skipping directory source %s' % abs_path) output_objects.append( {'object_type': 'warning', 'text': 'skipping directory src %s!' % relative_path}) continue # If destination is a directory the src should be copied there abs_target = abs_dest if os.path.isdir(abs_target): abs_target = os.path.join(abs_target, os.path.basename(abs_path)) if os.path.abspath(abs_path) == os.path.abspath(abs_target): logger.warning('%s tried to %s %s to itself! (%s)' % (client_id, op_name, abs_path, pattern)) output_objects.append( {'object_type': 'warning', 'text': "Cannot copy '%s' to self!" % relative_path}) status = returnvalues.CLIENT_ERROR continue if os.path.isdir(abs_path) and \ abs_target.startswith(abs_path + os.sep): logger.warning('%s tried to %s %s to itself! (%s)' % (client_id, op_name, abs_path, pattern)) output_objects.append( {'object_type': 'warning', 'text': "Cannot copy '%s' to (sub) self!" % relative_path}) status = returnvalues.CLIENT_ERROR continue try: gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'copied', [relative_path, relative_dest + "/" + os.path.basename(relative_path)]) if os.path.isdir(abs_path): shutil.copytree(abs_path, abs_target) else: shutil.copy(abs_path, abs_target) logger.info('%s %s %s done' % (op_name, abs_path, abs_target)) except Exception as exc: if not isinstance(exc, GDPIOLogError): gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'copied', [relative_path, relative_dest + "/" + os.path.basename(relative_path)], failed=True, details=exc) output_objects.append( {'object_type': 'error_text', 'text': "%s: failed on '%s' to '%s'" % (op_name, relative_path, relative_dest)}) logger.error("%s: failed on '%s': %s" % (op_name, relative_path, exc)) status = returnvalues.SYSTEM_ERROR return (output_objects, status)