def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) status = returnvalues.OK defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not configuration.site_enable_jobs: output_objects.append({ 'object_type': 'error_text', 'text': '''Job execution is not enabled on this system''' }) return (output_objects, returnvalues.SYSTEM_ERROR) external_dict = get_keywords_dict(configuration) mrsl = fields_to_mrsl(configuration, user_arguments_dict, external_dict) tmpfile = None # save to temporary file try: (filehandle, real_path) = tempfile.mkstemp(text=True) relative_path = os.path.basename(real_path) os.write(filehandle, mrsl) os.close(filehandle) except Exception, err: output_objects.append({ 'object_type': 'error_text', 'text': 'Failed to write temporary mRSL file: %s' % err }) return (output_objects, returnvalues.SYSTEM_ERROR)
def main(client_id, user_arguments_dict, environ=None): """Main function used by front end""" if environ is None: environ = os.environ (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) defaults = signature()[1] status = returnvalues.OK (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) patterns = accepted['path'] dst = accepted['dst'][-1].lstrip(os.sep) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep if verbose(flags): for flag in flags: output_objects.append({ 'object_type': 'text', 'text': '%s using flag: %s' % (op_name, flag) }) if dst: if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) dst_mode = "wb" # IMPORTANT: path must be expanded to abs for proper chrooting abs_dest = os.path.abspath(os.path.join(base_dir, dst)) relative_dst = abs_dest.replace(base_dir, '') if not valid_user_path(configuration, abs_dest, base_dir, True): logger.warning('%s tried to %s into restricted path %s ! (%s)' % (client_id, op_name, abs_dest, dst)) output_objects.append({ 'object_type': 'error_text', 'text': "invalid destination: '%s'" % dst }) return (output_objects, returnvalues.CLIENT_ERROR) for pattern in patterns: # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages unfiltered_match = glob.glob(base_dir + pattern) match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, pattern)) continue match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'file_not_found', 'name': pattern }) status = returnvalues.FILE_NOT_FOUND for abs_path in match: output_lines = [] relative_path = abs_path.replace(base_dir, '') try: gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'accessed', [relative_path]) fd = open(abs_path, 'r') # use file directly as iterator for efficiency for line in fd: output_lines.append(line) fd.close() except Exception, exc: if not isinstance(exc, GDPIOLogError): gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'accessed', [relative_path], failed=True, details=exc) output_objects.append({ 'object_type': 'error_text', 'text': "%s: '%s': %s" % (op_name, relative_path, exc) }) logger.error("%s: failed on '%s': %s" % (op_name, relative_path, exc)) status = returnvalues.SYSTEM_ERROR continue if dst: try: gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'modified', [dst]) out_fd = open(abs_dest, dst_mode) out_fd.writelines(output_lines) out_fd.close() logger.info('%s %s %s done' % (op_name, abs_path, abs_dest)) except Exception, exc: if not isinstance(exc, GDPIOLogError): gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'], 'modified', [dst], error=True, details=exc) output_objects.append({ 'object_type': 'error_text', 'text': "write failed: '%s'" % exc }) logger.error("%s: write failed on '%s': %s" % (op_name, abs_dest, exc)) status = returnvalues.SYSTEM_ERROR continue output_objects.append({ 'object_type': 'text', 'text': "wrote %s to %s" % (relative_path, relative_dst) }) # Prevent truncate after first write dst_mode = "ab+" else: entry = { 'object_type': 'file_output', 'lines': output_lines, 'wrap_binary': binary(flags), 'wrap_targets': ['lines'] } if verbose(flags): entry['path'] = relative_path output_objects.append(entry) # TODO: rip this hack out into real download handler? # Force download of files when output_format == 'file_format' # This will only work for the first file matching a glob when # using file_format. # And it is supposed to only work for one file. if user_arguments_dict.has_key('output_format'): output_format = user_arguments_dict['output_format'][0] if output_format == 'file': output_objects.append({ 'object_type': 'start', 'headers': [('Content-Disposition', 'attachment; filename="%s";' % os.path.basename(abs_path))] })
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) output_objects.append({ 'object_type': 'text', 'text': '--------- Trying to STOP store ----------' }) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) unique_resource_name = accepted['unique_resource_name'][-1] store_name_list = accepted['store_name'] all = accepted['all'][-1].lower() == 'true' parallel = accepted['parallel'][-1].lower() == 'true' if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not is_owner(client_id, unique_resource_name, configuration.resource_home, logger): output_objects.append({ 'object_type': 'error_text', 'text': 'Failure: You must be an owner of ' + unique_resource_name + ' to stop the store!' }) return (output_objects, returnvalues.CLIENT_ERROR) exit_status = returnvalues.OK if all: store_name_list = get_all_store_names(unique_resource_name) # take action based on supplied list of stores if len(store_name_list) == 0: output_objects.append({ 'object_type': 'text', 'text': "No stores specified and 'all' argument not set to true: Nothing to do!" }) workers = [] for store_name in store_name_list: task = Worker(target=stop_resource_store, args=(unique_resource_name, store_name, configuration.resource_home, logger)) workers.append((store_name, [task])) task.start() if not parallel: task.join() for (store_name, task_list) in workers: (status, msg) = task_list[0].finish() output_objects.append({'object_type': 'header', 'text': 'Stop store'}) if not status: output_objects.append({ 'object_type': 'error_text', 'text': 'Problems stopping store: %s' % msg }) exit_status = returnvalues.SYSTEM_ERROR else: output_objects.append({ 'object_type': 'text', 'text': 'Stop store success: %s' % msg }) return (output_objects, exit_status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) client_dir = client_id_dir(client_id) defaults = signature()[1] title_entry = find_entry(output_objects, 'title') label = "%s" % configuration.site_vgrid_label title_entry['text'] = "Add/Update %s Trigger" % label output_objects.append({ 'object_type': 'header', 'text': 'Add/Update %s Trigger' % label }) (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) # NOTE: strip leftmost slashes from all fields used in file paths to avoid # interference with os.path.join calls. Furthermore we strip and normalize # the path variable first to make sure it does not point outside the vgrid. # In practice any such directory traversal attempts will generally be moot # since the grid_events daemon only starts a listener for each top-level # vgrid and in there only reacts to events that match trigger rules from # that particular vgrid. Thus only subvgrid access to parent vgrids might # be a concern and still of limited consequence. # NOTE: merge multi args into one string and split again to get flat array rule_id = accepted['rule_id'][-1].strip() vgrid_name = accepted['vgrid_name'][-1].strip().lstrip(os.sep) path = os.path.normpath(accepted['path'][-1].strip()).lstrip(os.sep) changes = [i.strip() for i in ' '.join(accepted['changes']).split()] action = accepted['action'][-1].strip() arguments = [ i.strip() for i in shlex.split(' '.join(accepted['arguments'])) ] rate_limit = accepted['rate_limit'][-1].strip() settle_time = accepted['settle_time'][-1].strip() match_files = accepted['match_files'][-1].strip() == 'True' match_dirs = accepted['match_dirs'][-1].strip() == 'True' match_recursive = accepted['match_recursive'][-1].strip() == 'True' rank_str = accepted['rank'][-1] try: rank = int(rank_str) except ValueError: rank = None logger.debug("addvgridtrigger with args: %s" % user_arguments_dict) if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep # we just use a high res timestamp as automatic rule_id if rule_id == keyword_auto: rule_id = "%d" % (time.time() * 1E8) if action == keyword_auto: action = valid_trigger_actions[0] if any_state in changes: changes = valid_trigger_changes logger.info("addvgridtrigger %s" % vgrid_name) # Validity of user and vgrid names is checked in this init function so # no need to worry about illegal directory traversal through variables (ret_val, msg, ret_variables) = \ init_vgrid_script_add_rem(vgrid_name, client_id, rule_id, 'trigger', configuration) if not ret_val: output_objects.append({'object_type': 'error_text', 'text': msg}) return (output_objects, returnvalues.CLIENT_ERROR) elif msg: # In case of warnings, msg is non-empty while ret_val remains True output_objects.append({'object_type': 'warning', 'text': msg}) # if we get here user is either vgrid owner or allowed to add rule # don't add if already in vgrid or parent vgrid - but update if owner update_id = None if vgrid_is_trigger(vgrid_name, rule_id, configuration): if vgrid_is_trigger_owner(vgrid_name, rule_id, client_id, configuration): update_id = 'rule_id' else: output_objects.append({ 'object_type': 'error_text', 'text': '%s is already a trigger owned by somebody else in the %s' % (rule_id, label) }) return (output_objects, returnvalues.CLIENT_ERROR) # don't add if already in subvgrid (list_status, subvgrids) = vgrid_list_subvgrids(vgrid_name, configuration) if not list_status: output_objects.append({ 'object_type': 'error_text', 'text': 'Error getting list of sub%ss: %s' % (label, subvgrids) }) return (output_objects, returnvalues.SYSTEM_ERROR) for subvgrid in subvgrids: if vgrid_is_trigger(subvgrid, rule_id, configuration, recursive=False): output_objects.append({ 'object_type': 'error_text', 'text': '''%(rule_id)s is already in a sub-%(vgrid_label)s (%(subvgrid)s). Please remove the trigger from the sub-%(vgrid_label)s and try again''' % { 'rule_id': rule_id, 'subvgrid': subvgrid, 'vgrid_label': label } }) return (output_objects, returnvalues.CLIENT_ERROR) if not action in valid_trigger_actions: output_objects.append({ 'object_type': 'error_text', 'text': "invalid action value %s" % action }) return (output_objects, returnvalues.CLIENT_ERROR) if keyword_all in changes: changes = valid_trigger_changes for change in changes: if not change in valid_trigger_changes: output_objects.append({ 'object_type': 'error_text', 'text': "found invalid change value %s" % change }) return (output_objects, returnvalues.CLIENT_ERROR) # Check if we should load saved trigger for rank change or update rule_dict = None if rank is not None or update_id is not None: (load_status, all_triggers) = vgrid_triggers(vgrid_name, configuration) if not load_status: output_objects.append({ 'object_type': 'error_text', 'text': 'Failed to load triggers for %s: %s' % (vgrid_name, all_triggers) }) return (output_objects, returnvalues.SYSTEM_ERROR) for saved_dict in all_triggers: if saved_dict['rule_id'] == rule_id: rule_dict = saved_dict break if rule_dict is None: output_objects.append({ 'object_type': 'error_text', 'text': 'No such trigger %s for %s: %s' % (rule_id, vgrid_name, all_triggers) }) return (output_objects, returnvalues.CLIENT_ERROR) elif not path: # New trigger with missing path output_objects.append({ 'object_type': 'error_text', 'text': '''Either path or rank must be set.''' }) return (output_objects, returnvalues.CLIENT_ERROR) elif action == "submit" and not arguments: # New submit trigger with missing mrsl arguments output_objects.append({ 'object_type': 'error_text', 'text': '''Submit triggers must give a job description file path as argument.''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Handle create and update (i.e. new, update all or just refresh mRSL) if rank is None: # IMPORTANT: we save the job template contents to avoid potential abuse # Otherwise someone else in the VGrid could tamper with the template # and make the next trigger execute arbitrary code on behalf of the # rule owner. templates = [] # Merge current and saved values req_dict = { 'rule_id': rule_id, 'vgrid_name': vgrid_name, 'path': path, 'changes': changes, 'run_as': client_id, 'action': action, 'arguments': arguments, 'rate_limit': rate_limit, 'settle_time': settle_time, 'match_files': match_files, 'match_dirs': match_dirs, 'match_recursive': match_recursive, 'templates': templates } if rule_dict is None: rule_dict = req_dict else: for field in user_arguments_dict: if req_dict.has_key(field): rule_dict[field] = req_dict[field] # Now refresh template contents if rule_dict['action'] == "submit": for rel_path in rule_dict['arguments']: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(os.path.join(base_dir, rel_path)) try: if not valid_user_path(configuration, abs_path, base_dir, True): logger.warning( '%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, rel_path)) raise ValueError('invalid submit path argument: %s' % rel_path) temp_fd = open(abs_path) templates.append(temp_fd.read()) temp_fd.close() except Exception, err: logger.error("read submit argument file failed: %s" % err) output_objects.append({ 'object_type': 'error_text', 'text': 'failed to read submit argument file "%s"' % rel_path }) return (output_objects, returnvalues.CLIENT_ERROR) # Save updated template contents here rule_dict['templates'] = templates
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) logger.debug("User: %s executing %s" % (client_id, op_name)) if not configuration.site_enable_cloud: output_objects.append({ 'object_type': 'error_text', 'text': 'The cloud service is not enabled on the system' }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'header', 'text': 'Cloud Instance Management' }) user_map = get_full_user_map(configuration) user_dict = user_map.get(client_id, None) # Optional limitation of cload access vgrid permission if not user_dict or not cloud_access_allowed(configuration, user_dict): output_objects.append({ 'object_type': 'error_text', 'text': "You don't have permission to access the cloud facilities on " "this site" }) return (output_objects, returnvalues.CLIENT_ERROR) return_status = returnvalues.OK action = accepted['action'][-1] # NOTE: instance_X may be empty list - fall back to empty string instance_id = ([''] + accepted['instance_id'])[-1] instance_label = ([''] + accepted['instance_label'])[-1] instance_image = ([''] + accepted['instance_image'])[-1] accept_terms = (([''] + accepted['accept_terms'])[-1] in ('yes', 'on')) cloud_id = accepted['service'][-1] service = { k: v for options in configuration.cloud_services for k, v in options.items() if options['service_name'] == cloud_id } if not service: valid_services = [ options['service_name'] for options in configuration.cloud_services ] output_objects.append({ 'object_type': 'error_text', 'text': '%s is not among the valid cloud services: %s' % (cloud_id, ', '.join(valid_services)) }) return (output_objects, returnvalues.CLIENT_ERROR) valid_service = valid_cloud_service(configuration, service) if not valid_service: output_objects.append({ 'object_type': 'error_text', 'text': 'The service %s appears to be misconfigured, ' 'please contact a system administrator about this issue' % cloud_id }) return (output_objects, returnvalues.SYSTEM_ERROR) service_title = service['service_title'] if not action in valid_actions: output_objects.append({ 'object_type': 'error_text', 'text': '%s is not a valid action ' 'allowed actions include %s' % (action, ', '.join(valid_actions)) }) return (output_objects, returnvalues.CLIENT_ERROR) elif action in cloud_edit_actions: if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) cloud_flavor = service.get("service_flavor", "openstack") user_home_dir = os.path.join(configuration.user_home, client_dir) client_email = extract_field(client_id, 'email') if not client_email: logger.error("could not extract client email for %s!" % client_id) output_objects.append({ 'object_type': 'error_text', 'text': "No client ID found - can't continue" }) return (output_objects, returnvalues.SYSTEM_ERROR) ssh_auth_msg = "Login requires your private key for your public key:" instance_missing_msg = "Found no '%s' instance at %s. Please contact a " \ + "site administrator if it should be there." _label = instance_label if instance_id and not _label: _, _label, _ = cloud_split_instance_id(configuration, client_id, instance_id) if "create" == action: if not accept_terms: logger.error("refusing create without accepting terms for %s!" % client_id) output_objects.append({ 'object_type': 'error_text', 'text': "You MUST accept the cloud user terms to create instances" }) return (output_objects, returnvalues.CLIENT_ERROR) # Load all instances and make sure none contains label in ID saved_instances = cloud_load_instance(configuration, client_id, cloud_id, keyword_all) for (saved_id, instance) in saved_instances.items(): if instance_label == instance.get('INSTANCE_LABEL', saved_id): logger.error("Refused %s re-create %s cloud instance %s!" % (client_id, cloud_id, instance_label)) output_objects.append({ 'object_type': 'error_text', 'text': "You already have an instance with the label '%s'!" % instance_label }) return (output_objects, returnvalues.CLIENT_ERROR) max_instances = lookup_user_service_value( configuration, client_id, service, 'service_max_user_instances') max_user_instances = int(max_instances) # NOTE: a negative max value means unlimited but 0 or more is enforced if max_user_instances >= 0 and \ len(saved_instances) >= max_user_instances: logger.error("Refused %s create additional %s cloud instances!" % (client_id, cloud_id)) output_objects.append({ 'object_type': 'error_text', 'text': "You already have the maximum allowed %s instances (%d)!" % (service_title, max_user_instances) }) return (output_objects, returnvalues.CLIENT_ERROR) if not instance_label: logger.error("Refused %s create unlabelled %s cloud instance!" % (client_id, cloud_id)) output_objects.append({ 'object_type': 'error_text', 'text': "No instance label provided!" }) return (output_objects, returnvalues.CLIENT_ERROR) # Lookup user-specific allowed images (colon-separated image names) allowed_images = allowed_cloud_images(configuration, client_id, cloud_id, cloud_flavor) if not allowed_images: output_objects.append({ 'object_type': 'error_text', 'text': "No valid / allowed cloud images found!" }) return (output_objects, returnvalues.CLIENT_ERROR) if not instance_image: instance_image = allowed_images[0] logger.info("No image specified - using first for %s in %s: %s" % (client_id, cloud_id, instance_image)) image_id = None for (img_name, img_id, img_alias) in allowed_images: if instance_image == img_name: image_id = img_id break if not image_id: logger.error("No matching image ID found for %s in %s: %s" % (client_id, cloud_id, instance_image)) output_objects.append({ 'object_type': 'error_text', 'text': "No such image found: %s" % instance_image }) return (output_objects, returnvalues.CLIENT_ERROR) # TODO: remove this direct key injection if we can delay it cloud_settings = load_cloud(client_id, configuration) raw_keys = cloud_settings.get('authkeys', '').split('\n') auth_keys = [i.split('#', 1)[0].strip() for i in raw_keys] auth_keys = [i for i in auth_keys if i] if not auth_keys: logger.error("No cloud pub keys setup for %s - refuse create" % client_id) output_objects.append({ 'object_type': 'error_text', 'text': """ You haven't provided any valid ssh pub key(s) for cloud instance login, which is stricly required for all use. Please do so before you try again. """ }) output_objects.append({ 'object_type': 'link', 'destination': 'setup.py?topic=cloud', 'text': 'Open cloud setup', 'class': 'cloudsetuplink iconspace', 'title': 'open cloud setup', 'target': '_blank' }) return (output_objects, returnvalues.CLIENT_ERROR) logger.debug("Continue create for %s with auth_keys: %s" % (client_id, auth_keys)) # Create a new internal keyset and session id (priv_key, pub_key) = generate_ssh_rsa_key_pair(encode_utf8=True) session_id = generate_random_ascii(session_id_bytes, charset='0123456789abcdef') # We make sure to create instance with a globally unique ID on the # cloud while only showing the requested instance_label to the user. instance_id = cloud_build_instance_id(configuration, client_email, instance_label, session_id) # TODO: make more fields flexible/conf cloud_dict = { 'INSTANCE_ID': instance_id, 'INSTANCE_LABEL': instance_label, 'INSTANCE_IMAGE': instance_image, 'IMAGE_ID': image_id, 'AUTH_KEYS': auth_keys, 'USER_CERT': client_id, 'INSTANCE_PRIVATE_KEY': priv_key, 'INSTANCE_PUBLIC_KEY': pub_key, # don't need fraction precision, also not all systems provide fraction # precision. 'CREATED_TIMESTAMP': int(time.time()), # Init unset ssh address and leave for floating IP assigment below 'INSTANCE_SSH_IP': '', 'INSTANCE_SSH_PORT': 22, } (action_status, action_msg) = create_cloud_instance(configuration, client_id, cloud_id, cloud_flavor, instance_id, image_id, auth_keys) if not action_status: logger.error( "%s %s cloud instance %s for %s failed: %s" % (action, cloud_id, instance_id, client_id, action_msg)) output_objects.append({ 'object_type': 'error_text', 'text': 'Your %s instance %s at %s did not succeed: %s' % (action, instance_label, service_title, action_msg) }) return (output_objects, returnvalues.SYSTEM_ERROR) # On success the action_msg contains the assigned floating IP address instance_ssh_fqdn = action_msg cloud_dict['INSTANCE_SSH_IP'] = instance_ssh_fqdn if not cloud_save_instance(configuration, client_id, cloud_id, instance_id, cloud_dict): logger.error("save new %s cloud instance %s for %s failed" % (cloud_id, instance_id, client_id)) output_objects.append({ 'object_type': 'error_text', 'text': 'Error saving your %s cloud instance setup' % service_title }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': "%s instance %s at %s: %s" % (action, instance_label, service_title, "success") }) output_objects.append({ 'object_type': 'html_form', 'text': _ssh_help(configuration, client_id, cloud_id, cloud_dict, instance_id) }) elif "delete" == action: saved_instance = cloud_load_instance(configuration, client_id, cloud_id, instance_id) if not saved_instance: logger.error("no saved %s cloud instance %s for %s to delete" % (cloud_id, instance_id, client_id)) output_objects.append({ 'object_type': 'error_text', 'text': instance_missing_msg % (_label, service_title) }) return (output_objects, returnvalues.CLIENT_ERROR) (action_status, action_msg) = delete_cloud_instance(configuration, client_id, cloud_id, cloud_flavor, instance_id) if not action_status: logger.error( "%s %s cloud instance %s for %s failed: %s" % (action, cloud_id, instance_id, client_id, action_msg)) output_objects.append({ 'object_type': 'error_text', 'text': 'Your %s instance %s at %s did not succeed: %s' % (action, _label, service_title, action_msg) }) return (output_objects, returnvalues.SYSTEM_ERROR) if not cloud_purge_instance(configuration, client_id, cloud_id, instance_id): logger.error("purge %s cloud instance %s for %s failed" % (cloud_id, instance_id, client_id)) output_objects.append({ 'object_type': 'error_text', 'text': 'Error deleting your %s cloud instance setup' % service_title }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': "%s instance %s at %s: %s" % (action, _label, service_title, "success") }) elif "status" == action: saved_instance = cloud_load_instance(configuration, client_id, cloud_id, instance_id) if not saved_instance: logger.error("no saved %s cloud instance %s for %s to query" % (cloud_id, instance_id, client_id)) output_objects.append({ 'object_type': 'error_text', 'text': instance_missing_msg % (_label, service_title) }) return (output_objects, returnvalues.CLIENT_ERROR) (action_status, action_msg) = status_of_cloud_instance(configuration, client_id, cloud_id, cloud_flavor, instance_id) if not action_status: logger.error( "%s %s cloud instance %s for %s failed: %s" % (action, cloud_id, instance_id, client_id, action_msg)) output_objects.append({ 'object_type': 'error_text', 'text': 'Your %s instance %s at %s did not succeed: %s' % (action, _label, service_title, action_msg) }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': "%s instance %s at %s: %s" % (action, _label, service_title, action_msg) }) # Show instance access details if running if action_msg in ('ACTIVE', 'RUNNING'): # Only include web console if explicitly configured if configuration.user_cloud_console_access: (console_status, console_msg) = web_access_cloud_instance( configuration, client_id, cloud_id, cloud_flavor, instance_id) if not console_status: logger.error( "%s cloud instance %s console for %s failed: %s" % \ (cloud_id, instance_id, client_id, console_msg)) output_objects.append({ 'object_type': 'error_text', 'text': 'Failed to get instance %s at %s console: %s' % (_label, service_title, console_msg) }) return (output_objects, returnvalues.SYSTEM_ERROR) logger.info("%s cloud instance %s console for %s: %s" % (cloud_id, instance_id, client_id, console_msg)) output_objects.append({ 'object_type': 'link', 'destination': console_msg, 'text': 'Open web console', 'class': 'consolelink iconspace', 'title': 'open web console', 'target': '_blank' }) output_objects.append({'object_type': 'text', 'text': ''}) output_objects.append({ 'object_type': 'html_form', 'text': _ssh_help(configuration, client_id, cloud_id, saved_instance, instance_id) }) output_objects.append({'object_type': 'text', 'text': ''}) elif "start" == action: saved_instance = cloud_load_instance(configuration, client_id, cloud_id, instance_id) if not saved_instance: logger.error("no saved %s cloud instance %s for %s to start" % (cloud_id, instance_id, client_id)) output_objects.append({ 'object_type': 'error_text', 'text': instance_missing_msg % (_label, service_title) }) return (output_objects, returnvalues.CLIENT_ERROR) (action_status, action_msg) = start_cloud_instance(configuration, client_id, cloud_id, cloud_flavor, instance_id) if not action_status: logger.error( "%s %s cloud instance %s for %s failed: %s" % (action, cloud_id, instance_id, client_id, action_msg)) output_objects.append({ 'object_type': 'error_text', 'text': 'Your %s instance %s at %s did not succeed: %s' % (action, _label, service_title, action_msg) }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': "%s instance %s at %s: %s" % (action, _label, service_title, "success") }) output_objects.append({ 'object_type': 'html_form', 'text': _ssh_help(configuration, client_id, cloud_id, saved_instance, instance_id) }) elif action in ("softrestart", "hardrestart"): boot_strength = action.replace("restart", "").upper() saved_instance = cloud_load_instance(configuration, client_id, cloud_id, instance_id) if not saved_instance: logger.error("no saved %s cloud instance %s for %s to restart" % (cloud_id, instance_id, client_id)) output_objects.append({ 'object_type': 'error_text', 'text': instance_missing_msg % (_label, service_title) }) return (output_objects, returnvalues.CLIENT_ERROR) (action_status, action_msg) = restart_cloud_instance(configuration, client_id, cloud_id, cloud_flavor, instance_id, boot_strength) if not action_status: logger.error( "%s %s cloud instance %s for %s failed: %s" % (action, cloud_id, instance_id, client_id, action_msg)) output_objects.append({ 'object_type': 'error_text', 'text': 'Your %s instance %s at %s did not succeed: %s' % (action, _label, service_title, action_msg) }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': "%s instance %s at %s: %s" % (action, _label, service_title, "success") }) output_objects.append({ 'object_type': 'html_form', 'text': _ssh_help(configuration, client_id, cloud_id, saved_instance, instance_id) }) elif "stop" == action: saved_instance = cloud_load_instance(configuration, client_id, cloud_id, instance_id) if not saved_instance: logger.error("no saved %s cloud instance %s for %s to %s" % (cloud_id, instance_id, client_id, action)) output_objects.append({ 'object_type': 'error_text', 'text': instance_missing_msg % (_label, service_title) }) return (output_objects, returnvalues.CLIENT_ERROR) (action_status, action_msg) = stop_cloud_instance(configuration, client_id, cloud_id, cloud_flavor, instance_id) if not action_status: logger.error( "%s %s cloud instance %s for %s failed: %s" % (action, cloud_id, instance_id, client_id, action_msg)) output_objects.append({ 'object_type': 'error_text', 'text': 'Your %s instance %s at %s did not succeed: %s' % (action, _label, service_title, action_msg) }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': "%s instance %s at %s: %s" % (action, _label, service_title, "success") }) elif "webaccess" == action: saved_instance = cloud_load_instance(configuration, client_id, cloud_id, instance_id) if not saved_instance: logger.error("no saved %s cloud instance %s for %s to query" % (cloud_id, instance_id, client_id)) output_objects.append({ 'object_type': 'error_text', 'text': instance_missing_msg % (_label, service_title) }) return (output_objects, returnvalues.CLIENT_ERROR) if not configuration.user_cloud_console_access: logger.error("web console not enabled in conf!") output_objects.append({ 'object_type': 'error_text', 'text': 'Site does not expose cloud web console!' }) return (output_objects, returnvalues.CLIENT_ERROR) (action_status, action_msg) = web_access_cloud_instance(configuration, client_id, cloud_id, cloud_flavor, instance_id) if not action_status: logger.error( "%s %s cloud instance %s for %s failed: %s" % (action, service_title, instance_id, client_id, action_msg)) output_objects.append({ 'object_type': 'error_text', 'text': 'Your %s instance %s at %s did not succeed: %s' % (action, _label, service_title, action_msg) }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': "%s instance %s at %s" % (action, _label, service_title) }) output_objects.append({ 'object_type': 'link', 'destination': action_msg, 'text': 'Open web console', 'class': 'consolelink iconspace', 'title': 'open web console', 'target': '_blank' }) output_objects.append({'object_type': 'text', 'text': ''}) output_objects.append({ 'object_type': 'html_form', 'text': _ssh_help(configuration, client_id, cloud_id, saved_instance, instance_id) }) elif "updatekeys" == action: saved_instance = cloud_load_instance(configuration, client_id, cloud_id, instance_id) if not saved_instance: logger.error("no saved %s cloud instance %s for %s to update" % (cloud_id, instance_id, client_id)) output_objects.append({ 'object_type': 'error_text', 'text': instance_missing_msg % (_label, service_title) }) return (output_objects, returnvalues.CLIENT_ERROR) cloud_settings = load_cloud(client_id, configuration) auth_keys = cloud_settings.get('authkeys', '').split('\n') (action_status, action_msg) = update_cloud_instance_keys(configuration, client_id, cloud_id, cloud_flavor, instance_id, auth_keys) if not action_status: logger.error( "%s %s cloud instance %s for %s failed: %s" % (action, cloud_id, instance_id, client_id, action_msg)) output_objects.append({ 'object_type': 'error_text', 'text': 'Your %s instance %s at %s did not succeed: %s' % (action, _label, service_title, action_msg) }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': "%s instance %s at %s: %s" % (action, _label, service_title, "success") }) output_objects.append({ 'object_type': 'html_form', 'text': _ssh_help(configuration, client_id, cloud_id, saved_instance, instance_id) }) output_objects.append({'object_type': 'text', 'text': ssh_auth_msg}) for pub_key in auth_keys: output_objects.append({'object_type': 'text', 'text': pub_key}) else: output_objects.append({ 'object_type': 'error_text', 'text': 'Unknown action: %s' % action }) return_status = returnvalues.CLIENT_ERROR output_objects.append({ 'object_type': 'link', 'destination': 'cloud.py', 'class': 'backlink iconspace', 'title': 'Go back to cloud management', 'text': 'Back to cloud management' }) return (output_objects, return_status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] title_entry = find_entry(output_objects, 'title') label = "%s" % configuration.site_vgrid_label title_entry['text'] = "Add %s Member" % label output_objects.append({'object_type': 'header', 'text': 'Add %s Member(s)' % label}) status = returnvalues.OK (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) vgrid_name = accepted['vgrid_name'][-1].strip() cert_id_list = accepted['cert_id'] request_name = unhexlify(accepted['request_name'][-1]) rank_list = accepted['rank'] + ['' for _ in cert_id_list] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append( {'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) user_map = get_full_user_map(configuration) user_dict = user_map.get(client_id, None) # Optional site-wide limitation of manage vgrid permission if not user_dict or \ not vgrid_manage_allowed(configuration, user_dict): logger.warning("user %s is not allowed to manage vgrids!" % client_id) output_objects.append( {'object_type': 'error_text', 'text': 'Only privileged users can manage %ss' % label}) return (output_objects, returnvalues.CLIENT_ERROR) # make sure vgrid settings allow this owner to edit members (allow_status, allow_msg) = allow_members_adm(configuration, vgrid_name, client_id) if not allow_status: output_objects.append({'object_type': 'error_text', 'text': allow_msg}) return (output_objects, returnvalues.CLIENT_ERROR) cert_id_added = [] for (cert_id, rank_str) in zip(cert_id_list, rank_list): cert_id = cert_id.strip() cert_dir = client_id_dir(cert_id) try: rank = int(rank_str) except ValueError: rank = None # Allow openid alias as subject if openid with alias is enabled if configuration.user_openid_providers and configuration.user_openid_alias: cert_id = expand_openid_alias(cert_id, configuration) # Validity of user and vgrid names is checked in this init function so # no need to worry about illegal directory traversal through variables (ret_val, msg, _) = \ init_vgrid_script_add_rem(vgrid_name, client_id, cert_id, 'member', configuration) if not ret_val: output_objects.append({'object_type': 'error_text', 'text': msg}) status = returnvalues.CLIENT_ERROR continue # don't add if already an owner if vgrid_is_owner(vgrid_name, cert_id, configuration): output_objects.append( {'object_type': 'error_text', 'text': '%s is already an owner of %s or a parent %s.' % (cert_id, vgrid_name, label)}) status = returnvalues.CLIENT_ERROR continue # don't add if already a member unless rank is given if rank is None and vgrid_is_member(vgrid_name, cert_id, configuration): output_objects.append( {'object_type': 'error_text', 'text': '''%s is already a member of %s or a parent %s. Please remove the person first and then try this operation again.''' % (cert_id, vgrid_name, label) }) status = returnvalues.CLIENT_ERROR continue # owner or member of subvgrid? (list_status, subvgrids) = vgrid_list_subvgrids(vgrid_name, configuration) if not list_status: output_objects.append({'object_type': 'error_text', 'text': 'Error getting list of sub%ss: %s' % (label, subvgrids)}) status = returnvalues.SYSTEM_ERROR continue # TODO: we DO allow ownership of sub vgrids with parent membership so we # should support the (cumbersome) relinking of vgrid shares here. Leave it # to user to do it manually for now with temporary removal of ownership skip_entity = False for subvgrid in subvgrids: if vgrid_is_owner(subvgrid, cert_id, configuration, recursive=False): output_objects.append( {'object_type': 'error_text', 'text': """%(cert_id)s is already an owner of a sub-%(vgrid_label)s ('%(subvgrid)s'). While we DO support members being owners of sub-%(vgrid_label)ss, we do not support adding parent %(vgrid_label)s members at the moment. Please (temporarily) remove the person as owner of all sub-%(vgrid_label)ss first and then try this operation again.""" % {'cert_id': cert_id, 'subvgrid': subvgrid, 'vgrid_label': label}}) status = returnvalues.CLIENT_ERROR skip_entity = True break if vgrid_is_member(subvgrid, cert_id, configuration, recursive=False): output_objects.append( {'object_type': 'error_text', 'text': """%s is already a member of a sub-%s ('%s'). Please remove the person first and then try this operation again.""" % (cert_id, label, subvgrid)}) status = returnvalues.CLIENT_ERROR skip_entity = True break if skip_entity: continue # Check if only rank change was requested and apply if so if rank is not None: (add_status, add_msg) = vgrid_add_members(configuration, vgrid_name, [cert_id], rank=rank) if not add_status: output_objects.append( {'object_type': 'error_text', 'text': add_msg}) status = returnvalues.SYSTEM_ERROR else: output_objects.append({'object_type': 'text', 'text': 'changed %s to member %d' % (cert_id, rank)}) # No further action after rank change as everything else exists continue # Getting here means cert_id is neither owner or member of any parent or # sub-vgrids. # Please note that base_dir must end in slash to avoid access to other # vgrid dirs when own name is a prefix of another name base_dir = os.path.abspath(os.path.join(configuration.vgrid_home, vgrid_name)) + os.sep user_dir = os.path.abspath(os.path.join(configuration.user_home, cert_dir)) + os.sep # make sure all dirs can be created (that a file or directory with the same # name do not exist prior to adding the member) if os.path.exists(user_dir + vgrid_name): output_objects.append( {'object_type': 'error_text', 'text': '''Could not add member, a file or directory in the home directory called %s exists! (%s)''' % (vgrid_name, user_dir + vgrid_name)}) status = returnvalues.CLIENT_ERROR continue # Add (add_status, add_msg) = vgrid_add_members(configuration, vgrid_name, [cert_id]) if not add_status: output_objects.append( {'object_type': 'error_text', 'text': add_msg}) status = returnvalues.SYSTEM_ERROR continue vgrid_name_parts = vgrid_name.split('/') is_subvgrid = len(vgrid_name_parts) > 1 if is_subvgrid: try: # vgrid_name = IMADA/STUD/BACH # vgrid_name_last_fragment = BACH vgrid_name_last_fragment = \ vgrid_name_parts[len(vgrid_name_parts) - 1].strip() # vgrid_name_without_last_fragment = IMADA/STUD/ vgrid_name_without_last_fragment = \ ('/'.join(vgrid_name_parts[0:len(vgrid_name_parts) - 1]) + os.sep).strip() # create dirs if they do not exist dir1 = user_dir + vgrid_name_without_last_fragment if not os.path.isdir(dir1): os.makedirs(dir1) except Exception, exc: # out of range? should not be possible due to is_subvgrid check output_objects.append( {'object_type': 'error_text', 'text': ('Could not create needed dirs on %s server! %s' % (configuration.short_title, exc))}) logger.error('%s when looking for dir %s.' % (exc, dir1)) status = returnvalues.SYSTEM_ERROR continue # create symlink from users home directory to vgrid file directory link_src = os.path.abspath(configuration.vgrid_files_home + os.sep + vgrid_name) + os.sep link_dst = user_dir + vgrid_name # create symlink to vgrid files if not make_symlink(link_src, link_dst, logger): output_objects.append({'object_type': 'error_text', 'text': 'Could not create link to %s files!' % label }) logger.error('Could not create link to %s files! (%s -> %s)' % (label, link_src, link_dst)) status = returnvalues.SYSTEM_ERROR continue cert_id_added.append(cert_id)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) status = returnvalues.OK defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) patterns = accepted['path'] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not configuration.site_enable_jobs: output_objects.append({ 'object_type': 'error_text', 'text': '''Job execution is not enabled on this system''' }) return (output_objects, returnvalues.SYSTEM_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep if verbose(flags): for flag in flags: output_objects.append({ 'object_type': 'text', 'text': '%s using flag: %s' % (op_name, flag) }) for pattern in patterns: # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages unfiltered_match = glob.glob(base_dir + pattern) match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, pattern)) continue match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'file_not_found', 'name': pattern }) status = returnvalues.FILE_NOT_FOUND submitstatuslist = [] for abs_path in match: output_lines = [] relative_path = abs_path.replace(base_dir, '') submitstatus = { 'object_type': 'submitstatus', 'name': relative_path } try: (job_status, newmsg, job_id) = new_job(abs_path, client_id, configuration, False, True) except Exception, exc: logger.error("%s: failed on '%s': %s" % (op_name, relative_path, exc)) job_status = False newmsg = "%s failed on '%s' (is it a valid mRSL file?)"\ % (op_name, relative_path) job_id = None if not job_status: submitstatus['status'] = False submitstatus['message'] = newmsg status = returnvalues.CLIENT_ERROR else: submitstatus['status'] = True submitstatus['job_id'] = job_id submitstatuslist.append(submitstatus) output_objects.append({ 'object_type': 'submitstatuslist', 'submitstatuslist': submitstatuslist })
def main(client_id, user_arguments_dict, environ=None): """Main function used by front end""" if environ is None: environ = os.environ (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) logger.info('Extracting input in %s' % op_name) status = returnvalues.OK defaults = signature()[1] logger.info('Extracted input in %s: %s' % (op_name, user_arguments_dict.keys())) # All non-file fields must be validated validate_args = dict([(key, user_arguments_dict.get(key, val)) for (key, val) in user_arguments_dict.items() if not key in manual_validation]) # IMPORTANT: we must explicitly inlude CSRF token validate_args[csrf_field] = user_arguments_dict.get(csrf_field, ['']) (validate_status, accepted) = validate_input( validate_args, defaults, output_objects, allow_rejects=False, ) if not validate_status: logger.error('%s validation failed: %s (%s)' % (op_name, validate_status, accepted)) return (accepted, returnvalues.CLIENT_ERROR) logger.info('validated input in %s: %s' % (op_name, validate_args.keys())) action = accepted['action'][-1] current_dir = os.path.normpath(accepted['current_dir'][-1].lstrip(os.sep)) flags = ''.join(accepted['flags']) share_id = accepted['share_id'][-1] output_format = accepted['output_format'][-1] if action != "status": if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) reject_write = False uploaded = [] header_item = {'object_type': 'header', 'text': ''} # Always include a files reply even if empty output_objects.append(header_item) output_objects.append({'object_type': 'uploadfiles', 'files': uploaded}) # Either authenticated user client_id set or sharelink ID if client_id: user_id = client_id target_dir = client_id_dir(client_id) base_dir = configuration.user_home redirect_name = configuration.site_user_redirect redirect_path = redirect_name id_args = '' page_title = 'Upload to User Directory: %s' % action userstyle = True widgets = True elif share_id: try: (share_mode, _) = extract_mode_id(configuration, share_id) except ValueError, err: logger.error('%s called with invalid share_id %s: %s' % (op_name, share_id, err)) output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid sharelink ID: %s' % share_id }) return (output_objects, returnvalues.CLIENT_ERROR) # TODO: load and check sharelink pickle (currently requires client_id) user_id = 'anonymous user through share ID %s' % share_id # NOTE: we must return uploaded reply so we delay read-only failure if share_mode == 'read-only': logger.error('%s called without write access: %s' % (op_name, accepted)) reject_write = True target_dir = os.path.join(share_mode, share_id) base_dir = configuration.sharelink_home redirect_name = 'share_redirect' redirect_path = os.path.join(redirect_name, share_id) id_args = 'share_id=%s;' % share_id page_title = 'Upload to Shared Directory: %s' % action userstyle = False widgets = False
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] title_entry = find_entry(output_objects, 'title') label = "%s" % configuration.site_vgrid_label title_entry['text'] = "Create Archive" # NOTE: Delay header entry here to include freeze flavor # All non-file fields must be validated validate_args = dict([(key, user_arguments_dict.get(key, val)) for (key, val) in defaults.items()]) # IMPORTANT: we must explicitly inlude CSRF token validate_args[csrf_field] = user_arguments_dict.get(csrf_field, [ 'AllowMe']) (validate_status, accepted) = validate_input_and_cert( validate_args, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flavor = accepted['flavor'][-1].strip() freeze_state = accepted['freeze_state'][-1].strip() if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append( {'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not flavor in freeze_flavors.keys(): output_objects.append({'object_type': 'error_text', 'text': 'Invalid freeze flavor: %s' % flavor}) return (output_objects, returnvalues.CLIENT_ERROR) if not freeze_state in freeze_flavors[flavor]['states'] + [keyword_auto]: output_objects.append({'object_type': 'error_text', 'text': 'Invalid freeze state: %s' % freeze_state}) return (output_objects, returnvalues.CLIENT_ERROR) title = freeze_flavors[flavor]['createfreeze_title'] output_objects.append({'object_type': 'header', 'text': title}) if not configuration.site_enable_freeze: output_objects.append({'object_type': 'text', 'text': '''Freezing archives is disabled on this site. Please contact the site admins %s if you think it should be enabled. ''' % configuration.admin_email}) return (output_objects, returnvalues.OK) # jquery support for confirmation on freeze (add_import, add_init, add_ready) = man_base_js(configuration, []) title_entry['script']['advanced'] += add_import title_entry['script']['init'] += add_init title_entry['script']['ready'] += add_ready output_objects.append({'object_type': 'html_form', 'text': man_base_html(configuration)}) freeze_id = accepted['freeze_id'][-1].strip() freeze_name = accepted['freeze_name'][-1].strip() freeze_description = accepted['freeze_description'][-1] freeze_author = accepted['freeze_author'][-1].strip() freeze_department = accepted['freeze_department'][-1].strip() freeze_organization = accepted['freeze_organization'][-1].strip() freeze_publish = accepted['freeze_publish'][-1].strip() do_publish = (freeze_publish.lower() in ('on', 'true', 'yes', '1')) # Share init of base meta with lookup of default state in freeze_flavors if not freeze_state or freeze_state == keyword_auto: freeze_state = freeze_flavors[flavor]['states'][0] freeze_meta = {'ID': freeze_id, 'STATE': freeze_state} # New archives must have name and description set if freeze_id == keyword_auto: logger.debug("creating a new %s archive for %s" % (flavor, client_id)) if not freeze_name or freeze_name == keyword_auto: freeze_name = '%s-%s' % (flavor, datetime.datetime.now()) if not freeze_description: if flavor == 'backup': freeze_description = 'manual backup archive created on %s' % \ datetime.datetime.now() else: output_objects.append( {'object_type': 'error_text', 'text': 'You must provide a description for the archive!'}) return (output_objects, returnvalues.CLIENT_ERROR) if flavor == 'phd' and (not freeze_author or not freeze_department): output_objects.append({'object_type': 'error_text', 'text': """ You must provide author and department for the thesis!"""}) return (output_objects, returnvalues.CLIENT_ERROR) freeze_meta.update( {'FLAVOR': flavor, 'NAME': freeze_name, 'DESCRIPTION': freeze_description, 'AUTHOR': freeze_author, 'DEPARTMENT': freeze_department, 'ORGANIZATION': freeze_organization, 'PUBLISH': do_publish}) elif is_frozen_archive(client_id, freeze_id, configuration): logger.debug("updating existing %s archive for %s" % (flavor, client_id)) # Update any explicitly provided fields (may be left empty on finalize) changes = {} if freeze_name and freeze_name != keyword_auto: changes['NAME'] = freeze_name if freeze_author: changes['AUTHOR'] = freeze_author if freeze_description: changes['DESCRIPTION'] = freeze_description if freeze_publish: changes['PUBLISH'] = do_publish logger.debug("updating existing %s archive for %s with: %s" % (flavor, client_id, changes)) logger.debug("publish is %s based on %s" % (do_publish, freeze_publish)) freeze_meta.update(changes) else: logger.error("no such %s archive for %s: %s" % (flavor, client_id, freeze_id)) output_objects.append({'object_type': 'error_text', 'text': """ Invalid archive ID %s - you must either create a new archive or edit an existing archive of yours!""" % freeze_id}) return (output_objects, returnvalues.CLIENT_ERROR) # Now parse and validate files to archive for name in defaults.keys(): if user_arguments_dict.has_key(name): del user_arguments_dict[name] (copy_files, copy_rejected) = parse_form_copy(user_arguments_dict, client_id, configuration) (move_files, move_rejected) = parse_form_move(user_arguments_dict, client_id, configuration) (upload_files, upload_rejected) = parse_form_upload(user_arguments_dict, client_id, configuration) if copy_rejected + move_rejected + upload_rejected: output_objects.append({'object_type': 'error_text', 'text': 'Errors parsing freeze files: %s' % '\n '.join(copy_rejected + move_rejected + upload_rejected)}) return (output_objects, returnvalues.CLIENT_ERROR) # NOTE: this may be a new or an existing pending archive, and it will fail # if archive is already under update (retval, retmsg) = create_frozen_archive(freeze_meta, copy_files, move_files, upload_files, client_id, configuration) if not retval: output_objects.append({'object_type': 'error_text', 'text': 'Error creating/updating archive: %s' % retmsg}) return (output_objects, returnvalues.SYSTEM_ERROR) # Make sure we have freeze_id and other updated fields freeze_meta.update(retmsg) freeze_id = freeze_meta['ID'] logger.info("%s: successful for '%s': %s" % (op_name, freeze_id, client_id)) # Return simple status mainly for use in scripting output_objects.append({'object_type': 'freezestatus', 'freeze_id': freeze_id, 'flavor': flavor, 'freeze_state': freeze_state}) publish_note = '' if freeze_state == keyword_pending: publish_hint = 'Preview published archive page in a new window/tab' publish_text = 'Preview publishing' output_objects.append({'object_type': 'text', 'text': """ Saved *preliminary* %s archive with ID %s . You can continue inspecting and changing it until you're satisfied, then finalize it for actual persistent freezing.""" % (flavor, freeze_id)}) else: publish_hint = 'View published archive page in a new window/tab' publish_text = 'Open published archive' output_objects.append({'object_type': 'text', 'text': 'Successfully froze %s archive with ID %s .' % (flavor, freeze_id)}) if do_publish: public_url = published_url(freeze_meta, configuration) output_objects.append({'object_type': 'text', 'text': ''}) output_objects.append({ 'object_type': 'link', 'destination': public_url, 'class': 'previewarchivelink iconspace genericbutton', 'title': publish_hint, 'text': publish_text, 'target': '_blank', }) output_objects.append({'object_type': 'text', 'text': ''}) # Always allow show archive output_objects.append({ 'object_type': 'link', 'destination': 'showfreeze.py?freeze_id=%s;flavor=%s' % (freeze_id, flavor), 'class': 'viewarchivelink iconspace genericbutton', 'title': 'View details about your %s archive' % flavor, 'text': 'View details', }) if freeze_state == keyword_pending: output_objects.append({'object_type': 'text', 'text': ''}) output_objects.append({ 'object_type': 'link', 'destination': 'adminfreeze.py?freeze_id=%s' % freeze_id, 'class': 'editarchivelink iconspace genericbutton', 'title': 'Further modify your pending %s archive' % flavor, 'text': 'Edit archive', }) output_objects.append({'object_type': 'text', 'text': ''}) output_objects.append({'object_type': 'html_form', 'text': """ <br/><hr/><br/> <p class='warn_message'>IMPORTANT: you still have to explicitly finalize your archive before you get the additional data integrity/persistance guarantees like tape archiving. </p>"""}) form_method = 'post' target_op = 'createfreeze' csrf_limit = get_csrf_limit(configuration) csrf_token = make_csrf_token(configuration, form_method, target_op, client_id, csrf_limit) helper = html_post_helper('createfreeze', '%s.py' % target_op, {'freeze_id': freeze_id, 'freeze_state': keyword_final, 'flavor': flavor, csrf_field: csrf_token}) output_objects.append({'object_type': 'html_form', 'text': helper}) output_objects.append({ 'object_type': 'link', 'destination': "javascript: confirmDialog(%s, '%s');" % ('createfreeze', 'Really finalize %s?' % freeze_id), 'class': 'finalizearchivelink iconspace genericbutton', 'title': 'Finalize %s archive to prevent further changes' % flavor, 'text': 'Finalize archive', }) return (output_objects, returnvalues.OK)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) output_objects.append({ 'object_type': 'text', 'text': '--------- Trying to RESTART store ----------' }) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) unique_resource_name = accepted['unique_resource_name'][-1] store_name_list = accepted['store_name'] all = accepted['all'][-1].lower() == 'true' parallel = accepted['parallel'][-1].lower() == 'true' if not configuration.site_enable_resources: output_objects.append({ 'object_type': 'error_text', 'text': '''Resources are not enabled on this system''' }) return (output_objects, returnvalues.SYSTEM_ERROR) if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not is_owner(client_id, unique_resource_name, configuration.resource_home, logger): output_objects.append({ 'object_type': 'error_text', 'text': 'Failure: You must be an owner of ' + unique_resource_name + ' to restart the store!' }) return (output_objects, returnvalues.CLIENT_ERROR) exit_status = returnvalues.OK if all: store_name_list = get_all_store_names(unique_resource_name) # take action based on supplied list of stores if len(store_name_list) == 0: output_objects.append({ 'object_type': 'text', 'text': "No stores specified and 'all' argument not set to true: Nothing to do!" }) workers = [] for store_name in store_name_list: task = Worker(target=stop_resource_store, args=(unique_resource_name, store_name, configuration.resource_home, logger)) workers.append((store_name, [task])) task.start() if not parallel: task.join() # Complete each stop thread before launching corresponding start threads for (store_name, task_list) in workers: # We could optimize with non-blocking join here but keep it simple for now # as final result will need to wait for slowest member anyway task_list[0].join() task = Worker(target=start_resource_store, args=(unique_resource_name, store_name, configuration.resource_home, logger)) task_list.append(task) task.start() if not parallel: task.join() for (store_name, task_list) in workers: (status, msg) = task_list[0].finish() output_objects.append({ 'object_type': 'header', 'text': 'Restart store output:' }) if not status: output_objects.append({ 'object_type': 'error_text', 'text': 'Problems stopping store during restart: %s' % msg }) (status2, msg2) = task_list[1].finish() if not status2: output_objects.append({ 'object_type': 'error_text', 'text': 'Problems starting store during restart: %s' % msg2 }) exit_status = returnvalues.SYSTEM_ERROR if status and status2: output_objects.append({ 'object_type': 'text', 'text': 'Restart store success: Stop output: %s ; Start output: %s' % (msg, msg2) }) return (output_objects, exit_status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: WARNING_MSG = str(accepted) output_objects.append({'object_type': 'warning', 'text': WARNING_MSG}) return (accepted, returnvalues.CLIENT_ERROR) # Convert accpeted values to string and filter out NON-set values accepted_joined_values = { key: ''.join(value) for (key, value) in accepted.iteritems() if len(value) > 0 } action = accepted_joined_values['action'] flags = accepted_joined_values['flags'] path = accepted_joined_values['path'] extension = accepted['extension'][-1].strip() logger.debug('%s from %s: %s' % (op_name, client_id, accepted)) if not action in valid_actions: output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid action "%s" (supported: %s)' % (action, ', '.join(valid_actions)) }) return (output_objects, returnvalues.CLIENT_ERROR) if action in post_actions: if not safe_handler( configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted, ): logger.info('checkpoint2') output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep abs_path = os.path.join(base_dir, path) title_entry = find_entry(output_objects, 'title') title_entry['text'] = 'IMAGEPREVIEW Management' output_objects.append({ 'object_type': 'header', 'text': 'IMAGEPREVIEW Management' }) status = returnvalues.ERROR vgrid_name = in_vgrid_share(configuration, abs_path) vgrid_owner = vgrid_is_owner(vgrid_name, client_id, configuration) status = returnvalues.OK if vgrid_name is None: status = returnvalues.ERROR ERROR_MSG = "No vgrid found for path: '%s'" % path output_objects.append({'object_type': 'error_text', 'text': ERROR_MSG}) if status == returnvalues.OK: if action == 'list_settings': status = list_settings(configuration, abs_path, path, output_objects) logger.debug('list exit status: %s' % str(status)) elif action == 'remove_setting': if vgrid_owner == False: status = returnvalues.ERROR ERROR_MSG = \ "Ownership of vgrid: '%s' required to change imagepreview settings" \ % vgrid_name output_objects.append({ 'object_type': 'error_text', 'text': ERROR_MSG }) else: status = remove_setting(configuration, abs_path, path, extension, output_objects) logger.debug('remove_setting exit status: %s' % str(status)) elif action == 'get_setting': status = get_setting(configuration, abs_path, path, extension, output_objects) logger.debug('get_setting exit status: %s' % str(status)) elif action == 'update_setting': if vgrid_owner == False: status = returnvalues.ERROR ERROR_MSG = \ "Ownership of vgrid: '%s' required to change imagepreview settings" \ % vgrid_name output_objects.append({ 'object_type': 'error_text', 'text': ERROR_MSG }) else: status = update_setting( configuration, base_dir, abs_path, path, extension, accepted_joined_values, output_objects, ) logger.debug('update_setting exit status: %s' % str(status)) elif action == 'create_setting': if vgrid_owner == False: status = returnvalues.ERROR ERROR_MSG = \ "Ownership of vgrid: '%s' required to change imagepreview settings" \ % vgrid_name output_objects.append({ 'object_type': 'error_text', 'text': ERROR_MSG }) else: status = create_setting( configuration, client_id, base_dir, abs_path, path, extension, accepted_joined_values, output_objects, ) status = returnvalues.OK logger.debug('create_setting exit status: %s' % str(status)) elif action == 'reset_setting': if vgrid_owner == False: status = returnvalues.ERROR ERROR_MSG = \ "Ownership of vgrid: '%s' required to change imagepreview settings" \ % vgrid_name output_objects.append({ 'object_type': 'error_text', 'text': ERROR_MSG }) else: status = reset_settings(configuration, abs_path, path, output_objects, extension) logger.debug('reset exit status: %s' % str(status)) elif action == 'get': status = get(configuration, base_dir, path, output_objects) logger.debug('get exit status: %s' % str(status)) elif action == 'remove': if vgrid_owner == False: status = returnvalues.ERROR ERROR_MSG = \ "Ownership of vgrid: '%s' required to change imagepreview settings" \ % vgrid_name output_objects.append({ 'object_type': 'error_text', 'text': ERROR_MSG }) else: status = remove(configuration, base_dir, abs_path, path, output_objects) logger.debug('remove exit status: %s' % str(status)) elif action == 'clean': if vgrid_owner == False: status = returnvalues.ERROR ERROR_MSG = \ "Ownership of vgrid: '%s' required to change imagepreview settings" \ % vgrid_name output_objects.append({ 'object_type': 'error_text', 'text': ERROR_MSG }) else: status = clean(configuration, base_dir, abs_path, path, output_objects) logger.debug('clean exit status: %s' % str(status)) elif action == 'cleanrecursive': if vgrid_owner == False: status = returnvalues.ERROR ERROR_MSG = \ "Ownership of vgrid: '%s' required to change imagepreview settings" \ % vgrid_name output_objects.append({ 'object_type': 'error_text', 'text': ERROR_MSG }) else: status = clean( configuration, base_dir, abs_path, path, output_objects, recursive=True, ) logger.debug('cleanrecursive exit status: %s' % str(status)) elif action == 'refresh': if vgrid_owner == False: status = returnvalues.ERROR ERROR_MSG = \ "Ownership of vgrid: '%s' required to change imagepreview settings" \ % vgrid_name output_objects.append({ 'object_type': 'error_text', 'text': ERROR_MSG }) else: status = refresh( configuration, client_id, base_dir, abs_path, path, output_objects, ) logger.debug('refresh exit status: %s' % str(status)) else: ERROR_MSG = "action: '%s' _NOT_ implemented yet" \ % str(action) output_objects.append({ 'object_type': 'error_text', 'text': ERROR_MSG }) logger.debug('output_objects: %s' % str(output_objects)) logger.debug('status: %s' % str(status)) return (output_objects, status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] if not configuration.site_enable_resources: output_objects.append({ 'object_type': 'error_text', 'text': '''Resources are not enabled on this system''' }) return (output_objects, returnvalues.SYSTEM_ERROR) # IMPORTANT: we can not validate input completely here! # We validate the parts used in the path manipulation and only use # the remaining variables directly in the generated config file that # is then handed to the parser for full validation. critical_arguments = {} critical_fields = defaults.keys() # IMPORTANT: we must explicitly inlude CSRF token critical_fields.append(csrf_field) for field in critical_fields: critical_arguments[field] = user_arguments_dict.get(field, ['']) (validate_status, accepted) = validate_input_and_cert( critical_arguments, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) hosturl = accepted['HOSTURL'][-1] hostidentifier = accepted['HOSTIDENTIFIER'][-1] if hostidentifier: action = 'update' else: action = 'create' hostidentifier = keyword_auto accepted['HOSTIDENTIFIER'] = [hostidentifier] resource_id = "%s.%s" % (hosturl, hostidentifier) if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Override original critical values with the validated ones for field in critical_fields: user_arguments_dict[field] = accepted[field] status = returnvalues.OK title_entry = find_entry(output_objects, 'title') title_entry['text'] = 'Resource edit actions' output_objects.append({ 'object_type': 'header', 'text': 'Resource edit actions' }) conf = prepare_conf(configuration, user_arguments_dict, resource_id) if 'create' == action: logger.info('%s is trying to create resource %s (%s)' % (client_id, hosturl, conf)) output_objects.append({ 'object_type': 'sectionheader', 'text': 'Creating resource configuration' }) # We only get here if hostidentifier is dynamic so no access control if not handle_update(configuration, client_id, resource_id, conf, output_objects, True): status = returnvalues.SYSTEM_ERROR elif 'update' == action: logger.info('%s is trying to update resource %s (%s)' % (client_id, resource_id, conf)) output_objects.append({ 'object_type': 'sectionheader', 'text': 'Updating existing resource configuration' }) # Prevent unauthorized access to existing resources (owner_status, owner_list) = resource_owners(configuration, resource_id) if not owner_status: output_objects.append({ 'object_type': 'error_text', 'text': "Could not look up '%s' owners - no such resource?" % resource_id }) status = returnvalues.SYSTEM_ERROR elif client_id in owner_list: if not handle_update(configuration, client_id, resource_id, conf, output_objects, False): status = returnvalues.SYSTEM_ERROR else: status = returnvalues.CLIENT_ERROR output_objects.append({ 'object_type': 'error_text', 'text': 'You can only update your own resources!' }) else: status = returnvalues.CLIENT_ERROR output_objects.append({ 'object_type': 'error_text', 'text': 'Unknown action request!' }) return (output_objects, status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) action = accepted['action'][-1] transfer_id = accepted['transfer_id'][-1] protocol = accepted['protocol'][-1] fqdn = accepted['fqdn'][-1] port = accepted['port'][-1] src_list = accepted['transfer_src'] dst = accepted['transfer_dst'][-1] username = accepted['username'][-1] password = accepted['transfer_pw'][-1] key_id = accepted['key_id'][-1] # Skip empty exclude entries as they break backend calls exclude_list = [i for i in accepted['exclude'] if i] notify = accepted['notify'][-1] compress = accepted['compress'][-1] flags = accepted['flags'] anon_checked, pw_checked, key_checked = '', '', '' if username: if key_id: key_checked = 'checked' init_login = "******" else: pw_checked = 'checked' init_login = "******" else: anon_checked = 'checked' init_login = "******" use_compress = False if compress.lower() in ("true", "1", "yes", "on"): use_compress = True title_entry = find_entry(output_objects, 'title') title_entry['text'] = 'Background Data Transfers' # jquery support for tablesorter and confirmation on delete/redo: # datatransfer and key tables initially sorted by 0 (id) */ datatransfer_spec = { 'table_id': 'datatransferstable', 'pager_id': 'datatransfers_pager', 'sort_order': '[[0,0]]' } transferkey_spec = { 'table_id': 'transferkeystable', 'pager_id': 'transferkeys_pager', 'sort_order': '[[0,0]]' } (add_import, add_init, add_ready) = man_base_js(configuration, [datatransfer_spec, transferkey_spec]) add_init += ''' var fields = 0; var max_fields = 20; var src_input = "<label for=\'transfer_src\'>Source path(s)</label>"; src_input += "<input id=\'src_FIELD\' type=text size=60 name=transfer_src value=\'PATH\' title=\'relative source path: local for exports and remote for imports\' />"; src_input += "<input id=\'src_file_FIELD\' type=radio onclick=\'setSrcDir(FIELD, false);\' checked />Source file"; src_input += "<input id=\'src_dir_FIELD\' type=radio onclick=\'setSrcDir(FIELD, true);\' />Source directory (recursive)"; src_input += "<br />"; var exclude_input = "<label for=\'exclude\'>Exclude path(s)</label>"; exclude_input += "<input type=text size=60 name=exclude value=\'PATH\' title=\'relative path or regular expression to exclude\' />"; exclude_input += "<br />"; function addSource(path, is_dir) { if (path === undefined) { path = ""; } if (is_dir === undefined) { is_dir = false; } if (fields < max_fields) { $("#srcfields").append(src_input.replace(/FIELD/g, fields).replace(/PATH/g, path)); setSrcDir(fields, is_dir); fields += 1; } else { alert("Maximum " + max_fields + " source fields allowed!"); } } function addExclude(path) { if (path === undefined) { path = ""; } $("#excludefields").append(exclude_input.replace(/PATH/g, path)); } function setDir(target, field_no, is_dir) { var id_prefix = "#"+target+"_"; var input_id = id_prefix+field_no; var file_id = id_prefix+"file_"+field_no; var dir_id = id_prefix+"dir_"+field_no; var value = $(input_id).val(); $(file_id).removeAttr("checked"); $(dir_id).removeAttr("checked"); if (is_dir) { $(dir_id).prop("checked", "checked"); if(value.substr(-1) != "/") { value += "/"; } } else { $(file_id).prop("checked", "checked"); if(value.substr(-1) == "/") { value = value.substr(0, value.length - 1); } } $(input_id).val(value); return false; } function setSrcDir(field_no, is_dir) { return setDir("src", field_no, is_dir); } function setDstDir(field_no, is_dir) { return setDir("dst", field_no, is_dir); } function refreshSrcDir(field_no) { var dir_id = "#src_dir_"+field_no; var is_dir = $(dir_id).prop("checked"); return setSrcDir(field_no, is_dir); } function refreshDstDir(field_no) { var dir_id = "#dst_dir_"+field_no; var is_dir = $(dir_id).prop("checked"); return setDstDir(field_no, is_dir); } function setDefaultPort() { port_map = {"http": 80, "https": 443, "sftp": 22, "scp": 22, "ftp": 21, "ftps": 21, "webdav": 80, "webdavs": 443, "rsyncssh": 22, "rsyncd": 873}; var protocol = $("#protocol_select").val(); var port = port_map[protocol]; if (port != undefined) { $("#port_input").val(port); } else { alert("no default port provided for "+protocol); } } function beforeSubmit() { for(var i=0; i < fields; i++) { refreshSrcDir(i); } refreshDstDir(0); // Proceed with submit return true; } function doSubmit() { $("#submit-request-transfer").click(); } function enableLogin(method) { $("#anonymous_choice").removeAttr("checked"); $("#userpassword_choice").removeAttr("checked"); $("#userkey_choice").removeAttr("checked"); $("#username").prop("disabled", false); $("#password").prop("disabled", true); $("#key").prop("disabled", true); $("#login_fields").show(); $("#password_entry").hide(); $("#key_entry").hide(); if (method == "password") { $("#userpassword_choice").prop("checked", "checked"); $("#password").prop("disabled", false); $("#password_entry").show(); } else if (method == "key") { $("#userkey_choice").prop("checked", "checked"); $("#key").prop("disabled", false); $("#key_entry").show(); } else { $("#anonymous_choice").prop("checked", "checked"); $("#username").prop("disabled", true); $("#login_fields").hide(); } } ''' # Mangle ready handling to begin with dynamic init and end with tab init pre_ready = ''' enableLogin("%s"); ''' % init_login for src in src_list or ['']: pre_ready += ''' addSource("%s", %s); ''' % (src, ("%s" % src.endswith('/')).lower()) for exclude in exclude_list or ['']: pre_ready += ''' addExclude("%s"); ''' % exclude add_ready = ''' %s %s /* NOTE: requires managers CSS fix for proper tab bar height */ $(".datatransfer-tabs").tabs(); $("#logarea").scrollTop($("#logarea")[0].scrollHeight); ''' % (pre_ready, add_ready) title_entry['script']['advanced'] += add_import title_entry['script']['init'] += add_init title_entry['script']['ready'] += add_ready output_objects.append({ 'object_type': 'html_form', 'text': man_base_html(configuration) }) output_objects.append({ 'object_type': 'header', 'text': 'Manage background data transfers' }) if not configuration.site_enable_transfers: output_objects.append({ 'object_type': 'text', 'text': '''Backgroung data transfers are disabled on this site. Please contact the site admins %s if you think they should be enabled. ''' % configuration.admin_email }) return (output_objects, returnvalues.OK) logger.info('datatransfer %s from %s' % (action, client_id)) if not action in valid_actions: output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid action "%s" (supported: %s)' % (action, ', '.join(valid_actions)) }) return (output_objects, returnvalues.CLIENT_ERROR) if action in post_actions: if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) (load_status, transfer_map) = load_data_transfers(configuration, client_id) if not load_status: transfer_map = {} restrict_list = [] for from_fqdn in configuration.site_transfers_from: restrict_list += [from_fqdn, socket.gethostbyname(from_fqdn)] restrict_str = 'from="%s",no-pty,' % ','.join(restrict_list) restrict_str += 'no-port-forwarding,no-agent-forwarding,no-X11-forwarding' restrict_template = ''' As usual it is a good security measure to prepend a <em>from</em> restriction when you know the key will only be used from a single location.<br/> In this case the keys will only ever be used from %s and will not need much else, so the public key can be inserted in your authorized_keys file as: <br/> <p> <textarea class="publickey" rows="5" readonly="readonly">%s %%s</textarea> </p> ''' % (configuration.short_title, restrict_str) form_method = 'post' csrf_limit = get_csrf_limit(configuration) target_op = 'datatransfer' csrf_token = make_csrf_token(configuration, form_method, target_op, client_id, csrf_limit) if action in get_actions: datatransfers = [] for (saved_id, transfer_dict) in transfer_map.items(): transfer_item = build_transferitem_object(configuration, transfer_dict) transfer_item['status'] = transfer_item.get('status', 'NEW') data_url = '' # NOTE: we need to urlencode any exotic chars in paths here if transfer_item['action'] == 'import': enc_path = quote(("%(dst)s" % transfer_item)) data_url = "fileman.py?path=%s" % enc_path elif transfer_item['action'] == 'export': enc_paths = [quote(i) for i in transfer_item['src']] data_url = "fileman.py?path=" + ';path='.join(enc_paths) if data_url: transfer_item['viewdatalink'] = { 'object_type': 'link', 'destination': data_url, 'class': 'viewlink iconspace', 'title': 'View local component of %s' % saved_id, 'text': '' } transfer_item['viewoutputlink'] = { 'object_type': 'link', 'destination': "fileman.py?path=transfer_output/%s/" % saved_id, 'class': 'infolink iconspace', 'title': 'View status files for %s' % saved_id, 'text': '' } # Edit is just a call to self with fillimport set args = [('action', 'fill%(action)s' % transfer_dict), ('key_id', '%(key)s' % transfer_dict), ('transfer_dst', '%(dst)s' % transfer_dict)] for src in transfer_dict['src']: args.append(('transfer_src', src)) for exclude in transfer_dict.get('exclude', []): args.append(('exclude', exclude)) for field in edit_fields: val = transfer_dict.get(field, '') args.append((field, val)) transfer_args = urlencode(args, True) transfer_item['edittransferlink'] = { 'object_type': 'link', 'destination': "%s.py?%s" % (target_op, transfer_args), 'class': 'editlink iconspace', 'title': 'Edit or duplicate transfer %s' % saved_id, 'text': '' } js_name = 'delete%s' % hexlify(saved_id) helper = html_post_helper( js_name, '%s.py' % target_op, { 'transfer_id': saved_id, 'action': 'deltransfer', csrf_field: csrf_token }) output_objects.append({'object_type': 'html_form', 'text': helper}) transfer_item['deltransferlink'] = { 'object_type': 'link', 'destination': "javascript: confirmDialog(%s, '%s');" % (js_name, 'Really remove %s?' % saved_id), 'class': 'removelink iconspace', 'title': 'Remove %s' % saved_id, 'text': '' } js_name = 'redo%s' % hexlify(saved_id) helper = html_post_helper( js_name, '%s.py' % target_op, { 'transfer_id': saved_id, 'action': 'redotransfer', csrf_field: csrf_token }) output_objects.append({'object_type': 'html_form', 'text': helper}) transfer_item['redotransferlink'] = { 'object_type': 'link', 'destination': "javascript: confirmDialog(%s, '%s');" % (js_name, 'Really reschedule %s?' % saved_id), 'class': 'refreshlink iconspace', 'title': 'Reschedule %s' % saved_id, 'text': '' } datatransfers.append(transfer_item) #logger.debug("found datatransfers: %s" % datatransfers) log_path = os.path.join(configuration.user_home, client_id_dir(client_id), "transfer_output", configuration.site_transfer_log) show_lines = 40 log_lines = read_tail(log_path, show_lines, logger) available_keys = load_user_keys(configuration, client_id) if available_keys: key_note = '' else: key_note = '''No keys available - you can add a key for use in transfers below.''' if action.endswith('import'): transfer_action = 'import' elif action.endswith('export'): transfer_action = 'export' else: transfer_action = 'unknown' import_checked, export_checked = 'checked', '' toggle_quiet, scroll_to_create = '', '' if action in ['fillimport', 'fillexport']: if quiet(flags): toggle_quiet = ''' <script> $("#wrap-tabs").hide(); $("#quiet-mode-content").show(); </script> ''' scroll_to_create = ''' <script> $("html, body").animate({ scrollTop: $("#createtransfer").offset().top }, 2000); </script> ''' if action == 'fillimport': import_checked = 'checked' elif action == 'fillexport': export_checked = 'checked' import_checked = '' fill_helpers = { 'import_checked': import_checked, 'export_checked': export_checked, 'anon_checked': anon_checked, 'pw_checked': pw_checked, 'key_checked': key_checked, 'transfer_id': transfer_id, 'protocol': protocol, 'fqdn': fqdn, 'port': port, 'username': username, 'password': password, 'key_id': key_id, 'transfer_src_string': ', '.join(src_list), 'transfer_src': src_list, 'transfer_dst': dst, 'exclude': exclude_list, 'compress': use_compress, 'notify': notify, 'toggle_quiet': toggle_quiet, 'scroll_to_create': scroll_to_create, 'transfer_action': transfer_action, 'form_method': form_method, 'csrf_field': csrf_field, 'csrf_limit': csrf_limit, 'target_op': target_op, 'csrf_token': csrf_token } # Make page with manage transfers tab and manage keys tab output_objects.append({ 'object_type': 'html_form', 'text': ''' <div id="quiet-mode-content" class="hidden"> <p> Accept data %(transfer_action)s of %(transfer_src_string)s from %(protocol)s://%(fqdn)s:%(port)s/ into %(transfer_dst)s ? </p> <p> <input type=button onClick="doSubmit();" value="Accept %(transfer_action)s" /> </p> </div> <div id="wrap-tabs" class="datatransfer-tabs"> <ul> <li><a href="#transfer-tab">Manage Data Transfers</a></li> <li><a href="#keys-tab">Manage Transfer Keys</a></li> </ul> ''' % fill_helpers }) # Display external transfers, log and form to add new ones output_objects.append({ 'object_type': 'html_form', 'text': ''' <div id="transfer-tab"> ''' }) output_objects.append({ 'object_type': 'sectionheader', 'text': 'External Data Transfers' }) output_objects.append({ 'object_type': 'table_pager', 'id_prefix': 'datatransfers_', 'entry_name': 'transfers', 'default_entries': default_pager_entries }) output_objects.append({ 'object_type': 'datatransfers', 'datatransfers': datatransfers }) output_objects.append({ 'object_type': 'sectionheader', 'text': 'Latest Transfer Results' }) output_objects.append({ 'object_type': 'html_form', 'text': ''' <textarea id="logarea" class="fillwidth" rows=5 readonly="readonly">%s</textarea> ''' % (''.join(log_lines)) }) output_objects.append({ 'object_type': 'sectionheader', 'text': 'Create External Data Transfer' }) transfer_html = ''' <table class="addexttransfer"> <tr><td> Fill in the import/export data transfer details below to request a new background data transfer task.<br/> Source must be a path without wildcard characters and it must be specifically pointed out if the src is a directory. In that case recursive transfer will automatically be used and otherwise the src is considered a single file, so it will fail if that is not the case.<br/> Destination is a single location directory to transfer the data to. It is considered in relation to your user home for <em>import</em> requests. Source is similarly considered in relation to your user home in <em>export</em> requests.<br/> Destination is a always handled as a directory path to transfer source files into.<br/> <form method="%(form_method)s" action="%(target_op)s.py" onSubmit="return beforeSubmit();"> <input type="hidden" name="%(csrf_field)s" value="%(csrf_token)s" /> <fieldset id="transferbox"> <table id="createtransfer" class="addexttransfer"> <tr><td> <label for="action">Action</label> <input type=radio name=action %(import_checked)s value="import" />import data <input type=radio name=action %(export_checked)s value="export" />export data </td></tr> <tr><td> <label for="transfer_id">Optional Transfer ID / Name </label> <input type=text size=60 name=transfer_id value="%(transfer_id)s" pattern="[a-zA-Z0-9._-]*" title="Optional ID string containing only ASCII letters and digits possibly with separators like hyphen, underscore and period" /> </td></tr> <tr><td> <label for="protocol">Protocol</label> <select id="protocol_select" class="protocol-select themed-select html-select" name="protocol" onblur="setDefaultPort();"> ''' # select requested protocol for (key, val) in valid_proto: if protocol == key: selected = 'selected' else: selected = '' transfer_html += '<option %s value="%s">%s</option>' % \ (selected, key, val) transfer_html += ''' </select> </td></tr> <tr><td> <label for="fqdn">Host and port</label> <input type=text size=37 name=fqdn value="%(fqdn)s" required pattern="[a-zA-Z0-9]+(\.[a-zA-Z0-9]+)+" title="A fully qualified domain name or Internet IP address for the remote location"/> <input id="port_input" type=number step=1 min=1 max=65535 name=port value="%(port)s" required /> </td></tr> <tr><td> <label for="">Login method</label> <input id="anonymous_choice" type=radio %(anon_checked)s onclick="enableLogin(\'anonymous\');" /> anonymous access <input id="userpassword_choice" type=radio %(pw_checked)s onclick="enableLogin(\'password\');" /> login with password <input id="userkey_choice" type=radio %(key_checked)s onclick="enableLogin(\'key\');" /> login with key </td></tr> <tr id="login_fields" style="display: none;"><td> <label for="username">Username</label> <input id="username" type=text size=60 name=username value="%(username)s" pattern="[a-zA-Z0-9._-]*" title="Optional username used to login on the remote site, if required" /> <br/> <span id="password_entry"> <label for="transfer_pw">Password</label> <input id="password" type=password size=60 name=transfer_pw value="" /> </span> <span id="key_entry"> <label for="key_id">Key</label> <select id="key" class="key-select themed-select html-select" name=key_id /> ''' # select requested key for key_dict in available_keys: if key_dict['key_id'] == key_id: selected = 'selected' else: selected = '' transfer_html += '<option %s value="%s">%s</option>' % \ (selected, key_dict['key_id'], key_dict['key_id']) selected = '' transfer_html += ''' </select> %s ''' % key_note transfer_html += ''' </span> </td></tr> <tr><td> <div id="srcfields"> <!-- NOTE: automatically filled by addSource function --> </div> <input id="addsrcbutton" type="button" onclick="addSource(); return false;" value="Add another source field" /> </td></tr> <tr><td> <label for="transfer_dst">Destination path</label> <input id=\'dst_0\' type=text size=60 name=transfer_dst value="%(transfer_dst)s" required title="relative destination path: local for imports and remote for exports" /> <input id=\'dst_dir_0\' type=radio checked />Destination directory <input id=\'dst_file_0\' type=radio disabled />Destination file<br /> </td></tr> <tr><td> <div id="excludefields"> <!-- NOTE: automatically filled by addExclude function --> </div> <input id="addexcludebutton" type="button" onclick="addExclude(); return false;" value="Add another exclude field" /> </td></tr> <tr><td> <label for="compress">Enable compression (leave unset except for <em>slow</em> sites)</label> <input type=checkbox name=compress> </td></tr> <tr><td> <label for="notify">Optional notify on completion (e.g. email address)</label> <input type=text size=60 name=notify value=\'%(notify)s\'> </td></tr> <tr><td> <span> <input id="submit-request-transfer" type=submit value="Request transfer" /> <input type=reset value="Clear" /> </span> </td></tr> </table> </fieldset> </form> </td> </tr> </table> %(toggle_quiet)s %(scroll_to_create)s ''' output_objects.append({ 'object_type': 'html_form', 'text': transfer_html % fill_helpers }) output_objects.append({ 'object_type': 'html_form', 'text': ''' </div> ''' }) # Display key management output_objects.append({ 'object_type': 'html_form', 'text': ''' <div id="keys-tab"> ''' }) output_objects.append({ 'object_type': 'sectionheader', 'text': 'Manage Data Transfer Keys' }) key_html = ''' <form method="%(form_method)s" action="%(target_op)s.py"> <input type="hidden" name="%(csrf_field)s" value="%(csrf_token)s" /> <table class="managetransferkeys"> <tr><td> ''' transferkeys = [] for key_dict in available_keys: key_item = build_keyitem_object(configuration, key_dict) saved_id = key_item['key_id'] js_name = 'delete%s' % hexlify(saved_id) helper = html_post_helper(js_name, '%s.py' % target_op, { 'key_id': saved_id, 'action': 'delkey', csrf_field: csrf_token }) output_objects.append({'object_type': 'html_form', 'text': helper}) key_item['delkeylink'] = { 'object_type': 'link', 'destination': "javascript: confirmDialog(%s, '%s');" % (js_name, 'Really remove %s?' % saved_id), 'class': 'removelink iconspace', 'title': 'Remove %s' % saved_id, 'text': '' } transferkeys.append(key_item) output_objects.append({ 'object_type': 'table_pager', 'id_prefix': 'transferkeys_', 'entry_name': 'keys', 'default_entries': default_pager_entries }) output_objects.append({ 'object_type': 'transferkeys', 'transferkeys': transferkeys }) key_html += ''' Please copy the public key to your ~/.ssh/authorized_keys or ~/.ssh/authorized_keys2 file on systems where you want to login with the corresponding key.<br/> %s </td></tr> <tr><td> Select a name below to create a new key for use in future transfers. The key is generated and stored in a private storage area on %s, so that only the transfer service can access and use it for your transfers. </td></tr> <tr><td> <input type=hidden name=action value="generatekey" /> Key name:<br/> <input type=text size=60 name=key_id value="" required pattern="[a-zA-Z0-9._-]+" title="internal name for the key when used in transfers. I.e. letters and digits separated only by underscores, periods and hyphens" /> <br/> <input type=submit value="Generate key" /> </td></tr> </table> </form> ''' % (restrict_template % 'ssh-rsa AAAAB3NzaC...', configuration.short_title) output_objects.append({ 'object_type': 'html_form', 'text': key_html % fill_helpers }) output_objects.append({ 'object_type': 'html_form', 'text': ''' </div> ''' }) output_objects.append({ 'object_type': 'html_form', 'text': ''' </div> ''' }) return (output_objects, returnvalues.OK) elif action in transfer_actions: # NOTE: all path validation is done at run-time in grid_transfers transfer_dict = transfer_map.get(transfer_id, {}) if action == 'deltransfer': if transfer_dict is None: output_objects.append({ 'object_type': 'error_text', 'text': 'existing transfer_id is required for delete' }) return (output_objects, returnvalues.CLIENT_ERROR) (save_status, _) = delete_data_transfer(configuration, client_id, transfer_id, transfer_map) desc = "delete" elif action == 'redotransfer': if transfer_dict is None: output_objects.append({ 'object_type': 'error_text', 'text': 'existing transfer_id is required for reschedule' }) return (output_objects, returnvalues.CLIENT_ERROR) transfer_dict['status'] = 'NEW' (save_status, _) = update_data_transfer(configuration, client_id, transfer_dict, transfer_map) desc = "reschedule" else: if not fqdn: output_objects.append({ 'object_type': 'error_text', 'text': 'No host address provided!' }) return (output_objects, returnvalues.CLIENT_ERROR) if not [src for src in src_list if src] or not dst: output_objects.append({ 'object_type': 'error_text', 'text': 'transfer_src and transfer_dst parameters ' 'required for all data transfers!' }) return (output_objects, returnvalues.CLIENT_ERROR) if protocol == "rsyncssh" and not key_id: output_objects.append({ 'object_type': 'error_text', 'text': 'RSYNC over SSH is only supported with key!' }) return (output_objects, returnvalues.CLIENT_ERROR) if not password and not key_id and protocol in warn_anon: output_objects.append({ 'object_type': 'warning', 'text': ''' %s transfers usually require explicit authentication with your credentials. Proceeding as requested with anonymous login, but the transfer is likely to fail.''' % valid_proto_map[protocol] }) if key_id and protocol in warn_key: output_objects.append({ 'object_type': 'warning', 'text': ''' %s transfers usually only support authentication with username and password rather than key. Proceeding as requested, but the transfer is likely to fail if it really requires login.''' % valid_proto_map[protocol] }) # Make pseudo-unique ID based on msec time since epoch if not given if not transfer_id: transfer_id = "transfer-%d" % (time.time() * 1000) if transfer_dict: desc = "update" else: desc = "create" if password: # We don't want to store password in plain text on disk password_digest = make_digest('datatransfer', client_id, password, configuration.site_digest_salt) else: password_digest = '' transfer_dict.update({ 'transfer_id': transfer_id, 'action': action, 'protocol': protocol, 'fqdn': fqdn, 'port': port, 'username': username, 'password_digest': password_digest, 'key': key_id, 'src': src_list, 'dst': dst, 'exclude': exclude_list, 'compress': use_compress, 'notify': notify, 'status': 'NEW' }) (save_status, _) = create_data_transfer(configuration, client_id, transfer_dict, transfer_map) if not save_status: output_objects.append({ 'object_type': 'error_text', 'text': 'Error in %s data transfer %s: ' % (desc, transfer_id) + 'save updated transfers failed!' }) return (output_objects, returnvalues.CLIENT_ERROR) output_objects.append({ 'object_type': 'text', 'text': '%sd transfer request %s.' % (desc.title(), transfer_id) }) if action != 'deltransfer': output_objects.append({ 'object_type': 'link', 'destination': "fileman.py?path=transfer_output/%s/" % transfer_id, 'title': 'Transfer status and output', 'text': 'Transfer status and output folder' }) output_objects.append({ 'object_type': 'text', 'text': ''' Please note that the status files only appear after the transfer starts, so it may be empty now. ''' }) logger.debug('datatransfer %s from %s done: %s' % (action, client_id, transfer_dict)) elif action in key_actions: if action == 'generatekey': (gen_status, pub) = generate_user_key(configuration, client_id, key_id) if gen_status: output_objects.append({ 'object_type': 'html_form', 'text': ''' Generated new key with name %s and associated public key:<br/> <textarea class="publickey" rows="5" readonly="readonly">%s</textarea> <p> Please copy it to your ~/.ssh/authorized_keys or ~/.ssh/authorized_keys2 file on the host(s) where you want to use this key for background transfer login. <br/> %s </p> ''' % (key_id, pub, restrict_template % pub) }) else: output_objects.append({ 'object_type': 'error_text', 'text': ''' Key generation for name %s failed with error: %s''' % (key_id, pub) }) return (output_objects, returnvalues.CLIENT_ERROR) elif action == 'delkey': pubkey = '[unknown]' available_keys = load_user_keys(configuration, client_id) for key_dict in available_keys: if key_dict['key_id'] == key_id: pubkey = key_dict.get('public_key', pubkey) (del_status, msg) = delete_user_key(configuration, client_id, key_id) if del_status: output_objects.append({ 'object_type': 'html_form', 'text': ''' <p> Deleted the key "%s" and the associated public key:<br/> </p> <textarea class="publickey" rows="5" readonly="readonly">%s</textarea> <p> You will no longer be able to use it in your data transfers and can safely remove the public key from your ~/.ssh/authorized_keys* files on any hosts where you may have previously added it. </p> ''' % (key_id, pubkey) }) else: output_objects.append({ 'object_type': 'error_text', 'text': ''' Key removal for name %s failed with error: %s''' % (key_id, msg) }) return (output_objects, returnvalues.CLIENT_ERROR) else: output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid data transfer action: %s' % action }) return (output_objects, returnvalues.CLIENT_ERROR) output_objects.append({ 'object_type': 'link', 'destination': 'datatransfer.py', 'text': 'Return to data transfers overview' }) return (output_objects, returnvalues.OK)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) patterns = accepted['job_id'] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not configuration.site_enable_jobs: output_objects.append({ 'object_type': 'error_text', 'text': '''Job execution is not enabled on this system''' }) return (output_objects, returnvalues.SYSTEM_ERROR) if not patterns: output_objects.append({ 'object_type': 'error_text', 'text': 'No job_id specified!' }) return (output_objects, returnvalues.NO_SUCH_JOB_ID) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = \ os.path.abspath(os.path.join(configuration.mrsl_files_dir, client_dir)) + os.sep filelist = [] keywords_dict = mrslkeywords.get_keywords_dict(configuration) for pattern in patterns: pattern = pattern.strip() # Backward compatibility - all_jobs keyword should match all jobs if pattern == all_jobs: pattern = '*' # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages unfiltered_match = glob.glob(base_dir + pattern + '.mRSL') match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, pattern)) continue # Insert valid job files in filelist for later treatment match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'error_text', 'text': '%s: You do not have any matching job IDs!' % pattern }) status = returnvalues.CLIENT_ERROR else: filelist += match # resubmit is hard on the server if len(filelist) > 100: output_objects.append({ 'object_type': 'error_text', 'text': 'Too many matching jobs (%s)!' % len(filelist) }) return (output_objects, returnvalues.CLIENT_ERROR) resubmitobjs = [] status = returnvalues.OK for filepath in filelist: mrsl_file = filepath.replace(base_dir, '') job_id = mrsl_file.replace('.mRSL', '') # ("Resubmitting job with job_id: %s" % job_id) resubmitobj = {'object_type': 'resubmitobj', 'job_id': job_id} mrsl_dict = unpickle(filepath, logger) if not mrsl_dict: resubmitobj['message'] = "No such job: %s (%s)" % (job_id, mrsl_file) status = returnvalues.CLIENT_ERROR resubmitobjs.append(resubmitobj) continue resubmit_items = keywords_dict.keys() # loop selected keywords and create mRSL string resubmit_job_string = '' for dict_elem in resubmit_items: value = '' # Extract job value with fallback to default to support optional # fields job_value = mrsl_dict.get(dict_elem, keywords_dict[dict_elem]['Value']) if keywords_dict[dict_elem]['Type'].startswith( 'multiplekeyvalues'): for (elem_key, elem_val) in job_value: if elem_key: value += '%s=%s\n' % (str(elem_key).strip(), str(elem_val).strip()) elif keywords_dict[dict_elem]['Type'].startswith('multiple'): for elem in job_value: if elem: value += '%s\n' % str(elem).rstrip() else: if str(job_value): value += '%s\n' % str(job_value).rstrip() # Only insert keywords with an associated value if value: if value.rstrip() != '': resubmit_job_string += '''::%s:: %s ''' % (dict_elem, value.rstrip()) # save tempfile (filehandle, tempfilename) = \ tempfile.mkstemp(dir=configuration.mig_system_files, text=True) os.write(filehandle, resubmit_job_string) os.close(filehandle) # submit job the usual way (new_job_status, msg, new_job_id) = new_job(tempfilename, client_id, configuration, False, True) if not new_job_status: resubmitobj['status'] = False resubmitobj['message'] = msg status = returnvalues.SYSTEM_ERROR resubmitobjs.append(resubmitobj) continue # o.out("Resubmit failed: %s" % msg) # o.reply_and_exit(o.ERROR) resubmitobj['status'] = True resubmitobj['new_job_id'] = new_job_id resubmitobjs.append(resubmitobj) # o.out("Resubmit successful: %s" % msg) # o.out("%s" % msg) output_objects.append({ 'object_type': 'resubmitobjs', 'resubmitobjs': resubmitobjs }) return (output_objects, status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) client_dir = client_id_dir(client_id) status = returnvalues.OK defaults = signature()[1] # IMPORTANT: the CGI front end forces the input extraction to be delayed # We must manually extract and parse input here to avoid memory explosion # for huge files! # TODO: explosions still happen sometimes! # Most likely because of Apache SSL renegotiations which have # no other way of storing input extract_input = user_arguments_dict.get('__DELAYED_INPUT__', dict) logger.info('Extracting input in %s' % op_name) form = extract_input() logger.info('After extracting input in %s' % op_name) file_item = None file_name = '' user_arguments_dict = {} if form.has_key('fileupload'): file_item = form['fileupload'] file_name = file_item.filename user_arguments_dict['fileupload'] = ['true'] user_arguments_dict['path'] = [file_name] if form.has_key('path'): user_arguments_dict['path'] = [form['path'].value] if form.has_key('restrict'): user_arguments_dict['restrict'] = [form['restrict'].value] else: user_arguments_dict['restrict'] = defaults['restrict'] logger.info('Filtered input is: %s' % user_arguments_dict) # Now validate parts as usual (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) path = accepted['path'][-1] restrict = accepted['restrict'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not configuration.site_enable_griddk: output_objects.append({ 'object_type': 'text', 'text': '''Grid.dk features are disabled on this site. Please contact the site admins %s if you think they should be enabled. ''' % configuration.admin_email }) return (output_objects, returnvalues.OK) logger.info('Filtered input validated with result: %s' % accepted) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep if verbose(flags): for flag in flags: output_objects.append({ 'object_type': 'text', 'text': '%s using flag: %s' % (op_name, flag) }) output_objects.append({'object_type': 'header', 'text': 'Uploading file'}) # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing consistent # error messages real_path = os.path.realpath(os.path.join(base_dir, path)) # Implicit destination if os.path.isdir(real_path): real_path = os.path.join(real_path, os.path.basename(file_name)) if not valid_user_path(configuration, real_path, base_dir, True): logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, real_path, path)) output_objects.append({ 'object_type': 'error_text', 'text': "Invalid destination (%s expands to an illegal path)" % path }) return (output_objects, returnvalues.CLIENT_ERROR) if not os.path.isdir(os.path.dirname(real_path)): output_objects.append({ 'object_type': 'error_text', 'text': "cannot write: no such file or directory: %s)" % path }) return (output_objects, returnvalues.CLIENT_ERROR) # We fork off here and redirect the user to a progress page for user # friendly output and to avoid cgi timeouts from killing the upload. # We use something like the Active State python recipe for daemonizing # to properly detach from the CGI process and continue in the background. # Please note that we only close stdio file descriptors to avoid closing # the fileupload. file_item.file.seek(0, 2) total_size = file_item.file.tell() file_item.file.seek(0, 0) try: pid = os.fork() if pid == 0: os.setsid() pid = os.fork() if pid == 0: os.chdir('/') os.umask(0) for fno in range(3): try: os.close(fno) except OSError: pass else: os._exit(0) except OSError, ose: output_objects.append({ 'object_type': 'error_text', 'text': '%s upload could not background! (%s)' % (path, str(ose).replace(base_dir, '')) }) return (output_objects, returnvalues.SYSTEM_ERROR)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False, op_menu=client_id) output_objects.append({'object_type': 'header', 'text' : '%s Screen Saver Sandbox Download' % \ configuration.short_title }) defaults = signature()[1] (validate_status, accepted) = validate_input(user_arguments_dict, defaults, output_objects, allow_rejects=False) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) username = accepted['username'][-1] password = accepted['password'][-1] hd_size = accepted['hd_size'][-1] image_format = accepted['image_format'][-1] net_bw = accepted['net_bw'][-1] memory = accepted['memory'][-1] operating_system = accepted['operating_system'][-1] win_solution = accepted['win_solution'][-1] vgrid_list = accepted['vgrid'] cputime = 1000000 sandboxkey = hexlify(open('/dev/urandom').read(32)) ip_address = 'UNKNOWN' if os.environ.has_key('REMOTE_ADDR'): ip_address = os.environ['REMOTE_ADDR'] if not configuration.site_enable_sandboxes: output_objects.append({ 'object_type': 'text', 'text': '''Sandbox resources are disabled on this site. Please contact the site admins %s if you think they should be enabled. ''' % configuration.admin_email }) return (output_objects, returnvalues.OK) if not safe_handler(configuration, 'post', op_name, username, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # check that requested image format is valid if not image_format in ['raw', 'qcow', 'cow', 'qcow2', 'vmdk']: output_objects.append({ 'object_type': 'error_text', 'text': 'Unsupported image format: %s' % image_format }) return (output_objects, returnvalues.CLIENT_ERROR) # check that requested vgrids are valid - anybody can offer their sandbox # for a vgrid but it is still left to the vgrid owners to explicitly # accept all resources all_vgrids = get_vgrid_map_vgrids(configuration) for vgrid in vgrid_list: if not vgrid in all_vgrids: output_objects.append({ 'object_type': 'error_text', 'text': 'Failed to validate %s %s: %s' % (configuration.site_vgrid_label, vgrid, all_vgrids) }) return (output_objects, returnvalues.SYSTEM_ERROR) # Load the user file try: userdb = load_sandbox_db(configuration) except Exception, exc: output_objects.append({ 'object_type': 'error_text', 'text': 'Failed to read login info: %s' % exc }) return (output_objects, returnvalues.SYSTEM_ERROR)
userdb = {} except Exception, exc: output_objects.append({ 'object_type': 'error_text', 'text': 'Could not read sandbox database! %s' % exc }) return (output_objects, returnvalues.SYSTEM_ERROR) grid_stat = GridStat(configuration, logger) # If it's a new user, check that the username is free if newuser == 'on': if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if userdb.has_key(username): output_objects.append({ 'object_type': 'error_text', 'text': 'Username is already taken - please go back and choose another one...'
def main(client_id, user_arguments_dict, environ=None): """Main function used by front end""" if environ is None: environ = os.environ (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) src_list = accepted['src'] dst = accepted['dst'][-1] iosessionid = accepted['iosessionid'][-1] share_id = accepted['share_id'][-1] freeze_id = accepted['freeze_id'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append( {'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath(os.path.join(configuration.user_home, client_dir)) + os.sep # Special handling if used from a job (no client_id but iosessionid) if not client_id and iosessionid: base_dir = os.path.realpath(configuration.webserver_home + os.sep + iosessionid) + os.sep # Use selected base as source and destination dir by default src_base = dst_base = base_dir # Sharelink import if share_id is given - change to sharelink as src base if share_id: try: (share_mode, _) = extract_mode_id(configuration, share_id) except ValueError, err: logger.error('%s called with invalid share_id %s: %s' % (op_name, share_id, err)) output_objects.append( {'object_type': 'error_text', 'text': 'Invalid sharelink ID: %s' % share_id}) return (output_objects, returnvalues.CLIENT_ERROR) # TODO: load and check sharelink pickle (currently requires client_id) if share_mode == 'write-only': logger.error('%s called import from write-only sharelink: %s' % (op_name, accepted)) output_objects.append( {'object_type': 'error_text', 'text': 'Sharelink %s is write-only!' % share_id}) return (output_objects, returnvalues.CLIENT_ERROR) target_dir = os.path.join(share_mode, share_id) src_base = os.path.abspath(os.path.join(configuration.sharelink_home, target_dir)) + os.sep if os.path.isfile(os.path.realpath(src_base)): logger.error('%s called import on single file sharelink: %s' % (op_name, share_id)) output_objects.append( {'object_type': 'error_text', 'text': """Import is only supported for directory sharelinks!"""}) return (output_objects, returnvalues.CLIENT_ERROR) elif not os.path.isdir(src_base): logger.error('%s called import with non-existant sharelink: %s' % (client_id, share_id)) output_objects.append( {'object_type': 'error_text', 'text': 'No such sharelink: %s' % share_id}) return (output_objects, returnvalues.CLIENT_ERROR)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) client_dir = client_id_dir(client_id) defaults = signature()[1] title_entry = find_entry(output_objects, 'title') label = "%s" % configuration.site_vgrid_label title_entry['text'] = "Update %s Components" % label output_objects.append({ 'object_type': 'header', 'text': 'Update %s Components' % label }) (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) vgrid_name = accepted['vgrid_name'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not vgrid_is_owner(vgrid_name, client_id, configuration): output_objects.append({ 'object_type': 'error_text', 'text': 'Only owners of %s can administrate it.' % vgrid_name }) form_method = 'post' csrf_limit = get_csrf_limit(configuration) fill_helpers = { 'vgrid_label': label, 'vgrid_name': vgrid_name, 'form_method': form_method, 'csrf_field': csrf_field, 'csrf_limit': csrf_limit } target_op = 'sendrequestaction' csrf_token = make_csrf_token(configuration, form_method, target_op, client_id, csrf_limit) fill_helpers.update({'target_op': target_op, 'csrf_token': csrf_token}) output_objects.append({ 'object_type': 'html_form', 'text': ''' <form method="%(form_method)s" action="%(target_op)s.py"> <input type="hidden" name="%(csrf_field)s" value="%(csrf_token)s" /> <input type="hidden" name="vgrid_name" value="%(vgrid_name)s"/> <input type="hidden" name="request_type" value="vgridowner"/> <input type="text" size=50 name="request_text" /> <input type="hidden" name="output_format" value="html" /> <input type="submit" value="Request %(vgrid_label)s access" /> </form> ''' % fill_helpers }) return (output_objects, returnvalues.SYSTEM_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.vgrid_home, vgrid_name)) + os.sep public_base_dir = \ os.path.abspath(os.path.join(configuration.vgrid_public_base, vgrid_name)) + os.sep public_scm_dir = \ os.path.abspath(os.path.join(configuration.vgrid_public_base, vgrid_name, '.vgridscm')) + os.sep public_tracker_dir = \ os.path.abspath(os.path.join(configuration.vgrid_public_base, vgrid_name, '.vgridtracker')) + os.sep private_base_dir = \ os.path.abspath(os.path.join(configuration.vgrid_private_base, vgrid_name)) + os.sep private_scm_dir = \ os.path.abspath(os.path.join(configuration.vgrid_private_base, vgrid_name, '.vgridscm')) + os.sep private_tracker_dir = \ os.path.abspath(os.path.join(configuration.vgrid_private_base, vgrid_name, '.vgridtracker')) + os.sep private_forum_dir = \ os.path.abspath(os.path.join(configuration.vgrid_private_base, vgrid_name, '.vgridforum')) + os.sep vgrid_files_dir = \ os.path.abspath(os.path.join(configuration.vgrid_files_home, vgrid_name)) + os.sep vgrid_scm_dir = \ os.path.abspath(os.path.join(configuration.vgrid_files_home, vgrid_name, '.vgridscm')) + os.sep vgrid_tracker_dir = \ os.path.abspath(os.path.join(configuration.vgrid_files_home, vgrid_name, '.vgridtracker')) + os.sep output_objects.append({'object_type': 'text', 'text': 'Updating %s %s components ...' % \ (label, vgrid_name)}) # Try to create all base directories used for vgrid files for path in (base_dir, public_base_dir, private_base_dir, vgrid_files_dir): try: os.mkdir(path) except Exception, exc: pass
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) patterns = accepted['job_id'] action = accepted['action'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not configuration.site_enable_jobs: output_objects.append({ 'object_type': 'error_text', 'text': '''Job execution is not enabled on this system''' }) return (output_objects, returnvalues.SYSTEM_ERROR) if not action in valid_actions.keys(): output_objects.append({ 'object_type': 'error_text', 'text': 'Invalid job action "%s" (only %s supported)' % (action, ', '.join(valid_actions.keys())) }) return (output_objects, returnvalues.CLIENT_ERROR) new_state = valid_actions[action] # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = \ os.path.abspath(os.path.join(configuration.mrsl_files_dir, client_dir)) + os.sep status = returnvalues.OK filelist = [] for pattern in patterns: pattern = pattern.strip() # Backward compatibility - all_jobs keyword should match all jobs if pattern == all_jobs: pattern = '*' # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages unfiltered_match = glob.glob(base_dir + pattern + '.mRSL') match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.error( '%s tried to use %s %s outside own home! (pattern %s)' % (client_id, op_name, abs_path, pattern)) continue # Insert valid job files in filelist for later treatment match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'error_text', 'text': '%s: You do not have any matching job IDs!' % pattern }) status = returnvalues.CLIENT_ERROR else: filelist += match # job state change is hard on the server, limit if len(filelist) > 500: output_objects.append({ 'object_type': 'error_text', 'text': 'Too many matching jobs (%s)!' % len(filelist) }) return (output_objects, returnvalues.CLIENT_ERROR) changedstatusjobs = [] for filepath in filelist: # Extract job_id from filepath (replace doesn't modify filepath) mrsl_file = filepath.replace(base_dir, '') job_id = mrsl_file.replace('.mRSL', '') changedstatusjob = { 'object_type': 'changedstatusjob', 'job_id': job_id } job_dict = unpickle(filepath, logger) if not job_dict: changedstatusjob['message'] = '''The file containing the information for job id %s could not be opened! You can only %s your own jobs!''' % (job_id, action) changedstatusjobs.append(changedstatusjob) status = returnvalues.CLIENT_ERROR continue changedstatusjob['oldstatus'] = job_dict['STATUS'] # Is the job status compatible with action? possible_cancel_states = [ 'PARSE', 'QUEUED', 'RETRY', 'EXECUTING', 'FROZEN' ] if action == 'cancel' and \ not job_dict['STATUS'] in possible_cancel_states: changedstatusjob['message'] = \ 'You can only cancel jobs with status: %s.'\ % ' or '.join(possible_cancel_states) status = returnvalues.CLIENT_ERROR changedstatusjobs.append(changedstatusjob) continue possible_freeze_states = ['QUEUED', 'RETRY'] if action == 'freeze' and \ not job_dict['STATUS'] in possible_freeze_states: changedstatusjob['message'] = \ 'You can only freeze jobs with status: %s.'\ % ' or '.join(possible_freeze_states) status = returnvalues.CLIENT_ERROR changedstatusjobs.append(changedstatusjob) continue possible_thaw_states = ['FROZEN'] if action == 'thaw' and \ not job_dict['STATUS'] in possible_thaw_states: changedstatusjob['message'] = \ 'You can only thaw jobs with status: %s.'\ % ' or '.join(possible_thaw_states) status = returnvalues.CLIENT_ERROR changedstatusjobs.append(changedstatusjob) continue # job action is handled by changing the STATUS field, notifying the # job queue and making sure the server never submits jobs with status # FROZEN or CANCELED. # file is repickled to ensure newest information is used, job_dict # might be old if another script has modified the file. if not unpickle_and_change_status(filepath, new_state, logger): output_objects.append({ 'object_type': 'error_text', 'text': 'Job status could not be changed to %s!' % new_state }) status = returnvalues.SYSTEM_ERROR # Avoid key error and make sure grid_script gets expected number of # arguments if not job_dict.has_key('UNIQUE_RESOURCE_NAME'): job_dict['UNIQUE_RESOURCE_NAME'] = \ 'UNIQUE_RESOURCE_NAME_NOT_FOUND' if not job_dict.has_key('EXE'): job_dict['EXE'] = 'EXE_NAME_NOT_FOUND' # notify queue if not send_message_to_grid_script( 'JOBACTION ' + job_id + ' ' + job_dict['STATUS'] + ' ' + new_state + ' ' + job_dict['UNIQUE_RESOURCE_NAME'] + ' ' + job_dict['EXE'] + '\n', logger, configuration): output_objects.append({ 'object_type': 'error_text', 'text': '''Error sending message to grid_script, job may still be in the job queue.''' }) status = returnvalues.SYSTEM_ERROR continue changedstatusjob['newstatus'] = new_state changedstatusjobs.append(changedstatusjob) output_objects.append({ 'object_type': 'changedstatusjobs', 'changedstatusjobs': changedstatusjobs }) return (output_objects, status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) atjobs = accepted['atjobs'] cronjobs = accepted['crontab'] output_status = returnvalues.OK title_entry = find_entry(output_objects, 'title') title_entry['text'] = 'Add Scheduled Tasks' header_entry = {'object_type': 'header', 'text': 'Schedule Tasks'} output_objects.append(header_entry) if not configuration.site_enable_crontab: output_objects.append({ 'object_type': 'text', 'text': ''' Scheduled tasks are disabled on this site. Please contact the site admins %s if you think they should be enabled. ''' % configuration.admin_email }) return (output_objects, returnvalues.OK) logger.info('%s from %s' % (op_name, client_id)) #logger.debug('%s from %s: %s' % (op_name, client_id, accepted)) if not atjobs and not cronjobs: output_objects.append({ 'object_type': 'error_text', 'text': 'No cron/at jobs provided!' }) return (output_objects, returnvalues.CLIENT_ERROR) if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if cronjobs: crontab_contents = load_crontab(client_id, configuration) crontab_contents += ''' %s ''' % '\n'.join(cronjobs) (parse_status, parse_msg) = parse_and_save_crontab(crontab_contents, client_id, configuration) if not parse_status: output_objects.append({ 'object_type': 'error_text', 'text': 'Error parsing and saving crontab: %s' % parse_msg }) output_status = returnvalues.CLIENT_ERROR else: if parse_msg: output_objects.append({ 'object_type': 'html_form', 'text': '<p class="warningtext">%s</p>' % parse_msg }) else: output_objects.append({ 'object_type': 'text', 'text': 'Added repeating task schedule' }) if atjobs: atjobs_contents = load_atjobs(client_id, configuration) atjobs_contents += ''' %s ''' % '\n'.join(atjobs) (parse_status, parse_msg) = parse_and_save_atjobs(atjobs_contents, client_id, configuration) if not parse_status: output_objects.append({ 'object_type': 'error_text', 'text': 'Error parsing and saving atjobs: %s' % parse_msg }) output_status = returnvalues.CLIENT_ERROR else: if parse_msg: output_objects.append({ 'object_type': 'html_form', 'text': '<p class="warningtext">%s</p>' % parse_msg }) else: output_objects.append({ 'object_type': 'text', 'text': 'Added one-time task schedule' }) output_objects.append({ 'object_type': 'link', 'destination': 'crontab.py', 'text': 'Schedule task overview' }) return (output_objects, output_status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) patterns = accepted['job_id'] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not configuration.site_enable_jobs: output_objects.append({ 'object_type': 'error_text', 'text': '''Job execution is not enabled on this system''' }) return (output_objects, returnvalues.SYSTEM_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = \ os.path.abspath(os.path.join(configuration.mrsl_files_dir, client_dir)) + os.sep status = returnvalues.OK filelist = [] for pattern in patterns: pattern = pattern.strip() # Backward compatibility - all_jobs keyword should match all jobs if pattern == all_jobs: pattern = '*' # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages unfiltered_match = glob.glob(base_dir + pattern + '.mRSL') match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, pattern)) continue # Insert valid job files in filelist for later treatment match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'error_text', 'text': '%s: You do not have any matching job IDs!' % pattern }) status = returnvalues.CLIENT_ERROR else: filelist += match # job schedule is hard on the server, limit if len(filelist) > 100: output_objects.append({ 'object_type': 'error_text', 'text': 'Too many matching jobs (%s)!' % len(filelist) }) return (output_objects, returnvalues.CLIENT_ERROR) saveschedulejobs = [] for filepath in filelist: # Extract job_id from filepath (replace doesn't modify filepath) mrsl_file = filepath.replace(base_dir, '') job_id = mrsl_file.replace('.mRSL', '') saveschedulejob = {'object_type': 'saveschedulejob', 'job_id': job_id} dict = unpickle(filepath, logger) if not dict: saveschedulejob['message'] = \ ('The file containing the information' \ ' for job id %s could not be opened!' \ ' You can only read schedule for ' \ 'your own jobs!') % job_id saveschedulejobs.append(saveschedulejob) status = returnvalues.CLIENT_ERROR continue saveschedulejob['oldstatus'] = dict['STATUS'] # Is the job status pending? possible_schedule_states = ['QUEUED', 'RETRY', 'FROZEN'] if not dict['STATUS'] in possible_schedule_states: saveschedulejob['message'] = \ 'You can only read schedule for jobs with status: %s.'\ % ' or '.join(possible_schedule_states) saveschedulejobs.append(saveschedulejob) continue # notify queue if not send_message_to_grid_script('JOBSCHEDULE ' + job_id + '\n', logger, configuration): output_objects.append({ 'object_type': 'error_text', 'text': 'Error sending message to grid_script, update may fail.' }) status = returnvalues.SYSTEM_ERROR continue saveschedulejobs.append(saveschedulejob) savescheduleinfo = """Please find any available job schedule status in verbose job status output.""" output_objects.append({ 'object_type': 'saveschedulejobs', 'saveschedulejobs': saveschedulejobs, 'savescheduleinfo': savescheduleinfo }) return (output_objects, status)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) output_objects.append({'object_type': 'header', 'text' : '%s external certificate sign up' % \ configuration.short_title }) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert(user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, require_user=False) if not validate_status: logger.warning('%s invalid input: %s' % (op_name, accepted)) return (accepted, returnvalues.CLIENT_ERROR) admin_email = configuration.admin_email smtp_server = configuration.smtp_server user_pending = os.path.abspath(configuration.user_pending) cert_id = accepted['cert_id'][-1].strip() # force name to capitalized form (henrik karlsen -> Henrik Karlsen) # please note that we get utf8 coded bytes here and title() treats such # chars as word termination. Temporarily force to unicode. raw_name = accepted['cert_name'][-1].strip() try: cert_name = force_utf8(force_unicode(raw_name).title()) except Exception: cert_name = raw_name.title() country = accepted['country'][-1].strip().upper() state = accepted['state'][-1].strip().title() org = accepted['org'][-1].strip() # lower case email address email = accepted['email'][-1].strip().lower() # keep comment to a single line comment = accepted['comment'][-1].replace('\n', ' ') # single quotes break command line format - remove comment = comment.replace("'", ' ') if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) is_diku_email = False is_diku_org = False if email.find('@diku.dk') != -1: is_diku_email = True if 'DIKU' == org.upper(): # Consistent upper casing org = org.upper() is_diku_org = True if is_diku_org != is_diku_email: output_objects.append({ 'object_type': 'error_text', 'text': '''Illegal email and organization combination: Please read and follow the instructions in red on the request page! If you are a DIKU student with only a @*.ku.dk address please just use KU as organization. As long as you state that you want the certificate for DIKU purposes in the comment field, you will be given access to the necessary resources anyway. ''' }) return (output_objects, returnvalues.CLIENT_ERROR) try: distinguished_name_to_user(cert_id) except: output_objects.append({ 'object_type': 'error_text', 'text': '''Illegal Distinguished name: Please note that the distinguished name must be a valid certificate DN with multiple "key=val" fields separated by "/". ''' }) return (output_objects, returnvalues.CLIENT_ERROR) user_dict = { 'distinguished_name': cert_id, 'full_name': cert_name, 'organization': org, 'state': state, 'country': country, 'email': email, 'password': '', 'comment': '%s: %s' % ('Existing certificate', comment), 'expire': int(time.time() + cert_valid_days * 24 * 60 * 60), 'openid_names': [], 'auth': ['extcert'], } fill_distinguished_name(user_dict) user_id = user_dict['distinguished_name'] if configuration.user_openid_providers and configuration.user_openid_alias: user_dict['openid_names'] += \ [user_dict[configuration.user_openid_alias]] logger.info('got extcert request: %s' % user_dict) # If server allows automatic addition of users with a CA validated cert # we create the user immediately and skip mail if configuration.auto_add_cert_user: fill_user(user_dict) # Now all user fields are set and we can begin adding the user db_path = os.path.join(configuration.mig_server_home, user_db_filename) try: create_user(user_dict, configuration.config_file, db_path, ask_renew=False) except Exception, err: logger.error('Failed to create user with existing cert %s: %s' % (cert_id, err)) output_objects.append( {'object_type': 'error_text', 'text' : '''Could not create the user account for you: Please report this problem to the grid administrators (%s).''' % \ admin_email}) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': '''Created the user account for you: Please use the navigation menu to the left to proceed using it. ''' }) return (output_objects, returnvalues.OK)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] title_entry = find_entry(output_objects, 'title') label = "%s" % configuration.site_vgrid_label title_entry['text'] = "Remove %s Trigger" % label output_objects.append({'object_type': 'header', 'text' : 'Remove %s Trigger' % label}) (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) vgrid_name = accepted['vgrid_name'][-1] rule_id = accepted['rule_id'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append( {'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) logger.info("rmvgridtrigger %s %s" % (vgrid_name, rule_id)) # Validity of user and vgrid names is checked in this init function so # no need to worry about illegal directory traversal through variables (ret_val, msg, ret_variables) = \ init_vgrid_script_add_rem(vgrid_name, client_id, rule_id, 'trigger', configuration) if not ret_val: output_objects.append({'object_type': 'error_text', 'text': msg}) return (output_objects, returnvalues.CLIENT_ERROR) elif msg: # In case of warnings, msg is non-empty while ret_val remains True output_objects.append({'object_type': 'warning', 'text': msg}) # if we get here user is either vgrid owner or has rule ownership # can't remove if not a participant if not vgrid_is_trigger(vgrid_name, rule_id, configuration, recursive=False): output_objects.append({'object_type': 'error_text', 'text': '%s is not a trigger in %s %s.' % \ (rule_id, vgrid_name, label)}) return (output_objects, returnvalues.CLIENT_ERROR) # remove (rm_status, rm_msg) = vgrid_remove_triggers(configuration, vgrid_name, [rule_id]) if not rm_status: logger.error('%s failed to remove trigger: %s' % (client_id, rm_msg)) output_objects.append({'object_type': 'error_text', 'text': rm_msg}) output_objects.append({'object_type': 'error_text', 'text': '''%(rule_id)s might be listed as a trigger of this %(vgrid_label)s because it is a trigger of a parent %(vgrid_label)s. Removal must be performed from the most significant %(vgrid_label)s possible.''' % {'rule_id': rule_id, 'vgrid_label': label}}) return (output_objects, returnvalues.SYSTEM_ERROR) logger.info('%s removed trigger: %s' % (client_id, rule_id)) output_objects.append({'object_type': 'text', 'text': 'Trigger %s successfully removed from %s %s!' % (rule_id, vgrid_name, label)}) output_objects.append({'object_type': 'link', 'destination': 'vgridworkflows.py?vgrid_name=%s' % vgrid_name, 'text': 'Back to workflows for %s' % vgrid_name}) return (output_objects, returnvalues.OK)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) title_entry = find_entry(output_objects, 'title') title_entry['text'] = 'Runtime env support' output_objects.append({ 'object_type': 'header', 'text': 'Test runtime environment support' }) client_dir = client_id_dir(client_id) defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: logger.warning('%s invalid input: %s' % (op_name, accepted)) return (accepted, returnvalues.CLIENT_ERROR) resource_list = accepted['unique_resource_name'] re_name = accepted['re_name'][-1] status = returnvalues.OK visible_res = user_visible_res_confs(configuration, client_id) if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) if not re_name: output_objects.append({ 'object_type': 'error_text', 'text': 'Please specify the name of the runtime environment!' }) return (output_objects, returnvalues.CLIENT_ERROR) if not valid_dir_input(configuration.re_home, re_name): logger.warning( "possible illegal directory traversal attempt re_name '%s'" % re_name) output_objects.append({ 'object_type': 'error_text', 'text': 'Illegal runtime environment name: "%s"' % re_name }) return (output_objects, returnvalues.CLIENT_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep for visible_res_name in resource_list: if not visible_res_name in visible_res.keys(): logger.warning('User %s not allowed to view %s (%s)' % \ (client_id, visible_res_name, visible_res.keys())) output_objects.append({'object_type': 'error_text', 'text': 'invalid resource %s' % \ visible_res_name}) status = returnvalues.CLIENT_ERROR continue if not is_owner(client_id, visible_res_name, configuration.resource_home, logger): output_objects.append({ 'object_type': 'error_text', 'text': 'You must be an owner of the resource to validate runtime ' 'environment support. (resource %s)' % visible_res_name }) status = returnvalues.CLIENT_ERROR continue (re_dict, re_msg) = get_re_dict(re_name, configuration) if not re_dict: output_objects.append({ 'object_type': 'error_text', 'text': 'Could not get re_dict %s' % re_msg }) status = returnvalues.SYSTEM_ERROR continue if not testresource_has_re_specified(visible_res_name, re_name, configuration): output_objects.append({ 'object_type': 'error_text', 'text': 'You must specify the runtime environment in the resource' 'configuration before verifying if it is supported!' }) status = returnvalues.CLIENT_ERROR continue base64string = '' for stringpart in re_dict['TESTPROCEDURE']: base64string += stringpart mrslfile_content = base64.decodestring(base64string) try: (filehandle, mrslfile) = tempfile.mkstemp(text=True) os.write(filehandle, mrslfile_content) os.close(filehandle) create_verify_files(['status', 'stdout', 'stderr'], re_name, re_dict, base_dir, logger) except Exception, exc: output_objects.append({ 'object_type': 'error_text', 'text': 'Could not write test job for %s: %s' % (visible_res_name, exc) }) status = returnvalues.SYSTEM_ERROR continue forceddestination_dict = { 'UNIQUE_RESOURCE_NAME': visible_res_name, 'RE_NAME': re_name } (success, msg) = new_job(mrslfile, client_id, configuration, forceddestination_dict) if not success: output_objects.append({ 'object_type': 'error_text', 'text': 'Submit test job failed %s: %s' % (visible_res_name, msg) }) status = returnvalues.SYSTEM_ERROR try: os.remove(mrslfile) except: pass output_objects.append( {'object_type': 'text', 'text': 'Runtime environment test job for %s successfuly submitted! %s' \ % (visible_res_name, msg)})
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False, op_menu=client_id) client_dir = client_id_dir(client_id) status = returnvalues.OK defaults = signature()[1] (validate_status, accepted) = validate_input( user_arguments_dict, defaults, output_objects, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) patterns = accepted['path'] current_dir = accepted['current_dir'] share_id = accepted['share_id'][-1] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append( {'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Either authenticated user client_id set or sharelink ID if client_id: user_id = client_id target_dir = client_id_dir(client_id) base_dir = configuration.user_home id_query = '' page_title = 'Remove User Directory' userstyle = True widgets = True elif share_id: try: (share_mode, _) = extract_mode_id(configuration, share_id) except ValueError, err: logger.error('%s called with invalid share_id %s: %s' % \ (op_name, share_id, err)) output_objects.append({'object_type': 'error_text', 'text' : 'Invalid sharelink ID: %s' % share_id}) return (output_objects, returnvalues.CLIENT_ERROR) # TODO: load and check sharelink pickle (currently requires client_id) user_id = 'anonymous user through share ID %s' % share_id if share_mode == 'read-only': logger.error('%s called without write access: %s' % \ (op_name, accepted)) output_objects.append({'object_type': 'error_text', 'text' : 'No write access!'}) return (output_objects, returnvalues.CLIENT_ERROR) target_dir = os.path.join(share_mode, share_id) base_dir = configuration.sharelink_home id_query = '?share_id=%s' % share_id page_title = 'Remove Shared Directory' userstyle = False widgets = False
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] title_entry = find_entry(output_objects, 'title') label = "%s" % configuration.site_vgrid_label title_entry['text'] = "Remove %s Resource" % label output_objects.append({ 'object_type': 'header', 'text': 'Remove %s Resource' % label }) (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) vgrid_name = accepted['vgrid_name'][-1] unique_resource_name = accepted['unique_resource_name'][-1].lower() if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) user_map = get_full_user_map(configuration) user_dict = user_map.get(client_id, None) # Optional site-wide limitation of manage vgrid permission if not user_dict or \ not vgrid_manage_allowed(configuration, user_dict): logger.warning("user %s is not allowed to manage vgrids!" % client_id) output_objects.append({ 'object_type': 'error_text', 'text': 'Only privileged users can manage %ss' % label }) return (output_objects, returnvalues.CLIENT_ERROR) # make sure vgrid settings allow this owner to edit resources (allow_status, allow_msg) = allow_resources_adm(configuration, vgrid_name, client_id) if not allow_status: output_objects.append({'object_type': 'error_text', 'text': allow_msg}) return (output_objects, returnvalues.CLIENT_ERROR) # Validity of user and vgrid names is checked in this init function so # no need to worry about illegal directory traversal through variables (ret_val, msg, ret_variables) = \ init_vgrid_script_add_rem(vgrid_name, client_id, unique_resource_name, 'resource', configuration) if not ret_val: output_objects.append({'object_type': 'error_text', 'text': msg}) return (output_objects, returnvalues.CLIENT_ERROR) elif msg: # In case of warnings, msg is non-empty while ret_val remains True output_objects.append({'object_type': 'warning', 'text': msg}) if not vgrid_is_owner(vgrid_name, client_id, configuration): output_objects.append({ 'object_type': 'error_text', 'text': '''You must be an owner of the %s to remove a resource!''' % label }) return (output_objects, returnvalues.CLIENT_ERROR) # don't remove if not a participant if not vgrid_is_resource(vgrid_name, unique_resource_name, configuration): output_objects.append({ 'object_type': 'error_text', 'text': '%s is not a resource in %s or a parent %s.' % (unique_resource_name, vgrid_name, label) }) return (output_objects, returnvalues.CLIENT_ERROR) # remove (rm_status, rm_msg) = vgrid_remove_resources(configuration, vgrid_name, [unique_resource_name]) if not rm_status: output_objects.append({'object_type': 'error_text', 'text': rm_msg}) output_objects.append({ 'object_type': 'error_text', 'text': '''%(res_name)s might be listed as a resource of this %(vgrid_label)s because it is a resource of a parent %(vgrid_label)s. Removal must be performed from the most significant %(vgrid_label)s possible. ''' % { 'res_name': unique_resource_name, 'vgrid_label': label } }) return (output_objects, returnvalues.SYSTEM_ERROR) output_objects.append({ 'object_type': 'text', 'text': 'Resource %s successfully removed from %s %s!' % (unique_resource_name, vgrid_name, label) }) output_objects.append({ 'object_type': 'link', 'destination': 'adminvgrid.py?vgrid_name=%s' % vgrid_name, 'text': 'Back to administration for %s' % vgrid_name }) return (output_objects, returnvalues.OK)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id) client_dir = client_id_dir(client_id) status = returnvalues.OK defaults = signature()[1] (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) flags = ''.join(accepted['flags']) size = int(accepted['size'][-1]) pattern_list = accepted['path'] if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name base_dir = os.path.abspath( os.path.join(configuration.user_home, client_dir)) + os.sep if verbose(flags): for flag in flags: output_objects.append({ 'object_type': 'text', 'text': '%s using flag: %s' % (op_name, flag) }) if size < 0: output_objects.append({ 'object_type': 'error_text', 'text': 'size must be non-negative' }) return (output_objects, returnvalues.CLIENT_ERROR) for pattern in pattern_list: # Check directory traversal attempts before actual handling to avoid # leaking information about file system layout while allowing # consistent error messages unfiltered_match = glob.glob(base_dir + pattern) match = [] for server_path in unfiltered_match: # IMPORTANT: path must be expanded to abs for proper chrooting abs_path = os.path.abspath(server_path) if not valid_user_path(configuration, abs_path, base_dir, True): # out of bounds - save user warning for later to allow # partial match: # ../*/* is technically allowed to match own files. logger.warning('%s tried to %s restricted path %s ! (%s)' % (client_id, op_name, abs_path, pattern)) continue match.append(abs_path) # Now actually treat list of allowed matchings and notify if no # (allowed) match if not match: output_objects.append({ 'object_type': 'file_not_found', 'name': pattern }) status = returnvalues.FILE_NOT_FOUND for abs_path in match: relative_path = abs_path.replace(base_dir, '') if verbose(flags): output_objects.append({ 'object_type': 'file', 'name': relative_path }) if not check_write_access(abs_path): logger.warning('%s called without write access: %s' % \ (op_name, abs_path)) output_objects.append( {'object_type': 'error_text', 'text': 'cannot truncate "%s": inside a read-only location!' % \ pattern}) status = returnvalues.CLIENT_ERROR continue try: fd = open(abs_path, 'r+') fd.truncate(size) fd.close() logger.info('%s %s %s done' % (op_name, abs_path, size)) except Exception, exc: output_objects.append({ 'object_type': 'error_text', 'text': "%s: '%s': %s" % (op_name, relative_path, exc) }) logger.error("%s: failed on '%s': %s" % (op_name, relative_path, exc)) status = returnvalues.SYSTEM_ERROR continue
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] title_entry = find_entry(output_objects, 'title') label = "%s" % configuration.site_vgrid_label title_entry['text'] = "Remove %s Owner" % label output_objects.append({'object_type': 'header', 'text': 'Remove %s Owner' % label}) (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) vgrid_name = accepted['vgrid_name'][-1] flags = ''.join(accepted['flags']) cert_id = accepted['cert_id'][-1] cert_dir = client_id_dir(cert_id) # inherited vgrid membership inherit_vgrid_member = False if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append( {'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # always allow owner to remove self if client_id != cert_id: user_map = get_full_user_map(configuration) user_dict = user_map.get(client_id, None) # Optional site-wide limitation of manage vgrid permission if not user_dict or \ not vgrid_manage_allowed(configuration, user_dict): logger.warning("user %s is not allowed to manage vgrids!" % client_id) output_objects.append( {'object_type': 'error_text', 'text': 'Only privileged users can manage %ss' % label}) return (output_objects, returnvalues.CLIENT_ERROR) # make sure vgrid settings allow this owner to edit other owners (allow_status, allow_msg) = allow_owners_adm(configuration, vgrid_name, client_id) if not allow_status: output_objects.append({'object_type': 'error_text', 'text': allow_msg}) return (output_objects, returnvalues.CLIENT_ERROR) # Validity of user and vgrid names is checked in this init function so # no need to worry about illegal directory traversal through variables (ret_val, msg, _) = \ init_vgrid_script_add_rem(vgrid_name, client_id, cert_id, 'owner', configuration) if not ret_val: output_objects.append({'object_type': 'error_text', 'text': msg}) return (output_objects, returnvalues.CLIENT_ERROR) # don't remove if not already an owner if not vgrid_is_owner(vgrid_name, cert_id, configuration): logger.warning('%s is not allowed to remove owner %s from %s' % (client_id, cert_id, vgrid_name)) output_objects.append({'object_type': 'error_text', 'text': '%s is not an owner of %s or a parent %s.' % (cert_id, vgrid_name, label)}) return (output_objects, returnvalues.CLIENT_ERROR) # we need the local owners file to detect inherited ownerships (owners_status, owners_direct) = vgrid_owners(vgrid_name, configuration, False) (all_status, owners) = vgrid_owners(vgrid_name, configuration, True) if not owners_status or not all_status: logger.error('Error loading owners for %s: %s / %s' % (vgrid_name, owners_direct, owners)) output_objects.append( {'object_type': 'error_text', 'text': 'An internal error occurred, error conditions have been logged.'}) output_objects.append({'object_type': 'text', 'text': ''' You can help us fix the problem by notifying the administrators via mail about what you wanted to do when the error happened.'''}) return (output_objects, returnvalues.CLIENT_ERROR) logger.info('%s removing owner %s from %s' % (client_id, cert_id, vgrid_name)) # find out whether to just remove an owner or delete the whole thing. # ask about delete if last or no direct owners. if len(owners_direct) > 1: logger.debug('Removing %s, one of several owners, from %s.' % (cert_id, vgrid_name)) if not (cert_id in owners_direct): # the owner owns an upper vgrid, ownership is inherited # cannot remove, not last (inherited) owner logger.warning('Cannot delete: Inherited ownership.' + '\n Owners: %s,\n Direct owners: %s.' % (owners, owners_direct)) output_objects.append({'object_type': 'error_text', 'text': '''%s is owner of a parent %s. Owner removal has to be performed at the topmost vgrid''' % (cert_id, label)}) return (output_objects, returnvalues.CLIENT_ERROR) else: # Remove any tracker admin rights if configuration.trac_admin_path: public_tracker_dir = \ os.path.abspath(os.path.join( configuration.vgrid_public_base, vgrid_name, '.vgridtracker')) private_tracker_dir = \ os.path.abspath(os.path.join( configuration.vgrid_private_base, vgrid_name, '.vgridtracker')) vgrid_tracker_dir = \ os.path.abspath(os.path.join( configuration.vgrid_files_home, vgrid_name, '.vgridtracker')) for tracker_dir in [public_tracker_dir, private_tracker_dir, vgrid_tracker_dir]: if not rm_tracker_admin(configuration, cert_id, vgrid_name, tracker_dir, output_objects): return (output_objects, returnvalues.SYSTEM_ERROR) user_dir = os.path.abspath(os.path.join(configuration.user_home, cert_dir)) + os.sep # Do not touch vgrid share if still a member of a parent vgrid if vgrid_is_member(vgrid_name, cert_id, configuration): # list is in top-down order parent_vgrids = vgrid_list_parents(vgrid_name, configuration) inherit_vgrid_member = vgrid_name for parent in parent_vgrids: if vgrid_is_member(parent, cert_id, configuration, recursive=False): inherit_vgrid_member = parent break output_objects.append( {'object_type': 'text', 'text': '''NOTE: %s is still a member of parent %s %s. Preserving access to corresponding %s.''' % (cert_id, label, inherit_vgrid_member, label)}) else: (success, msg) = unlink_share(user_dir, vgrid_name) if not success: logger.error('Could not remove share link: %s.' % msg) output_objects.append({'object_type': 'error_text', 'text': 'Could not remove share links: %s.' % msg}) return (output_objects, returnvalues.SYSTEM_ERROR) # unlink shared web folders (success, msg) = unlink_web_folders(user_dir, vgrid_name) if not success: logger.error('Could not remove web links: %s.' % msg) output_objects.append({'object_type': 'error_text', 'text': 'Could not remove web links: %s.' % msg}) return (output_objects, returnvalues.SYSTEM_ERROR) # remove user from saved owners list (rm_status, rm_msg) = vgrid_remove_owners(configuration, vgrid_name, [cert_id]) if not rm_status: output_objects.append({'object_type': 'error_text', 'text': '%s of owners of %s' % (rm_msg, vgrid_name)}) return (output_objects, returnvalues.SYSTEM_ERROR) # Any parent vgrid membership is left untouched here as we only # force a normal refresh in unmap_inheritance unmap_inheritance(configuration, vgrid_name, cert_id) output_objects.append({'object_type': 'text', 'text': '%s successfully removed as owner of %s!' % (cert_id, vgrid_name)}) output_objects.append({'object_type': 'link', 'destination': 'adminvgrid.py?vgrid_name=%s' % vgrid_name, 'text': 'Back to administration for %s' % vgrid_name}) return (output_objects, returnvalues.OK) else: # no more direct owners - we try to remove this VGrid logger.debug('Leave %s from %s with no more direct owners: delete' % (vgrid_name, cert_id)) if not force(flags): output_objects.append({'object_type': 'text', 'text': ''' No more direct owners of %s - leaving will result in the %s getting deleted. Please use either of the links below to confirm or cancel. ''' % (vgrid_name, label)}) # Reuse csrf token from this request target_op = 'rmvgridowner' js_name = target_op csrf_token = accepted[csrf_field][-1] helper = html_post_helper(js_name, '%s.py' % target_op, {'vgrid_name': vgrid_name, 'cert_id': cert_id, 'flags': 'f', csrf_field: csrf_token}) output_objects.append({'object_type': 'html_form', 'text': helper}) output_objects.append({'object_type': 'link', 'destination': "javascript: %s();" % js_name, 'class': 'removelink iconspace', 'text': 'Really leave and delete %s' % vgrid_name}) output_objects.append({'object_type': 'text', 'text': ''}) output_objects.append({'object_type': 'link', 'destination': 'adminvgrid.py?vgrid_name=%s' % vgrid_name, 'text': 'Back to administration for %s' % vgrid_name}) return (output_objects, returnvalues.OK) # check if any resources participate or sub-vgrids depend on this one (list_status, subs) = vgrid_list_subvgrids(vgrid_name, configuration) if not list_status: logger.error('Error loading sub-vgrid for %s: %s)' % (vgrid_name, subs)) output_objects.append({'object_type': 'error_text', 'text': ''' An internal error occurred, error conditions have been logged.'''}) output_objects.append({'object_type': 'text', 'text': ''' You can help us fix the problem by notifying the administrators via mail about what you wanted to do when the error happened.'''}) return (output_objects, returnvalues.CLIENT_ERROR) if len(subs) > 0: logger.debug('Cannot delete: still has sub-vgrids: %s' % subs) output_objects.append( {'object_type': 'error_text', 'text': '%s has one or more child %ss and cannot be deleted.' % (vgrid_name, label)}) output_objects.append( {'object_type': 'text', 'text': '''To leave (and delete) %s first remove all its children: %s.''' % (vgrid_name, ', '.join(subs))}) return (output_objects, returnvalues.CLIENT_ERROR) # we consider the local members and resources here, not inherited ones (member_status, members_direct) = vgrid_members(vgrid_name, configuration, False) (resource_status, resources_direct) = vgrid_resources(vgrid_name, configuration, False) if not member_status or not resource_status: logger.warning('failed to load %s members or resources: %s %s' % (vgrid_name, members_direct, resources_direct)) output_objects.append( {'object_type': 'error_text', 'text': 'could not load %s members or resources for %s.' % (label, vgrid_name)}) return (output_objects, returnvalues.SYSTEM_ERROR) if len(resources_direct) > 0: logger.debug('Cannot delete: still has direct resources %s.' % resources_direct) output_objects.append( {'object_type': 'error_text', 'text': '%s still has resources and cannot be deleted.' % vgrid_name}) output_objects.append({'object_type': 'text', 'text': ''' To leave (and delete) %s, first remove the participating resources.''' % vgrid_name}) return (output_objects, returnvalues.CLIENT_ERROR) if len(members_direct) > 0: logger.debug('Cannot delete: still has direct members %s.' % members_direct) output_objects.append( {'object_type': 'error_text', 'text': '%s still has members and cannot be deleted.' % vgrid_name}) output_objects.append({'object_type': 'text', 'text': ''' To leave (and delete) %s, first remove all members.''' % vgrid_name}) return (output_objects, returnvalues.CLIENT_ERROR) # Deleting write restricted VGrid is not allowed (load_status, saved_settings) = vgrid_settings(vgrid_name, configuration, recursive=True, as_dict=True) if not load_status: output_objects.append( {'object_type': 'error_text', 'text': 'failed to load saved %s settings' % vgrid_name}) return (output_objects, returnvalues.SYSTEM_ERROR) if saved_settings.get('write_shared_files', keyword_members) != \ keyword_members: logger.warning("%s can't delete vgrid %s - write limited!" % (client_id, vgrid_name)) output_objects.append( {'object_type': 'error_text', 'text': """You can't delete write-restricted %ss - first remove any write restrictions for shared files on the admin page and then try again.""" % label}) return (output_objects, returnvalues.CLIENT_ERROR) # When reaching here, OK to remove the VGrid. # if top-level: unlink, remove all files and directories, # in all cases: remove configuration entry for the VGrid # unlink and move new-style vgrid sub dir to parent logger.info('Deleting %s and all related data as requested by %s' % (vgrid_name, cert_id)) if (cert_id in owners_direct): # owner owns this vgrid, direct ownership logger.debug('%s looks like a top-level vgrid.' % vgrid_name) logger.debug('Deleting all related files.') user_dir = os.path.abspath(os.path.join(configuration.user_home, cert_dir)) + os.sep (share_lnk, share_msg) = unlink_share(user_dir, vgrid_name) (web_lnk, web_msg) = unlink_web_folders(user_dir, vgrid_name) (files_act, files_msg) = abandon_vgrid_files(vgrid_name, configuration) else: # owner owns some parent vgrid - ownership is only inherited logger.debug('%s looks like a sub-vgrid, ownership inherited.' % vgrid_name) logger.debug('Only removing entry, leaving files in place.') share_lnk, share_msg = True, '' web_lnk, web_msg = True, '' (files_act, files_msg) = inherit_vgrid_files(vgrid_name, configuration) (removed, entry_msg) = remove_vgrid_entry(vgrid_name, configuration) output_objects.append({'object_type': 'text', 'text': '%s has been removed with last owner.' % vgrid_name}) output_objects.append({'object_type': 'link', 'destination': 'vgridman.py', 'text': 'Back to the overview.'}) if not share_lnk or not web_lnk or not files_act or not removed: err = '\n'.join([share_msg, web_msg, files_msg, entry_msg]) logger.error('Errors while removing %s:\n%s.' % (vgrid_name, err)) output_objects.append({'object_type': 'error_text', 'text': ''' An internal error occurred, error conditions have been logged.'''}) output_objects.append({'object_type': 'text', 'text': ''' You can help us fix the problem by notifying the administrators via mail about what you wanted to do when the error happened.'''}) return (output_objects, returnvalues.CLIENT_ERROR) else: # Remove vgrid from vgrid cache (after deleting all) unmap_vgrid(configuration, vgrid_name) return (output_objects, returnvalues.OK)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False) defaults = signature()[1] title_entry = find_entry(output_objects, 'title') label = "%s" % configuration.site_vgrid_label title_entry['text'] = "Remove %s Member" % label output_objects.append({ 'object_type': 'header', 'text': 'Remove %s Member' % label }) (validate_status, accepted) = validate_input_and_cert( user_arguments_dict, defaults, output_objects, client_id, configuration, allow_rejects=False, ) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) vgrid_name = accepted['vgrid_name'][-1] cert_id = accepted['cert_id'][-1] cert_dir = client_id_dir(cert_id) if not safe_handler(configuration, 'post', op_name, client_id, get_csrf_limit(configuration), accepted): output_objects.append({ 'object_type': 'error_text', 'text': '''Only accepting CSRF-filtered POST requests to prevent unintended updates''' }) return (output_objects, returnvalues.CLIENT_ERROR) # always allow member to remove self if client_id != cert_id: user_map = get_full_user_map(configuration) user_dict = user_map.get(client_id, None) # Optional site-wide limitation of manage vgrid permission if not user_dict or \ not vgrid_manage_allowed(configuration, user_dict): logger.warning("user %s is not allowed to manage vgrids!" % client_id) output_objects.append({ 'object_type': 'error_text', 'text': 'Only privileged users can manage %ss' % label }) return (output_objects, returnvalues.CLIENT_ERROR) # make sure vgrid settings allow this owner to edit other members (allow_status, allow_msg) = allow_members_adm(configuration, vgrid_name, client_id) if not allow_status: output_objects.append({ 'object_type': 'error_text', 'text': allow_msg }) return (output_objects, returnvalues.CLIENT_ERROR) # Validity of user and vgrid names is checked in this init function so # no need to worry about illegal directory traversal through variables (ret_val, msg, _) = \ init_vgrid_script_add_rem(vgrid_name, client_id, cert_id, 'member', configuration) if not ret_val: output_objects.append({'object_type': 'error_text', 'text': msg}) return (output_objects, returnvalues.CLIENT_ERROR) # don't remove if not a member if not vgrid_is_member(vgrid_name, cert_id, configuration): output_objects.append({ 'object_type': 'error_text', 'text': '%s is not a member of %s or a parent %s.' % (cert_id, vgrid_name, label) }) return (output_objects, returnvalues.CLIENT_ERROR) # owner of subvgrid? (list_status, subvgrids) = vgrid_list_subvgrids(vgrid_name, configuration) if not list_status: output_objects.append({ 'object_type': 'error_text', 'text': 'Error getting list of sub%ss: %s' % (label, subvgrids) }) return (output_objects, returnvalues.SYSTEM_ERROR) # TODO: we DO allow ownership of sub vgrids with parent membership so we # should support the (cumbersome) relinking of vgrid shares here. Leave it # to user to do it manually for now with temporary removal of ownership for subvgrid in subvgrids: if vgrid_is_owner(subvgrid, cert_id, configuration, recursive=False): output_objects.append({ 'object_type': 'error_text', 'text': """%(cert_id)s is already an owner of a sub-%(vgrid_label)s ('%(subvgrid)s'). While we DO support members being owners of sub-%(vgrid_label)ss, we do not support removing parent %(vgrid_label)s members at the moment. Please (temporarily) remove the person as owner of all sub-%(vgrid_label)ss first and then try this operation again.""" % { 'cert_id': cert_id, 'subvgrid': subvgrid, 'vgrid_label': label } }) return (output_objects, returnvalues.CLIENT_ERROR) # Please note that base_dir must end in slash to avoid access to other # vgrid dirs when own name is a prefix of another name base_dir = os.path.abspath( os.path.join(configuration.vgrid_home, vgrid_name)) + os.sep # remove symlink from users home directory to vgrid directory # Please note that base_dir must end in slash to avoid access to other # user dirs when own name is a prefix of another user name user_dir = os.path.abspath(os.path.join(configuration.user_home, cert_dir)) + os.sep dst = user_dir + vgrid_name try: os.remove(dst) except Exception, exc: # ouch, not good. Email admin? pass