def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False, op_title=False, op_menu=client_id) defaults = signature()[1] (validate_status, accepted) = validate_input(user_arguments_dict, defaults, output_objects, allow_rejects=False) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) remote_ip = str(os.getenv('REMOTE_ADDR')) res_type = accepted['type'][-1] unique_resource_name = accepted['unique_resource_name'][-1] exe_name = accepted['exe_name'][-1] pgid = accepted['pgid'][-1] status = returnvalues.OK # Web format for cert access and no header for SID access if client_id: output_objects.append({ 'object_type': 'title', 'text': 'Load resource script PGID' }) output_objects.append({ 'object_type': 'header', 'text': 'Load resource script PGID' }) else: output_objects.append({'object_type': 'start'}) # Please note that base_dir must end in slash to avoid access to other # resource dirs when own name is a prefix of another resource name base_dir = os.path.abspath( os.path.join(configuration.resource_home, unique_resource_name)) + os.sep # We do not have a trusted base dir here since there's no certificate data. # Manually check input variables if not valid_dir_input(configuration.resource_home, unique_resource_name): # out of bounds - rogue resource!?!? msg = 'invalid unique_resource_name! %s' % unique_resource_name logger.error('putrespgid FE called with illegal parameter(s) in what appears to be an illegal directory traversal attempt!: unique_resource_name %s, exe %s, client_id %s' \ % (unique_resource_name, exe_name, client_id)) return (output_objects, returnvalues.CLIENT_ERROR) if not valid_dir_input(base_dir, 'EXE_%s.PGID' % exe_name): # out of bounds - rogue resource!?!? msg = 'invalid unique_resource_name / exe_name! %s / %s' \ % (unique_resource_name, exe_name) logger.error('putrespgid EXE called with illegal parameter(s) in what appears to be an illegal directory traversal attempt!: unique_resource_name %s, exe %s, client_id %s' \ % (unique_resource_name, exe_name, client_id)) return (output_objects, returnvalues.CLIENT_ERROR) (load_status, resource_conf) = \ get_resource_configuration(configuration.resource_home, unique_resource_name, logger) if not load_status: logger.error("Invalid putrespgid - no resouce_conf for: %s : %s" % \ (unique_resource_name, resource_conf)) output_objects.append({ 'object_type': 'error_text', 'text': 'invalid request: no such resource!' }) return (output_objects, returnvalues.CLIENT_ERROR) # Check that resource address matches request source to make DoS harder proxy_fqdn = resource_conf.get('FRONTENDPROXY', None) try: check_source_ip(remote_ip, unique_resource_name, proxy_fqdn) except ValueError, vae: logger.error("Invalid put pgid: %s" % vae) output_objects.append({ 'object_type': 'error_text', 'text': 'invalid request: %s' % vae }) return (output_objects, returnvalues.CLIENT_ERROR)
for unique_resource_name in os.listdir(configuration.resource_home): res_dir = os.path.realpath(configuration.resource_home + os.sep + unique_resource_name) # skip all dot dirs - they are from repos etc and _not_ jobs if res_dir.find(os.sep + '.') != -1: continue if not os.path.isdir(res_dir): continue dir_name = os.path.basename(res_dir) if sandbox_resource(dir_name): continue try: (status, res_conf) = \ get_resource_configuration(configuration.resource_home, unique_resource_name, logger) if not status: continue if res_conf.has_key('SSHMULTIPLEX') and res_conf['SSHMULTIPLEX']: print 'adding multiplexing resource %s' % unique_resource_name fqdn = res_conf['HOSTURL'] res_conf['HOMEDIR'] = res_dir persistent_hosts[fqdn] = res_conf except Exception, err: # else: # print "ignoring non-multiplexing resource %s" % unique_resource_name print "Failed to open resource conf '%s': %s"\ % (unique_resource_name, err)
# TODO: check the status of the specified job(id) and verify it has not previously been executed. # The status must be ? (What about RETRY?) if mrsldict['STATUS'] == 'FINISHED': o.out('requestinteractivejob error! Job already executed!') o.reply_and_exit(o.ERROR) if not is_resource(unique_resource_name, configuration.resource_home): o.out('requestinteractivejob error! Your unique_resource_name ' + ' is not recognized as a %s resource!' % configuration.short_title ) o.reply_and_exit(o.ERROR) (status, resource_config) = \ get_resource_configuration(configuration.resource_home, unique_resource_name, logger) if not status: o.out("No resouce_config for: '" + unique_resource_name + "'\n") o.reply_and_exit(o.ERROR) logger.info('getting exe') (status, exe) = get_resource_exe(resource_config, exe_name, logger) if not status: o.out("No EXE config for: '" + unique_resource_name + "' EXE: '" + exe_name + "'") o.reply_and_exit(o.ERROR) # ################################################ # ## SSH to resource and start interactive job ### # ################################################
def refresh_resource_map(configuration): """Refresh map of resources and their configuration. Uses a pickled dictionary for efficiency. Resource IDs are stored in their raw (non-anonymized form). Only update map for resources that updated conf after last map save. """ dirty = [] map_path = os.path.join(configuration.mig_system_files, "resource.map") lock_path = os.path.join(configuration.mig_system_files, "resource.lock") lock_handle = open(lock_path, 'a') fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX) resource_map, map_stamp = load_resource_map(configuration, do_lock=False) # Find all resources and their configurations all_resources = list_resources(configuration.resource_home, only_valid=True) real_map = real_to_anon_res_map(configuration.resource_home) for res in all_resources: # Sandboxes do not change their configuration if resource_map.has_key(res) and sandbox_resource(res): continue conf_path = os.path.join(configuration.resource_home, res, "config") if not os.path.isfile(conf_path): continue conf_mtime = os.path.getmtime(conf_path) owners_path = os.path.join(configuration.resource_home, res, "owners") if not os.path.isfile(owners_path): continue owners_mtime = os.path.getmtime(owners_path) # init first time resource_map[res] = resource_map.get(res, {}) if not resource_map[res].has_key(CONF) or conf_mtime >= map_stamp: (status, res_conf) = get_resource_configuration( configuration.resource_home, res, configuration.logger) if not status: continue resource_map[res][CONF] = res_conf public_id = res if res_conf.get('ANONYMOUS', True): public_id = real_map[res] resource_map[res][RESID] = public_id resource_map[res][MODTIME] = map_stamp dirty += [res] if not resource_map[res].has_key(OWNERS) or owners_mtime >= map_stamp: owners = load(owners_path) resource_map[res][OWNERS] = owners resource_map[res][MODTIME] = map_stamp dirty += [res] # Remove any missing resources from map missing_res = [res for res in resource_map.keys() \ if not res in all_resources] for res in missing_res: del resource_map[res] dirty += [res] if dirty: try: dump(resource_map, map_path) except Exception, exc: configuration.logger.error("Could not save resource map: %s" % exc)
def main(client_id, user_arguments_dict): """Main function used by front end""" (configuration, logger, output_objects, op_name) = \ initialize_main_variables(client_id, op_header=False, op_title=False, op_menu=client_id) defaults = signature()[1] (validate_status, accepted) = validate_input(user_arguments_dict, defaults, output_objects, allow_rejects=False) if not validate_status: return (accepted, returnvalues.CLIENT_ERROR) remote_ip = str(os.getenv('REMOTE_ADDR')) unique_resource_name = accepted['unique_resource_name'][-1] exe = accepted['exe'][-1] cputime = int(accepted['cputime'][-1]) nodecount = int(accepted['nodecount'][-1]) localjobname = accepted['localjobname'][-1] sandboxkey = accepted['sandboxkey'][-1] execution_delay = int(accepted['execution_delay'][-1]) exe_pgid = int(accepted['exe_pgid'][-1]) status = returnvalues.OK # No header and footer here output_objects.append({'object_type': 'start'}) output_objects.append({'object_type': 'script_status', 'text': ''}) # Please note that base_dir must end in slash to avoid access to other # resource dirs when own name is a prefix of another resource name base_dir = os.path.abspath(os.path.join(configuration.resource_home, unique_resource_name)) + os.sep if not is_resource(unique_resource_name, configuration.resource_home): output_objects.append( {'object_type': 'error_text', 'text': "Failure: You must be an owner of '%s' to get the PGID!" % \ unique_resource_name}) return (output_objects, returnvalues.CLIENT_ERROR) # is_resource incorporates unique_resource_name verification - no need to # specifically check for illegal directory traversal on that variable. (load_status, resource_conf) = \ get_resource_configuration(configuration.resource_home, unique_resource_name, logger) if not load_status: logger.error("Invalid requestnewjob - no resouce_conf for: %s : %s" % \ (unique_resource_name, resource_conf)) output_objects.append({'object_type': 'error_text', 'text': 'invalid request: no such resource!'}) return (output_objects, returnvalues.CLIENT_ERROR) # Check that resource address matches request source to make DoS harder proxy_fqdn = resource_conf.get('FRONTENDPROXY', None) try: check_source_ip(remote_ip, unique_resource_name, proxy_fqdn) except ValueError, vae: logger.error("Invalid requestnewjob: %s (%s)" % (vae, accepted)) output_objects.append({'object_type': 'error_text', 'text': 'invalid request: %s' % vae}) return (output_objects, returnvalues.CLIENT_ERROR)