Esempio n. 1
0
def copy_file_to_exe(
    local_path,
    dest_path,
    resource_config,
    exe_name,
    logger,
):
    """Copy local_path to dest_path relative to execution_dir on
    exe_name. This needs to go through the resource front end using scp
    and the copy method to the exe depends on the shared fs setting.
    """

    local_filename = os.path.basename(local_path)
    msg = ''
    unique_resource_name = resource_config['HOSTURL'] + '.'\
        + resource_config['HOSTIDENTIFIER']
    (status, exe) = get_resource_exe(resource_config, exe_name, logger)
    if not status:
        msg = "No EXE config for: '" + unique_resource_name + "' EXE: '"\
            + exe_name + "'"
        return (False, msg)

    if dest_path.startswith(os.sep):
        logger.warning('copy_file_to_exe: force relative dest path!')
        dest_path = dest_path.lstrip(os.sep)

    # copy file to frontend

    copy_attempts = 3
    for attempt in range(copy_attempts):
        copy_status = copy_file_to_resource(local_path, dest_path,
                                            resource_config, logger)
        if not copy_status:
            logger.warning('scp of %s failed in attempt %d of %d' %
                           (local_path, attempt, copy_attempts))
        else:
            break

    # Remove temporary file no matter what scp returned

    try:
        os.remove(local_path)
    except Exception, err:
        logger.error('Could not remove %s (%s)' % (local_path, err))
Esempio n. 2
0
def copy_file_to_exe(
    local_filename,
    dest_path,
    resource_config,
    exe_name,
    logger,
    ):
    """Copy local_filename to dest_path relative to execution_dir on
    exe_name. This needs to go through the resource front end using scp
    and the copy method to the exe depends on the shared fs setting.
    """

    msg = ''
    unique_resource_name = resource_config['HOSTURL'] + '.'\
         + resource_config['HOSTIDENTIFIER']
    (status, exe) = get_resource_exe(resource_config, exe_name, logger)
    if not status:
        msg = "No EXE config for: '" + unique_resource_name + "' EXE: '"\
             + exe_name + "'"
        return (False, msg)

    if dest_path.startswith(os.sep):
        logger.warning('copy_file_to_exe: force relative dest path!')
        dest_path = dest_path.lstrip(os.sep)

    # copy file to frontend

    copy_attempts = 3
    for attempt in range(copy_attempts):
        copy_status = copy_file_to_resource(local_filename, dest_path,
                resource_config, logger)
        if not copy_status:
            logger.warning('scp of file failed in attempt %d of %d'
                            % (attempt, copy_attempts))
        else:
            break

    # Remove temporary file no matter what scp returned

    try:
        os.remove(local_filename)
    except Exception, err:
        logger.error('Could not remove %s (%s)' % (local_filename, err))
Esempio n. 3
0
if not is_resource(unique_resource_name, configuration.resource_home):
    o.out('requestinteractivejob error! Your unique_resource_name ' + 
          ' is not recognized as a %s resource!' % configuration.short_title
          )
    o.reply_and_exit(o.ERROR)

(status, resource_config) = \
    get_resource_configuration(configuration.resource_home,
                               unique_resource_name, logger)
if not status:
    o.out("No resouce_config for: '" + unique_resource_name + "'\n")
    o.reply_and_exit(o.ERROR)

logger.info('getting exe')
(status, exe) = get_resource_exe(resource_config, exe_name, logger)
if not status:
    o.out("No EXE config for: '" + unique_resource_name + "' EXE: '"
           + exe_name + "'")
    o.reply_and_exit(o.ERROR)

# ################################################
# ## SSH to resource and start interactive job ###
# ################################################

# set the correct DISPLAY before calling SSH

display_number = get_users_display_number(job_submitter_client_id,
        configuration, logger)

if not display_number:
Esempio n. 4
0
    (status, resource_config) = \
             get_resource_configuration(configuration.resource_home,
                                        unique_host_name, logger)
    logger.debug('got resource conf %s' % resource_config)
    if not resource_config:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            "No resouce_config for: '%s'" % unique_host_name
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    # read pickled exe conf file (needed to create master_node_script.sh)

    (status, exe) = get_resource_exe(resource_config, 'localhost', logger)
    if not exe:
        output_objects.append({'object_type': 'error_text', 'text':
                               "No 'localhost' EXE config for: '%s'" % \
                               unique_host_name})
        return (output_objects, returnvalues.SYSTEM_ERROR)

    resource_dir = os.path.join(configuration.resource_home, unique_host_name)

    # create master_node_script

    try:

        # Securely open a temporary file in resource_dir

        (master_node_script_file, mns_fname) = \
Esempio n. 5
0
    # read pickled resource conf file (needed to create
    # master_node_script.sh)

    msg = ''
    (status, resource_config) = \
             get_resource_configuration(configuration.resource_home,
                                        unique_host_name, logger)
    logger.debug('got resource conf %s' % resource_config)
    if not resource_config:
        output_objects.append({'object_type': 'error_text', 'text':
                               "No resouce_config for: '%s'" % unique_host_name})
        return (output_objects, returnvalues.SYSTEM_ERROR)

    # read pickled exe conf file (needed to create master_node_script.sh)

    (status, exe) = get_resource_exe(resource_config, 'localhost', logger)
    if not exe:
        output_objects.append({'object_type': 'error_text', 'text':
                               "No 'localhost' EXE config for: '%s'" % \
                               unique_host_name})
        return (output_objects, returnvalues.SYSTEM_ERROR)

    resource_dir = os.path.join(configuration.resource_home, unique_host_name)

    # create master_node_script

    try:

        # Securely open a temporary file in resource_dir

        (master_node_script_file, mns_fname) = \
Esempio n. 6
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    job_ids = accepted['job_id']
    action = accepted['action'][-1]
    src = accepted['src']
    dst = accepted['dst'][-1]

    title_entry = find_entry(output_objects, 'title')
    title_entry['text'] = '%s live I/O' % configuration.short_title
    output_objects.append({'object_type': 'header', 'text'
                           : 'Request live communication with jobs'})

    if not action in valid_actions:
        output_objects.append({'object_type': 'error_text', 'text'
                               : 'Invalid action "%s" (supported: %s)' % \
                               (action, ', '.join(valid_actions))})
        return (output_objects, returnvalues.CLIENT_ERROR)

    if action in post_actions and not correct_handler('POST'):
        output_objects.append(
            {'object_type': 'error_text', 'text'
             : 'Only accepting POST requests to prevent unintended updates'})
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not job_ids or action in interactive_actions:
        job_id = ''
        if job_ids:
            job_id = job_ids[-1]
        output_objects.append({'object_type': 'text', 'text'
                          : '''
Fill in the live I/O details below to request communication with a running
job.
Job ID can be a full ID or a wild card pattern using "*" and "?" to match one
or more of your job IDs.
Use send output without source and destination paths to request upload of the
default stdio files from the job on the resource to the associated job_output
directory in your MiG home.
Destination is a always handled as a directory path to put source files into.
Source and destination paths are always taken relative to the job execution
directory on the resource and your MiG home respectively.
'''})
        html = '''
<table class="liveio">
<tr>
<td>
<form method="post" action="liveio.py">
<table class="liveio">
<tr><td class=centertext>
</td></tr>
<tr><td>
Action:<br />
<input type=radio name=action checked value="send" />send output
<input type=radio name=action value="get" />get input
</td></tr>
<tr><td>
Job ID:<br />
<input type=text size=60 name=job_id value="%s" />
</td></tr>
<tr><td>
Source path(s):<br />
<div id="srcfields">
<input type=text size=60 name=src value="" /><br />
</div>
</td></tr>
<tr><td>
Destination path:<br />
<input type=text size=60 name=dst value="" />
</td></tr>
<tr><td>
<input type="submit" value="Send request" />
</td></tr>
</table>
</form>
</td>
<td>
<script type="text/javascript">
fields = 1;
max_fields = 64;
function addInput() {
    if (fields < max_fields) {
        document.getElementById("srcfields").innerHTML += "<input type=text size=60 name=src value='' /><br />";
        fields += 1;
    } else {
        alert("Maximum " + max_fields + " source fields allowed!");
        document.form.add.disabled=true;
    }
}
</script>
<form name="addsrcform">
<input type="button" onclick="addInput(); return false;" name="add" value="Add another source field" />
</form>
</td>
</tr>
</table>
''' % job_id
        output_objects.append({'object_type': 'html_form', 'text'
                              : html})
        output_objects.append({'object_type': 'text', 'text': '''
Further live job control is avalable through your personal message queues.
They provide a basic interface for centrally storing messages under your grid
account and can be used to pass messages between jobs or for orchestrating
jobs before and during execution.
'''
                               })
        output_objects.append({'object_type': 'link', 'destination':
                               'mqueue.py',
                               'text': 'Message queue interface'})
        return (output_objects, returnvalues.OK)
    elif action in ['get', 'receive', 'input']:
        action = 'get'
        action_desc = 'will be downloaded to the job on the resource'
    elif action in ['put', 'send', 'output']:
        action = 'send'
        action_desc = 'will be uploaded from the job on the resource'
    else:
        output_objects.append({'object_type': 'error_text', 'text'
                              : 'Invalid live io action: %s' % action})
        return (output_objects, returnvalues.CLIENT_ERROR)

    output_objects.append({'object_type': 'text', 'text'
                          : 'Requesting live I/O for %s'
                           % ', '.join(job_ids)})

    if action == 'get' and (not src or not dst):
        output_objects.append(
            {'object_type': 'error_text',
             'text': 'src and dst parameters required for live input'})
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Automatic fall back to stdio files if output with no path provided
                
    if src:
        src_text = 'The files ' + ' '.join(src)
    else:
        src_text = 'The job stdio files'

    if dst:
        dst_text = 'the ' + dst + ' directory'
    else:
        dst_text = 'the corresponding job_output directory'

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = \
        os.path.abspath(os.path.join(configuration.mrsl_files_dir,
                        client_dir)) + os.sep

    filelist = []
    for job_id in job_ids:
        job_id = job_id.strip()

        # is job currently being executed?

        # Backward compatibility - all_jobs keyword should match all jobs

        if job_id == all_jobs:
            job_id = '*'

        # Check directory traversal attempts before actual handling to avoid
        # leaking information about file system layout while allowing
        # consistent error messages

        unfiltered_match = glob.glob(base_dir + job_id + '.mRSL')
        match = []
        for server_path in unfiltered_match:
            real_path = os.path.abspath(server_path)
            if not valid_user_path(real_path, base_dir, True):

                # out of bounds - save user warning for later to allow
                # partial match:
                # ../*/* is technically allowed to match own files.

                logger.warning("%s tried to %s restricted path %s ! (%s)" % \
                                (client_id, op_name, real_path, job_id))

                continue

            # Insert valid job files in filelist for later treatment

            match.append(real_path)

        # Now actually treat list of allowed matchings and notify if no
        # (allowed) match....

        if not match:
            output_objects.append(
                {'object_type': 'error_text', 'text'
                 : '%s: You do not have any matching job IDs!' % job_id})
        else:
            filelist += match

    for filepath in filelist:

        # Extract jo_id from filepath (replace doesn't modify filepath)

        mrsl_file = filepath.replace(base_dir, '')
        job_id = mrsl_file.replace('.mRSL', '')
        job_dict = unpickle(filepath, logger)
        if not job_dict:
            status = returnvalues.CLIENT_ERROR

            output_objects.append(
                {'object_type': 'error_text', 'text'
                 : ('You can only list status of your own jobs. '
                    'Please verify that you submitted the mRSL file '
                    'with job id "%s" (Could not unpickle mRSL file %s)'
                    ) % (job_id, filepath)})
            continue

        if job_dict['STATUS'] != 'EXECUTING':
            output_objects.append(
                {'object_type': 'text', 'text'
                 : 'Job %s is not currently being executed! Job status: %s'
                 % (job_id, job_dict['STATUS'])})
            continue

        if job_dict['UNIQUE_RESOURCE_NAME'] == 'ARC':
            output_objects.append(
                {'object_type': 'text', 'text'
                 : 'Job %s is submitted to ARC, details are not available!'
                 % job_id })
            continue

        last_live_update_dict = {}
        last_live_update_file = configuration.mig_system_files + os.sep\
             + job_id + '.last_live_update'
        if os.path.isfile(last_live_update_file):
            last_live_update_dict_unpickled = \
                unpickle(last_live_update_file, logger)
            if not last_live_update_dict_unpickled:
                output_objects.append({'object_type': 'error_text',
                        'text'
                        : 'Could not unpickle %s - skipping request!'
                         % last_live_update_file})
                continue

            if not last_live_update_dict_unpickled.has_key(
                'LAST_LIVE_UPDATE_REQUEST_TIMESTAMP'):
                output_objects.append(
                    {'object_type': 'error_text',
                     'text': 'Could not find needed key in %s.'
                     % last_live_update_file})
                continue

            last_live_update_request = \
                last_live_update_dict_unpickled['LAST_LIVE_UPDATE_REQUEST_TIMESTAMP'
                    ]

            difference = datetime.datetime.now()- last_live_update_request
            try:
                min_delay = \
                    int(configuration.min_seconds_between_live_update_requests)
            except:
                min_delay = 30

            if difference.seconds < min_delay:
                output_objects.append(
                    {'object_type': 'error_text',
                     'text': ('Request not allowed, you must wait at least ' \
                              '%s seconds between live update requests!'
                              ) % min_delay})
                continue

        # save this request to file to avoid DoS from a client request loop.

        last_live_update_dict['LAST_LIVE_UPDATE_REQUEST_TIMESTAMP'] = \
            datetime.datetime.now()
        pickle_ret = pickle(last_live_update_dict,
                            last_live_update_file, logger)
        if not pickle_ret:
            output_objects.append(
                {'object_type': 'error_text', 'text'
                 : 'Error saving live io request timestamp to last_live_update '
                 'file, request not sent!'})
            continue

        # #
        # ## job is being executed right now, send live io request to frontend
        # #

        # get resource_config, needed by scp_file_to_resource
        #(status, resource_config) = get_resource_configuration(
        #    resource_home, unique_resource_name, logger)

        resource_config = job_dict['RESOURCE_CONFIG']
        (status, exe) = get_resource_exe(resource_config, job_dict['EXE'],
                                         logger)
        if not status:
            output_objects.append(
                {'object_type': 'error_text', 'text'
                 : 'Could not get exe configuration for job %s' % job_id})
            continue

        local_file = '%s.%supdate' % (job_dict['LOCALJOBNAME'], action)
        if not os.path.exists(local_file):

            # create

            try:
                filehandle = open(local_file, 'w')
                filehandle.write('job_id '
                                  + job_dict['JOB_ID'] + '\n')
                filehandle.write('localjobname '
                                  + job_dict['LOCALJOBNAME'] + '\n')
                filehandle.write('execution_user '
                                  + exe['execution_user'] + '\n')
                filehandle.write('execution_node '
                                  + exe['execution_node'] + '\n')
                filehandle.write('execution_dir ' + exe['execution_dir']
                                  + '\n')
                filehandle.write('target liveio\n')

                # Leave defaults src and dst to FE script if not provided
                
                if src:
                    filehandle.write('source ' + ' '.join(src) + '\n')
                if dst:
                    filehandle.write('destination ' + dst + '\n')

                # Backward compatible test for shared_fs - fall back to scp

                if exe.has_key('shared_fs') and exe['shared_fs']:
                    filehandle.write('copy_command cp\n')
                    filehandle.write('copy_frontend_prefix \n')
                    filehandle.write('copy_execution_prefix \n')
                else:
                    filehandle.write('copy_command scp -B\n')
                    filehandle.write('copy_frontend_prefix ${frontend_user}@${frontend_node}:\n'
                            )
                    filehandle.write('copy_execution_prefix ${execution_user}@${execution_node}:\n'
                            )

                filehandle.write('### END OF SCRIPT ###\n')
                filehandle.close()
            except Exception, exc:
                pass

        if not os.path.exists(local_file):
            output_objects.append(
                {'object_type': 'error_text', 'text'
                 : '.%supdate file not available on %s server' % \
                 (action, configuration.short_title)})
            continue

        scpstatus = copy_file_to_resource(local_file, '%s.%supdate'
                 % (job_dict['LOCALJOBNAME'], action), resource_config, logger)
        if not scpstatus:
            output_objects.append(
                {'object_type': 'error_text', 'text'
                 : 'Error sending request for live io to resource!'})
            continue
        else:
            output_objects.append(
                {'object_type': 'text', 'text'
                 : 'Request for live io was successfully sent to the resource!'
                 })
            output_objects.append(
                {'object_type': 'text', 'text'
                 : '%s %s and should become available in %s in a minute.' % \
                 (src_text, action_desc, dst_text)
                 })
            if action == 'send':
                if not dst:
                    target_path = '%s/%s/*' % (job_output_dir, job_id)
                else:
                    target_path = dst
                output_objects.append({'object_type': 'link', 'destination'
                                       : 'ls.py?path=%s' % target_path,
                                       'text': 'View uploaded files'})
            else:
                output_objects.append({'object_type': 'link', 'destination'
                                       : 'ls.py?path=%s' % ';path='.join(src),
                                       'text': 'View files for download'})

        try:
            os.remove(local_file)
        except Exception, exc:
            pass
Esempio n. 7
0
if not is_resource(unique_resource_name, configuration.resource_home):
    o.out('requestinteractivejob error! Your unique_resource_name ' + 
          ' is not recognized as a %s resource!' % configuration.short_title
          )
    o.reply_and_exit(o.ERROR)

(status, resource_conf) = \
    get_resource_configuration(configuration.resource_home,
                               unique_resource_name, logger)
if not status:
    o.out("No resouce_conf for: '" + unique_resource_name + "'\n")
    o.reply_and_exit(o.ERROR)

logger.info('getting exe')
(status, exe_conf) = get_resource_exe(resource_conf, exe, logger)
if not status:
    o.out("No EXE config for: '" + unique_resource_name + "' EXE: '"
           + exe + "'")
    o.reply_and_exit(o.ERROR)

# ################################################
# ## SSH to resource and start interactive job ###
# ################################################

# set the correct DISPLAY before calling SSH

display_number = get_users_display_number(job_submitter_client_id,
        configuration, logger)

if not display_number:
Esempio n. 8
0
     unique_resource_name = sys.argv[1]
 if sys.argv[2:]:
     exe_name = sys.argv[2]
 print "running ssh unit tests against %s" % unique_resource_name
 configuration = get_configuration_object()
 logger = configuration.logger
 filename = '/tmp/localdummy'
 dummy_fd = open(filename, "w")
 dummy_fd.write("sample text\n")
 dummy_fd.close()
 res_path = 'res-' + os.path.basename(filename)
 exe_path = 'exe-' + os.path.basename(filename)
 (res_status, resource_config) = \
     get_resource_configuration(configuration.resource_home,
                                unique_resource_name, logger)
 (exe_status, exe_config) = get_resource_exe(resource_config, exe_name,
                                             logger)
 if not res_status:
     print "Failed to extract resource config for %s: %s" % \
           (unique_resource_name, resource_config)
     sys.exit(1)
 if not exe_status:
     print "Failed to extract exe config for %s: %s" % \
           (exe_name, exe_config)
     sys.exit(1)
 copy_res = copy_file_to_resource(filename, res_path, resource_config,
                                  logger)
 print "copy %s to %s on %s success: %s" % (filename, res_path,
                                            unique_resource_name, copy_res)
 command = "ls"
 print "Execute %s on %s" % (command, unique_resource_name)
 (exec_res, exec_msg) = execute_on_resource(command, False, resource_config,