Beispiel #1
0
def get_ps3_resource():
    log_msg = 'ps3live'

    # Identify sandboxkey

    sandboxkey = fieldstorage.getfirst('sandboxkey', None)
    if not sandboxkey:

        # No sandboxkey provided,

        log_msg = log_msg + ', Remote IP: %s, provided no sandboxkey.'\
             % os.getenv('REMOTE_ADDR')

        return (False, log_msg)

    if not os.path.exists(configuration.sandbox_home + sandboxkey):

        # Create resource

        unique_resource_name = create_ps3_resource(sandboxkey)
        log_msg = log_msg + ' Created resource: %s'\
             % unique_resource_name

        # Make symbolic link from
    # sandbox_home/sandboxkey to resource_home/resource_name

        sandbox_link = configuration.sandbox_home + sandboxkey
        resource_path = os.path.abspath(os.path.join(configuration.resource_home,
                                                     unique_resource_name))

        make_symlink(resource_path, sandbox_link, logger)
    else:
        (status, unique_resource_name) = get_resource_name(sandboxkey,
                logger)
        if not status:
            return (False, unique_resource_name)

    # If resource has a jobrequest pending, remove it.

    job_pending_file = os.path.join(configuration.resource_home,
                                    unique_resource_name,
                                    'jobrequest_pending.ps3')

    if os.path.exists(job_pending_file):
        os.remove(job_pending_file)

    log_msg = log_msg + ', Remote IP: %s, Key: %s'\
         % (os.getenv('REMOTE_ADDR'), sandboxkey)

    o.internal('''
%s
''' % log_msg)

    return (True, unique_resource_name)
Beispiel #2
0
def get_ps3_resource(configuration):
    log_msg = 'ps3live'

    # Identify sandboxkey

    sandboxkey = fieldstorage.getfirst('sandboxkey', None)
    if not sandboxkey:

        # No sandboxkey provided,

        log_msg = log_msg + ', Remote IP: %s, provided no sandboxkey.'\
             % os.getenv('REMOTE_ADDR')

        return (False, log_msg)

    if not os.path.exists(configuration.sandbox_home + sandboxkey):

        # Create resource

        unique_resource_name = create_ps3_resource(configuration, sandboxkey)
        log_msg = log_msg + ' Created resource: %s'\
             % unique_resource_name

        # Make symbolic link from
        # sandbox_home/sandboxkey to resource_home/resource_name

        sandbox_link = configuration.sandbox_home + sandboxkey
        resource_path = os.path.abspath(
            os.path.join(configuration.resource_home, unique_resource_name))

        make_symlink(resource_path, sandbox_link, logger)
    else:
        (status, unique_resource_name) = get_resource_name(sandboxkey, logger)
        if not status:
            return (False, unique_resource_name)

    # If resource has a jobrequest pending, remove it.

    job_pending_file = os.path.join(configuration.resource_home,
                                    unique_resource_name,
                                    'jobrequest_pending.ps3')

    if os.path.exists(job_pending_file):
        os.remove(job_pending_file)

    log_msg = log_msg + ', Remote IP: %s, Key: %s'\
         % (os.getenv('REMOTE_ADDR'), sandboxkey)

    o.internal('''
%s
''' % log_msg)

    return (True, unique_resource_name)
Beispiel #3
0
def save_twofactor_session(configuration,
                           client_id,
                           session_key,
                           user_addr,
                           user_agent,
                           session_start,
                           session_end=-1):
    """Save twofactor session dict for client_id"""
    _logger = configuration.logger
    if configuration.site_enable_gdp:
        client_id = get_base_client_id(configuration,
                                       client_id,
                                       expand_oid_alias=False)
    session_path = os.path.join(configuration.twofactor_home, session_key)
    if session_end < 0:
        session_end = session_start + twofactor_cookie_ttl
    session_data = {
        'client_id': client_id,
        'session_key': session_key,
        'user_addr': user_addr,
        'user_agent': user_agent,
        'session_start': session_start,
        'session_end': session_end
    }
    status = pickle(session_data, session_path, configuration.logger)
    if status and configuration.site_twofactor_strict_address:
        session_path_link = os.path.join(configuration.twofactor_home,
                                         "%s_%s" % (user_addr, session_key))
        status = \
            make_symlink(session_key, session_path_link, _logger, force=False)
        if not status:
            delete_file(session_path, _logger)
    return status
Beispiel #4
0
            output_objects.append(
                {'object_type': 'error_text', 'text'
                 : ('Could not create needed dirs on %s server! %s'
                    % (configuration.short_title, exc))})
            logger.error('%s when looking for dir %s.' % (exc, share_dir))
            return (output_objects, returnvalues.SYSTEM_ERROR)

    # create symlink from users home directory to vgrid file directory
    # unless member of parent vgrid so that it is included already

    link_src = os.path.abspath(configuration.vgrid_files_home + os.sep
                                + vgrid_name) + os.sep
    link_dst = user_dir + vgrid_name

    if not inherit_vgrid_member and \
           not make_symlink(link_src, link_dst, logger):
        output_objects.append({'object_type': 'error_text', 'text'
                              : 'Could not create link to %s share!' % \
                               configuration.site_vgrid_label})
        logger.error('Could not create link to %s files (%s -> %s)'
                     % (configuration.site_vgrid_label, link_src, link_dst))
        return (output_objects, returnvalues.SYSTEM_ERROR)

    public_base_dst = user_public_base + vgrid_name

    # create symlink for public_base files

    if not make_symlink(public_base_dir, public_base_dst, logger):
        output_objects.append({'object_type': 'error_text', 'text'
                              : 'Could not create link to public_base dir!'
                              })
Beispiel #5
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    defaults = signature()[1]
    title_entry = find_entry(output_objects, 'title')
    label = "%s" % configuration.site_vgrid_label
    title_entry['text'] = "Add %s Member" % label
    output_objects.append({'object_type': 'header', 'text':
                           'Add %s Member(s)' % label})
    status = returnvalues.OK
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    vgrid_name = accepted['vgrid_name'][-1].strip()
    cert_id_list = accepted['cert_id']
    request_name = unhexlify(accepted['request_name'][-1])
    rank_list = accepted['rank'] + ['' for _ in cert_id_list]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append(
            {'object_type': 'error_text', 'text': '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
             })
        return (output_objects, returnvalues.CLIENT_ERROR)

    user_map = get_full_user_map(configuration)
    user_dict = user_map.get(client_id, None)
    # Optional site-wide limitation of manage vgrid permission
    if not user_dict or \
            not vgrid_manage_allowed(configuration, user_dict):
        logger.warning("user %s is not allowed to manage vgrids!" % client_id)
        output_objects.append(
            {'object_type': 'error_text', 'text':
             'Only privileged users can manage %ss' % label})
        return (output_objects, returnvalues.CLIENT_ERROR)

    # make sure vgrid settings allow this owner to edit members

    (allow_status, allow_msg) = allow_members_adm(configuration, vgrid_name,
                                                  client_id)
    if not allow_status:
        output_objects.append({'object_type': 'error_text', 'text': allow_msg})
        return (output_objects, returnvalues.CLIENT_ERROR)

    cert_id_added = []
    for (cert_id, rank_str) in zip(cert_id_list, rank_list):
        cert_id = cert_id.strip()
        cert_dir = client_id_dir(cert_id)
        try:
            rank = int(rank_str)
        except ValueError:
            rank = None

        # Allow openid alias as subject if openid with alias is enabled
        if configuration.user_openid_providers and configuration.user_openid_alias:
            cert_id = expand_openid_alias(cert_id, configuration)

        # Validity of user and vgrid names is checked in this init function so
        # no need to worry about illegal directory traversal through variables

        (ret_val, msg, _) = \
            init_vgrid_script_add_rem(vgrid_name, client_id, cert_id,
                                      'member', configuration)
        if not ret_val:
            output_objects.append({'object_type': 'error_text', 'text':
                                   msg})
            status = returnvalues.CLIENT_ERROR
            continue

        # don't add if already an owner

        if vgrid_is_owner(vgrid_name, cert_id, configuration):
            output_objects.append(
                {'object_type': 'error_text', 'text':
                 '%s is already an owner of %s or a parent %s.' %
                 (cert_id, vgrid_name, label)})
            status = returnvalues.CLIENT_ERROR
            continue

        # don't add if already a member unless rank is given

        if rank is None and vgrid_is_member(vgrid_name, cert_id, configuration):
            output_objects.append(
                {'object_type': 'error_text', 'text':
                 '''%s is already a member of %s or a parent %s. Please remove
    the person first and then try this operation again.''' %
                 (cert_id, vgrid_name, label)
                 })
            status = returnvalues.CLIENT_ERROR
            continue

        # owner or member of subvgrid?

        (list_status, subvgrids) = vgrid_list_subvgrids(vgrid_name,
                                                        configuration)
        if not list_status:
            output_objects.append({'object_type': 'error_text', 'text':
                                   'Error getting list of sub%ss: %s' %
                                   (label, subvgrids)})
            status = returnvalues.SYSTEM_ERROR
            continue

        # TODO: we DO allow ownership of sub vgrids with parent membership so we
        # should support the (cumbersome) relinking of vgrid shares here. Leave it
        # to user to do it manually for now with temporary removal of ownership

        skip_entity = False
        for subvgrid in subvgrids:
            if vgrid_is_owner(subvgrid, cert_id, configuration, recursive=False):
                output_objects.append(
                    {'object_type': 'error_text', 'text':
                     """%(cert_id)s is already an owner of a
sub-%(vgrid_label)s ('%(subvgrid)s'). While we DO support members being owners
of sub-%(vgrid_label)ss, we do not support adding parent %(vgrid_label)s
members at the moment. Please (temporarily) remove the person as owner of all
sub-%(vgrid_label)ss first and then try this operation again.""" %
                     {'cert_id': cert_id, 'subvgrid': subvgrid,
                      'vgrid_label': label}})
                status = returnvalues.CLIENT_ERROR
                skip_entity = True
                break
            if vgrid_is_member(subvgrid, cert_id, configuration, recursive=False):
                output_objects.append(
                    {'object_type': 'error_text', 'text':
                     """%s is already a member of a sub-%s ('%s'). Please
remove the person first and then try this operation again.""" %
                     (cert_id, label, subvgrid)})
                status = returnvalues.CLIENT_ERROR
                skip_entity = True
                break
        if skip_entity:
            continue

        # Check if only rank change was requested and apply if so

        if rank is not None:
            (add_status, add_msg) = vgrid_add_members(configuration,
                                                      vgrid_name,
                                                      [cert_id], rank=rank)
            if not add_status:
                output_objects.append(
                    {'object_type': 'error_text', 'text': add_msg})
                status = returnvalues.SYSTEM_ERROR
            else:
                output_objects.append({'object_type': 'text', 'text':
                                       'changed %s to member %d' % (cert_id,
                                                                    rank)})
            # No further action after rank change as everything else exists
            continue

        # Getting here means cert_id is neither owner or member of any parent or
        # sub-vgrids.

        # Please note that base_dir must end in slash to avoid access to other
        # vgrid dirs when own name is a prefix of another name

        base_dir = os.path.abspath(os.path.join(configuration.vgrid_home,
                                                vgrid_name)) + os.sep
        user_dir = os.path.abspath(os.path.join(configuration.user_home,
                                                cert_dir)) + os.sep

        # make sure all dirs can be created (that a file or directory with the same
        # name do not exist prior to adding the member)

        if os.path.exists(user_dir + vgrid_name):
            output_objects.append(
                {'object_type': 'error_text', 'text':
                 '''Could not add member, a file or directory in the home
directory called %s exists! (%s)''' % (vgrid_name, user_dir + vgrid_name)})
            status = returnvalues.CLIENT_ERROR
            continue

        # Add

        (add_status, add_msg) = vgrid_add_members(configuration, vgrid_name,
                                                  [cert_id])
        if not add_status:
            output_objects.append(
                {'object_type': 'error_text', 'text': add_msg})
            status = returnvalues.SYSTEM_ERROR
            continue

        vgrid_name_parts = vgrid_name.split('/')
        is_subvgrid = len(vgrid_name_parts) > 1

        if is_subvgrid:
            try:

                # vgrid_name = IMADA/STUD/BACH
                # vgrid_name_last_fragment = BACH

                vgrid_name_last_fragment = \
                    vgrid_name_parts[len(vgrid_name_parts)
                                     - 1].strip()

                # vgrid_name_without_last_fragment = IMADA/STUD/

                vgrid_name_without_last_fragment = \
                    ('/'.join(vgrid_name_parts[0:len(vgrid_name_parts)
                                               - 1]) + os.sep).strip()

                # create dirs if they do not exist

                dir1 = user_dir + vgrid_name_without_last_fragment
                if not os.path.isdir(dir1):
                    os.makedirs(dir1)
            except Exception, exc:

                # out of range? should not be possible due to is_subvgrid check

                output_objects.append(
                    {'object_type': 'error_text', 'text':
                     ('Could not create needed dirs on %s server! %s'
                      % (configuration.short_title, exc))})
                logger.error('%s when looking for dir %s.' % (exc, dir1))
                status = returnvalues.SYSTEM_ERROR
                continue

        # create symlink from users home directory to vgrid file directory

        link_src = os.path.abspath(configuration.vgrid_files_home + os.sep
                                   + vgrid_name) + os.sep
        link_dst = user_dir + vgrid_name

        # create symlink to vgrid files

        if not make_symlink(link_src, link_dst, logger):
            output_objects.append({'object_type': 'error_text', 'text':
                                   'Could not create link to %s files!' %
                                   label
                                   })
            logger.error('Could not create link to %s files! (%s -> %s)' %
                         (label, link_src, link_dst))
            status = returnvalues.SYSTEM_ERROR
            continue
        cert_id_added.append(cert_id)
Beispiel #6
0
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Could not update sandbox database: %s' % exc
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    logger.debug('building resource specific files for %s' % unique_host_name)

    # create sandboxlink

    sandbox_link = configuration.sandbox_home + sandboxkey
    resource_path = os.path.abspath(configuration.resource_home +
                                    unique_host_name)
    make_symlink(resource_path, sandbox_link, logger)

    # change dir to sss_home

    old_path = os.getcwd()

    # log_dir = "log/"

    # read pickled resource conf file (needed to create
    # master_node_script.sh)

    msg = ''
    (status, resource_config) = \
             get_resource_configuration(configuration.resource_home,
                                        unique_host_name, logger)
    logger.debug('got resource conf %s' % resource_config)
Beispiel #7
0
def create_arc_job(job, configuration, logger):
    """Analog to create_job_script for ARC jobs:
    Creates symLinks for receiving result files, translates job dict to ARC
    xrsl, and stores resulting job script (xrsl + sh script) for submitting.
    
    We do _not_ create a separate job_dict with copies and SESSIONID inside,
    as opposed to create_job_script, all we need is the link from 
    webserver_home / sessionID into the user's home directory 
    ("job_output/job['JOB_ID']" is added to the result upload URLs in the 
    translation). 
    
    Returns message (ARC job ID if no error) and sessionid (None if error)
    """

    if not configuration.arc_clusters:
        return (None, "No ARC support!")
    if not job["JOBTYPE"] == "arc":
        return (None, "Error. This is not an ARC job")

    # Deep copy job for local changes
    job_dict = deepcopy(job)
    # Finally expand reserved job variables like +JOBID+ and +JOBNAME+
    job_dict = expand_variables(job_dict)
    # ... no more changes to job_dict from here on
    client_id = str(job_dict["USER_CERT"])

    # we do not want to see empty jobs here. Test as done in create_job_script.
    if client_id == configuration.empty_job_name:
        return (None, "Error. empty job for ARC?")

    # generate random session ID:
    sessionid = hexlify(open("/dev/urandom").read(32))
    logger.debug("session ID (for creating links): %s" % sessionid)

    client_dir = client_id_dir(client_id)

    # make symbolic links inside webserver_home:
    #
    # we need: link to owner's dir. to receive results,
    #          job mRSL inside sessid_to_mrsl_link_home
    linklist = [
        (configuration.user_home + client_dir, configuration.webserver_home + sessionid),
        (
            configuration.mrsl_files_dir + client_dir + "/" + str(job_dict["JOB_ID"]) + ".mRSL",
            configuration.sessid_to_mrsl_link_home + sessionid + ".mRSL",
        ),
    ]

    for (dest, loc) in linklist:
        make_symlink(dest, loc, logger)

    # the translation generates an xRSL object which specifies to execute
    # a shell script with script_name. If sessionid != None, results will
    # be uploaded to sid_redirect/sessionid/job_output/job_id

    try:
        (xrsl, script, script_name) = mrsltoxrsl.translate(job_dict, sessionid)
        logger.debug("translated to xRSL: %s" % xrsl)
        logger.debug("script:\n %s" % script)

    except Exception, err:
        # error during translation, pass a message
        logger.error("Error during xRSL translation: %s" % err.__str__())
        return (None, err.__str__())
Beispiel #8
0
def create_job_script(unique_resource_name, exe, job, resource_config, localjobname, configuration, logger):
    """Helper to create actual jobs for handout to a resource.

    Returns tuple with job dict on success and None otherwise.
    The job dict includes random generated sessionid and a I/O session id.
    """

    job_dict = {"": ""}
    sessionid = hexlify(open("/dev/urandom").read(32))
    iosessionid = hexlify(open("/dev/urandom").read(32))
    helper_dict_filename = os.path.join(
        configuration.resource_home, unique_resource_name, "empty_job_helper_dict.%s" % exe
    )

    # Deep copy job for local changes
    job_dict = deepcopy(job)

    job_dict["SESSIONID"] = sessionid
    job_dict["IOSESSIONID"] = iosessionid

    # Create ssh rsa keys and known_hosts for job mount

    mount_private_key = ""
    mount_public_key = ""
    mount_known_hosts = ""

    if job_dict.get("MOUNT", []) != []:

        # Generate public/private key pair for sshfs

        (mount_private_key, mount_public_key) = generate_ssh_rsa_key_pair()

        # Generate known_hosts

        if not os.path.exists(configuration.user_sftp_key_pub):
            msg = "job generation failed:"
            msg = "%s user_sftp_key_pub: '%s' -> File _NOT_ found" % (msg, configuration.user_sftp_key_pub)
            print msg
            logger.error(msg)
            return (None, msg)

        sftp_address = configuration.user_sftp_show_address
        sftp_addresses = socket.gethostbyname_ex(sftp_address or socket.getfqdn())
        sftp_port = configuration.user_sftp_show_port

        mount_known_hosts = "%s,[%s]:%s" % (sftp_addresses[0], sftp_addresses[0], sftp_port)
        for list_idx in xrange(1, len(sftp_addresses)):
            for sftp_address in sftp_addresses[list_idx]:
                mount_known_hosts += ",%s,[%s]:%s" % (sftp_address, sftp_address, sftp_port)

        fd = open(configuration.user_sftp_key_pub, "r")
        mount_known_hosts = "%s %s" % (mount_known_hosts, fd.read())
        fd.close()

    job_dict["MOUNTSSHPUBLICKEY"] = mount_public_key
    job_dict["MOUNTSSHPRIVATEKEY"] = mount_private_key
    job_dict["MOUNTSSHKNOWNHOSTS"] = mount_known_hosts

    if not job_dict.has_key("MAXPRICE"):
        job_dict["MAXPRICE"] = "0"
    # Finally expand reserved job variables like +JOBID+ and +JOBNAME+
    job_dict = expand_variables(job_dict)
    # ... no more changes to job_dict from here on
    client_id = str(job_dict["USER_CERT"])
    client_dir = client_id_dir(client_id)

    # if not job:

    if client_id == configuration.empty_job_name:

        # create link to empty job

        linkdest_empty_job = helper_dict_filename
        linkloc_empty_job = configuration.sessid_to_mrsl_link_home + sessionid + ".mRSL"
        make_symlink(linkdest_empty_job, linkloc_empty_job, logger)
    else:

        # link sessionid to mrsl file

        linkdest1 = configuration.mrsl_files_dir + client_dir + "/" + str(job_dict["JOB_ID"]) + ".mRSL"
        linkloc1 = configuration.sessid_to_mrsl_link_home + sessionid + ".mRSL"
        make_symlink(linkdest1, linkloc1, logger)

    # link sessionid to job owners home directory

    linkdest2 = configuration.user_home + client_dir
    linkloc2 = configuration.webserver_home + sessionid
    make_symlink(linkdest2, linkloc2, logger)

    # link iosessionid to job owners home directory

    linkdest3 = configuration.user_home + client_dir
    linkloc3 = configuration.webserver_home + iosessionid
    make_symlink(linkdest3, linkloc3, logger)

    # link sessionid to .job file

    linkdest4 = configuration.mig_system_files + str(job_dict["JOB_ID"]) + ".job"
    linkloc4 = configuration.webserver_home + sessionid + ".job"
    make_symlink(linkdest4, linkloc4, logger)

    # link sessionid to .getupdatefiles file

    linkdest5 = configuration.mig_system_files + str(job_dict["JOB_ID"]) + ".getupdatefiles"
    linkloc5 = configuration.webserver_home + sessionid + ".getupdatefiles"
    make_symlink(linkdest5, linkloc5, logger)

    # link sessionid to .sendoutputfiles file

    linkdest4 = configuration.mig_system_files + str(job_dict["JOB_ID"]) + ".sendoutputfiles"
    linkloc4 = configuration.webserver_home + sessionid + ".sendoutputfiles"
    make_symlink(linkdest4, linkloc4, logger)

    # link sessionid to .sendupdatefiles file

    linkdest5 = configuration.mig_system_files + str(job_dict["JOB_ID"]) + ".sendupdatefiles"
    linkloc5 = configuration.webserver_home + sessionid + ".sendupdatefiles"
    make_symlink(linkdest5, linkloc5, logger)

    path_without_extension = os.path.join(configuration.resource_home, unique_resource_name, localjobname)
    gen_res = gen_job_script(
        job_dict, resource_config, configuration, localjobname, path_without_extension, client_dir, exe, logger
    )
    if not gen_res:
        msg = "job scripts were not generated. Perhaps you have specified " + "an invalid SCRIPTLANGUAGE ? "
        print msg
        logger.error(msg)
        return (None, msg)

    inputfiles_path = path_without_extension + ".getinputfiles"

    # hack to ensure that a resource has a sandbox keyword

    if resource_config.get("SANDBOX", False):

        # Move file to webserver_home for download as we can't push it to
        # sandboxes

        try:

            # RA TODO: change download filename to something that
            # includes sessionid

            webserver_path = os.path.join(configuration.webserver_home, localjobname + ".getinputfiles")
            os.rename(inputfiles_path, webserver_path)

            # ########## ATTENTION HACK TO MAKE JVM SANDBOXES WORK ############
            # This should be changed to use the (to be developed) RE pre/post
            # processing framework. For now the user must have a jvm dir in his
            # home dir where the classfiles is located this should be changed
            # so that the execution homepath can be specified in the mRSL
            # jobfile
            # Martin Rehr 08/09/06

            # If this is a oneclick job link the users jvm dir to
            # webserver_home/sandboxkey.oneclick
            # This is done because the client applet uses the
            # codebase from which it is originaly loaded
            # Therefore the codebase must be dynamicaly changed
            # for every job

            if resource_config.has_key("PLATFORM") and resource_config["PLATFORM"] == "ONE-CLICK":

                # A two step link is made.
                # First sandboxkey.oneclick is made to point to
                # sessiondid.jvm
                # Second sessionid.jvm is set to point to
                # USER_HOME/jvm
                # This is done for security and easy cleanup,
                # sessionid.jvm is cleaned up
                # by the server upon job finish/timeout and
                # thereby leaving no open entryes to the users
                # jvm dir.

                linkintermediate = configuration.webserver_home + sessionid + ".jvm"

                if client_dir == configuration.empty_job_name:
                    linkdest = os.path.abspath(configuration.javabin_home)
                else:
                    linkdest = configuration.user_home + client_dir + os.sep + "jvm"

                # Make link sessionid.jvm -> USER_HOME/jvm

                make_symlink(linkdest, linkintermediate, logger)

                linkloc = configuration.webserver_home + resource_config["SANDBOXKEY"] + ".oneclick"

                # Remove previous symlink
                # This must be done in a try/catch as the symlink,
                # may be a dead link and 'if os.path.exists(linkloc):'
                # will then return false, even though the link exists.

                try:
                    os.remove(linkloc)
                except:
                    pass

                # Make link sandboxkey.oneclick -> sessionid.jvm

                make_symlink(linkintermediate, linkloc, logger)
        except Exception, err:

            # ######### End JVM SANDBOX HACK ###########

            msg = "File '%s' was not copied to the webserver home." % inputfiles_path
            print "\nERROR: " + str(err)
            logger.error(msg)
            return (None, msg)

        return (job_dict, "OK")
Beispiel #9
0
def modify_share_links(action,
                       share_dict,
                       client_id,
                       configuration,
                       share_map=None):
    """Modify share links with given action and share_dict for client_id.
    In practice this a shared helper to add or remove share links from the
    saved dictionary. The optional share_map argument can be used to pass an
    already loaded dictionary of saved share links to avoid reloading.
    """
    logger = configuration.logger
    share_id = share_dict['share_id']
    if share_map is None:
        (load_status, share_map) = load_share_links(configuration, client_id)
        if not load_status:
            logger.error("modify_share_links failed in load: %s" % share_map)
            return (load_status, share_map)

    share_dict.update(share_map.get(share_id, {}))
    rel_path = share_dict['path'].lstrip(os.sep)
    access = share_dict['access']
    if 'read' in access and 'write' in access:
        access_dir = 'read-write'
    elif 'read' in access:
        access_dir = 'read-only'
    elif 'write' in access:
        access_dir = 'write-only'
    else:
        logger.error("modify_share_links invalid access: %s" % access)
        return (load_status, share_map)
    symlink_path = os.path.join(configuration.sharelink_home, access_dir,
                                share_id)
    target_path = os.path.join(configuration.user_home,
                               client_id_dir(client_id), rel_path)
    if action == "create":
        if not make_symlink(target_path, symlink_path, configuration.logger,
                            False):
            logger.error("could not make share symlink: %s (already exists?)" %
                         symlink_path)
            return (False, share_map)
        share_dict.update({
            'created_timestamp': datetime.datetime.now(),
            'owner': client_id,
        })
        share_map[share_id] = share_dict
    elif action == "modify":
        if not make_symlink(target_path, symlink_path, configuration.logger,
                            True):
            logger.error("could not update share symlink: %s" % symlink_path)
            return (False, share_map)
        share_dict['created_timestamp'] = datetime.datetime.now()
        share_map[share_id].update(share_dict)
    elif action == "delete":
        if not delete_symlink(symlink_path, configuration.logger):
            logger.error("could not delete share symlink: %s (missing?)" %
                         symlink_path)
            return (False, share_map)
        del share_map[share_id]
    else:
        return (False, "Invalid action %s on share links" % action)

    try:
        sharelinks_path = os.path.join(configuration.user_settings,
                                       client_id_dir(client_id),
                                       sharelinks_filename)
        dump(share_map, sharelinks_path)
    except Exception, err:
        logger.error("modify_share_links failed: %s" % err)
        return (False, 'Error updating share links: %s' % err)
Beispiel #10
0
        save_sandbox_db(userdb, configuration)
    except Exception, exc:
        output_objects.append({'object_type': 'error_text', 'text':
                               'Could not update sandbox database: %s' % exc
                               })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    logger.debug('building resource specific files for %s'
                 % unique_host_name)

    # create sandboxlink

    sandbox_link = configuration.sandbox_home + sandboxkey
    resource_path = os.path.abspath(configuration.resource_home
                                    + unique_host_name)
    make_symlink(resource_path, sandbox_link, logger)

    # change dir to sss_home
    
    old_path = os.getcwd()

    # log_dir = "log/"

    # read pickled resource conf file (needed to create
    # master_node_script.sh)

    msg = ''
    (status, resource_config) = \
             get_resource_configuration(configuration.resource_home,
                                        unique_host_name, logger)
    logger.debug('got resource conf %s' % resource_config)
Beispiel #11
0
def create_arc_job(
    job,
    configuration,
    logger,
):
    """Analog to create_job_script for ARC jobs:
    Creates symLinks for receiving result files, translates job dict to ARC
    xrsl, and stores resulting job script (xrsl + sh script) for submitting.

    We do _not_ create a separate job_dict with copies and SESSIONID inside,
    as opposed to create_job_script, all we need is the link from 
    webserver_home / sessionID into the user's home directory 
    ("job_output/job['JOB_ID']" is added to the result upload URLs in the 
    translation). 

    Returns message (ARC job ID if no error) and sessionid (None if error)
    """

    if not configuration.arc_clusters:
        return (None, 'No ARC support!')
    if not job['JOBTYPE'] == 'arc':
        return (None, 'Error. This is not an ARC job')

    # Deep copy job for local changes
    job_dict = deepcopy(job)
    # Finally expand reserved job variables like +JOBID+ and +JOBNAME+
    job_dict = expand_variables(job_dict)
    # ... no more changes to job_dict from here on
    client_id = str(job_dict['USER_CERT'])

    # we do not want to see empty jobs here. Test as done in create_job_script.
    if client_id == configuration.empty_job_name:
        return (None, 'Error. empty job for ARC?')

    # generate random session ID:
    sessionid = hexlify(open('/dev/urandom').read(session_id_bytes))
    logger.debug('session ID (for creating links): %s' % sessionid)

    client_dir = client_id_dir(client_id)

    # make symbolic links inside webserver_home:
    #
    # we need: link to owner's dir. to receive results,
    #          job mRSL inside sessid_to_mrsl_link_home
    linklist = [(configuration.user_home + client_dir,
                 configuration.webserver_home + sessionid),
                (configuration.mrsl_files_dir + client_dir + '/' +
                 str(job_dict['JOB_ID']) + '.mRSL',
                 configuration.sessid_to_mrsl_link_home + sessionid + '.mRSL')
                ]

    for (dest, loc) in linklist:
        make_symlink(dest, loc, logger)

    # the translation generates an xRSL object which specifies to execute
    # a shell script with script_name. If sessionid != None, results will
    # be uploaded to sid_redirect/sessionid/job_output/job_id

    try:
        (xrsl, script, script_name) = mrsltoxrsl.translate(job_dict, sessionid)
        logger.debug('translated to xRSL: %s' % xrsl)
        logger.debug('script:\n %s' % script)

    except Exception, err:
        # error during translation, pass a message
        logger.error('Error during xRSL translation: %s' % err.__str__())
        return (None, err.__str__())
Beispiel #12
0
def create_job_script(
    unique_resource_name,
    exe,
    job,
    resource_config,
    localjobname,
    configuration,
    logger,
):
    """Helper to create actual jobs for handout to a resource.

    Returns tuple with job dict on success and None otherwise.
    The job dict includes random generated sessionid and a I/O session id.
    """

    job_dict = {'': ''}
    # TODO: hexlify is an awfully space wasting URL-safe encoding.
    #       We should just use something like the proposed secure method from
    #       http://stackoverflow.com/a/23728630/2213647
    sessionid = hexlify(open('/dev/urandom').read(session_id_bytes))
    iosessionid = hexlify(open('/dev/urandom').read(session_id_bytes))
    helper_dict_filename = os.path.join(configuration.resource_home,
                                        unique_resource_name,
                                        'empty_job_helper_dict.%s' % exe)

    # Deep copy job for local changes
    job_dict = deepcopy(job)
    # Bump requested values to any resource specs requested in MAXFILL
    job_maxfill = job_dict.get('MAXFILL', [])
    if keyword_all in job_maxfill:
        job_maxfill = maxfill_fields
    for name in maxfill_fields:
        if name in job_maxfill:
            job_dict[name] = resource_config[name]

    job_dict['SESSIONID'] = sessionid
    job_dict['IOSESSIONID'] = iosessionid

    # Create ssh rsa keys and known_hosts for job mount

    mount_private_key = ""
    mount_public_key = ""
    mount_known_hosts = ""

    if job_dict.get('MOUNT', []) != []:

        # Generate public/private key pair for sshfs

        (mount_private_key, mount_public_key) = generate_ssh_rsa_key_pair()

        # Generate known_hosts

        if not os.path.exists(configuration.user_sftp_key_pub):
            msg = "job generation failed:"
            msg = "%s user_sftp_key_pub: '%s' -> File NOT found" % \
                  (msg, configuration.user_sftp_key_pub)
            print msg
            logger.error(msg)
            return (None, msg)

        # Use best available sftp implementation - configuration picks it
        sftp_address = configuration.user_sftp_show_address
        sftp_port = configuration.user_sftp_show_port
        sftp_addresses = socket.gethostbyname_ex(sftp_address or
                                                 socket.getfqdn())
        mount_known_hosts = "%s,[%s]:%s" % (sftp_addresses[0],
                                            sftp_addresses[0], sftp_port)
        for list_idx in xrange(1, len(sftp_addresses)):
            for sftp_address in sftp_addresses[list_idx]:
                mount_known_hosts += ",%s,[%s]:%s" % (sftp_address,
                                                      sftp_address,
                                                      sftp_port)

        fd = open(configuration.user_sftp_key_pub, 'r')
        mount_known_hosts = "%s %s" % (mount_known_hosts, fd.read())
        fd.close()

    job_dict['MOUNTSSHPUBLICKEY'] = mount_public_key
    job_dict['MOUNTSSHPRIVATEKEY'] = mount_private_key
    job_dict['MOUNTSSHKNOWNHOSTS'] = mount_known_hosts

    if not job_dict.has_key('MAXPRICE'):
        job_dict['MAXPRICE'] = '0'
    # Finally expand reserved job variables like +JOBID+ and +JOBNAME+
    job_dict = expand_variables(job_dict)
    # ... no more changes to job_dict from here on
    client_id = str(job_dict['USER_CERT'])
    client_dir = client_id_dir(client_id)

    # if not job:

    if client_id == configuration.empty_job_name:

        # create link to empty job

        linkdest_empty_job = helper_dict_filename
        linkloc_empty_job = configuration.sessid_to_mrsl_link_home + \
            sessionid + '.mRSL'
        make_symlink(linkdest_empty_job, linkloc_empty_job, logger)
    else:

        # link sessionid to mrsl file

        linkdest1 = configuration.mrsl_files_dir + client_dir + '/' + \
            str(job_dict['JOB_ID']) + '.mRSL'
        linkloc1 = configuration.sessid_to_mrsl_link_home + sessionid + '.mRSL'
        make_symlink(linkdest1, linkloc1, logger)

    # link sessionid to job owners home directory

    linkdest2 = configuration.user_home + client_dir
    linkloc2 = configuration.webserver_home + sessionid
    make_symlink(linkdest2, linkloc2, logger)

    # link iosessionid to job owners home directory

    linkdest3 = configuration.user_home + client_dir
    linkloc3 = configuration.webserver_home + iosessionid
    make_symlink(linkdest3, linkloc3, logger)

    # link sessionid to .job file

    linkdest4 = configuration.mig_system_files + str(job_dict['JOB_ID']) + \
        '.job'
    linkloc4 = configuration.webserver_home + sessionid + '.job'
    make_symlink(linkdest4, linkloc4, logger)

    # link sessionid to .getupdatefiles file

    linkdest5 = configuration.mig_system_files + str(job_dict['JOB_ID']) + \
        '.getupdatefiles'
    linkloc5 = configuration.webserver_home + sessionid + \
        '.getupdatefiles'
    make_symlink(linkdest5, linkloc5, logger)

    # link sessionid to .sendoutputfiles file

    linkdest4 = configuration.mig_system_files + str(job_dict['JOB_ID']) + \
        '.sendoutputfiles'
    linkloc4 = configuration.webserver_home + sessionid + \
        '.sendoutputfiles'
    make_symlink(linkdest4, linkloc4, logger)

    # link sessionid to .sendupdatefiles file

    linkdest5 = configuration.mig_system_files + str(job_dict['JOB_ID']) + \
        '.sendupdatefiles'
    linkloc5 = configuration.webserver_home + sessionid + \
        '.sendupdatefiles'
    make_symlink(linkdest5, linkloc5, logger)

    path_without_extension = os.path.join(configuration.resource_home,
                                          unique_resource_name, localjobname)
    gen_res = gen_job_script(
        job_dict,
        resource_config,
        configuration,
        localjobname,
        path_without_extension,
        client_dir,
        exe,
        logger,
    )
    if not gen_res:
        msg = \
            'job scripts were not generated. Perhaps you have specified ' + \
            'an invalid SCRIPTLANGUAGE ? '
        print msg
        logger.error(msg)
        return (None, msg)

    inputfiles_path = path_without_extension + '.getinputfiles'

    # hack to ensure that a resource has a sandbox keyword

    if resource_config.get('SANDBOX', False):

        # Move file to webserver_home for download as we can't push it to
        # sandboxes

        try:

            # RA TODO: change download filename to something that
            # includes sessionid

            webserver_path = os.path.join(configuration.webserver_home,
                                          localjobname + '.getinputfiles')
            os.rename(inputfiles_path, webserver_path)

            # ########## ATTENTION HACK TO MAKE JVM SANDBOXES WORK ############
            # This should be changed to use the (to be developed) RE pre/post
            # processing framework. For now the user must have a jvm dir in his
            # home dir where the classfiles is located this should be changed
            # so that the execution homepath can be specified in the mRSL
            # jobfile
            # Martin Rehr 08/09/06

            # If this is a oneclick job link the users jvm dir to
            # webserver_home/sandboxkey.oneclick
            # This is done because the client applet uses the
            # codebase from which it is originaly loaded
            # Therefore the codebase must be dynamicaly changed
            # for every job

            if resource_config.has_key('PLATFORM') and \
                    resource_config['PLATFORM'] == 'ONE-CLICK':

                # A two step link is made.
                # First sandboxkey.oneclick is made to point to
                # sessiondid.jvm
                # Second sessionid.jvm is set to point to
                # USER_HOME/jvm
                # This is done for security and easy cleanup,
                # sessionid.jvm is cleaned up
                # by the server upon job finish/timeout and
                # thereby leaving no open entryes to the users
                # jvm dir.

                linkintermediate = configuration.webserver_home + \
                    sessionid + '.jvm'

                if client_dir == configuration.empty_job_name:
                    linkdest = os.path.abspath(configuration.javabin_home)
                else:
                    linkdest = configuration.user_home + client_dir + \
                        os.sep + 'jvm'

                # Make link sessionid.jvm -> USER_HOME/jvm

                make_symlink(linkdest, linkintermediate, logger)

                linkloc = configuration.webserver_home + \
                    resource_config['SANDBOXKEY'] + '.oneclick'

                # Remove previous symlink
                # This must be done in a try/catch as the symlink,
                # may be a dead link and 'if os.path.exists(linkloc):'
                # will then return false, even though the link exists.

                try:
                    os.remove(linkloc)
                except:
                    pass

                # Make link sandboxkey.oneclick -> sessionid.jvm

                make_symlink(linkintermediate, linkloc, logger)
        except Exception, err:

                # ######### End JVM SANDBOX HACK ###########

            msg = "File '%s' was not copied to the webserver home." % \
                  inputfiles_path
            print '\nERROR: ' + str(err)
            logger.error(msg)
            return (None, msg)

        return (job_dict, 'OK')
Beispiel #13
0
                                                          vgrid_name, [])
    if not trigger_status:
        output_objects.append({'object_type': 'error_text', 'text'
                              : 'Could not save trigger list: %s' % \
                               trigger_msg})
        return (output_objects, returnvalues.SYSTEM_ERROR)

    if new_base_vgrid:

        # create sym link from creators (client_id) home directory to directory
        # containing the vgrid files

        src = vgrid_files_dir
        dst = os.path.join(configuration.user_home, client_dir,
                           vgrid_name)
        if not make_symlink(src, dst, logger):
            output_objects.append({'object_type': 'error_text', 'text'
                                  : 'Could not create link to %s files!' % \
                                   configuration.site_vgrid_label
                                  })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        # make sure public_base dir exists in users home dir

        user_public_base = os.path.join(configuration.user_home,
                client_dir, 'public_base')
        try:
            os.mkdir(user_public_base)
        except:
            logger.warning("could not create %s. Probably already exists." % \
                   user_public_base)
Beispiel #14
0
def get_resource(client_id, configuration, logger):
    cookie = None
    sandboxkey = None
    cputime = 1000000
    log_msg = 'oneclick:'

    __MIG_ONECLICK_COOKIE__ = 'MiGOneClickSandboxKey'

    # If user with identifing cookie use cookie infomation

    if os.getenv('HTTP_COOKIE') and os.getenv('HTTP_COOKIE'
            ).count(__MIG_ONECLICK_COOKIE__) > 0:
        cookie_arr = os.getenv('HTTP_COOKIE').split(';')
        for elm in cookie_arr:
            if elm.count(__MIG_ONECLICK_COOKIE__) > 0:
                sandboxkey = elm.split('=')[1]
                break

    # If we don't know user, generate an identification key
    # and a new resource for him.
    # The key is send to him as a cookie

    if not sandboxkey or not os.path.exists(configuration.sandbox_home
             + sandboxkey):

        # Generate key, and set cookie

        sandboxkey = hexlify(open('/dev/urandom').read(32))
        cookie = 'Set-Cookie: ' + __MIG_ONECLICK_COOKIE__ + '='\
             + sandboxkey + '; '\
             + 'expires=Thu 31-Jan-2099 12:00:00 GMT; path=/; '\
             + 'domain=' + configuration.server_fqdn + '; secure'

        # Create resource

        (status, msg) = create_oneclick_resource(sandboxkey, cputime,
                configuration, logger)
        if not status:
            return (status, msg)
        resource_name = msg
        log_msg += ' Created resource: %s' % resource_name

        # Make symbolic link from
        # sandbox_home/sandboxkey to resource_home/resource_name

        sandbox_link = configuration.sandbox_home + sandboxkey
        resource_path = os.path.abspath(configuration.resource_home
                 + resource_name)

        make_symlink(resource_path, sandbox_link, logger)
    else:

        # Retrieve resource_name from sandboxkey symbolic link

        sandbox_link = configuration.sandbox_home + sandboxkey
        resource_name = os.path.basename(os.path.realpath(sandbox_link))

    # If resource has a jobrequest pending, remove it.

    job_pending_file = configuration.resource_home + resource_name\
         + os.sep + 'jobrequest_pending.jvm'
    if os.path.exists(job_pending_file):
        os.remove(job_pending_file)

    log_msg += ', Remote IP: %s, Key: %s' % (os.getenv('REMOTE_ADDR'),
            sandboxkey)

    # Make symbolic link from webserver_home to javabin_home

    codebase_link = configuration.webserver_home + sandboxkey\
         + '.oneclick'
    codebase_path = os.path.abspath(configuration.javabin_home)

    # Remove symbolic link if it allready exists.
    # This must be done in a try/catch as the symlink,
    # may be a dead link and 'if os.path.exists(linkloc):'
    # will then return false, even though the link exists.

    try:
        os.remove(codebase_link)
    except:
        pass

    make_symlink(codebase_path, codebase_link, logger)

    logger.info(log_msg)

    return (True, (sandboxkey, resource_name, cookie, cputime))
Beispiel #15
0
def get_resource(client_id, configuration, logger):
    cookie = None
    sandboxkey = None
    cputime = 1000000
    log_msg = 'oneclick:'

    __MIG_ONECLICK_COOKIE__ = 'MiGOneClickSandboxKey'

    # If user with identifing cookie use cookie infomation

    if os.getenv('HTTP_COOKIE') and os.getenv('HTTP_COOKIE').count(
            __MIG_ONECLICK_COOKIE__) > 0:
        cookie_arr = os.getenv('HTTP_COOKIE').split(';')
        for elm in cookie_arr:
            if elm.count(__MIG_ONECLICK_COOKIE__) > 0:
                sandboxkey = elm.split('=')[1]
                break

    # If we don't know user, generate an identification key
    # and a new resource for him.
    # The key is send to him as a cookie

    if not sandboxkey or not os.path.exists(configuration.sandbox_home +
                                            sandboxkey):

        # Generate key, and set cookie

        sandboxkey = hexlify(open('/dev/urandom').read(32))
        cookie = 'Set-Cookie: ' + __MIG_ONECLICK_COOKIE__ + '='\
             + sandboxkey + '; '\
             + 'expires=Thu 31-Jan-2099 12:00:00 GMT; path=/; '\
             + 'domain=' + configuration.server_fqdn + '; secure'

        # Create resource

        (status, msg) = create_oneclick_resource(sandboxkey, cputime,
                                                 configuration, logger)
        if not status:
            return (status, msg)
        resource_name = msg
        log_msg += ' Created resource: %s' % resource_name

        # Make symbolic link from
        # sandbox_home/sandboxkey to resource_home/resource_name

        sandbox_link = configuration.sandbox_home + sandboxkey
        resource_path = os.path.abspath(configuration.resource_home +
                                        resource_name)

        make_symlink(resource_path, sandbox_link, logger)
    else:

        # Retrieve resource_name from sandboxkey symbolic link

        sandbox_link = configuration.sandbox_home + sandboxkey
        resource_name = os.path.basename(os.path.realpath(sandbox_link))

    # If resource has a jobrequest pending, remove it.

    job_pending_file = configuration.resource_home + resource_name\
         + os.sep + 'jobrequest_pending.jvm'
    if os.path.exists(job_pending_file):
        os.remove(job_pending_file)

    log_msg += ', Remote IP: %s, Key: %s' % (os.getenv('REMOTE_ADDR'),
                                             sandboxkey)

    # Make symbolic link from webserver_home to javabin_home

    codebase_link = configuration.webserver_home + sandboxkey\
         + '.oneclick'
    codebase_path = os.path.abspath(configuration.javabin_home)

    # Remove symbolic link if it allready exists.
    # This must be done in a try/catch as the symlink,
    # may be a dead link and 'if os.path.exists(linkloc):'
    # will then return false, even though the link exists.

    try:
        os.remove(codebase_link)
    except:
        pass

    make_symlink(codebase_path, codebase_link, logger)

    logger.info(log_msg)

    return (True, (sandboxkey, resource_name, cookie, cputime))
Beispiel #16
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""
    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )

    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    logger.debug("User: %s executing %s" % (client_id, op_name))
    if not configuration.site_enable_jupyter:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'The Jupyter service is not enabled on the system'
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    if not configuration.site_enable_sftp_subsys and not \
            configuration.site_enable_sftp:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'The required sftp service is not enabled on the system'
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    if configuration.site_enable_sftp:
        sftp_port = configuration.user_sftp_port

    if configuration.site_enable_sftp_subsys:
        sftp_port = configuration.user_sftp_subsys_port

    requested_service = accepted['service'][-1]
    service = {
        k: v
        for options in configuration.jupyter_services
        for k, v in options.items()
        if options['service_name'] == requested_service
    }

    if not service:
        valid_services = [
            options['name'] for options in configuration.jupyter_services
        ]
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '%s is not a valid jupyter service, '
            'allowed include %s' % (requested_service, valid_services)
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    valid_service = valid_jupyter_service(configuration, service)
    if not valid_service:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'The service %s appears to be misconfigured, '
            'please contact a system administrator about this issue' %
            requested_service
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    host = get_host_from_service(configuration, service)
    # Get an active jupyterhost
    if host is None:
        logger.error("No active jupyterhub host could be found")
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Failed to establish connection to the %s Jupyter service' %
            service['service_name']
        })
        output_objects.append({
            'object_type': 'link',
            'destination': 'jupyter.py',
            'text': 'Back to Jupyter services overview'
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    remote_user = unescape(os.environ.get('REMOTE_USER', '')).strip()
    if not remote_user:
        logger.error("Can't connect to jupyter with an empty REMOTE_USER "
                     "environment variable")
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Failed to establish connection to the Jupyter service'
        })
        return (output_objects, returnvalues.CLIENT_ERROR)
    # Ensure the remote_user dict can be http posted
    remote_user = str(remote_user)

    # TODO, activate admin info
    # remote_user = {'USER': username, 'IS_ADMIN': is_admin(client_id,
    #                                                      configuration,
    # logger)}

    # Regular sftp path
    mnt_path = os.path.join(configuration.jupyter_mount_files_dir, client_dir)
    # Subsys sftp path
    subsys_path = os.path.join(configuration.mig_system_files, 'jupyter_mount')
    # sftp session path
    link_home = configuration.sessid_to_jupyter_mount_link_home

    user_home_dir = os.path.join(configuration.user_home, client_dir)

    # Preparing prerequisites
    if not os.path.exists(mnt_path):
        os.makedirs(mnt_path)

    if not os.path.exists(link_home):
        os.makedirs(link_home)

    if configuration.site_enable_sftp_subsys:
        if not os.path.exists(subsys_path):
            os.makedirs(subsys_path)

    # Make sure ssh daemon does not complain
    tighten_key_perms(configuration, client_id)

    url_base = '/' + service['service_name']
    url_home = url_base + '/home'
    url_auth = host + url_base + '/hub/login'
    url_data = host + url_base + '/hub/user-data'

    # Does the client home dir contain an active mount key
    # If so just keep on using it.
    jupyter_mount_files = [
        os.path.join(mnt_path, jfile) for jfile in os.listdir(mnt_path)
        if jfile.endswith('.jupyter_mount')
    ]

    logger.info("User: %s mount files: %s" %
                (client_id, "\n".join(jupyter_mount_files)))
    logger.debug("Remote-User %s" % remote_user)
    active_mounts = []
    for jfile in jupyter_mount_files:
        jupyter_dict = unpickle(jfile, logger)
        if not jupyter_dict:
            # Remove failed unpickle
            logger.error("Failed to unpickle %s removing it" % jfile)
            remove_jupyter_mount(jfile, configuration)
        else:
            # Mount has been timed out
            if not is_active(jupyter_dict):
                remove_jupyter_mount(jfile, configuration)
            else:
                # Valid mount
                active_mounts.append({'path': jfile, 'state': jupyter_dict})

    logger.debug(
        "User: %s active keys: %s" %
        (client_id, "\n".join([mount['path'] for mount in active_mounts])))

    # If multiple are active, remove oldest
    active_mount, old_mounts = get_newest_mount(active_mounts)
    for mount in old_mounts:
        remove_jupyter_mount(mount['path'], configuration)

    # A valid active key is already present redirect straight to the jupyter
    # service, pass most recent mount information
    if active_mount is not None:
        mount_dict = mig_to_mount_adapt(active_mount['state'])
        user_dict = mig_to_user_adapt(active_mount['state'])
        logger.debug("Existing header values, Mount: %s User: %s" %
                     (mount_dict, user_dict))

        auth_header = {'Remote-User': remote_user}
        json_data = {'data': {'Mount': mount_dict, 'User': user_dict}}

        if configuration.site_enable_workflows:
            workflows_dict = mig_to_workflows_adapt(active_mount['state'])
            if not workflows_dict:
                # No cached workflows session could be found -> refresh with a
                # one
                workflow_session_id = get_workflow_session_id(
                    configuration, client_id)
                if not workflow_session_id:
                    workflow_session_id = create_workflow_session_id(
                        configuration, client_id)
                # TODO get this dynamically
                url = configuration.migserver_https_sid_url + \
                    '/cgi-sid/workflowsjsoninterface.py?output_format=json'
                workflows_dict = {
                    'WORKFLOWS_URL': url,
                    'WORKFLOWS_SESSION_ID': workflow_session_id
                }

            logger.debug("Existing header values, Workflows: %s" %
                         workflows_dict)
            json_data['workflows_data'] = {'Session': workflows_dict}

        with requests.session() as session:
            # Authenticate and submit data
            response = session.post(url_auth, headers=auth_header)
            if response.status_code == 200:
                response = session.post(url_data, json=json_data)
                if response.status_code != 200:
                    logger.error(
                        "Jupyter: User %s failed to submit data %s to %s" %
                        (client_id, json_data, url_data))
            else:
                logger.error(
                    "Jupyter: User %s failed to authenticate against %s" %
                    (client_id, url_auth))

        # Redirect client to jupyterhub
        return jupyter_host(configuration, output_objects, remote_user,
                            url_home)

    # Create a new keyset
    # Create login session id
    session_id = generate_random_ascii(2 * session_id_bytes,
                                       charset='0123456789abcdef')

    # Generate private/public keys
    (mount_private_key,
     mount_public_key) = generate_ssh_rsa_key_pair(encode_utf8=True)

    # Known hosts
    sftp_addresses = socket.gethostbyname_ex(
        configuration.user_sftp_show_address or socket.getfqdn())

    # Subsys sftp support
    if configuration.site_enable_sftp_subsys:
        # Restrict possible mount agent
        auth_content = []
        restrict_opts = 'no-agent-forwarding,no-port-forwarding,no-pty,'
        restrict_opts += 'no-user-rc,no-X11-forwarding'
        restrictions = '%s' % restrict_opts
        auth_content.append('%s %s\n' % (restrictions, mount_public_key))
        # Write auth file
        write_file('\n'.join(auth_content),
                   os.path.join(subsys_path, session_id + '.authorized_keys'),
                   logger,
                   umask=027)

    logger.debug("User: %s - Creating a new jupyter mount keyset - "
                 "private_key: %s public_key: %s " %
                 (client_id, mount_private_key, mount_public_key))

    jupyter_dict = {
        'MOUNT_HOST': configuration.short_title,
        'SESSIONID': session_id,
        'USER_CERT': client_id,
        # don't need fraction precision, also not all systems provide fraction
        # precision.
        'CREATED_TIMESTAMP': int(time.time()),
        'MOUNTSSHPRIVATEKEY': mount_private_key,
        'MOUNTSSHPUBLICKEY': mount_public_key,
        # Used by the jupyterhub to know which host to mount against
        'TARGET_MOUNT_ADDR': "@" + sftp_addresses[0] + ":",
        'PORT': sftp_port
    }
    client_email = extract_field(client_id, 'email')
    if client_email:
        jupyter_dict.update({'USER_EMAIL': client_email})

    if configuration.site_enable_workflows:
        workflow_session_id = get_workflow_session_id(configuration, client_id)
        if not workflow_session_id:
            workflow_session_id = create_workflow_session_id(
                configuration, client_id)
        # TODO get this dynamically
        url = configuration.migserver_https_sid_url + \
            '/cgi-sid/workflowsjsoninterface.py?output_format=json'
        jupyter_dict.update({
            'WORKFLOWS_URL': url,
            'WORKFLOWS_SESSION_ID': workflow_session_id
        })

    # Only post the required keys, adapt to API expectations
    mount_dict = mig_to_mount_adapt(jupyter_dict)
    user_dict = mig_to_user_adapt(jupyter_dict)
    workflows_dict = mig_to_workflows_adapt(jupyter_dict)
    logger.debug("User: %s Mount header: %s" % (client_id, mount_dict))
    logger.debug("User: %s User header: %s" % (client_id, user_dict))
    if workflows_dict:
        logger.debug("User: %s Workflows header: %s" %
                     (client_id, workflows_dict))

    # Auth and pass a new set of valid mount keys
    auth_header = {'Remote-User': remote_user}
    json_data = {'data': {'Mount': mount_dict, 'User': user_dict}}
    if workflows_dict:
        json_data['workflows_data'] = {'Session': workflows_dict}

    # First login
    with requests.session() as session:
        # Authenticate
        response = session.post(url_auth, headers=auth_header)
        if response.status_code == 200:
            response = session.post(url_data, json=json_data)
            if response.status_code != 200:
                logger.error(
                    "Jupyter: User %s failed to submit data %s to %s" %
                    (client_id, json_data, url_data))
        else:
            logger.error("Jupyter: User %s failed to authenticate against %s" %
                         (client_id, url_auth))

    # Update pickle with the new valid key
    jupyter_mount_state_path = os.path.join(mnt_path,
                                            session_id + '.jupyter_mount')

    pickle(jupyter_dict, jupyter_mount_state_path, logger)

    # Link jupyter pickle state file
    linkdest_new_jupyter_mount = os.path.join(mnt_path,
                                              session_id + '.jupyter_mount')

    linkloc_new_jupyter_mount = os.path.join(link_home,
                                             session_id + '.jupyter_mount')
    make_symlink(linkdest_new_jupyter_mount, linkloc_new_jupyter_mount, logger)

    # Link userhome
    linkloc_user_home = os.path.join(link_home, session_id)
    make_symlink(user_home_dir, linkloc_user_home, logger)

    return jupyter_host(configuration, output_objects, remote_user, url_home)
Beispiel #17
0
            output_objects.append(
                {'object_type': 'error_text', 'text'
                 : ('Could not create needed dirs on %s server! %s'
                    % (configuration.short_title, exc))})
            logger.error('%s when looking for dir %s.' % (exc, dir1))
            return (output_objects, returnvalues.SYSTEM_ERROR)

    # create symlink from users home directory to vgrid file directory

    link_src = os.path.abspath(configuration.vgrid_files_home + os.sep
                                + vgrid_name) + os.sep
    link_dst = user_dir + vgrid_name

    # create symlink to vgrid files

    if not make_symlink(link_src, link_dst, logger):
        output_objects.append({'object_type': 'error_text', 'text'
                              : 'Could not create link to %s files!' % \
                               configuration.site_vgrid_label
                              })
        logger.error('Could not create link to %s files! (%s -> %s)'
                      % (configuration.site_vgrid_label, link_src, link_dst))
        return (output_objects, returnvalues.SYSTEM_ERROR)

    output_objects.append({'object_type': 'text', 'text'
                          : 'New member %s successfully added to %s %s!'
                           % (cert_id, vgrid_name,
                              configuration.site_vgrid_label)})
    output_objects.append({'object_type': 'html_form', 'text'
                          : """
<form method='post' action='sendrequestaction.py'>
Beispiel #18
0
        for (meta_key, meta_label) in public_meta:
            meta_value = freeze_dict.get(meta_key, '')
            if meta_value:
                contents += """%s: %s<br/>
""" % (meta_label, meta_value)
        contents += """
<h2 class='staticpage'>Archive Files</h2>
        """
        for rel_path in frozen_files:
            contents += """<a href='%s'>%s</a><br/>
""" % (rel_path, rel_path)
        contents += """
</div>
%s
        """ % get_cgi_html_footer(configuration, widgets=False)
        if not make_symlink(frozen_dir, real_pub_dir, logger) or \
               not write_file(contents, real_pub_index, configuration.logger):
            logger.error("create_frozen_archive: publish failed")
            remove_rec(frozen_dir, configuration)
            return (False, 'Error publishing frozen archive')
    return (True, freeze_id)

def delete_frozen_archive(freeze_id, configuration):
    """Delete an existing frozen archive without checking ownership or
    persistance of frozen archives.
    """
    frozen_dir = os.path.join(configuration.freeze_home, freeze_id)
    if remove_rec(frozen_dir, configuration):
        return (True, '')
    else:
        return (False, 'Error deleting frozen archive "%s"' % freeze_id)