示例#1
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id)
    output_objects.append({'object_type': 'text', 'text'
                          : '--------- Trying to Clean front end ----------'
                          })
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    unique_resource_name = accepted['unique_resource_name'][-1]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append(
            {'object_type': 'error_text', 'text': '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
             })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not is_owner(client_id, unique_resource_name,
                    configuration.resource_home, logger):
        output_objects.append({'object_type': 'error_text', 'text'
                              : 'Failure: You must be an owner of '
                               + unique_resource_name
                               + ' to clean the front end!'})
        return (output_objects, returnvalues.CLIENT_ERROR)

    exit_status = returnvalues.OK
    (status, msg) = stop_resource_frontend(unique_resource_name,
            configuration, logger)
    if not status:
        output_objects.append({'object_type': 'error_text', 'text'
                              : 'Problems stopping front end during clean: %s'
                               % msg})
        return (output_objects, returnvalues.CLIENT_ERROR)

    (status2, msg2) = clean_resource_frontend(unique_resource_name,
            configuration.resource_home, logger)
    if not status2:
        output_objects.append({'object_type': 'error_text', 'text'
                              : 'Problems cleaning front end during clean: %s'
                               % msg2})
        exit_status = returnvalues.SYSTEM_ERROR
    if status and status2:
        output_objects.append({'object_type': 'text', 'text'
                              : 'Clean front end success: Stop output: %s Clean output %s'
                               % (msg, msg2)})

    return (output_objects, exit_status)
示例#2
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    title_entry = find_entry(output_objects, 'title')
    label = "%s" % configuration.site_vgrid_label
    title_entry['text'] = "Add/Update %s Trigger" % label
    output_objects.append({
        'object_type': 'header',
        'text': 'Add/Update %s Trigger' % label
    })
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        typecheck_overrides={'path': valid_path_pattern},
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    # NOTE: strip leftmost slashes from all fields used in file paths to avoid
    # interference with os.path.join calls. Furthermore we strip and normalize
    # the path variable first to make sure it does not point outside the vgrid.
    # In practice any such directory traversal attempts will generally be moot
    # since the grid_events daemon only starts a listener for each top-level
    # vgrid and in there only reacts to events that match trigger rules from
    # that particular vgrid. Thus only subvgrid access to parent vgrids might
    # be a concern and still of limited consequence.
    # NOTE: merge multi args into one string and split again to get flat array
    rule_id = accepted['rule_id'][-1].strip()
    vgrid_name = accepted['vgrid_name'][-1].strip().lstrip(os.sep)
    path = os.path.normpath(accepted['path'][-1].strip()).lstrip(os.sep)
    changes = [i.strip() for i in ' '.join(accepted['changes']).split()]
    action = accepted['action'][-1].strip()
    arguments = [
        i.strip() for i in shlex.split(' '.join(accepted['arguments']))
    ]
    rate_limit = accepted['rate_limit'][-1].strip()
    settle_time = accepted['settle_time'][-1].strip()
    match_files = accepted['match_files'][-1].strip() == 'True'
    match_dirs = accepted['match_dirs'][-1].strip() == 'True'
    match_recursive = accepted['match_recursive'][-1].strip() == 'True'
    rank_str = accepted['rank'][-1]
    try:
        rank = int(rank_str)
    except ValueError:
        rank = None

    logger.debug("addvgridtrigger with args: %s" % user_arguments_dict)

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = os.path.abspath(
        os.path.join(configuration.user_home, client_dir)) + os.sep

    # we just use a high res timestamp as automatic rule_id

    if rule_id == keyword_auto:
        rule_id = "%d" % (time.time() * 1E8)

    if action == keyword_auto:
        action = valid_trigger_actions[0]

    if any_state in changes:
        changes = valid_trigger_changes

    logger.info("addvgridtrigger %s" % vgrid_name)

    # Validity of user and vgrid names is checked in this init function so
    # no need to worry about illegal directory traversal through variables

    (ret_val, msg, ret_variables) = \
        init_vgrid_script_add_rem(vgrid_name, client_id,
                                  rule_id, 'trigger',
                                  configuration)
    if not ret_val:
        output_objects.append({'object_type': 'error_text', 'text': msg})
        return (output_objects, returnvalues.CLIENT_ERROR)
    elif msg:

        # In case of warnings, msg is non-empty while ret_val remains True

        output_objects.append({'object_type': 'warning', 'text': msg})

    # if we get here user is either vgrid owner or allowed to add rule

    # don't add if already in vgrid or parent vgrid - but update if owner

    update_id = None
    if vgrid_is_trigger(vgrid_name, rule_id, configuration):
        if vgrid_is_trigger_owner(vgrid_name, rule_id, client_id,
                                  configuration):
            update_id = 'rule_id'
        else:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '%s is already a trigger owned by somebody else in the %s' %
                (rule_id, label)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    # don't add if already in subvgrid

    (list_status, subvgrids) = vgrid_list_subvgrids(vgrid_name, configuration)
    if not list_status:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Error getting list of sub%ss: %s' % (label, subvgrids)
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)
    for subvgrid in subvgrids:
        if vgrid_is_trigger(subvgrid, rule_id, configuration, recursive=False):
            output_objects.append({
                'object_type': 'error_text',
                'text': '''%(rule_id)s is already in a
sub-%(vgrid_label)s (%(subvgrid)s). Please remove the trigger from the
sub-%(vgrid_label)s and try again''' % {
                    'rule_id': rule_id,
                    'subvgrid': subvgrid,
                    'vgrid_label': label
                }
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    if not action in valid_trigger_actions:
        output_objects.append({
            'object_type': 'error_text',
            'text': "invalid action value %s" % action
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if keyword_all in changes:
        changes = valid_trigger_changes
    for change in changes:
        if not change in valid_trigger_changes:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "found invalid change value %s" % change
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    # Check if we should load saved trigger for rank change or update

    rule_dict = None
    if rank is not None or update_id is not None:
        (load_status, all_triggers) = vgrid_triggers(vgrid_name, configuration)
        if not load_status:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Failed to load triggers for %s: %s' %
                (vgrid_name, all_triggers)
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)
        for saved_dict in all_triggers:
            if saved_dict['rule_id'] == rule_id:
                rule_dict = saved_dict
                break
        if rule_dict is None:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'No such trigger %s for %s: %s' %
                (rule_id, vgrid_name, all_triggers)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
    elif not path:
        # New trigger with missing path
        output_objects.append({
            'object_type': 'error_text',
            'text': '''Either path or rank must
be set.'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)
    elif action == "submit" and not arguments:
        # New submit trigger with missing mrsl arguments
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Submit triggers must give
a job description file path as argument.'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Handle create and update (i.e. new, update all or just refresh mRSL)

    if rank is None:

        # IMPORTANT: we save the job template contents to avoid potential abuse
        # Otherwise someone else in the VGrid could tamper with the template
        # and make the next trigger execute arbitrary code on behalf of the
        # rule owner.

        templates = []

        # Merge current and saved values

        req_dict = {
            'rule_id': rule_id,
            'vgrid_name': vgrid_name,
            'path': path,
            'changes': changes,
            'run_as': client_id,
            'action': action,
            'arguments': arguments,
            'rate_limit': rate_limit,
            'settle_time': settle_time,
            'match_files': match_files,
            'match_dirs': match_dirs,
            'match_recursive': match_recursive,
            'templates': templates
        }
        if rule_dict is None:
            rule_dict = req_dict
        else:
            for field in user_arguments_dict:
                if field in req_dict:
                    rule_dict[field] = req_dict[field]

        # Now refresh template contents

        if rule_dict['action'] == "submit":
            for rel_path in rule_dict['arguments']:
                # IMPORTANT: path must be expanded to abs for proper chrooting
                abs_path = os.path.abspath(os.path.join(base_dir, rel_path))
                try:
                    if not valid_user_path(configuration, abs_path, base_dir,
                                           True):
                        logger.warning(
                            '%s tried to %s restricted path %s ! (%s)' %
                            (client_id, op_name, abs_path, rel_path))
                        raise ValueError('invalid submit path argument: %s' %
                                         rel_path)
                    temp_fd = open(abs_path)
                    templates.append(temp_fd.read())
                    temp_fd.close()
                except Exception as err:
                    logger.error("read submit argument file failed: %s" % err)
                    output_objects.append({
                        'object_type':
                        'error_text',
                        'text':
                        'failed to read submit argument file "%s"' % rel_path
                    })
                    return (output_objects, returnvalues.CLIENT_ERROR)

        # Save updated template contents here
        rule_dict['templates'] = templates

    # Add to list and pickle

    (add_status, add_msg) = vgrid_add_triggers(configuration, vgrid_name,
                                               [rule_dict], update_id, rank)
    if not add_status:
        logger.error('%s failed to add/update trigger: %s' %
                     (client_id, add_msg))
        output_objects.append({
            'object_type': 'error_text',
            'text': '%s' % add_msg
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    if rank is not None:
        logger.info('%s moved trigger %s to %d' % (client_id, rule_id, rank))
        output_objects.append({
            'object_type':
            'text',
            'text':
            'moved %s trigger %s to position %d' % (vgrid_name, rule_id, rank)
        })
    elif update_id:
        logger.info('%s updated trigger: %s' % (client_id, rule_dict))
        output_objects.append({
            'object_type':
            'text',
            'text':
            'Existing trigger %s successfully updated in %s %s!' %
            (rule_id, vgrid_name, label)
        })
    else:
        logger.info('%s added new trigger: %s' % (client_id, rule_dict))
        output_objects.append({
            'object_type':
            'text',
            'text':
            'New trigger %s successfully added to %s %s!' %
            (rule_id, vgrid_name, label)
        })

    output_objects.append({
        'object_type': 'link',
        'destination': 'vgridworkflows.py?vgrid_name=%s' % vgrid_name,
        'text': 'Back to workflows for %s' % vgrid_name
    })
    return (output_objects, returnvalues.OK)
示例#3
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    defaults = signature()[1]
    title_entry = find_entry(output_objects, 'title')
    label = "%s" % configuration.site_vgrid_label
    title_entry['text'] = "Remove %s Trigger" % label
    output_objects.append({'object_type': 'header', 'text'
                          : 'Remove %s Trigger' % label})
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    vgrid_name = accepted['vgrid_name'][-1]
    rule_id = accepted['rule_id'][-1]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append(
            {'object_type': 'error_text', 'text': '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
             })
        return (output_objects, returnvalues.CLIENT_ERROR)

    logger.info("rmvgridtrigger %s %s" % (vgrid_name, rule_id))

    # Validity of user and vgrid names is checked in this init function so
    # no need to worry about illegal directory traversal through variables

    (ret_val, msg, ret_variables) = \
        init_vgrid_script_add_rem(vgrid_name, client_id,
                                  rule_id, 'trigger',
                                  configuration)
    if not ret_val:
        output_objects.append({'object_type': 'error_text', 'text': msg})
        return (output_objects, returnvalues.CLIENT_ERROR)
    elif msg:

        # In case of warnings, msg is non-empty while ret_val remains True

        output_objects.append({'object_type': 'warning', 'text': msg})

    # if we get here user is either vgrid owner or has rule ownership

    # can't remove if not a participant

    if not vgrid_is_trigger(vgrid_name, rule_id, configuration, recursive=False):
        output_objects.append({'object_type': 'error_text', 'text':
                               '%s is not a trigger in %s %s.' % \
                               (rule_id, vgrid_name, label)})
        return (output_objects, returnvalues.CLIENT_ERROR)

    # remove

    (rm_status, rm_msg) = vgrid_remove_triggers(configuration, vgrid_name,
                                                 [rule_id])
    if not rm_status:
        logger.error('%s failed to remove trigger: %s' % (client_id, rm_msg))
        output_objects.append({'object_type': 'error_text', 'text': rm_msg})
        output_objects.append({'object_type': 'error_text', 'text':
                               '''%(rule_id)s might be listed as a trigger of
this %(vgrid_label)s because it is a trigger of a parent %(vgrid_label)s.
Removal must be performed from the most significant %(vgrid_label)s
possible.''' % {'rule_id': rule_id, 'vgrid_label': label}})
        return (output_objects, returnvalues.SYSTEM_ERROR)

    logger.info('%s removed trigger: %s' % (client_id, rule_id))
    output_objects.append({'object_type': 'text', 'text':
                           'Trigger %s successfully removed from %s %s!'
                           % (rule_id, vgrid_name, label)})
    output_objects.append({'object_type': 'link', 'destination':
                           'vgridworkflows.py?vgrid_name=%s' % vgrid_name,
                           'text': 'Back to workflows for %s' % vgrid_name})
    return (output_objects, returnvalues.OK)
示例#4
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    defaults = signature()[1]
    title_entry = find_entry(output_objects, 'title')
    label = "%s" % configuration.site_vgrid_label
    title_entry['text'] = "Reject %s Request" % label
    output_objects.append({
        'object_type': 'header',
        'text': 'Reject %s Request' % label
    })
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    vgrid_name = accepted['vgrid_name'][-1].strip()
    request_name = unhexlify(accepted['request_name'][-1])

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Validity of user and vgrid names is checked in this init function so
    # no need to worry about illegal directory traversal through variables

    (ret_val, msg, ret_variables) = \
        init_vgrid_script_add_rem(vgrid_name, client_id, request_name,
                                  'request', configuration)
    if not ret_val:
        output_objects.append({'object_type': 'error_text', 'text': msg})
        return (output_objects, returnvalues.CLIENT_ERROR)
    elif msg:

        # In case of warnings, msg is non-empty while ret_val remains True

        output_objects.append({'object_type': 'warning', 'text': msg})

    if request_name:
        request_dir = os.path.join(configuration.vgrid_home, vgrid_name)
        req = load_access_request(configuration, request_dir, request_name)
    if not req or not delete_access_request(configuration, request_dir,
                                            request_name):
        logger.error("failed to delete owner request for %s in %s" % \
                     (vgrid_name, request_name))
        output_objects.append({
            'object_type': 'error_text', 'text':
            'Failed to remove saved vgrid request for %s in %s!'\
            % (vgrid_name, request_name)})
        return (output_objects, returnvalues.CLIENT_ERROR)
    output_objects.append({
        'object_type':
        'text',
        'text':
        '''
Deleted %(request_type)s access request to %(target)s for %(entity)s .
''' % req
    })
    if req['request_type'] == 'vgridresource':
        id_field = "unique_resource_name"
    else:
        id_field = "cert_id"
    form_method = 'post'
    csrf_limit = get_csrf_limit(configuration)
    fill_helpers = {
        'protocol': any_protocol,
        'id_field': id_field,
        'vgrid_label': label,
        'form_method': form_method,
        'csrf_field': csrf_field,
        'csrf_limit': csrf_limit
    }
    fill_helpers.update(req)
    target_op = 'sendrequestaction'
    csrf_token = make_csrf_token(configuration, form_method, target_op,
                                 client_id, csrf_limit)
    fill_helpers.update({'target_op': target_op, 'csrf_token': csrf_token})

    output_objects.append({
        'object_type':
        'html_form',
        'text':
        """
<p>
You can use the reply form below if you want to additionally send an
explanation for rejecting the request.
</p>
<form method='%(form_method)s' action='%(target_op)s.py'>
<input type='hidden' name='%(csrf_field)s' value='%(csrf_token)s' />
<input type=hidden name=request_type value='vgridreject' />
<input type=hidden name=vgrid_name value='%(target)s' />
<input type=hidden name=%(id_field)s value='%(entity)s' />
<input type=hidden name=protocol value='%(protocol)s' />
<table>
<tr>
<td class='title'>Optional reject message to requestor(s)</td>
</tr><tr>
<td><textarea name=request_text cols=72 rows=10>
We have decided to reject your %(request_type)s request to our %(target)s
%(vgrid_label)s.

Regards, the %(target)s %(vgrid_label)s owners
</textarea></td>
</tr>
<tr>
<td><input type='submit' value='Inform requestor(s)' /></td>
</tr>
</table>
</form>
<br />
""" % fill_helpers
    })
    output_objects.append({
        'object_type': 'link',
        'destination': 'adminvgrid.py?vgrid_name=%s' % vgrid_name,
        'text': 'Back to administration for %s' % vgrid_name
    })
    return (output_objects, returnvalues.OK)
示例#5
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    patterns = accepted['job_id']

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not configuration.site_enable_jobs:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Job execution is not enabled on this system'''
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    if not patterns:
        output_objects.append({
            'object_type': 'error_text',
            'text': 'No job_id specified!'
        })
        return (output_objects, returnvalues.NO_SUCH_JOB_ID)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = \
        os.path.abspath(os.path.join(configuration.mrsl_files_dir,
                        client_dir)) + os.sep

    filelist = []
    keywords_dict = mrslkeywords.get_keywords_dict(configuration)
    for pattern in patterns:
        pattern = pattern.strip()

        # Backward compatibility - all_jobs keyword should match all jobs

        if pattern == all_jobs:
            pattern = '*'

        # Check directory traversal attempts before actual handling to avoid
        # leaking information about file system layout while allowing
        # consistent error messages

        unfiltered_match = glob.glob(base_dir + pattern + '.mRSL')
        match = []
        for server_path in unfiltered_match:
            # IMPORTANT: path must be expanded to abs for proper chrooting
            abs_path = os.path.abspath(server_path)
            if not valid_user_path(configuration, abs_path, base_dir, True):

                # out of bounds - save user warning for later to allow
                # partial match:
                # ../*/* is technically allowed to match own files.

                logger.warning('%s tried to %s restricted path %s ! (%s)' %
                               (client_id, op_name, abs_path, pattern))
                continue

            # Insert valid job files in filelist for later treatment

            match.append(abs_path)

        # Now actually treat list of allowed matchings and notify if no
        # (allowed) match

        if not match:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '%s: You do not have any matching job IDs!' % pattern
            })
            status = returnvalues.CLIENT_ERROR
        else:
            filelist += match

    # resubmit is hard on the server

    if len(filelist) > 100:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Too many matching jobs (%s)!' % len(filelist)
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    resubmitobjs = []
    status = returnvalues.OK
    for filepath in filelist:
        mrsl_file = filepath.replace(base_dir, '')
        job_id = mrsl_file.replace('.mRSL', '')

        # ("Resubmitting job with job_id: %s" % job_id)

        resubmitobj = {'object_type': 'resubmitobj', 'job_id': job_id}

        mrsl_dict = unpickle(filepath, logger)
        if not mrsl_dict:
            resubmitobj['message'] = "No such job: %s (%s)" % (job_id,
                                                               mrsl_file)
            status = returnvalues.CLIENT_ERROR
            resubmitobjs.append(resubmitobj)
            continue

        resubmit_items = keywords_dict.keys()

        # loop selected keywords and create mRSL string

        resubmit_job_string = ''

        for dict_elem in resubmit_items:
            value = ''
            # Extract job value with fallback to default to support optional
            # fields
            job_value = mrsl_dict.get(dict_elem,
                                      keywords_dict[dict_elem]['Value'])
            if keywords_dict[dict_elem]['Type'].startswith(
                    'multiplekeyvalues'):
                for (elem_key, elem_val) in job_value:
                    if elem_key:
                        value += '%s=%s\n' % (str(elem_key).strip(),
                                              str(elem_val).strip())
            elif keywords_dict[dict_elem]['Type'].startswith('multiple'):
                for elem in job_value:
                    if elem:
                        value += '%s\n' % str(elem).rstrip()
            else:
                if str(job_value):
                    value += '%s\n' % str(job_value).rstrip()

            # Only insert keywords with an associated value

            if value:
                if value.rstrip() != '':
                    resubmit_job_string += '''::%s::
%s

''' % (dict_elem, value.rstrip())

        # save tempfile

        (filehandle, tempfilename) = \
            tempfile.mkstemp(dir=configuration.mig_system_files,
                             text=True)
        os.write(filehandle, resubmit_job_string)
        os.close(filehandle)

        # submit job the usual way

        (new_job_status, msg, new_job_id) = new_job(tempfilename, client_id,
                                                    configuration, False, True)
        if not new_job_status:
            resubmitobj['status'] = False
            resubmitobj['message'] = msg
            status = returnvalues.SYSTEM_ERROR
            resubmitobjs.append(resubmitobj)
            continue

            # o.out("Resubmit failed: %s" % msg)
            # o.reply_and_exit(o.ERROR)

        resubmitobj['status'] = True
        resubmitobj['new_job_id'] = new_job_id
        resubmitobjs.append(resubmitobj)

        # o.out("Resubmit successful: %s" % msg)
        # o.out("%s" % msg)

    output_objects.append({
        'object_type': 'resubmitobjs',
        'resubmitobjs': resubmitobjs
    })

    return (output_objects, status)
示例#6
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        # NOTE: path can use wildcards, dst and current_dir cannot
        typecheck_overrides={'path': valid_path_pattern},
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    flags = ''.join(accepted['flags'])
    algo_list = accepted['hash_algo']
    max_chunks = int(accepted['max_chunks'][-1])
    pattern_list = accepted['path']
    dst = accepted['dst'][-1]
    current_dir = accepted['current_dir'][-1].lstrip(os.sep)

    # All paths are relative to current_dir

    pattern_list = [os.path.join(current_dir, i) for i in pattern_list]
    if dst:
        dst = os.path.join(current_dir, dst)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = os.path.abspath(
        os.path.join(configuration.user_home, client_dir)) + os.sep

    status = returnvalues.OK

    if verbose(flags):
        for flag in flags:
            output_objects.append({
                'object_type': 'text',
                'text': '%s using flag: %s' % (op_name, flag)
            })

    # IMPORTANT: path must be expanded to abs for proper chrooting
    abs_dir = os.path.abspath(
        os.path.join(base_dir, current_dir.lstrip(os.sep)))
    if not valid_user_path(configuration, abs_dir, base_dir, True):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            "You're not allowed to work in %s!" % current_dir
        })
        logger.warning('%s tried to %s restricted path %s ! (%s)' %
                       (client_id, op_name, abs_dir, current_dir))
        return (output_objects, returnvalues.CLIENT_ERROR)

    if verbose(flags):
        output_objects.append({
            'object_type': 'text',
            'text': "working in %s" % current_dir
        })

    if dst:
        if not safe_handler(configuration, 'post', op_name, client_id,
                            get_csrf_limit(configuration), accepted):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '''Only accepting
                CSRF-filtered POST requests to prevent unintended updates'''
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        # NOTE: dst already incorporates current_dir prefix here
        # IMPORTANT: path must be expanded to abs for proper chrooting
        abs_dest = os.path.abspath(os.path.join(base_dir, dst))
        logger.info('chksum in %s' % abs_dest)

        # Don't use abs_path in output as it may expose underlying
        # fs layout.

        relative_dest = abs_dest.replace(base_dir, '')
        if not valid_user_path(configuration, abs_dest, base_dir, True):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "Invalid path! (%s expands to an illegal path)" % dst
            })
            logger.warning('%s tried to %s restricted path %s !(%s)' %
                           (client_id, op_name, abs_dest, dst))
            return (output_objects, returnvalues.CLIENT_ERROR)
        if not check_write_access(abs_dest, parent_dir=True):
            logger.warning('%s called without write access: %s' %
                           (op_name, abs_dest))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'cannot checksum to "%s": inside a read-only location!' %
                relative_dest
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    all_lines = []
    for pattern in pattern_list:

        # Check directory traversal attempts before actual handling to avoid
        # leaking information about file system layout while allowing
        # consistent error messages

        unfiltered_match = glob.glob(base_dir + pattern)
        match = []
        for server_path in unfiltered_match:
            # IMPORTANT: path must be expanded to abs for proper chrooting
            abs_path = os.path.abspath(server_path)
            if not valid_user_path(configuration, abs_path, base_dir, True):

                # out of bounds - save user warning for later to allow
                # partial match:
                # ../*/* is technically allowed to match own files.

                logger.warning('%s tried to %s restricted path %s ! (%s)' %
                               (client_id, op_name, abs_path, pattern))
                continue
            match.append(abs_path)

        # Now actually treat list of allowed matchings and notify if no
        # (allowed) match

        if not match:
            output_objects.append({
                'object_type': 'file_not_found',
                'name': pattern
            })
            status = returnvalues.FILE_NOT_FOUND

        for abs_path in match:
            relative_path = abs_path.replace(base_dir, '')
            output_lines = []
            for hash_algo in algo_list:
                try:
                    chksum_helper = _algo_map.get(hash_algo, _algo_map["md5"])
                    checksum = chksum_helper(abs_path, max_chunks=max_chunks)
                    line = "%s %s\n" % (checksum, relative_path)
                    logger.info("%s %s of %s: %s" %
                                (op_name, hash_algo, abs_path, checksum))
                    output_lines.append(line)
                except Exception as exc:
                    output_objects.append({
                        'object_type':
                        'error_text',
                        'text':
                        "%s: '%s': %s" % (op_name, relative_path, exc)
                    })
                    logger.error("%s: failed on '%s': %s" %
                                 (op_name, relative_path, exc))
                    status = returnvalues.SYSTEM_ERROR
                    continue
            entry = {'object_type': 'file_output', 'lines': output_lines}
            output_objects.append(entry)
            all_lines += output_lines

    if dst and not write_file(''.join(all_lines), abs_dest, logger):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            "failed to write checksums to %s" % relative_dest
        })
        logger.error("writing checksums to %s for %s failed" %
                     (abs_dest, client_id))
        status = returnvalues.SYSTEM_ERROR

    return (output_objects, status)
示例#7
0
def main(client_id, user_arguments_dict, environ=None):
    """Main function used by front end"""

    if environ is None:
        environ = os.environ

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False,
                                  op_menu=client_id)
    client_dir = client_id_dir(client_id)
    status = returnvalues.OK
    defaults = signature()[1]
    (validate_status, accepted) = validate_input(
        user_arguments_dict,
        defaults,
        output_objects,
        allow_rejects=False,
        # NOTE: path cannot use wildcards here
        typecheck_overrides={},
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    flags = ''.join(accepted['flags'])
    patterns = accepted['path']
    current_dir = accepted['current_dir'][-1]
    share_id = accepted['share_id'][-1]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Either authenticated user client_id set or sharelink ID
    if client_id:
        user_id = client_id
        target_dir = client_id_dir(client_id)
        base_dir = configuration.user_home
        id_query = ''
        page_title = 'Create User Directory'
        userstyle = True
        widgets = True
    elif share_id:
        try:
            (share_mode, _) = extract_mode_id(configuration, share_id)
        except ValueError as err:
            logger.error('%s called with invalid share_id %s: %s' %
                         (op_name, share_id, err))
            output_objects.append({
                'object_type': 'error_text',
                'text': 'Invalid sharelink ID: %s' % share_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        # TODO: load and check sharelink pickle (currently requires client_id)
        user_id = 'anonymous user through share ID %s' % share_id
        if share_mode == 'read-only':
            logger.error('%s called without write access: %s' %
                         (op_name, accepted))
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No write access!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        target_dir = os.path.join(share_mode, share_id)
        base_dir = configuration.sharelink_home
        id_query = '?share_id=%s' % share_id
        page_title = 'Create Shared Directory'
        userstyle = False
        widgets = False
    else:
        logger.error('%s called without proper auth: %s' % (op_name, accepted))
        output_objects.append({
            'object_type': 'error_text',
            'text': 'Authentication is missing!'
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = os.path.abspath(os.path.join(base_dir, target_dir)) + os.sep

    title_entry = find_entry(output_objects, 'title')
    title_entry['text'] = page_title
    title_entry['skipwidgets'] = not widgets
    title_entry['skipuserstyle'] = not userstyle
    output_objects.append({'object_type': 'header', 'text': page_title})

    # Input validation assures target_dir can't escape base_dir
    if not os.path.isdir(base_dir):
        output_objects.append({
            'object_type': 'error_text',
            'text': 'Invalid client/sharelink id!'
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if verbose(flags):
        for flag in flags:
            output_objects.append({
                'object_type': 'text',
                'text': '%s using flag: %s' % (op_name, flag)
            })

    for pattern in patterns:

        # Check directory traversal attempts before actual handling to avoid
        # leaking information about file system layout while allowing
        # consistent error messages
        # NB: Globbing disabled on purpose here

        unfiltered_match = [base_dir + os.sep + current_dir + os.sep + pattern]
        match = []
        for server_path in unfiltered_match:
            # IMPORTANT: path must be expanded to abs for proper chrooting
            abs_path = os.path.abspath(server_path)
            if not valid_user_path(configuration, abs_path, base_dir, True):

                # out of bounds - save user warning for later to allow
                # partial match:
                # ../*/* is technically allowed to match own files.

                logger.warn('%s tried to %s %s restricted path! (%s)' %
                            (client_id, op_name, abs_path, pattern))
                continue
            match.append(abs_path)

        # Now actually treat list of allowed matchings and notify if no
        # (allowed) match

        if not match:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "%s: cannot create directory '%s': Permission denied" %
                (op_name, pattern)
            })
            status = returnvalues.CLIENT_ERROR

        for abs_path in match:
            relative_path = abs_path.replace(base_dir, '')
            if verbose(flags):
                output_objects.append({
                    'object_type': 'file',
                    'name': relative_path
                })
            if not parents(flags) and os.path.exists(abs_path):
                output_objects.append({
                    'object_type': 'error_text',
                    'text': '%s: path exist!' % pattern
                })
                status = returnvalues.CLIENT_ERROR
                continue
            if not check_write_access(abs_path, parent_dir=True):
                logger.warning('%s called without write access: %s' %
                               (op_name, abs_path))
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'cannot create "%s": inside a read-only location!' %
                    pattern
                })
                status = returnvalues.CLIENT_ERROR
                continue
            try:
                gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'],
                          'created', [relative_path])
                if parents(flags):
                    if not os.path.isdir(abs_path):
                        os.makedirs(abs_path)
                else:
                    os.mkdir(abs_path)
                logger.info('%s %s done' % (op_name, abs_path))
            except Exception as exc:
                if not isinstance(exc, GDPIOLogError):
                    gdp_iolog(configuration,
                              client_id,
                              environ['REMOTE_ADDR'],
                              'created', [relative_path],
                              failed=True,
                              details=exc)
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    "%s: '%s' failed!" % (op_name, relative_path)
                })
                logger.error("%s: failed on '%s': %s" %
                             (op_name, relative_path, exc))

                status = returnvalues.SYSTEM_ERROR
                continue
            output_objects.append({
                'object_type':
                'text',
                'text':
                "created directory %s" % (relative_path)
            })
            if id_query:
                open_query = "%s;current_dir=%s" % (id_query, relative_path)
            else:
                open_query = "?current_dir=%s" % relative_path
            output_objects.append({
                'object_type': 'link',
                'destination': 'ls.py%s' % open_query,
                'text': 'Open %s' % relative_path
            })
            output_objects.append({'object_type': 'text', 'text': ''})

    output_objects.append({
        'object_type': 'link',
        'destination': 'ls.py%s' % id_query,
        'text': 'Return to files overview'
    })
    return (output_objects, status)
示例#8
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id)
    client_dir = client_id_dir(client_id)
    status = returnvalues.OK
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append(
            {'object_type': 'error_text', 'text': '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
             })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not configuration.site_enable_jobs:
        output_objects.append({'object_type': 'error_text', 'text':
            '''Job execution is not enabled on this system'''})
        return (output_objects, returnvalues.SYSTEM_ERROR)

    external_dict = get_keywords_dict(configuration)
    mrsl = fields_to_mrsl(configuration, user_arguments_dict, external_dict)

    tmpfile = None

    # save to temporary file

    try:
        (filehandle, real_path) = tempfile.mkstemp(text=True)
        relative_path = os.path.basename(real_path)
        os.write(filehandle, mrsl)
        os.close(filehandle)
    except Exception as err:
        output_objects.append({'object_type': 'error_text', 'text'
                              : 'Failed to write temporary mRSL file: %s' % err})
        return (output_objects, returnvalues.SYSTEM_ERROR)

    # submit it

    (submit_status, newmsg, job_id) = new_job(real_path, client_id,
            configuration, False, True)
    if not submit_status:
        output_objects.append({'object_type': 'error_text', 'text'
                              : newmsg})
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = \
        os.path.abspath(os.path.join(configuration.mrsl_files_dir,
                        client_dir)) + os.sep

    # job = Job()

    filepath = os.path.join(base_dir, job_id)
    filepath += '.mRSL'

    (new_job_obj_status, new_job_obj) = \
        create_job_object_from_pickled_mrsl(filepath, logger,
            external_dict)
    if not new_job_obj_status:
        output_objects.append({'object_type': 'error_text', 'text'
                              : new_job_obj})
        status = returnvalues.CLIENT_ERROR
    else:

        # return new_job_obj

        output_objects.append({'object_type': 'jobobj', 'jobobj'
                              : new_job_obj})
    return (output_objects, status)
示例#9
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False, op_menu=False)
    defaults = signature()[1]
    client_dir = client_id_dir(client_id)
    logger.debug('in peersaction: %s' % user_arguments_dict)
    (validate_status, accepted) = validate_input(user_arguments_dict,
                                                 defaults,
                                                 output_objects,
                                                 allow_rejects=False)
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    title_entry = find_entry(output_objects, 'title')
    title_entry['text'] = 'Save Peers'
    output_objects.append({'object_type': 'header', 'text': 'Save Peers'})

    admin_email = configuration.admin_email
    smtp_server = configuration.smtp_server
    user_pending = os.path.abspath(configuration.user_pending)

    user_map = get_full_user_map(configuration)
    user_dict = user_map.get(client_id, None)
    # Optional site-wide limitation of peers permission
    if not user_dict or \
            not peers_permit_allowed(configuration, user_dict):
        logger.warning("user %s is not allowed to permit peers!" % client_id)
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Only privileged users can permit external peers!'
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    action = accepted['action'][-1].strip()
    label = accepted['peers_label'][-1].strip()
    kind = accepted['peers_kind'][-1].strip()
    raw_expire = accepted['peers_expire'][-1].strip()
    peers_content = accepted['peers_content']
    peers_format = accepted['peers_format'][-1].strip()
    peers_invite = accepted['peers_invite'][-1].strip()
    do_invite = (peers_invite.lower() in ['on', 'true', 'yes', '1'])

    try:
        expire = datetime.datetime.strptime(raw_expire, '%Y-%m-%d')
        if datetime.datetime.now() > expire:
            raise ValueError("specified expire value is in the past!")
    except Exception as exc:
        logger.error("expire %r could not be parsed into a (future) date" %
                     raw_expire)
        output_objects.append({
            'object_type':
            'text',
            'text':
            'No valid expire provided - using default: %d days' %
            default_expire_days
        })
        expire = datetime.datetime.now()
        expire += datetime.timedelta(days=default_expire_days)
    expire = expire.date().isoformat()

    if not action in peer_actions:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Unsupported peer action %r - only %s are allowed' %
            (action, ', '.join(peer_actions))
        })
        return (output_objects, returnvalues.CLIENT_ERROR)
    if not kind in peer_kinds:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Unsupported peer kind %r - only %s are allowed' %
            (kind, ', '.join(peer_kinds))
        })
        return (output_objects, returnvalues.CLIENT_ERROR)
    # TODO: implement and enable more formats?
    if peers_format not in ("csvform", 'userid'):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Only Import Peers is implemented so far!'
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    peers_path = os.path.join(configuration.user_settings, client_dir,
                              peers_filename)
    try:
        all_peers = load(peers_path)
    except Exception as exc:
        logger.warning("could not load peers from: %s" % exc)
        all_peers = {}

    # Extract peer(s) from request
    (peers, err) = parse_peers(configuration, peers_content, peers_format)
    if not err and not peers:
        err = ["No valid peers provided"]
    if err:
        output_objects.append({
            'object_type': 'error_text',
            'text': 'Parsing failed: %s' % '.\n '.join(err)
        })
        output_objects.append({
            'object_type': 'link',
            'destination': 'peers.py',
            'text': 'Back to peers'
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # NOTE: general cases of operation here:
    # * import multiple peers in one go (add new, update existing)
    # * add one or more new peers
    # * update one or more existing peers
    # * remove one or more existing peers
    # * accept one or more pending requests
    # * reject one or more pending requests
    # The kind and expire values are generally applied for all included peers.

    # NOTE: we check all peers before any action
    for user in peers:
        fill_distinguished_name(user)
        peer_id = user['distinguished_name']
        cur_peer = all_peers.get(peer_id, {})
        if 'add' == action and cur_peer:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'Peer %r already exists!' % peer_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        elif 'update' == action and not cur_peer:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'Peer %r does not exists!' % peer_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        elif 'remove' == action and not cur_peer:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'Peer %r does not exists!' % peer_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        elif 'accept' == action and cur_peer:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'Peer %r already accepted!' % peer_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        elif 'reject' == action and cur_peer:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'Peer %r already accepted!' % peer_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        elif 'import' == action and cur_peer:
            # Only warn on import with existing match
            output_objects.append({
                'object_type': 'text',
                'text': 'Updating existing peer %r' % peer_id
            })

    # Now apply changes
    for user in peers:
        peer_id = user['distinguished_name']
        cur_peer = all_peers.get(peer_id, {})
        user.update({'label': label, 'kind': kind, 'expire': expire})
        if 'add' == action:
            all_peers[peer_id] = user
        elif 'update' == action:
            all_peers[peer_id] = user
        elif 'remove' == action:
            del all_peers[peer_id]
        elif 'accept' == action:
            all_peers[peer_id] = user
        elif 'reject' == action:
            pass
        elif 'import' == action:
            all_peers[peer_id] = user
        logger.info("%s peer %s" % (action, peer_id))

    try:
        dump(all_peers, peers_path)
        logger.debug('%s %s peers %s in %s' %
                     (client_id, action, all_peers, peers_path))
        output_objects.append({
            'object_type': 'text',
            'text': "Completed %s peers" % action
        })
        for user in peers:
            output_objects.append({
                'object_type': 'text',
                'text': "%(distinguished_name)s" % user
            })
        if action in ['import', 'add', 'update']:
            client_name = extract_field(client_id, 'full_name')
            client_email = extract_field(client_id, 'email')

            if do_invite:
                succeeded, failed = [], []
                email_header = '%s Invitation' % configuration.short_title
                email_msg_template = """Hi %%s,
This is an automatic email sent on behalf of %s who vouched for you to get a
user account on %s. You can accept the invitation by going to
%%s
entering a password of your choice and submitting the form.
If you do not want a user account you can safely ignore this email.

We would be grateful if you report any abuse of the invitation system to the
site administrators (%s).
""" % (client_name, configuration.short_title, admin_email)
                for peer_user in peers:
                    peer_name = peer_user['full_name']
                    peer_email = peer_user['email']
                    peer_url = os.path.join(
                        configuration.migserver_https_sid_url, 'cgi-sid',
                        'reqoid.py')
                    peer_req = {}
                    for field in peers_fields:
                        peer_req[field] = peer_user.get(field, '')
                    peer_req['comment'] = 'Invited by %s (%s) for %s purposes' \
                                          % (client_name, client_email, kind)
                    # Mark ID fields as readonly in the form to limit errors
                    peer_req['ro_fields'] = keyword_auto
                    peer_url += '?%s' % urllib.urlencode(peer_req)
                    email_msg = email_msg_template % (peer_name, peer_url)
                    logger.info(
                        'Sending invitation: to: %s, header: %s, msg: %s, smtp_server: %s'
                        % (peer_email, email_header, email_msg, smtp_server))
                    if send_email(peer_email, email_header, email_msg, logger,
                                  configuration):
                        succeeded.append(peer_email)
                    else:
                        failed.append(peer_email)

                if failed:
                    output_objects.append({
                        'object_type':
                        'error_text',
                        'text':
                        """An error occured trying to email the peer
invitation to %s . Please inform the site admins (%s) if the problem persists.
""" % (', '.join(failed), admin_email)
                    })
                if succeeded:
                    output_objects.append({
                        'object_type':
                        'text',
                        'text':
                        """Sent invitation to %s with a link to a mostly pre-filled %s account request
form with the exact ID fields you provided here.""" %
                        (', '.join(succeeded), configuration.short_title)
                    })
            else:
                output_objects.append({
                    'object_type':
                    'text',
                    'text':
                    """Please tell your peers
to request an account at %s with the exact ID fields you provided here and
importantly mentioning the purpose and your email (%s) in the sign up Comment
field. Alternatively you can use the invite button to send out an email with a
link to a mostly prefilled request form.""" %
                    (configuration.short_title, client_email)
                })
    except Exception as exc:
        logger.error('Failed to save %s peers to %s: %s' %
                     (client_id, peers_path, exc))
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''
Could not %s peers %r. Please contact the site admins on %s if this error
persists.
''' % (action, label, admin_email)
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    if action in ["accept", "reject"]:
        changed = [(i['distinguished_name'], i) for i in peers]
        if not manage_pending_peers(configuration, client_id, "remove",
                                    changed):
            logger.warning('could not update pending peers for %s after %s' %
                           (client_id, action))

    logger.info('%s completed for %s peers for %s in %s' %
                (action, label, client_id, peers_path))

    user_lines = []
    pretty_peers = {'label': label, 'kind': kind, 'expire': expire}
    for user in peers:
        user_lines.append(user['distinguished_name'])
    pretty_peers['user_lines'] = '\n'.join(user_lines)
    email_header = '%s Peers %s' % (configuration.short_title, action)
    email_msg = """Received %s peers from %s
""" % (action, client_id)
    email_msg += """
Kind: %(kind)s , Expire: %(expire)s, Label: %(label)s , Peers:
%(user_lines)s
""" % pretty_peers

    logger.info('Sending email: to: %s, header: %s, msg: %s, smtp_server: %s' %
                (admin_email, email_header, email_msg, smtp_server))
    if not send_email(admin_email, email_header, email_msg, logger,
                      configuration):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''
An error occured trying to send the email about your %s peers to the site
administrators. Please manually inform them (%s) if the problem persists.
''' % (action, admin_email)
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    output_objects.append({
        'object_type':
        'text',
        'text':
        '''
Informed the site admins about your %s peers action to let them accept peer
account requests you already validated.''' % action
    })

    output_objects.append({
        'object_type': 'link',
        'destination': 'peers.py',
        'text': 'Back to peers'
    })
    return (output_objects, returnvalues.OK)
示例#10
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)

    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    resource_list = accepted['unique_resource_name']
    resource_id = resource_list.pop()

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append(
            {'object_type': 'error_text', 'text': '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
             })
        return (output_objects, returnvalues.CLIENT_ERROR)

    res_dir = os.path.join(configuration.resource_home, resource_id)

    # Prevent unauthorized access
    
    (owner_status, owner_list) = resource_owners(configuration, resource_id)
    if not owner_status:
        output_objects.append(
            {'object_type': 'error_text', 'text'
             : "Could not look up '%s' owners - no such resource?" % resource_id
             })
        return (output_objects, returnvalues.CLIENT_ERROR)
    elif client_id not in owner_list:
        logger.warning('user %s tried to delete resource "%s" not owned' % \
                       (client_id, resource_id))
        output_objects.append({'object_type': 'error_text', 'text'
                               : "You can't delete '%s' - you don't own it!"
                               % resource_id})
        output_objects.append({'object_type': 'link', 'destination':
                               'resman.py', 'class': 'infolink iconspace', 'title':
                               'Show resources', 'text': 'Show resources'})
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Locking the access to resources and vgrids.
    lock_path_vgrid = os.path.join(configuration.resource_home, "vgrid.lock")
    lock_handle_vgrid = open(lock_path_vgrid, 'a')

    fcntl.flock(lock_handle_vgrid.fileno(), fcntl.LOCK_EX)

    lock_path_res = os.path.join(configuration.resource_home, "resource.lock")
    lock_handle_res = open(lock_path_res, 'a')

    fcntl.flock(lock_handle_res.fileno(), fcntl.LOCK_EX)

    # Only resources that are down may be deleted.
    # A "FE.PGID" file with a PGID in the resource's home directory means that
    # the FE is running.

    pgid_path = os.path.join(res_dir, 'FE.PGID')
    fe_running = True
    try:

        # determine if fe runs by finding out if pgid is numerical

        pgid_file = open(pgid_path, 'r')
        fcntl.flock(pgid_file, fcntl.LOCK_EX)
        pgid = pgid_file.readline().strip()
        fcntl.flock(pgid_file, fcntl.LOCK_UN)
        pgid_file.close()
        if not pgid.isdigit():
            raise Exception('FE already stopped')
    except:
        fe_running = False

    if fe_running:
        output_objects.append({'object_type': 'error_text', 'text'
                               : "Can't delete the running resource %s!"
                               % resource_id})
        output_objects.append({'object_type': 'link', 'destination':
                               'resman.py', 'class': 'infolink iconspace',
                               'title': 'Show resources', 'text':
                               'Show resources'})
        lock_handle_vgrid.close()
        lock_handle_res.close()
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Deleting the resource files, but not the resource directory itself.
    # The resource directory is kept, to prevent hijacking of resource id's

    try:
        for name in os.listdir(res_dir):
            file_path = os.path.join(res_dir, name)
            if os.path.isfile(file_path):
                os.unlink(file_path)
    except Exception as err:
        output_objects.append({'object_type': 'error_text', 'text'
                               : 'Deletion exception: ' + str(err)})
        output_objects.append({'object_type': 'link', 'destination':
                               'resman.py', 'class': 'infolink iconspace',
                               'title': 'Show resources', 'text':
                               'Show resources'})
        lock_handle_vgrid.close()
        lock_handle_res.close()
        return (output_objects, returnvalues.CLIENT_ERROR)

    # The resource has been deleted, and OK is returned.
    title_entry = find_entry(output_objects, 'title')
    title_entry['text'] = 'Resource Deletion'
    output_objects.append({'object_type': 'header', 'text'
                          : 'Deleting resource'})
    output_objects.append({'object_type': 'text', 'text'
                           : 'Sucessfully deleted resource: ' + resource_id})
    output_objects.append({'object_type': 'link', 'destination': 'resman.py',
                           'class': 'infolink iconspace', 'title':
                           'Show resources', 'text': 'Show resources'})
    
    # Releasing locks
    lock_handle_vgrid.close()
    lock_handle_res.close()

    # Remove resource from resource and vgrid caches (after realeasing locks)
    unmap_resource(configuration, resource_id)

    return (output_objects, returnvalues.OK)
示例#11
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    defaults = signature()[1]
    output_objects.append({
        'object_type': 'header',
        'text': 'Reject Resource Request'
    })
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    unique_resource_name = accepted['unique_resource_name'][-1].strip()
    request_name = unhexlify(accepted['request_name'][-1])

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not is_owner(client_id, unique_resource_name,
                    configuration.resource_home, logger):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'You must be an owner of %s to reject requests!' %
            unique_resource_name
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Please note that base_dir must end in slash to avoid access to other
    # resource dirs when own name is a prefix of another user name

    base_dir = \
        os.path.abspath(os.path.join(configuration.resource_home,
                        unique_resource_name)) + os.sep

    # IMPORTANT: path must be expanded to abs for proper chrooting
    abs_path = os.path.abspath(os.path.join(base_dir, request_name))
    if not valid_user_path(
            configuration, abs_path, base_dir, allow_equal=False):
        logger.warning('%s tried to access restricted path %s ! (%s)' % \
                       (client_id, abs_path, request_name))
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Illegal request name "%s":
you can only reject requests to your own resources.''' % request_name
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if request_name:
        request_dir = os.path.join(configuration.resource_home,
                                   unique_resource_name)
        req = load_access_request(configuration, request_dir, request_name)
    if not req or not delete_access_request(configuration, request_dir,
                                            request_name):
        logger.error("failed to delete owner request for %s in %s" % \
                     (unique_resource_name, request_name))
        output_objects.append({
            'object_type': 'error_text', 'text':
            'Failed to remove saved resource request for %s in %s!'\
            % (unique_resource_name, request_name)})
        return (output_objects, returnvalues.CLIENT_ERROR)
    output_objects.append({
        'object_type':
        'text',
        'text':
        '''
Deleted %(request_type)s access request to %(target)s for %(entity)s .
''' % req
    })
    form_method = 'post'
    csrf_limit = get_csrf_limit(configuration)
    fill_helpers = {
        'protocol': any_protocol,
        'unique_resource_name': unique_resource_name,
        'form_method': form_method,
        'csrf_field': csrf_field,
        'csrf_limit': csrf_limit
    }
    fill_helpers.update(req)
    target_op = 'sendrequestaction'
    csrf_token = make_csrf_token(configuration, form_method, target_op,
                                 client_id, csrf_limit)
    fill_helpers.update({'target_op': target_op, 'csrf_token': csrf_token})

    output_objects.append({
        'object_type':
        'html_form',
        'text':
        """
<p>
You can use the reply form below if you want to additionally send an
explanation for rejecting the request.
</p>
<form method='%(form_method)s' action='%(target_op)s.py'>
<input type='hidden' name='%(csrf_field)s' value='%(csrf_token)s' />
<input type=hidden name=request_type value='resourcereject' />
<input type=hidden name=unique_resource_name value='%(target)s' />
<input type=hidden name=cert_id value='%(entity)s' />
<input type=hidden name=protocol value='%(protocol)s' />
<table>
<tr>
<td class='title'>Optional reject message to requestor(s)</td>
</tr><tr>
<td><textarea name=request_text cols=72 rows=10>
We have decided to reject your %(request_type)s request to our %(target)s
resource.

Regards, the %(target)s resource owners
</textarea></td>
</tr>
<tr>
<td><input type='submit' value='Inform requestor(s)' /></td>
</tr>
</table>
</form>
<br />
""" % fill_helpers
    })
    output_objects.append({
        'object_type':
        'link',
        'destination':
        'resadmin.py?unique_resource_name=%s' % unique_resource_name,
        'text':
        'Back to administration for %s' % unique_resource_name
    })
    return (output_objects, returnvalues.OK)
示例#12
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    client_dir = client_id_dir(client_id)

    valid_langs = {'sh': 'shell', 'python': 'python'}
    valid_flavors = {'user': '******',
                     'resource': 'vgridscriptgen'}
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    flags = ''.join(accepted['flags'])
    langs = accepted['lang']
    flavor_list = accepted['flavor']
    sh_cmd = accepted['sh_cmd'][-1]
    python_cmd = accepted['python_cmd'][-1]
    script_dir = accepted['script_dir'][-1]

    flavors = []

    title_entry = find_entry(output_objects, 'title')
    title_entry['text'] = 'Script generator'
    output_objects.append(
        {'object_type': 'header', 'text': 'Script generator'})

    status = returnvalues.OK

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = os.path.abspath(os.path.join(configuration.user_home,
                                            client_dir)) + os.sep

    if 'h' in flags:
        output_objects = usage(output_objects, valid_langs,
                               valid_flavors)
        return (output_objects, status)

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append(
            {'object_type': 'error_text', 'text': '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
             })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Filter out any invalid flavors to avoid illegal filenames, etc.

    for f in flavor_list:
        if f in valid_flavors.keys():
            flavors.append(f)

    # Default to user scripts

    if not flavors:
        if flavor_list:
            output_objects.append({'object_type': 'text', 'text': 'No valid flavors specified - falling back to user scripts'
                                   })
        flavors = ['user']

    if not langs or keyword_all in langs:

        # Add new languages here

        languages = [(userscriptgen.sh_lang, sh_cmd, userscriptgen.sh_ext),
                     (userscriptgen.python_lang, python_cmd,
                      userscriptgen.python_ext)]
    else:
        languages = []

        # check arguments

        for lang in langs:
            if lang == 'sh':
                interpreter = sh_cmd
                extension = userscriptgen.sh_ext
            elif lang == 'python':
                interpreter = python_cmd
                extension = userscriptgen.python_ext
            else:
                output_objects.append({'object_type': 'warning', 'text': 'Unknown script language: %s - ignoring!'
                                       % lang})
                continue

            languages.append((lang, interpreter, extension))

    if not languages:
        output_objects.append({'object_type': 'error_text', 'text': 'No valid languages specified - aborting script generation'
                               })
        return (output_objects, returnvalues.CLIENT_ERROR)

    for flavor in flavors:
        if not script_dir or script_dir == keyword_auto:
            # Generate scripts in a "unique" destination directory
            # gmtime([seconds]) -> (tm_year, tm_mon, tm_day, tm_hour, tm_min,
            #                       tm_sec, tm_wday, tm_yday, tm_isdst)
            now = time.gmtime()
            timestamp = '%.2d%.2d%.2d-%.2d%.2d%.2d' % (
                now[2],
                now[1],
                now[0],
                now[3],
                now[4],
                now[5],
            )
            script_dir = '%s-%s-scripts-%s' % (configuration.short_title,
                                               flavor, timestamp)
        else:
            # Avoid problems from especially trailing slash (zip recursion)
            script_dir = script_dir.strip(os.sep)

        # IMPORTANT: path must be expanded to abs for proper chrooting
        abs_dir = os.path.abspath(os.path.join(base_dir, script_dir))
        if not valid_user_path(configuration, abs_dir, base_dir, True):

            # out of bounds

            output_objects.append({'object_type': 'error_text', 'text': "You're not allowed to work in %s!"
                                   % script_dir})
            logger.warning('%s tried to %s restricted path %s ! (%s)'
                           % (client_id, op_name, abs_dir, script_dir))
            return (output_objects, returnvalues.CLIENT_ERROR)

        if not os.path.isdir(abs_dir):
            try:
                os.mkdir(abs_dir)
            except Exception as exc:
                output_objects.append({'object_type': 'error_text',
                                       'text': 'Failed to create destination directory (%s) - aborting script generation'
                                       % exc})
                return (output_objects, returnvalues.SYSTEM_ERROR)

        for (lang, _, _) in languages:
            output_objects.append({'object_type': 'text', 'text': 'Generating %s %s scripts in the %s subdirectory of your %s home directory'
                                   % (lang, flavor, script_dir, configuration.short_title)})

        logger.debug('generate %s scripts in %s' % (flavor, abs_dir))

        # Generate all scripts

        if flavor == 'user':
            for op in userscriptgen.script_ops:
                generator = 'userscriptgen.generate_%s' % op
                eval(generator)(configuration, languages, abs_dir)

            if userscriptgen.shared_lib:
                userscriptgen.generate_lib(configuration, userscriptgen.script_ops,
                                           languages, abs_dir)

            if userscriptgen.test_script:
                userscriptgen.generate_test(configuration, languages, abs_dir)
        elif flavor == 'resource':
            for op in vgridscriptgen.script_ops_single_arg:
                vgridscriptgen.generate_single_argument(configuration, op[0], op[1],
                                                        languages, abs_dir)
            for op in vgridscriptgen.script_ops_single_upload_arg:
                vgridscriptgen.generate_single_argument_upload(configuration, op[0],
                                                               op[1], op[2],
                                                               languages, abs_dir)
            for op in vgridscriptgen.script_ops_two_args:
                vgridscriptgen.generate_two_arguments(configuration, op[0], op[1],
                                                      op[2], languages, abs_dir)
            for op in vgridscriptgen.script_ops_ten_args:
                vgridscriptgen.generate_ten_arguments(configuration, op[0], op[1],
                                                      op[2], op[3], op[4], op[5],
                                                      op[6], op[7], op[8], op[9],
                                                      op[10], languages, abs_dir)
        else:
            output_objects.append(
                {'object_type': 'warning_text', 'text': 'Unknown flavor: %s' % flavor})
            continue

        # Always include license conditions file

        userscriptgen.write_license(configuration, abs_dir)

        output_objects.append({'object_type': 'text', 'text': '... Done'
                               })
        output_objects.append({'object_type': 'text', 'text': '%s %s scripts are now available in your %s home directory:'
                               % (configuration.short_title, flavor, configuration.short_title)})
        output_objects.append({'object_type': 'link', 'text': 'View directory',
                               'destination': 'fileman.py?path=%s/' % script_dir})

        # Create zip from generated dir

        output_objects.append({'object_type': 'text', 'text': 'Generating zip archive of the %s %s scripts'
                               % (configuration.short_title, flavor)})

        script_zip = script_dir + '.zip'
        dest_zip = '%s%s' % (base_dir, script_zip)
        logger.debug('packing generated scripts from %s in %s' % (abs_dir,
                                                                  dest_zip))

        # Force compression
        zip_file = zipfile.ZipFile(dest_zip, 'w', zipfile.ZIP_DEFLATED)

        # Directory write is not supported - add each file manually

        for script in os.listdir(abs_dir):
            zip_file.write(abs_dir + os.sep + script, script_dir
                           + os.sep + script)

        # Preserve executable flag in accordance with:
        # http://mail.python.org/pipermail/pythonmac-sig/2005-March/013491.html

        for zinfo in zip_file.filelist:
            zinfo.create_system = 3

        zip_file.close()

        # Verify CRC

        zip_file = zipfile.ZipFile(dest_zip, 'r')
        err = zip_file.testzip()
        zip_file.close()
        if err:
            output_objects.append({'object_type': 'error_text', 'text': 'Zip file integrity check failed! (%s)'
                                   % err})
            status = returnvalues.SYSTEM_ERROR
            continue

        output_objects.append({'object_type': 'text', 'text': '... Done'
                               })
        output_objects.append({'object_type': 'text', 'text': 'Zip archive of the %s %s scripts are now available in your %s home directory'
                               % (configuration.short_title, flavor, configuration.short_title)})
        output_objects.append({'object_type': 'link', 'text': 'Download zip archive %s' % script_zip, 'destination': os.path.join('..', client_dir,
                                                                                                                                  script_zip)})
        output_objects.append({'object_type': 'upgrade_info', 'text': '''
You can upgrade from an existing user scripts folder with the commands:''',
                               'commands': ["./migget.sh '%s' ../" % script_zip,
                                            "cd ..", "unzip '%s'" % script_zip,
                                            "cd '%s'" % script_dir]
                               })

    return (output_objects, status)
示例#13
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""
    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )

    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    logger.debug("User: %s executing %s" % (client_id, op_name))
    if not configuration.site_enable_cloud:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'The cloud service is not enabled on the system'
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    output_objects.append({
        'object_type': 'header',
        'text': 'Cloud Instance Management'
    })

    user_map = get_full_user_map(configuration)
    user_dict = user_map.get(client_id, None)
    # Optional limitation of cload access vgrid permission
    if not user_dict or not cloud_access_allowed(configuration, user_dict):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            "You don't have permission to access the cloud facilities on "
            "this site"
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    return_status = returnvalues.OK
    action = accepted['action'][-1]
    # NOTE: instance_X may be empty list - fall back to empty string
    instance_id = ([''] + accepted['instance_id'])[-1]
    instance_label = ([''] + accepted['instance_label'])[-1]
    instance_image = ([''] + accepted['instance_image'])[-1]
    accept_terms = (([''] + accepted['accept_terms'])[-1] in ('yes', 'on'))
    cloud_id = accepted['service'][-1]
    service = {
        k: v
        for options in configuration.cloud_services
        for k, v in options.items() if options['service_name'] == cloud_id
    }

    if not service:
        valid_services = [
            options['service_name'] for options in configuration.cloud_services
        ]
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '%s is not among the valid cloud services: %s' %
            (cloud_id, ', '.join(valid_services))
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    valid_service = valid_cloud_service(configuration, service)
    if not valid_service:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'The service %s appears to be misconfigured, '
            'please contact a system administrator about this issue' % cloud_id
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    service_title = service['service_title']
    if not action in valid_actions:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '%s is not a valid action '
            'allowed actions include %s' % (action, ', '.join(valid_actions))
        })
        return (output_objects, returnvalues.CLIENT_ERROR)
    elif action in cloud_edit_actions:
        if not safe_handler(configuration, 'post', op_name, client_id,
                            get_csrf_limit(configuration), accepted):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '''Only accepting
                CSRF-filtered POST requests to prevent unintended updates'''
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    cloud_flavor = service.get("service_flavor", "openstack")
    user_home_dir = os.path.join(configuration.user_home, client_dir)

    client_email = extract_field(client_id, 'email')
    if not client_email:
        logger.error("could not extract client email for %s!" % client_id)
        output_objects.append({
            'object_type': 'error_text',
            'text': "No client ID found - can't continue"
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    ssh_auth_msg = "Login requires your private key for your public key:"
    instance_missing_msg = "Found no '%s' instance at %s. Please contact a " \
                           + "site administrator if it should be there."

    _label = instance_label
    if instance_id and not _label:
        _, _label, _ = cloud_split_instance_id(configuration, client_id,
                                               instance_id)

    if "create" == action:
        if not accept_terms:
            logger.error("refusing create without accepting terms for %s!" %
                         client_id)
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "You MUST accept the cloud user terms to create instances"
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        # Load all instances and make sure none contains label in ID
        saved_instances = cloud_load_instance(configuration, client_id,
                                              cloud_id, keyword_all)
        for (saved_id, instance) in saved_instances.items():
            if instance_label == instance.get('INSTANCE_LABEL', saved_id):
                logger.error("Refused %s re-create %s cloud instance %s!" %
                             (client_id, cloud_id, instance_label))
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    "You already have an instance with the label '%s'!" %
                    instance_label
                })
                return (output_objects, returnvalues.CLIENT_ERROR)

        max_instances = lookup_user_service_value(
            configuration, client_id, service, 'service_max_user_instances')
        max_user_instances = int(max_instances)
        # NOTE: a negative max value means unlimited but 0 or more is enforced
        if max_user_instances >= 0 and \
                len(saved_instances) >= max_user_instances:
            logger.error("Refused %s create additional %s cloud instances!" %
                         (client_id, cloud_id))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "You already have the maximum allowed %s instances (%d)!" %
                (service_title, max_user_instances)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        if not instance_label:
            logger.error("Refused %s create unlabelled %s cloud instance!" %
                         (client_id, cloud_id))
            output_objects.append({
                'object_type': 'error_text',
                'text': "No instance label provided!"
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        # Lookup user-specific allowed images (colon-separated image names)
        allowed_images = allowed_cloud_images(configuration, client_id,
                                              cloud_id, cloud_flavor)
        if not allowed_images:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "No valid / allowed cloud images found!"
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        if not instance_image:
            instance_image = allowed_images[0]
            logger.info("No image specified - using first for %s in %s: %s" %
                        (client_id, cloud_id, instance_image))

        image_id = None
        for (img_name, img_id, img_alias) in allowed_images:
            if instance_image == img_name:
                image_id = img_id
                break

        if not image_id:
            logger.error("No matching image ID found for %s in %s: %s" %
                         (client_id, cloud_id, instance_image))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "No such  image found: %s" % instance_image
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        # TODO: remove this direct key injection if we can delay it
        cloud_settings = load_cloud(client_id, configuration)
        raw_keys = cloud_settings.get('authkeys', '').split('\n')
        auth_keys = [i.split('#', 1)[0].strip() for i in raw_keys]
        auth_keys = [i for i in auth_keys if i]
        if not auth_keys:
            logger.error("No cloud pub keys setup for %s - refuse create" %
                         client_id)
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                """
You haven't provided any valid ssh pub key(s) for cloud instance login, which
is stricly required for all use. Please do so before you try again.
            """
            })
            output_objects.append({
                'object_type': 'link',
                'destination': 'setup.py?topic=cloud',
                'text': 'Open cloud setup',
                'class': 'cloudsetuplink iconspace',
                'title': 'open cloud setup',
                'target': '_blank'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        logger.debug("Continue create for %s with auth_keys: %s" %
                     (client_id, auth_keys))

        # Create a new internal keyset and session id
        (priv_key, pub_key) = generate_ssh_rsa_key_pair(encode_utf8=True)
        session_id = generate_random_ascii(session_id_bytes,
                                           charset='0123456789abcdef')
        # We make sure to create instance with a globally unique ID on the
        # cloud while only showing the requested instance_label to the user.
        instance_id = cloud_build_instance_id(configuration, client_email,
                                              instance_label, session_id)
        # TODO: make more fields flexible/conf
        cloud_dict = {
            'INSTANCE_ID': instance_id,
            'INSTANCE_LABEL': instance_label,
            'INSTANCE_IMAGE': instance_image,
            'IMAGE_ID': image_id,
            'AUTH_KEYS': auth_keys,
            'USER_CERT': client_id,
            'INSTANCE_PRIVATE_KEY': priv_key,
            'INSTANCE_PUBLIC_KEY': pub_key,
            # don't need fraction precision, also not all systems provide fraction
            # precision.
            'CREATED_TIMESTAMP': int(time.time()),
            # Init unset ssh address and leave for floating IP assigment below
            'INSTANCE_SSH_IP': '',
            'INSTANCE_SSH_PORT': 22,
        }
        (action_status,
         action_msg) = create_cloud_instance(configuration, client_id,
                                             cloud_id, cloud_flavor,
                                             instance_id, image_id, auth_keys)
        if not action_status:
            logger.error(
                "%s %s cloud instance %s for %s failed: %s" %
                (action, cloud_id, instance_id, client_id, action_msg))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Your %s instance %s at %s did not succeed: %s' %
                (action, instance_label, service_title, action_msg)
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        # On success the action_msg contains the assigned floating IP address
        instance_ssh_fqdn = action_msg
        cloud_dict['INSTANCE_SSH_IP'] = instance_ssh_fqdn
        if not cloud_save_instance(configuration, client_id, cloud_id,
                                   instance_id, cloud_dict):
            logger.error("save new %s cloud instance %s for %s failed" %
                         (cloud_id, instance_id, client_id))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Error saving your %s cloud instance setup' % service_title
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        output_objects.append({
            'object_type':
            'text',
            'text':
            "%s instance %s at %s: %s" %
            (action, instance_label, service_title, "success")
        })
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            _ssh_help(configuration, client_id, cloud_id, cloud_dict,
                      instance_id)
        })

    elif "delete" == action:
        saved_instance = cloud_load_instance(configuration, client_id,
                                             cloud_id, instance_id)
        if not saved_instance:
            logger.error("no saved %s cloud instance %s for %s to delete" %
                         (cloud_id, instance_id, client_id))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                instance_missing_msg % (_label, service_title)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        (action_status,
         action_msg) = delete_cloud_instance(configuration, client_id,
                                             cloud_id, cloud_flavor,
                                             instance_id)
        if not action_status:
            logger.error(
                "%s %s cloud instance %s for %s failed: %s" %
                (action, cloud_id, instance_id, client_id, action_msg))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Your %s instance %s at %s did not succeed: %s' %
                (action, _label, service_title, action_msg)
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        if not cloud_purge_instance(configuration, client_id, cloud_id,
                                    instance_id):
            logger.error("purge %s cloud instance %s for %s failed" %
                         (cloud_id, instance_id, client_id))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Error deleting your %s cloud instance setup' % service_title
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        output_objects.append({
            'object_type':
            'text',
            'text':
            "%s instance %s at %s: %s" %
            (action, _label, service_title, "success")
        })

    elif "status" == action:
        saved_instance = cloud_load_instance(configuration, client_id,
                                             cloud_id, instance_id)
        if not saved_instance:
            logger.error("no saved %s cloud instance %s for %s to query" %
                         (cloud_id, instance_id, client_id))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                instance_missing_msg % (_label, service_title)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        (action_status,
         action_msg) = status_of_cloud_instance(configuration, client_id,
                                                cloud_id, cloud_flavor,
                                                instance_id)
        if not action_status:
            logger.error(
                "%s %s cloud instance %s for %s failed: %s" %
                (action, cloud_id, instance_id, client_id, action_msg))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Your %s instance %s at %s did not succeed: %s' %
                (action, _label, service_title, action_msg)
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        output_objects.append({
            'object_type':
            'text',
            'text':
            "%s instance %s at %s: %s" %
            (action, _label, service_title, action_msg)
        })

        # Show instance access details if running
        if action_msg in ('ACTIVE', 'RUNNING'):
            # Only include web console if explicitly configured
            if configuration.user_cloud_console_access:
                (console_status, console_msg) = web_access_cloud_instance(
                    configuration, client_id, cloud_id, cloud_flavor,
                    instance_id)
                if not console_status:
                    logger.error(
                        "%s cloud instance %s console for %s failed: %s" % \
                        (cloud_id, instance_id, client_id, console_msg))
                    output_objects.append({
                        'object_type':
                        'error_text',
                        'text':
                        'Failed to get instance %s at %s console: %s' %
                        (_label, service_title, console_msg)
                    })
                    return (output_objects, returnvalues.SYSTEM_ERROR)
                logger.info("%s cloud instance %s console for %s: %s" %
                            (cloud_id, instance_id, client_id, console_msg))
                output_objects.append({
                    'object_type': 'link',
                    'destination': console_msg,
                    'text': 'Open web console',
                    'class': 'consolelink iconspace',
                    'title': 'open web console',
                    'target': '_blank'
                })
                output_objects.append({'object_type': 'text', 'text': ''})

            output_objects.append({
                'object_type':
                'html_form',
                'text':
                _ssh_help(configuration, client_id, cloud_id, saved_instance,
                          instance_id)
            })

        output_objects.append({'object_type': 'text', 'text': ''})

    elif "start" == action:
        saved_instance = cloud_load_instance(configuration, client_id,
                                             cloud_id, instance_id)
        if not saved_instance:
            logger.error("no saved %s cloud instance %s for %s to start" %
                         (cloud_id, instance_id, client_id))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                instance_missing_msg % (_label, service_title)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        (action_status,
         action_msg) = start_cloud_instance(configuration, client_id, cloud_id,
                                            cloud_flavor, instance_id)
        if not action_status:
            logger.error(
                "%s %s cloud instance %s for %s failed: %s" %
                (action, cloud_id, instance_id, client_id, action_msg))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Your %s instance %s at %s did not succeed: %s' %
                (action, _label, service_title, action_msg)
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        output_objects.append({
            'object_type':
            'text',
            'text':
            "%s instance %s at %s: %s" %
            (action, _label, service_title, "success")
        })
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            _ssh_help(configuration, client_id, cloud_id, saved_instance,
                      instance_id)
        })

    elif action in ("softrestart", "hardrestart"):
        boot_strength = action.replace("restart", "").upper()
        saved_instance = cloud_load_instance(configuration, client_id,
                                             cloud_id, instance_id)
        if not saved_instance:
            logger.error("no saved %s cloud instance %s for %s to restart" %
                         (cloud_id, instance_id, client_id))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                instance_missing_msg % (_label, service_title)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        (action_status,
         action_msg) = restart_cloud_instance(configuration, client_id,
                                              cloud_id, cloud_flavor,
                                              instance_id, boot_strength)
        if not action_status:
            logger.error(
                "%s %s cloud instance %s for %s failed: %s" %
                (action, cloud_id, instance_id, client_id, action_msg))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Your %s instance %s at %s did not succeed: %s' %
                (action, _label, service_title, action_msg)
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        output_objects.append({
            'object_type':
            'text',
            'text':
            "%s instance %s at %s: %s" %
            (action, _label, service_title, "success")
        })
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            _ssh_help(configuration, client_id, cloud_id, saved_instance,
                      instance_id)
        })

    elif "stop" == action:
        saved_instance = cloud_load_instance(configuration, client_id,
                                             cloud_id, instance_id)
        if not saved_instance:
            logger.error("no saved %s cloud instance %s for %s to %s" %
                         (cloud_id, instance_id, client_id, action))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                instance_missing_msg % (_label, service_title)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        (action_status,
         action_msg) = stop_cloud_instance(configuration, client_id, cloud_id,
                                           cloud_flavor, instance_id)
        if not action_status:
            logger.error(
                "%s %s cloud instance %s for %s failed: %s" %
                (action, cloud_id, instance_id, client_id, action_msg))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Your %s instance %s at %s did not succeed: %s' %
                (action, _label, service_title, action_msg)
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        output_objects.append({
            'object_type':
            'text',
            'text':
            "%s instance %s at %s: %s" %
            (action, _label, service_title, "success")
        })

    elif "webaccess" == action:
        saved_instance = cloud_load_instance(configuration, client_id,
                                             cloud_id, instance_id)
        if not saved_instance:
            logger.error("no saved %s cloud instance %s for %s to query" %
                         (cloud_id, instance_id, client_id))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                instance_missing_msg % (_label, service_title)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        if not configuration.user_cloud_console_access:
            logger.error("web console not enabled in conf!")
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Site does not expose cloud web console!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        (action_status,
         action_msg) = web_access_cloud_instance(configuration, client_id,
                                                 cloud_id, cloud_flavor,
                                                 instance_id)
        if not action_status:
            logger.error(
                "%s %s cloud instance %s for %s failed: %s" %
                (action, service_title, instance_id, client_id, action_msg))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Your %s instance %s at %s did not succeed: %s' %
                (action, _label, service_title, action_msg)
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        output_objects.append({
            'object_type':
            'text',
            'text':
            "%s instance %s at %s" % (action, _label, service_title)
        })
        output_objects.append({
            'object_type': 'link',
            'destination': action_msg,
            'text': 'Open web console',
            'class': 'consolelink iconspace',
            'title': 'open web console',
            'target': '_blank'
        })
        output_objects.append({'object_type': 'text', 'text': ''})
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            _ssh_help(configuration, client_id, cloud_id, saved_instance,
                      instance_id)
        })

    elif "updatekeys" == action:
        saved_instance = cloud_load_instance(configuration, client_id,
                                             cloud_id, instance_id)
        if not saved_instance:
            logger.error("no saved %s cloud instance %s for %s to update" %
                         (cloud_id, instance_id, client_id))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                instance_missing_msg % (_label, service_title)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        cloud_settings = load_cloud(client_id, configuration)
        auth_keys = cloud_settings.get('authkeys', '').split('\n')
        (action_status,
         action_msg) = update_cloud_instance_keys(configuration, client_id,
                                                  cloud_id, cloud_flavor,
                                                  instance_id, auth_keys)
        if not action_status:
            logger.error(
                "%s %s cloud instance %s for %s failed: %s" %
                (action, cloud_id, instance_id, client_id, action_msg))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Your %s instance %s at %s did not succeed: %s' %
                (action, _label, service_title, action_msg)
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

        output_objects.append({
            'object_type':
            'text',
            'text':
            "%s instance %s at %s: %s" %
            (action, _label, service_title, "success")
        })
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            _ssh_help(configuration, client_id, cloud_id, saved_instance,
                      instance_id)
        })
        output_objects.append({'object_type': 'text', 'text': ssh_auth_msg})
        for pub_key in auth_keys:
            output_objects.append({'object_type': 'text', 'text': pub_key})

    else:
        output_objects.append({
            'object_type': 'error_text',
            'text': 'Unknown action: %s' % action
        })
        return_status = returnvalues.CLIENT_ERROR

    output_objects.append({
        'object_type': 'link',
        'destination': 'cloud.py',
        'class': 'backlink iconspace',
        'title': 'Go back to cloud management',
        'text': 'Back to cloud management'
    })
    return (output_objects, return_status)
示例#14
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    defaults = signature()[1]
    title_entry = find_entry(output_objects, 'title')
    label = "%s" % configuration.site_vgrid_label
    title_entry['text'] = "Add %s Resource" % label
    output_objects.append({
        'object_type': 'header',
        'text': 'Add %s Resource(s)' % label
    })
    status = returnvalues.OK
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    vgrid_name = accepted['vgrid_name'][-1].strip()
    res_id_list = accepted['unique_resource_name']
    request_name = unhexlify(accepted['request_name'][-1])
    rank_list = accepted['rank'] + ['' for _ in res_id_list]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    user_map = get_full_user_map(configuration)
    user_dict = user_map.get(client_id, None)
    # Optional site-wide limitation of manage vgrid permission
    if not user_dict or \
            not vgrid_manage_allowed(configuration, user_dict):
        logger.warning("user %s is not allowed to manage vgrids!" % client_id)
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Only privileged users can manage %ss' % label
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # make sure vgrid settings allow this owner to edit resources

    (allow_status, allow_msg) = allow_resources_adm(configuration, vgrid_name,
                                                    client_id)
    if not allow_status:
        output_objects.append({'object_type': 'error_text', 'text': allow_msg})
        return (output_objects, returnvalues.CLIENT_ERROR)

    res_id_added = []
    for (res_id, rank_str) in zip(res_id_list, rank_list):
        unique_resource_name = res_id.lower().strip()
        try:
            rank = int(rank_str)
        except ValueError:
            rank = None

        # Validity of user and vgrid names is checked in this init function so
        # no need to worry about illegal directory traversal through variables

        (ret_val, msg, ret_variables) = \
            init_vgrid_script_add_rem(vgrid_name, client_id,
                                      unique_resource_name, 'resource',
                                      configuration)
        if not ret_val:
            output_objects.append({'object_type': 'error_text', 'text': msg})
            status = returnvalues.CLIENT_ERROR
            continue
        elif msg:

            # In case of warnings, msg is non-empty while ret_val remains True

            output_objects.append({'object_type': 'warning', 'text': msg})

        # don't add if already in vgrid or parent vgrid unless rank is given

        if rank is None and vgrid_is_resource(vgrid_name, unique_resource_name,
                                              configuration):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '%s is already a resource in the %s' %
                (unique_resource_name, label)
            })
            status = returnvalues.CLIENT_ERROR
            continue

        # don't add if already in subvgrid

        (list_status,
         subvgrids) = vgrid_list_subvgrids(vgrid_name, configuration)
        if not list_status:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Error getting list of sub%ss: %s' % (label, subvgrids)
            })
            status = returnvalues.SYSTEM_ERROR
            continue
        skip_entity = False
        for subvgrid in subvgrids:
            if vgrid_is_resource(subvgrid,
                                 unique_resource_name,
                                 configuration,
                                 recursive=False):
                output_objects.append({
                    'object_type': 'error_text',
                    'text': '''%(res_name)s is already in a
sub-%(vgrid_label)s (%(subvgrid)s). Please remove the resource from the
sub-%(vgrid_label)s and try again''' % {
                        'res_name': unique_resource_name,
                        'subvgrid': subvgrid,
                        'vgrid_label': label
                    }
                })
                status = returnvalues.CLIENT_ERROR
                skip_entity = True
                break
        if skip_entity:
            continue

        # Check if only rank change was requested and apply if so

        if rank is not None:
            (add_status, add_msg) = vgrid_add_resources(configuration,
                                                        vgrid_name,
                                                        [unique_resource_name],
                                                        rank=rank)
            if not add_status:
                output_objects.append({
                    'object_type': 'error_text',
                    'text': add_msg
                })
                status = returnvalues.SYSTEM_ERROR
            else:
                output_objects.append({
                    'object_type':
                    'text',
                    'text':
                    'changed %s to resource %d' % (res_id, rank)
                })
            # No further action after rank change as everything else exists
            continue

        # Getting here means res_id is neither resource of any parent or
        # sub-vgrids.

        # Please note that base_dir must end in slash to avoid access to other
        # vgrid dirs when own name is a prefix of another name

        base_dir = os.path.abspath(configuration.vgrid_home + os.sep +
                                   vgrid_name) + os.sep
        resources_file = base_dir + 'resources'

        # Add to list and pickle

        (add_status, add_msg) = vgrid_add_resources(configuration, vgrid_name,
                                                    [unique_resource_name])
        if not add_status:
            output_objects.append({
                'object_type': 'error_text',
                'text': '%s' % add_msg
            })
            status = returnvalues.SYSTEM_ERROR
            continue
        res_id_added.append(unique_resource_name)

    if request_name:
        request_dir = os.path.join(configuration.vgrid_home, vgrid_name)
        if not delete_access_request(configuration, request_dir, request_name):
            logger.error("failed to delete res request for %s in %s" %
                         (vgrid_name, request_name))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Failed to remove saved request for %s in %s!' %
                (vgrid_name, request_name)
            })

    if res_id_added:
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            'New resource(s)<br />%s<br />successfully added to %s %s!'
            '' % ('<br />'.join(res_id_added), vgrid_name, label)
        })
        res_id_fields = ''
        for res_id in res_id_added:
            res_id_fields += """
<input type=hidden name=unique_resource_name value='%s' />""" % res_id

        form_method = 'post'
        csrf_limit = get_csrf_limit(configuration)
        fill_helpers = {
            'vgrid_name': vgrid_name,
            'unique_resource_name': unique_resource_name,
            'protocol': any_protocol,
            'short_title': configuration.short_title,
            'vgrid_label': label,
            'res_id_fields': res_id_fields,
            'form_method': form_method,
            'csrf_field': csrf_field,
            'csrf_limit': csrf_limit
        }
        target_op = 'sendrequestaction'
        csrf_token = make_csrf_token(configuration, form_method, target_op,
                                     client_id, csrf_limit)
        fill_helpers.update({'target_op': target_op, 'csrf_token': csrf_token})
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            """
<form method='%(form_method)s' action='%(target_op)s.py'>
<input type='hidden' name='%(csrf_field)s' value='%(csrf_token)s' />
<input type=hidden name=request_type value='vgridaccept' />
<input type=hidden name=vgrid_name value='%(vgrid_name)s' />
%(res_id_fields)s
<input type=hidden name=protocol value='%(protocol)s' />
<table>
<tr>
<td class='title'>Custom message to resource owners</td>
</tr><tr>
<td><textarea name=request_text cols=72 rows=10>
We have granted your %(unique_resource_name)s resource access to our
%(vgrid_name)s %(vgrid_label)s.
You can assign it to accept jobs from the %(vgrid_name)s %(vgrid_label)s from
your Resources page on %(short_title)s.

Regards, the %(vgrid_name)s %(vgrid_label)s owners
</textarea></td>
</tr>
<tr>
<td><input type='submit' value='Inform owners' /></td>
</tr>
</table>
</form>
<br />
""" % fill_helpers
        })

    output_objects.append({
        'object_type': 'link',
        'destination': 'adminvgrid.py?vgrid_name=%s' % vgrid_name,
        'text': 'Back to administration for %s' % vgrid_name
    })
    return (output_objects, returnvalues.OK)
示例#15
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False, op_menu=False)
    defaults = signature()[1]
    logger.debug('in extoidaction: %s' % user_arguments_dict)
    (validate_status, accepted) = validate_input(user_arguments_dict,
                                                 defaults,
                                                 output_objects,
                                                 allow_rejects=False)
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    # Unfortunately OpenID does not use POST
    # if not safe_handler(configuration, 'post', op_name, client_id,
    #                    get_csrf_limit(configuration), accepted):
    #    output_objects.append(
    #        {'object_type': 'error_text', 'text': '''Only accepting
# CSRF-filtered POST requests to prevent unintended updates'''
#         })
#    return (output_objects, returnvalues.CLIENT_ERROR)

    title_entry = find_entry(output_objects, 'title')
    title_entry[
        'text'] = '%s OpenID account sign up' % configuration.short_title
    title_entry['skipmenu'] = True
    output_objects.append({
        'object_type':
        'header',
        'text':
        '%s OpenID account sign up' % configuration.short_title
    })

    admin_email = configuration.admin_email
    smtp_server = configuration.smtp_server
    user_pending = os.path.abspath(configuration.user_pending)

    # force name to capitalized form (henrik karlsen -> Henrik Karlsen)

    id_url = os.environ['REMOTE_USER'].strip()
    openid_prefix = configuration.user_ext_oid_provider.rstrip('/') + '/'
    raw_login = id_url.replace(openid_prefix, '')
    full_name = accepted['openid.sreg.full_name'][-1].strip().title()
    country = accepted['openid.sreg.country'][-1].strip().upper()
    state = accepted['state'][-1].strip().title()
    organization = accepted['openid.sreg.organization'][-1].strip()
    organizational_unit = accepted['openid.sreg.organizational_unit'][
        -1].strip()
    locality = accepted['openid.sreg.locality'][-1].strip()

    # lower case email address

    email = accepted['openid.sreg.email'][-1].strip().lower()
    password = accepted['password'][-1]
    #verifypassword = accepted['verifypassword'][-1]

    # keep comment to a single line

    comment = accepted['comment'][-1].replace('\n', '   ')

    # single quotes break command line format - remove

    comment = comment.replace("'", ' ')
    accept_terms = (accepted['accept_terms'][-1].strip().lower()
                    in ('1', 'o', 'y', 't', 'on', 'yes', 'true'))

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not accept_terms:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'You must accept the terms of use in sign up!'
        })
        output_objects.append({
            'object_type': 'link',
            'destination': 'javascript:history.back();',
            'class': 'genericbutton',
            'text': "Try again"
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    user_dict = {
        'full_name': full_name,
        'organization': organization,
        'organizational_unit': organizational_unit,
        'locality': locality,
        'state': state,
        'country': country,
        'email': email,
        'password': password,
        'comment': comment,
        'expire': default_account_expire(configuration, 'oid'),
        'openid_names': [raw_login],
        'auth': ['extoid'],
    }
    fill_distinguished_name(user_dict)
    user_id = user_dict['distinguished_name']
    if configuration.user_openid_providers and configuration.user_openid_alias:
        user_dict['openid_names'].append(
            user_dict[configuration.user_openid_alias])

    req_path = None
    try:
        (os_fd, req_path) = tempfile.mkstemp(dir=user_pending)
        os.write(os_fd, dumps(user_dict))
        os.close(os_fd)
    except Exception as err:
        logger.error('Failed to write OpenID account request to %s: %s' %
                     (req_path, err))
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Request could not be sent to site administrators. Please
contact them manually on %s if this error persists.''' % admin_email
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    logger.info('Wrote OpenID account request to %s' % req_path)
    tmp_id = req_path.replace(user_pending, '')
    user_dict['tmp_id'] = tmp_id

    # TODO: remove cert generation or generate pw for it
    mig_user = os.environ.get('USER', 'mig')
    helper_commands = user_manage_commands(configuration, mig_user, req_path,
                                           user_id, user_dict, 'oid')
    user_dict.update(helper_commands)
    user_dict['site'] = configuration.short_title
    user_dict['vgrid_label'] = configuration.site_vgrid_label
    user_dict['vgridman_links'] = generate_https_urls(
        configuration, '%(auto_base)s/%(auto_bin)s/vgridman.py', {})
    email_header = '%s OpenID request for %s' % \
                   (configuration.short_title, full_name)
    email_msg = """
Received an OpenID account sign up with user data
 * Full Name: %(full_name)s
 * Organization: %(organization)s
 * State: %(state)s
 * Country: %(country)s
 * Email: %(email)s
 * Comment: %(comment)s
 * Expire: %(expire)s

Command to create user on %(site)s server:
%(command_user_create)s

Optional command to create matching certificate:
%(command_cert_create)s

Finally add the user
%(distinguished_name)s
to any relevant %(vgrid_label)ss using one of the management links:
%(vgridman_links)s


--- If user must be denied access or deleted at some point ---

Command to reject user account request on %(site)s server:
%(command_user_reject)s

Remove the user
%(distinguished_name)s
from any relevant %(vgrid_label)ss using one of the management links:
%(vgridman_links)s

Optional command to revoke any user certificates:
%(command_cert_revoke)s
You need to copy the resulting signed certificate revocation list (crl.pem)
to the web server(s) for the revocation to take effect.

Command to suspend user on %(site)s server:
%(command_user_suspend)s

Command to delete user again on %(site)s server:
%(command_user_delete)s

---

""" % user_dict

    logger.info('Sending email: to: %s, header: %s, msg: %s, smtp_server: %s' %
                (admin_email, email_header, email_msg, smtp_server))
    if not send_email(admin_email, email_header, email_msg, logger,
                      configuration):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''An error occured trying to send the email requesting your new
user account. Please email the site administrators (%s) manually and include
the session ID: %s''' % (admin_email, tmp_id)
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    output_objects.append({
        'object_type':
        'text',
        'text':
        """Request sent to site administrators: Your user account will
be created as soon as possible, so please be patient. Once handled an email
will be sent to the account you have specified ('%s') with further information.
In case of inquiries about this request, please email the site administrators
(%s) and include the session ID: %s""" %
        (email, configuration.admin_email, tmp_id)
    })
    return (output_objects, returnvalues.OK)
示例#16
0
def main(client_id, user_arguments_dict, environ=None):
    """Main function used by front end"""

    if environ is None:
        environ = os.environ

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    status = returnvalues.OK
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        # NOTE: path can use wildcards, dst cannot
        typecheck_overrides={'path': valid_path_pattern},
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    flags = ''.join(accepted['flags'])
    patterns = accepted['path']
    dst = accepted['dst'][-1].lstrip(os.sep)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = os.path.abspath(
        os.path.join(configuration.user_home, client_dir)) + os.sep

    if verbose(flags):
        for flag in flags:
            output_objects.append({
                'object_type': 'text',
                'text': '%s using flag: %s' % (op_name, flag)
            })
    if dst:
        if not safe_handler(configuration, 'post', op_name, client_id,
                            get_csrf_limit(configuration), accepted):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        dst_mode = "wb"
        # IMPORTANT: path must be expanded to abs for proper chrooting
        abs_dest = os.path.abspath(os.path.join(base_dir, dst))
        relative_dst = abs_dest.replace(base_dir, '')
        if not valid_user_path(configuration, abs_dest, base_dir, True):
            logger.warning('%s tried to %s into restricted path %s ! (%s)' %
                           (client_id, op_name, abs_dest, dst))
            output_objects.append({
                'object_type': 'error_text',
                'text': "invalid destination: '%s'" % dst
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    for pattern in patterns:

        # Check directory traversal attempts before actual handling to avoid
        # leaking information about file system layout while allowing
        # consistent error messages

        unfiltered_match = glob.glob(base_dir + pattern)
        match = []
        for server_path in unfiltered_match:
            # IMPORTANT: path must be expanded to abs for proper chrooting
            abs_path = os.path.abspath(server_path)
            if not valid_user_path(configuration, abs_path, base_dir, True):

                # out of bounds - save user warning for later to allow
                # partial match:
                # ../*/* is technically allowed to match own files.

                logger.warning('%s tried to %s restricted path %s ! (%s)' %
                               (client_id, op_name, abs_path, pattern))
                continue
            match.append(abs_path)

        # Now actually treat list of allowed matchings and notify if no
        # (allowed) match

        if not match:
            output_objects.append({
                'object_type': 'file_not_found',
                'name': pattern
            })
            status = returnvalues.FILE_NOT_FOUND

        for abs_path in match:
            output_lines = []
            relative_path = abs_path.replace(base_dir, '')
            try:
                gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'],
                          'accessed', [relative_path])
                fd = open(abs_path, 'r')

                # use file directly as iterator for efficiency

                for line in fd:
                    output_lines.append(line)
                fd.close()
            except Exception as exc:
                if not isinstance(exc, GDPIOLogError):
                    gdp_iolog(configuration,
                              client_id,
                              environ['REMOTE_ADDR'],
                              'accessed', [relative_path],
                              failed=True,
                              details=exc)
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    "%s: '%s': %s" % (op_name, relative_path, exc)
                })
                logger.error("%s: failed on '%s': %s" %
                             (op_name, relative_path, exc))

                status = returnvalues.SYSTEM_ERROR
                continue
            if dst:
                try:
                    gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'],
                              'modified', [dst])
                    out_fd = open(abs_dest, dst_mode)
                    out_fd.writelines(output_lines)
                    out_fd.close()
                    logger.info('%s %s %s done' %
                                (op_name, abs_path, abs_dest))
                except Exception as exc:
                    if not isinstance(exc, GDPIOLogError):
                        gdp_iolog(configuration,
                                  client_id,
                                  environ['REMOTE_ADDR'],
                                  'modified', [dst],
                                  failed=True,
                                  details=exc)
                    output_objects.append({
                        'object_type': 'error_text',
                        'text': "write failed: '%s'" % exc
                    })
                    logger.error("%s: write failed on '%s': %s" %
                                 (op_name, abs_dest, exc))
                    status = returnvalues.SYSTEM_ERROR
                    continue
                output_objects.append({
                    'object_type':
                    'text',
                    'text':
                    "wrote %s to %s" % (relative_path, relative_dst)
                })
                # Prevent truncate after first write
                dst_mode = "ab+"
            else:
                entry = {
                    'object_type': 'file_output',
                    'lines': output_lines,
                    'wrap_binary': binary(flags),
                    'wrap_targets': ['lines']
                }
                if verbose(flags):
                    entry['path'] = relative_path
                output_objects.append(entry)

                # TODO: rip this hack out into real download handler?
                # Force download of files when output_format == 'file_format'
                # This will only work for the first file matching a glob when
                # using file_format.
                # And it is supposed to only work for one file.
                if 'output_format' in user_arguments_dict:
                    output_format = user_arguments_dict['output_format'][0]
                    if output_format == 'file':
                        output_objects.append({
                            'object_type':
                            'start',
                            'headers': [('Content-Disposition',
                                         'attachment; filename="%s";' %
                                         os.path.basename(abs_path))]
                        })

    return (output_objects, status)
示例#17
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    action = accepted['action'][-1]
    transfer_id = accepted['transfer_id'][-1]
    protocol = accepted['protocol'][-1]
    fqdn = accepted['fqdn'][-1]
    port = accepted['port'][-1]
    src_list = accepted['transfer_src']
    dst = accepted['transfer_dst'][-1]
    username = accepted['username'][-1]
    password = accepted['transfer_pw'][-1]
    key_id = accepted['key_id'][-1]
    # Skip empty exclude entries as they break backend calls
    exclude_list = [i for i in accepted['exclude'] if i]
    notify = accepted['notify'][-1]
    compress = accepted['compress'][-1]
    flags = accepted['flags']

    anon_checked, pw_checked, key_checked = '', '', ''
    if username:
        if key_id:
            key_checked = 'checked'
            init_login = "******"
        else:
            pw_checked = 'checked'
            init_login = "******"
    else:
        anon_checked = 'checked'
        init_login = "******"
    use_compress = False
    if compress.lower() in ("true", "1", "yes", "on"):
        use_compress = True

    title_entry = find_entry(output_objects, 'title')
    title_entry['text'] = 'Background Data Transfers'
    title_entry['container_class'] = 'fillwidth',

    # jquery support for tablesorter and confirmation on delete/redo:
    # datatransfer and key tables initially sorted by 0 (id) */

    datatransfer_spec = {
        'table_id': 'datatransferstable',
        'pager_id': 'datatransfers_pager',
        'sort_order': '[[0,0]]'
    }
    transferkey_spec = {
        'table_id': 'transferkeystable',
        'pager_id': 'transferkeys_pager',
        'sort_order': '[[0,0]]'
    }
    (add_import, add_init,
     add_ready) = man_base_js(configuration,
                              [datatransfer_spec, transferkey_spec])
    add_init += '''
    var fields = 0;
    var max_fields = 20;
    var src_input = "<label for=\'transfer_src\'>Source path(s)</label>";
    src_input += "<input id=\'src_FIELD\' type=text size=60 name=transfer_src value=\'PATH\' title=\'relative source path: local for exports and remote for imports\' />";
    src_input += "<input id=\'src_file_FIELD\' type=radio onclick=\'setSrcDir(FIELD, false);\' checked />Source file";
    src_input += "<input id=\'src_dir_FIELD\' type=radio onclick=\'setSrcDir(FIELD, true);\' />Source directory (recursive)";
    src_input += "<br />";
    var exclude_input = "<label for=\'exclude\'>Exclude path(s)</label>";
    exclude_input += "<input type=text size=60 name=exclude value=\'PATH\' title=\'relative path or regular expression to exclude\' />";
    exclude_input += "<br />";
    function addSource(path, is_dir) {
        if (path === undefined) {
            path = "";
        }
        if (is_dir === undefined) {
            is_dir = false;
        }
        if (fields < max_fields) {
            $("#srcfields").append(src_input.replace(/FIELD/g, fields).replace(/PATH/g, path));
            setSrcDir(fields, is_dir);
            fields += 1;
        } else {
            alert("Maximum " + max_fields + " source fields allowed!");
        }
    }
    function addExclude(path) {
        if (path === undefined) {
            path = "";
        }
        $("#excludefields").append(exclude_input.replace(/PATH/g, path));
    }
    function setDir(target, field_no, is_dir) {
        var id_prefix = "#"+target+"_";
        var input_id = id_prefix+field_no;
        var file_id = id_prefix+"file_"+field_no;
        var dir_id = id_prefix+"dir_"+field_no;
        var value = $(input_id).val();
        $(file_id).prop("checked", "");
        $(dir_id).prop("checked", "");
        if (is_dir) {
            $(dir_id).prop("checked", "checked");
            if(value.substr(-1) != "/") {
                value += "/";
            }
        } else {
            $(file_id).prop("checked", "checked");
            if(value.substr(-1) == "/") {
                value = value.substr(0, value.length - 1);
            }
        }
        $(input_id).val(value);
        return false;
    }
    function setSrcDir(field_no, is_dir) {
        return setDir("src", field_no, is_dir);
    }
    function setDstDir(field_no, is_dir) {
        return setDir("dst", field_no, is_dir);
    }
    function refreshSrcDir(field_no) {
        var dir_id = "#src_dir_"+field_no;
        var is_dir = $(dir_id).prop("checked");
        return setSrcDir(field_no, is_dir);
    }
    function refreshDstDir(field_no) {
        var dir_id = "#dst_dir_"+field_no;
        var is_dir = $(dir_id).prop("checked");
        return setDstDir(field_no, is_dir);
    }
    function setDefaultPort() {
        port_map = {"http": 80, "https": 443, "sftp": 22, "scp": 22, "ftp": 21,
                    "ftps": 21, "webdav": 80, "webdavs": 443, "rsyncssh": 22,
                    "rsyncd": 873};
        var protocol = $("#protocol_select").val();
        var port = port_map[protocol]; 
        if (port != undefined) {
            $("#port_input").val(port);
        } else {
            alert("no default port provided for "+protocol);
        }
    }
    function beforeSubmit() {
        for(var i=0; i < fields; i++) {
            refreshSrcDir(i);
        }
        refreshDstDir(0);
        // Proceed with submit
        return true;
    }
    function doSubmit() {
        $("#submit-request-transfer").click();
    }
    function enableLogin(method) {
        $("#anonymous_choice").prop("checked", "");
        $("#userpassword_choice").prop("checked", "");
        $("#userkey_choice").prop("checked", "");
        $("#username").prop("disabled", false);
        $("#password").prop("disabled", true);
        $("#key").prop("disabled", true);
        $("#login_fields").show();
        $("#password_entry").hide();
        $("#key_entry").hide();
        
        if (method == "password") {
            $("#userpassword_choice").prop("checked", "checked");
            $("#password").prop("disabled", false);
            $("#password_entry").show();
        } else if (method == "key") {
            $("#userkey_choice").prop("checked", "checked");
            $("#key").prop("disabled", false);
            $("#key_entry").show();
        } else {
            $("#anonymous_choice").prop("checked", "checked");
            $("#username").prop("disabled", true);
            $("#login_fields").hide();
        }
    }
    '''
    # Mangle ready handling to begin with dynamic init and end with tab init
    pre_ready = '''
        enableLogin("%s");
        ''' % init_login
    for src in src_list or ['']:
        pre_ready += '''
        addSource("%s", %s);
        ''' % (src, ("%s" % src.endswith('/')).lower())
    for exclude in exclude_list or ['']:
        pre_ready += '''
        addExclude("%s");
        ''' % exclude
    add_ready = '''
        %s
        %s
        /* NOTE: requires managers CSS fix for proper tab bar height */      
        $(".datatransfer-tabs").tabs();
        $("#logarea").scrollTop($("#logarea")[0].scrollHeight);
    ''' % (pre_ready, add_ready)
    title_entry['script']['advanced'] += add_import
    title_entry['script']['init'] += add_init
    title_entry['script']['ready'] += add_ready
    output_objects.append({
        'object_type': 'html_form',
        'text': man_base_html(configuration)
    })

    output_objects.append({
        'object_type': 'header',
        'text': 'Manage background data transfers'
    })

    if not configuration.site_enable_transfers:
        output_objects.append({
            'object_type':
            'text',
            'text':
            '''Backgroung data transfers are disabled on
this site.
Please contact the site admins %s if you think they should be enabled.
''' % configuration.admin_email
        })
        return (output_objects, returnvalues.OK)

    logger.info('datatransfer %s from %s' % (action, client_id))

    if not action in valid_actions:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Invalid action "%s" (supported: %s)' %
            (action, ', '.join(valid_actions))
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if action in post_actions:
        if not safe_handler(configuration, 'post', op_name, client_id,
                            get_csrf_limit(configuration), accepted):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '''Only accepting
                CSRF-filtered POST requests to prevent unintended updates'''
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    (load_status, transfer_map) = load_data_transfers(configuration, client_id)
    if not load_status:
        transfer_map = {}

    restrict_list = []
    for from_fqdn in configuration.site_transfers_from:
        restrict_list += [from_fqdn, socket.gethostbyname(from_fqdn)]
    restrict_str = 'from="%s",no-pty,' % ','.join(restrict_list)
    restrict_str += 'no-port-forwarding,no-agent-forwarding,no-X11-forwarding'
    restrict_template = '''
As usual it is a good security measure to prepend a <em>from</em> restriction
when you know the key will only be used from a single location.<br/>
In this case the keys will only ever be used from %s and will not need much
else, so the public key can be inserted in your authorized_keys file as:
<br/>
<p>
<textarea class="publickey" rows="5" readonly="readonly">%s %%s</textarea>
</p>
''' % (configuration.short_title, restrict_str)

    form_method = 'post'
    csrf_limit = get_csrf_limit(configuration)
    target_op = 'datatransfer'
    csrf_token = make_csrf_token(configuration, form_method, target_op,
                                 client_id, csrf_limit)
    if action in get_actions:
        datatransfers = []
        for (saved_id, transfer_dict) in transfer_map.items():
            transfer_item = build_transferitem_object(configuration,
                                                      transfer_dict)
            transfer_item['status'] = transfer_item.get('status', 'NEW')
            data_url = ''
            # NOTE: we need to urlencode any exotic chars in paths here
            if transfer_item['action'] == 'import':
                enc_path = quote(("%(dst)s" % transfer_item))
                data_url = "fileman.py?path=%s" % enc_path
            elif transfer_item['action'] == 'export':
                enc_paths = [quote(i) for i in transfer_item['src']]
                data_url = "fileman.py?path=" + ';path='.join(enc_paths)
            if data_url:
                transfer_item['viewdatalink'] = {
                    'object_type': 'link',
                    'destination': data_url,
                    'class': 'viewlink iconspace',
                    'title': 'View local component of %s' % saved_id,
                    'text': ''
                }
            transfer_item['viewoutputlink'] = {
                'object_type': 'link',
                'destination':
                "fileman.py?path=transfer_output/%s/" % saved_id,
                'class': 'infolink iconspace',
                'title': 'View status files for %s' % saved_id,
                'text': ''
            }
            # Edit is just a call to self with fillimport set
            args = [('action', 'fill%(action)s' % transfer_dict),
                    ('key_id', '%(key)s' % transfer_dict),
                    ('transfer_dst', '%(dst)s' % transfer_dict)]
            for src in transfer_dict['src']:
                args.append(('transfer_src', src))
            for exclude in transfer_dict.get('exclude', []):
                args.append(('exclude', exclude))
            for field in edit_fields:
                val = transfer_dict.get(field, '')
                args.append((field, val))
            transfer_args = urlencode(args, True)
            transfer_item['edittransferlink'] = {
                'object_type': 'link',
                'destination': "%s.py?%s" % (target_op, transfer_args),
                'class': 'editlink iconspace',
                'title': 'Edit or duplicate transfer %s' % saved_id,
                'text': ''
            }
            js_name = 'delete%s' % hexlify(saved_id)
            helper = html_post_helper(
                js_name, '%s.py' % target_op, {
                    'transfer_id': saved_id,
                    'action': 'deltransfer',
                    csrf_field: csrf_token
                })
            output_objects.append({'object_type': 'html_form', 'text': helper})
            transfer_item['deltransferlink'] = {
                'object_type':
                'link',
                'destination':
                "javascript: confirmDialog(%s, '%s');" %
                (js_name, 'Really remove %s?' % saved_id),
                'class':
                'removelink iconspace',
                'title':
                'Remove %s' % saved_id,
                'text':
                ''
            }
            js_name = 'redo%s' % hexlify(saved_id)
            helper = html_post_helper(
                js_name, '%s.py' % target_op, {
                    'transfer_id': saved_id,
                    'action': 'redotransfer',
                    csrf_field: csrf_token
                })
            output_objects.append({'object_type': 'html_form', 'text': helper})
            transfer_item['redotransferlink'] = {
                'object_type':
                'link',
                'destination':
                "javascript: confirmDialog(%s, '%s');" %
                (js_name, 'Really reschedule %s?' % saved_id),
                'class':
                'refreshlink iconspace',
                'title':
                'Reschedule %s' % saved_id,
                'text':
                ''
            }
            datatransfers.append(transfer_item)
        #logger.debug("found datatransfers: %s" % datatransfers)
        log_path = os.path.join(configuration.user_home,
                                client_id_dir(client_id), "transfer_output",
                                configuration.site_transfer_log)
        show_lines = 40
        log_lines = read_tail(log_path, show_lines, logger)
        available_keys = load_user_keys(configuration, client_id)
        if available_keys:
            key_note = ''
        else:
            key_note = '''No keys available - you can add a key for use in
transfers below.'''

        if action.endswith('import'):
            transfer_action = 'import'
        elif action.endswith('export'):
            transfer_action = 'export'
        else:
            transfer_action = 'unknown'

        import_checked, export_checked = 'checked', ''
        toggle_quiet, scroll_to_create = '', ''
        if action in ['fillimport', 'fillexport']:
            if quiet(flags):
                toggle_quiet = '''
<script>
 $("#wrap-tabs").hide();
 $("#quiet-mode-content").show();
</script>
'''
            scroll_to_create = '''
<script>
 $("html, body").animate({
  scrollTop: $("#createtransfer").offset().top
   }, 2000);
</script>
            '''
            if action == 'fillimport':
                import_checked = 'checked'
            elif action == 'fillexport':
                export_checked = 'checked'
                import_checked = ''

        fill_helpers = {
            'import_checked': import_checked,
            'export_checked': export_checked,
            'anon_checked': anon_checked,
            'pw_checked': pw_checked,
            'key_checked': key_checked,
            'transfer_id': transfer_id,
            'protocol': protocol,
            'fqdn': fqdn,
            'port': port,
            'username': username,
            'password': password,
            'key_id': key_id,
            'transfer_src_string': ', '.join(src_list),
            'transfer_src': src_list,
            'transfer_dst': dst,
            'exclude': exclude_list,
            'compress': use_compress,
            'notify': notify,
            'toggle_quiet': toggle_quiet,
            'scroll_to_create': scroll_to_create,
            'transfer_action': transfer_action,
            'form_method': form_method,
            'csrf_field': csrf_field,
            'csrf_limit': csrf_limit,
            'target_op': target_op,
            'csrf_token': csrf_token
        }

        # Make page with manage transfers tab and manage keys tab

        output_objects.append({
            'object_type':
            'html_form',
            'text':
            '''
    <div id="quiet-mode-content" class="hidden">
        <p>
        Accept data %(transfer_action)s of %(transfer_src_string)s from
        %(protocol)s://%(fqdn)s:%(port)s/ into %(transfer_dst)s ?
        </p>
        <p>
            <input type=button onClick="doSubmit();"
            value="Accept %(transfer_action)s" />
        </p>
    </div>
    <div id="wrap-tabs" class="datatransfer-tabs">
<ul>
<li><a href="#transfer-tab">Manage Data Transfers</a></li>
<li><a href="#keys-tab">Manage Transfer Keys</a></li>
</ul>
''' % fill_helpers
        })

        # Display external transfers, log and form to add new ones

        output_objects.append({
            'object_type': 'html_form',
            'text': '''
<div id="transfer-tab">
'''
        })

        output_objects.append({
            'object_type': 'sectionheader',
            'text': 'External Data Transfers'
        })
        output_objects.append({
            'object_type': 'table_pager',
            'id_prefix': 'datatransfers_',
            'entry_name': 'transfers',
            'default_entries': default_pager_entries
        })
        output_objects.append({
            'object_type': 'datatransfers',
            'datatransfers': datatransfers
        })
        output_objects.append({
            'object_type': 'sectionheader',
            'text': 'Latest Transfer Results'
        })
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            '''
<textarea id="logarea" class="fillwidth" rows=5 readonly="readonly">%s</textarea>
''' % (''.join(log_lines))
        })
        output_objects.append({
            'object_type': 'sectionheader',
            'text': 'Create External Data Transfer'
        })
        transfer_html = '''
<table class="addexttransfer">
<tr><td>
Fill in the import/export data transfer details below to request a new
background data transfer task.<br/>  
Source must be a path without wildcard characters and it must be specifically
pointed out if the src is a directory. In that case recursive transfer will
automatically be used and otherwise the src is considered a single file, so it
will fail if that is not the case.<br/>  
Destination is a single location directory to transfer the data to. It is
considered in relation to your user home for <em>import</em> requests. Source
is similarly considered in relation to your user home in <em>export</em>
requests.<br/>
Destination is a always handled as a directory path to transfer source files
into.<br/>
<form method="%(form_method)s" action="%(target_op)s.py"
    onSubmit="return beforeSubmit();">
<input type="hidden" name="%(csrf_field)s" value="%(csrf_token)s" />
<fieldset id="transferbox">
<table id="createtransfer" class="addexttransfer">
<tr><td>
<label for="action">Action</label>
<input type=radio name=action %(import_checked)s value="import" />import data
<input type=radio name=action %(export_checked)s value="export" />export data
</td></tr>
<tr><td>
<label for="transfer_id">Optional Transfer ID / Name </label>
<input type=text size=60 name=transfer_id value="%(transfer_id)s"
    pattern="[a-zA-Z0-9._-]*"
    title="Optional ID string containing only ASCII letters and digits possibly with separators like hyphen, underscore and period" />
</td></tr>
<tr><td>
<label for="protocol">Protocol</label>
<select id="protocol_select" class="protocol-select themed-select html-select"
    name="protocol" onblur="setDefaultPort();">
'''
        # select requested protocol
        for (key, val) in valid_proto:
            if protocol == key:
                selected = 'selected'
            else:
                selected = ''
            transfer_html += '<option %s value="%s">%s</option>' % \
                             (selected, key, val)
        transfer_html += '''
</select>
</td></tr>
<tr><td>
<label for="fqdn">Host and port</label>
<input type=text size=37 name=fqdn value="%(fqdn)s" required
    pattern="[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)+"
    title="A fully qualified domain name or Internet IP address for the remote location"/>
<input id="port_input" type=number step=1 min=1 max=65535 name=port
    value="%(port)s" required />
</td></tr>
<tr><td>
<label for="">Login method</label>
<input id="anonymous_choice" type=radio %(anon_checked)s
    onclick="enableLogin(\'anonymous\');" />
anonymous access
<input id="userpassword_choice" type=radio %(pw_checked)s
    onclick="enableLogin(\'password\');" />
login with password
<input id="userkey_choice" type=radio %(key_checked)s
    onclick="enableLogin(\'key\');" />
login with key
</td></tr>
<tr id="login_fields" style="display: none;"><td>
<label for="username">Username</label>
<input id="username" type=text size=60 name=username value="%(username)s"
    pattern="[a-zA-Z0-9._-]*"
    title="Optional username used to login on the remote site, if required" />
<br/>
<span id="password_entry">
<label for="transfer_pw">Password</label>
<input id="password" type=password size=60 name=transfer_pw value="" />
</span>
<span id="key_entry">
<label for="key_id">Key</label>
<select id="key" class="key-select themed-select html-select" name=key_id />
'''
        # select requested key
        for key_dict in available_keys:
            if key_dict['key_id'] == key_id:
                selected = 'selected'
            else:
                selected = ''
            transfer_html += '<option %s value="%s">%s</option>' % \
                             (selected, key_dict['key_id'], key_dict['key_id'])
            selected = ''
        transfer_html += '''
</select> %s
''' % key_note
        transfer_html += '''
</span>
</td></tr>
<tr><td>
<div id="srcfields">
<!-- NOTE: automatically filled by addSource function -->
</div>
<input id="addsrcbutton" type="button" onclick="addSource(); return false;"
    value="Add another source field" />
</td></tr>
<tr><td>
<label for="transfer_dst">Destination path</label>
<input id=\'dst_0\' type=text size=60 name=transfer_dst
    value="%(transfer_dst)s" required
    title="relative destination path: local for imports and remote for exports" />
<input id=\'dst_dir_0\' type=radio checked />Destination directory
<input id=\'dst_file_0\' type=radio disabled />Destination file<br />
</td></tr>
<tr><td>
<div id="excludefields">
<!-- NOTE: automatically filled by addExclude function -->
</div>
<input id="addexcludebutton" type="button" onclick="addExclude(); return false;"
    value="Add another exclude field" />
</td></tr>
<tr><td>
<label for="compress">Enable compression (leave unset except for <em>slow</em> sites)</label>
<input type=checkbox name=compress>
</td></tr>
<tr><td>
<label for="notify">Optional notify on completion (e.g. email address)</label>
<input type=text size=60 name=notify value=\'%(notify)s\'>
</td></tr>
<tr><td>
<span>
<input id="submit-request-transfer" type=submit value="Request transfer" />
<input type=reset value="Clear" />
</span>
</td></tr>
</table>
</fieldset>
</form>
</td>
</tr>
</table>
%(toggle_quiet)s
%(scroll_to_create)s
'''
        output_objects.append({
            'object_type': 'html_form',
            'text': transfer_html % fill_helpers
        })
        output_objects.append({
            'object_type': 'html_form',
            'text': '''
</div>
'''
        })

        # Display key management

        output_objects.append({
            'object_type': 'html_form',
            'text': '''
<div id="keys-tab">
'''
        })
        output_objects.append({
            'object_type': 'sectionheader',
            'text': 'Manage Data Transfer Keys'
        })
        key_html = '''
<form method="%(form_method)s" action="%(target_op)s.py">
<input type="hidden" name="%(csrf_field)s" value="%(csrf_token)s" />
<table class="managetransferkeys">
<tr><td>
'''
        transferkeys = []
        for key_dict in available_keys:
            key_item = build_keyitem_object(configuration, key_dict)
            saved_id = key_item['key_id']
            js_name = 'delete%s' % hexlify(saved_id)
            helper = html_post_helper(js_name, '%s.py' % target_op, {
                'key_id': saved_id,
                'action': 'delkey',
                csrf_field: csrf_token
            })
            output_objects.append({'object_type': 'html_form', 'text': helper})
            key_item['delkeylink'] = {
                'object_type':
                'link',
                'destination':
                "javascript: confirmDialog(%s, '%s');" %
                (js_name, 'Really remove %s?' % saved_id),
                'class':
                'removelink iconspace',
                'title':
                'Remove %s' % saved_id,
                'text':
                ''
            }
            transferkeys.append(key_item)

        output_objects.append({
            'object_type': 'table_pager',
            'id_prefix': 'transferkeys_',
            'entry_name': 'keys',
            'default_entries': default_pager_entries
        })
        output_objects.append({
            'object_type': 'transferkeys',
            'transferkeys': transferkeys
        })

        key_html += '''
Please copy the public key to your ~/.ssh/authorized_keys or
~/.ssh/authorized_keys2 file on systems where you want to login with the
corresponding key.<br/>
%s
</td></tr>
<tr><td>
Select a name below to create a new key for use in future transfers. The key is
generated and stored in a private storage area on %s, so that only the transfer
service can access and use it for your transfers.
</td></tr>
<tr><td>
<input type=hidden name=action value="generatekey" />
Key name:<br/>
<input type=text size=60 name=key_id value="" required pattern="[a-zA-Z0-9._-]+"
    title="internal name for the key when used in transfers. I.e. letters and digits separated only by underscores, periods and hyphens" />
<br/>
<input type=submit value="Generate key" />
</td></tr>
</table>
</form>
''' % (restrict_template % 'ssh-rsa AAAAB3NzaC...', configuration.short_title)
        output_objects.append({
            'object_type': 'html_form',
            'text': key_html % fill_helpers
        })
        output_objects.append({
            'object_type': 'html_form',
            'text': '''
</div>
'''
        })

        output_objects.append({
            'object_type': 'html_form',
            'text': '''
</div>
'''
        })
        return (output_objects, returnvalues.OK)
    elif action in transfer_actions:
        # NOTE: all path validation is done at run-time in grid_transfers
        transfer_dict = transfer_map.get(transfer_id, {})
        if action == 'deltransfer':
            if transfer_dict is None:
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'existing transfer_id is required for delete'
                })
                return (output_objects, returnvalues.CLIENT_ERROR)
            (save_status, _) = delete_data_transfer(configuration, client_id,
                                                    transfer_id, transfer_map)
            desc = "delete"
        elif action == 'redotransfer':
            if transfer_dict is None:
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'existing transfer_id is required for reschedule'
                })
                return (output_objects, returnvalues.CLIENT_ERROR)
            transfer_dict['status'] = 'NEW'
            (save_status, _) = update_data_transfer(configuration, client_id,
                                                    transfer_dict,
                                                    transfer_map)
            desc = "reschedule"
        else:
            if not fqdn:
                output_objects.append({
                    'object_type': 'error_text',
                    'text': 'No host address provided!'
                })
                return (output_objects, returnvalues.CLIENT_ERROR)
            if not [src for src in src_list if src] or not dst:
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'transfer_src and transfer_dst parameters '
                    'required for all data transfers!'
                })
                return (output_objects, returnvalues.CLIENT_ERROR)
            if protocol == "rsyncssh" and not key_id:
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'RSYNC over SSH is only supported with key!'
                })
                return (output_objects, returnvalues.CLIENT_ERROR)
            if not password and not key_id and protocol in warn_anon:
                output_objects.append({
                    'object_type':
                    'warning',
                    'text':
                    '''
%s transfers usually require explicit authentication with your credentials.
Proceeding as requested with anonymous login, but the transfer is likely to
fail.''' % valid_proto_map[protocol]
                })
            if key_id and protocol in warn_key:
                output_objects.append({
                    'object_type':
                    'warning',
                    'text':
                    '''
%s transfers usually only support authentication with username and password
rather than key. Proceeding as requested, but the transfer is likely to
fail if it really requires login.''' % valid_proto_map[protocol]
                })

            # Make pseudo-unique ID based on msec time since epoch if not given
            if not transfer_id:
                transfer_id = "transfer-%d" % (time.time() * 1000)
            if transfer_dict:
                desc = "update"
            else:
                desc = "create"

            if password:
                # We don't want to store password in plain text on disk
                password_digest = make_digest('datatransfer', client_id,
                                              password,
                                              configuration.site_digest_salt)
            else:
                password_digest = ''
            transfer_dict.update({
                'transfer_id': transfer_id,
                'action': action,
                'protocol': protocol,
                'fqdn': fqdn,
                'port': port,
                'username': username,
                'password_digest': password_digest,
                'key': key_id,
                'src': src_list,
                'dst': dst,
                'exclude': exclude_list,
                'compress': use_compress,
                'notify': notify,
                'status': 'NEW'
            })
            (save_status, _) = create_data_transfer(configuration, client_id,
                                                    transfer_dict,
                                                    transfer_map)
        if not save_status:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Error in %s data transfer %s: ' % (desc, transfer_id) +
                'save updated transfers failed!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        output_objects.append({
            'object_type':
            'text',
            'text':
            '%sd transfer request %s.' % (desc.title(), transfer_id)
        })
        if action != 'deltransfer':
            output_objects.append({
                'object_type':
                'link',
                'destination':
                "fileman.py?path=transfer_output/%s/" % transfer_id,
                'title':
                'Transfer status and output',
                'text':
                'Transfer status and output folder'
            })
            output_objects.append({
                'object_type':
                'text',
                'text':
                '''
Please note that the status files only appear after the transfer starts, so it
may be empty now.
'''
            })
        logger.debug('datatransfer %s from %s done: %s' %
                     (action, client_id, transfer_dict))
    elif action in key_actions:
        if action == 'generatekey':
            (gen_status, pub) = generate_user_key(configuration, client_id,
                                                  key_id)
            if gen_status:
                output_objects.append({
                    'object_type':
                    'html_form',
                    'text':
                    '''
Generated new key with name %s and associated public key:<br/>
<textarea class="publickey" rows="5" readonly="readonly">%s</textarea>
<p>
Please copy it to your ~/.ssh/authorized_keys or ~/.ssh/authorized_keys2 file
on the host(s) where you want to use this key for background transfer login.
<br/>
%s
</p>
''' % (key_id, pub, restrict_template % pub)
                })
            else:
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    '''
Key generation for name %s failed with error: %s''' % (key_id, pub)
                })
                return (output_objects, returnvalues.CLIENT_ERROR)
        elif action == 'delkey':
            pubkey = '[unknown]'
            available_keys = load_user_keys(configuration, client_id)
            for key_dict in available_keys:
                if key_dict['key_id'] == key_id:
                    pubkey = key_dict.get('public_key', pubkey)
            (del_status, msg) = delete_user_key(configuration, client_id,
                                                key_id)
            if del_status:
                output_objects.append({
                    'object_type':
                    'html_form',
                    'text':
                    '''
<p>
Deleted the key "%s" and the associated public key:<br/>
</p>
<textarea class="publickey" rows="5" readonly="readonly">%s</textarea>
<p>
You will no longer be able to use it in your data transfers and can safely
remove the public key from your ~/.ssh/authorized_keys* files on any hosts
where you may have previously added it.
</p>
''' % (key_id, pubkey)
                })
            else:
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    '''
Key removal for name %s failed with error: %s''' % (key_id, msg)
                })
                return (output_objects, returnvalues.CLIENT_ERROR)
    else:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Invalid data transfer action: %s' % action
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    output_objects.append({
        'object_type': 'link',
        'destination': 'datatransfer.py',
        'text': 'Return to data transfers overview'
    })

    return (output_objects, returnvalues.OK)
示例#18
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False, op_menu=False)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input(user_arguments_dict,
                                                 defaults,
                                                 output_objects,
                                                 allow_rejects=False)
    if not validate_status:
        logger.warning('%s invalid input: %s' % (op_name, accepted))
        return (accepted, returnvalues.CLIENT_ERROR)

    if not configuration.site_enable_openid or \
            not 'migoid' in configuration.site_signup_methods:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Local OpenID login is not enabled on this site'''
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    title_entry = find_entry(output_objects, 'title')
    title_entry['text'] = '%s OpenID account request' % \
                          configuration.short_title
    title_entry['skipmenu'] = True
    output_objects.append({
        'object_type':
        'header',
        'text':
        '%s OpenID account request' % configuration.short_title
    })

    admin_email = configuration.admin_email
    smtp_server = configuration.smtp_server
    user_pending = os.path.abspath(configuration.user_pending)

    # TODO: switch to canonical_user fra mig.shared.base instead?
    # force name to capitalized form (henrik karlsen -> Henrik Karlsen)
    # please note that we get utf8 coded bytes here and title() treats such
    # chars as word termination. Temporarily force to unicode.

    raw_name = accepted['cert_name'][-1].strip()
    try:
        cert_name = force_utf8(force_unicode(raw_name).title())
    except Exception:
        cert_name = raw_name.title()
    country = accepted['country'][-1].strip().upper()
    state = accepted['state'][-1].strip().upper()
    org = accepted['org'][-1].strip()

    # lower case email address

    email = accepted['email'][-1].strip().lower()
    password = accepted['password'][-1]
    verifypassword = accepted['verifypassword'][-1]
    # The checkbox typically returns value 'on' if selected
    passwordrecovery = (accepted['passwordrecovery'][-1].strip().lower()
                        in ('1', 'o', 'y', 't', 'on', 'yes', 'true'))

    # keep comment to a single line

    comment = accepted['comment'][-1].replace('\n', '   ')

    # single quotes break command line format - remove

    comment = comment.replace("'", ' ')
    accept_terms = (accepted['accept_terms'][-1].strip().lower()
                    in ('1', 'o', 'y', 't', 'on', 'yes', 'true'))

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not accept_terms:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'You must accept the terms of use in sign up!'
        })
        output_objects.append({
            'object_type': 'link',
            'destination': 'javascript:history.back();',
            'class': 'genericbutton',
            'text': "Try again"
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if password != verifypassword:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Password and verify password are not identical!'
        })
        output_objects.append({
            'object_type': 'link',
            'destination': 'javascript:history.back();',
            'class': 'genericbutton',
            'text': "Try again"
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    try:
        assure_password_strength(configuration, password)
    except Exception as exc:
        logger.warning(
            "%s invalid password for '%s' (policy %s): %s" %
            (op_name, cert_name, configuration.site_password_policy, exc))
        output_objects.append({
            'object_type': 'error_text',
            'text': 'Invalid password requested: %s.' % exc
        })
        output_objects.append({
            'object_type': 'link',
            'destination': 'javascript:history.back();',
            'class': 'genericbutton',
            'text': "Try again"
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not existing_country_code(country, configuration):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Illegal country code:
Please read and follow the instructions shown in the help bubble when filling
the country field on the request page!
Specifically if you are from the U.K. you need to use GB as country code in
line with the ISO-3166 standard.
'''
        })
        output_objects.append({
            'object_type': 'link',
            'destination': 'javascript:history.back();',
            'class': 'genericbutton',
            'text': "Try again"
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # TODO: move this check to conf?

    if not forced_org_email_match(org, email, configuration):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Illegal email and organization combination:
Please read and follow the instructions in red on the request page!
If you are a student with only a @*.ku.dk address please just use KU as
organization. As long as you state that you want the account for course
purposes in the comment field, you will be given access to the necessary
resources anyway.
'''
        })
        output_objects.append({
            'object_type': 'link',
            'destination': 'javascript:history.back();',
            'class': 'genericbutton',
            'text': "Try again"
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # NOTE: we save password on scrambled form only if explicitly requested
    if passwordrecovery:
        logger.info('saving %s scrambled password to enable recovery' % email)
        scrambled_pw = scramble_password(configuration.site_password_salt,
                                         password)
    else:
        logger.info('only saving %s password hash' % email)
        scrambled_pw = ''
    user_dict = {
        'full_name': cert_name,
        'organization': org,
        'state': state,
        'country': country,
        'email': email,
        'comment': comment,
        'password': scrambled_pw,
        'password_hash': make_hash(password),
        'expire': default_account_expire(configuration, 'oid'),
        'openid_names': [],
        'auth': ['migoid'],
    }

    fill_distinguished_name(user_dict)
    user_id = user_dict['distinguished_name']
    user_dict['authorized'] = (user_id == client_id)
    if configuration.user_openid_providers and configuration.user_openid_alias:
        user_dict['openid_names'] += \
            [user_dict[configuration.user_openid_alias]]
    logger.info('got account request from reqoid: %s' % user_dict)

    # For testing only

    if cert_name.upper().find('DO NOT SEND') != -1:
        output_objects.append({
            'object_type': 'text',
            'text': "Test request ignored!"
        })
        return (output_objects, returnvalues.OK)

    req_path = None
    try:
        (os_fd, req_path) = tempfile.mkstemp(dir=user_pending)
        os.write(os_fd, dumps(user_dict))
        os.close(os_fd)
    except Exception as err:
        logger.error('Failed to write OpenID account request to %s: %s' %
                     (req_path, err))
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Request could not be sent to site
administrators. Please contact them manually on %s if this error persists.''' %
            admin_email
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    logger.info('Wrote OpenID account request to %s' % req_path)
    tmp_id = os.path.basename(req_path)
    user_dict['tmp_id'] = tmp_id

    mig_user = os.environ.get('USER', 'mig')
    helper_commands = user_manage_commands(configuration, mig_user, req_path,
                                           user_id, user_dict, 'oid')
    user_dict.update(helper_commands)
    user_dict['site'] = configuration.short_title
    user_dict['vgrid_label'] = configuration.site_vgrid_label
    user_dict['vgridman_links'] = generate_https_urls(
        configuration, '%(auto_base)s/%(auto_bin)s/vgridman.py', {})
    email_header = '%s OpenID request for %s' % \
                   (configuration.short_title, cert_name)
    email_msg = \
        """
Received an OpenID request with account data
 * Full Name: %(full_name)s
 * Organization: %(organization)s
 * State: %(state)s
 * Country: %(country)s
 * Email: %(email)s
 * Comment: %(comment)s
 * Expire: %(expire)s

Command to create user on %(site)s server:
%(command_user_create)s

Command to inform user and %(site)s admins:
%(command_user_notify)s

Optional command to create matching certificate:
%(command_cert_create)s

Finally add the user
%(distinguished_name)s
to any relevant %(vgrid_label)ss using one of the management links:
%(vgridman_links)s


--- If user must be denied access or deleted at some point ---

Command to reject user account request on %(site)s server:
%(command_user_reject)s

Remove the user
%(distinguished_name)s
from any relevant %(vgrid_label)ss using one of the management links:
%(vgridman_links)s

Optional command to revoke any matching user certificate:
%(command_cert_revoke)s
You need to copy the resulting signed certificate revocation list (crl.pem)
to the web server(s) for the revocation to take effect.

Command to suspend user on %(site)s server:
%(command_user_suspend)s

Command to delete user again on %(site)s server:
%(command_user_delete)s

---

""" % user_dict

    logger.info('Sending email: to: %s, header: %s, msg: %s, smtp_server: %s' %
                (admin_email, email_header, email_msg, smtp_server))
    if not send_email(admin_email, email_header, email_msg, logger,
                      configuration):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''An error occured trying to send the email
requesting the site administrators to create a new OpenID and account. Please
email them (%s) manually and include the session ID: %s''' %
            (admin_email, tmp_id)
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    output_objects.append({
        'object_type':
        'text',
        'text':
        """Request sent to site administrators:
Your OpenID account request will be verified and handled as soon as possible,
so please be patient.
Once handled an email will be sent to the account you have specified ('%s') with
further information. In case of inquiries about this request, please email
the site administrators (%s) and include the session ID: %s""" %
        (email, configuration.admin_email, tmp_id)
    })
    return (output_objects, returnvalues.OK)
示例#19
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id)
    output_objects.append({
        'object_type': 'text',
        'text': '--------- Trying to RESTART exe ----------'
    })

    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    unique_resource_name = accepted['unique_resource_name'][-1]
    cputime = accepted['cputime'][-1]
    exe_name_list = accepted['exe_name']
    all = accepted['all'][-1].lower() == 'true'
    parallel = accepted['parallel'][-1].lower() == 'true'

    if not configuration.site_enable_resources:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Resources are not enabled on this system'''
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not is_owner(client_id, unique_resource_name,
                    configuration.resource_home, logger):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Failure: You must be an owner of ' + unique_resource_name +
            ' to restart the exe!'
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    exit_status = returnvalues.OK

    if all:
        exe_name_list = get_all_exe_names(unique_resource_name)

    # take action based on supplied list of exes

    if len(exe_name_list) == 0:
        output_objects.append({
            'object_type':
            'text',
            'text':
            "No exes specified and 'all' argument not set to true: Nothing to do!"
        })

    workers = []
    task_list = []
    for exe_name in exe_name_list:
        task = Worker(target=stop_resource_exe,
                      args=(unique_resource_name, exe_name,
                            configuration.resource_home, logger))
        workers.append((exe_name, [task]))
        task_list.append(task)
        throttle_max_concurrent(task_list)
        task.start()
        if not parallel:
            task.join()

    # Complete each stop thread before launching corresponding start threads

    for (exe_name, task_list) in workers:

        # We could optimize with non-blocking join here but keep it simple for now
        # as final result will need to wait for slowest member anyway

        task_list[0].join()
        task = Worker(target=start_resource_exe,
                      args=(unique_resource_name, exe_name,
                            configuration.resource_home, int(cputime), logger))
        task_list.append(task)
        throttle_max_concurrent(task_list)
        task.start()
        if not parallel:
            task.join()

    for (exe_name, task_list) in workers:
        (status, msg) = task_list[0].finish()
        output_objects.append({
            'object_type': 'header',
            'text': 'Restart exe output:'
        })
        if not status:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Problems stopping exe during restart: %s' % msg
            })

        (status2, msg2) = task_list[1].finish()
        if not status2:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Problems starting exe during restart: %s' % msg2
            })
            exit_status = returnvalues.SYSTEM_ERROR
        if status and status2:
            output_objects.append({
                'object_type':
                'text',
                'text':
                'Restart exe success: Stop output: %s ; Start output: %s' %
                (msg, msg2)
            })

    return (output_objects, exit_status)
示例#20
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    title_entry = find_entry(output_objects, 'title')
    title_entry['text'] = 'Delete runtime environment'
    output_objects.append({
        'object_type': 'header',
        'text': 'Delete runtime environment'
    })
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    re_name = accepted['re_name'][-1]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not valid_dir_input(configuration.re_home, re_name):
        logger.warning(
            "possible illegal directory traversal attempt re_name '%s'" %
            re_name)
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Illegal runtime environment name: "%s"' % re_name
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Check whether re_name represents a runtime environment
    if not is_runtime_environment(re_name, configuration):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            "No such runtime environment: '%s'" % re_name
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    (re_dict, load_msg) = get_re_dict(re_name, configuration)
    if not re_dict:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Could not read runtime environment details for %s: %s' %
            (re_name, load_msg)
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    # Make sure the runtime environment belongs to the user trying to delete it
    if client_id != re_dict['CREATOR']:
        output_objects.append({'object_type': 'error_text', 'text': \
        'You are not the owner of runtime environment "%s"' % re_name})
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Prevent delete if the runtime environment is used by any resources
    actives = resources_using_re(configuration, re_name)

    # If the runtime environment is active, an error message is printed, along
    # with a list of the resources using the runtime environment
    if actives:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            "Can't delete runtime environment '%s' in use by resources:" %
            re_name
        })
        output_objects.append({'object_type': 'list', 'list': actives})
        output_objects.append({
            'object_type': 'link',
            'destination': 'redb.py',
            'class': 'infolink iconspace',
            'title': 'Show runtime environments',
            'text': 'Show runtime environments'
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Delete the runtime environment
    (del_status, msg) = delete_runtimeenv(re_name, configuration)

    # If something goes wrong when trying to delete runtime environment
    # re_name, an error is displayed.
    if not del_status:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Could not remove %s runtime environment: %s' % (re_name, msg)
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    # If deletion of runtime environment re_name is successful, we just
    # return OK
    else:
        output_objects.append({
            'object_type':
            'text',
            'text':
            'Successfully deleted runtime environment: "%s"' % re_name
        })
        output_objects.append({
            'object_type': 'link',
            'destination': 'redb.py',
            'class': 'infolink iconspace',
            'title': 'Show runtime environments',
            'text': 'Show runtime environments'
        })
        return (output_objects, returnvalues.OK)
示例#21
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    patterns = accepted['job_id']

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not configuration.site_enable_jobs:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Job execution is not enabled on this system'''
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = \
        os.path.abspath(os.path.join(configuration.mrsl_files_dir,
                        client_dir)) + os.sep

    status = returnvalues.OK
    filelist = []
    for pattern in patterns:
        pattern = pattern.strip()

        # Backward compatibility - all_jobs keyword should match all jobs

        if pattern == all_jobs:
            pattern = '*'

        # Check directory traversal attempts before actual handling to
        # avoid leaking information about file system layout while
        # allowing consistent error messages

        unfiltered_match = glob.glob(base_dir + pattern + '.mRSL')
        match = []
        for server_path in unfiltered_match:
            # IMPORTANT: path must be expanded to abs for proper chrooting
            abs_path = os.path.abspath(server_path)
            if not valid_user_path(configuration, abs_path, base_dir, True):

                # out of bounds - save user warning for later to allow
                # partial match:
                # ../*/* is technically allowed to match own files.

                logger.warning('%s tried to %s restricted path %s ! (%s)' %
                               (client_id, op_name, abs_path, pattern))
                continue

            # Insert valid job files in filelist for later treatment

            match.append(abs_path)

        # Now actually treat list of allowed matchings and notify if no
        # (allowed) match^I

        if not match:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '%s: You do not have any matching job IDs!' % pattern
            })
            status = returnvalues.CLIENT_ERROR
        else:
            filelist += match

    # job feasibility is hard on the server, limit

    if len(filelist) > 100:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Too many matching jobs (%s)!' % len(filelist)
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    checkcondjobs = []

    for filepath in filelist:

        # Extract job_id from filepath (replace doesn't modify filepath)

        mrsl_file = filepath.replace(base_dir, '')
        job_id = mrsl_file.replace('.mRSL', '')

        checkcondjob = {'object_type': 'checkcondjob', 'job_id': job_id}

        dict = unpickle(filepath, logger)
        if not dict:
            checkcondjob['message'] = \
                                    ('The file containing the information ' \
                                     'for job id %s could not be opened! ' \
                                     'You can only check feasibility of ' \
                                     'your own jobs!'
                                     ) % job_id
            checkcondjobs.append(checkcondjob)
            status = returnvalues.CLIENT_ERROR
            continue

        # Is the job status pending?

        possible_check_states = ['QUEUED', 'RETRY', 'FROZEN']
        if not dict['STATUS'] in possible_check_states:
            checkcondjob['message'] = \
                'You can only check feasibility of jobs with status: %s.'\
                 % ' or '.join(possible_check_states)
            checkcondjobs.append(checkcondjob)
            continue

        # Actually check feasibility
        feasible_res = job_feasibility(configuration, dict)
        checkcondjob.update(feasible_res)
        checkcondjobs.append(checkcondjob)

    output_objects.append({
        'object_type': 'checkcondjobs',
        'checkcondjobs': checkcondjobs
    })
    return (output_objects, status)
示例#22
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    patterns = accepted['job_id']
    action = accepted['action'][-1]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not configuration.site_enable_jobs:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Job execution is not enabled on this system'''
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    if not action in valid_actions.keys():
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Invalid job action "%s" (only %s supported)' %
            (action, ', '.join(valid_actions.keys()))
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    new_state = valid_actions[action]

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = \
        os.path.abspath(os.path.join(configuration.mrsl_files_dir,
                        client_dir)) + os.sep

    status = returnvalues.OK
    filelist = []
    for pattern in patterns:
        pattern = pattern.strip()

        # Backward compatibility - all_jobs keyword should match all jobs

        if pattern == all_jobs:
            pattern = '*'

        # Check directory traversal attempts before actual handling to avoid
        # leaking information about file system layout while allowing
        # consistent error messages

        unfiltered_match = glob.glob(base_dir + pattern + '.mRSL')
        match = []
        for server_path in unfiltered_match:
            # IMPORTANT: path must be expanded to abs for proper chrooting
            abs_path = os.path.abspath(server_path)
            if not valid_user_path(configuration, abs_path, base_dir, True):

                # out of bounds - save user warning for later to allow
                # partial match:
                # ../*/* is technically allowed to match own files.

                logger.error(
                    '%s tried to use %s %s outside own home! (pattern %s)' %
                    (client_id, op_name, abs_path, pattern))
                continue

            # Insert valid job files in filelist for later treatment

            match.append(abs_path)

        # Now actually treat list of allowed matchings and notify if no
        # (allowed) match

        if not match:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '%s: You do not have any matching job IDs!' % pattern
            })
            status = returnvalues.CLIENT_ERROR
        else:
            filelist += match

    # job state change is hard on the server, limit

    if len(filelist) > 500:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Too many matching jobs (%s)!' % len(filelist)
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    changedstatusjobs = []

    for filepath in filelist:

        # Extract job_id from filepath (replace doesn't modify filepath)

        mrsl_file = filepath.replace(base_dir, '')
        job_id = mrsl_file.replace('.mRSL', '')

        changedstatusjob = {
            'object_type': 'changedstatusjob',
            'job_id': job_id
        }

        job_dict = unpickle(filepath, logger)
        if not job_dict:
            changedstatusjob['message'] = '''The file containing the
information for job id %s could not be opened! You can only %s your own
jobs!''' % (job_id, action)
            changedstatusjobs.append(changedstatusjob)
            status = returnvalues.CLIENT_ERROR
            continue

        changedstatusjob['oldstatus'] = job_dict['STATUS']

        # Is the job status compatible with action?

        possible_cancel_states = [
            'PARSE', 'QUEUED', 'RETRY', 'EXECUTING', 'FROZEN'
        ]
        if action == 'cancel' and \
               not job_dict['STATUS'] in possible_cancel_states:
            changedstatusjob['message'] = \
                'You can only cancel jobs with status: %s.'\
                 % ' or '.join(possible_cancel_states)
            status = returnvalues.CLIENT_ERROR
            changedstatusjobs.append(changedstatusjob)
            continue
        possible_freeze_states = ['QUEUED', 'RETRY']
        if action == 'freeze' and \
               not job_dict['STATUS'] in possible_freeze_states:
            changedstatusjob['message'] = \
                'You can only freeze jobs with status: %s.'\
                 % ' or '.join(possible_freeze_states)
            status = returnvalues.CLIENT_ERROR
            changedstatusjobs.append(changedstatusjob)
            continue
        possible_thaw_states = ['FROZEN']
        if action == 'thaw' and \
               not job_dict['STATUS'] in possible_thaw_states:
            changedstatusjob['message'] = \
                'You can only thaw jobs with status: %s.'\
                 % ' or '.join(possible_thaw_states)
            status = returnvalues.CLIENT_ERROR
            changedstatusjobs.append(changedstatusjob)
            continue

        # job action is handled by changing the STATUS field, notifying the
        # job queue and making sure the server never submits jobs with status
        # FROZEN or CANCELED.

        # file is repickled to ensure newest information is used, job_dict
        # might be old if another script has modified the file.

        if not unpickle_and_change_status(filepath, new_state, logger):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Job status could not be changed to %s!' % new_state
            })
            status = returnvalues.SYSTEM_ERROR

        # Avoid key error and make sure grid_script gets expected number of
        # arguments

        if 'UNIQUE_RESOURCE_NAME' not in job_dict:
            job_dict['UNIQUE_RESOURCE_NAME'] = \
                'UNIQUE_RESOURCE_NAME_NOT_FOUND'
        if 'EXE' not in job_dict:
            job_dict['EXE'] = 'EXE_NAME_NOT_FOUND'

        # notify queue

        if not send_message_to_grid_script(
                'JOBACTION ' + job_id + ' ' + job_dict['STATUS'] + ' ' +
                new_state + ' ' + job_dict['UNIQUE_RESOURCE_NAME'] + ' ' +
                job_dict['EXE'] + '\n', logger, configuration):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '''Error sending message to grid_script,
job may still be in the job queue.'''
            })
            status = returnvalues.SYSTEM_ERROR
            continue

        changedstatusjob['newstatus'] = new_state
        changedstatusjobs.append(changedstatusjob)

    output_objects.append({
        'object_type': 'changedstatusjobs',
        'changedstatusjobs': changedstatusjobs
    })
    return (output_objects, status)
示例#23
0
def main(client_id, user_arguments_dict, environ=None):
    """Main function used by front end"""

    if environ is None:
        environ = os.environ
    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False,
                                  op_menu=False)
    logger = configuration.logger
    logger.info('%s: args: %s' % (op_name, user_arguments_dict))
    prefilter_map = {}

    output_objects.append({'object_type': 'header',
                           'text': 'Automatic %s sign up'
                           % configuration.short_title})
    (auth_type, auth_flavor) = detect_client_auth(configuration, environ)
    identity = extract_client_id(configuration, environ, lookup_dn=False)
    if client_id and auth_type == AUTH_CERTIFICATE:
        if auth_flavor == AUTH_MIG_CERT:
            base_url = configuration.migserver_https_mig_cert_url
        elif auth_flavor == AUTH_EXT_CERT:
            base_url = configuration.migserver_https_ext_cert_url
        else:
            logger.warning('no matching sign up auth flavor %s' % auth_flavor)
            output_objects.append({'object_type': 'error_text', 'text':
                                   '%s sign up not supported' % auth_flavor})
            return (output_objects, returnvalues.SYSTEM_ERROR)
    elif identity and auth_type == AUTH_OPENID_V2:
        if auth_flavor == AUTH_MIG_OID:
            base_url = configuration.migserver_https_mig_oid_url
        elif auth_flavor == AUTH_EXT_OID:
            base_url = configuration.migserver_https_ext_oid_url
        else:
            logger.warning('no matching sign up auth flavor %s' % auth_flavor)
            output_objects.append({'object_type': 'error_text', 'text':
                                   '%s sign up not supported' % auth_flavor})
            return (output_objects, returnvalues.SYSTEM_ERROR)
        for name in ('openid.sreg.cn', 'openid.sreg.fullname',
                     'openid.sreg.full_name'):
            prefilter_map[name] = filter_commonname
    elif identity and auth_type == AUTH_OPENID_CONNECT:
        if auth_flavor == AUTH_MIG_OIDC:
            base_url = configuration.migserver_https_mig_oidc_url
        elif auth_flavor == AUTH_EXT_OIDC:
            base_url = configuration.migserver_https_ext_oidc_url
        else:
            logger.warning('no matching sign up auth flavor %s' % auth_flavor)
            output_objects.append({'object_type': 'error_text', 'text':
                                   '%s sign up not supported' % auth_flavor})
            return (output_objects, returnvalues.SYSTEM_ERROR)
        oidc_keys = signature(AUTH_OPENID_CONNECT)[1].keys()
        # NOTE: again we lowercase to avoid case sensitivity in validation
        for key in environ:
            low_key = key.replace('OIDC_CLAIM_', 'oidc.claim.').lower()
            if low_key in oidc_keys:
                user_arguments_dict[low_key] = [environ[key]]
    else:
        logger.error('autocreate without ID rejected for %s' % client_id)
        output_objects.append({'object_type': 'error_text',
                               'text': 'Missing user credentials'})
        return (output_objects, returnvalues.CLIENT_ERROR)
    defaults = signature(auth_type)[1]
    (validate_status, accepted) = validate_input(user_arguments_dict,
                                                 defaults, output_objects,
                                                 allow_rejects=False,
                                                 prefilter_map=prefilter_map)
    if not validate_status:
        logger.warning('%s from %s got invalid input: %s' %
                       (op_name, client_id, accepted))
        return (accepted, returnvalues.CLIENT_ERROR)

    logger.debug('Accepted arguments: %s' % accepted)
    # logger.debug('with environ: %s' % environ)

    admin_email = configuration.admin_email
    smtp_server = configuration.smtp_server
    (openid_names, oid_extras) = ([], {})
    tmp_id = 'tmp%s' % time.time()

    logger.info('Received autocreate from %s with ID %s' % (client_id, tmp_id))

    # Extract raw values

    if auth_type == AUTH_CERTIFICATE:
        uniq_id = accepted['cert_id'][-1].strip()
        raw_name = accepted['cert_name'][-1].strip()
        country = accepted['country'][-1].strip()
        state = accepted['state'][-1].strip()
        org = accepted['org'][-1].strip()
        org_unit = ''
        # NOTE: leave role and association alone here
        role = ''
        association = ''
        locality = ''
        timezone = ''
        email = accepted['email'][-1].strip()
    elif auth_type == AUTH_OPENID_V2:
        uniq_id = accepted['openid.sreg.nickname'][-1].strip() \
            or accepted['openid.sreg.short_id'][-1].strip()
        raw_name = accepted['openid.sreg.fullname'][-1].strip() \
            or accepted['openid.sreg.full_name'][-1].strip()
        country = accepted['openid.sreg.country'][-1].strip()
        state = accepted['openid.sreg.state'][-1].strip()
        org = accepted['openid.sreg.o'][-1].strip() \
            or accepted['openid.sreg.organization'][-1].strip()
        org_unit = accepted['openid.sreg.ou'][-1].strip() \
            or accepted['openid.sreg.organizational_unit'][-1].strip()

        # We may receive multiple roles and associations

        role = ','.join([i for i in accepted['openid.sreg.role'] if i])
        association = ','.join([i for i in
                                accepted['openid.sreg.association']
                                if i])
        locality = accepted['openid.sreg.locality'][-1].strip()
        timezone = accepted['openid.sreg.timezone'][-1].strip()

        # We may encounter results without an email, fall back to uniq_id then

        email = accepted['openid.sreg.email'][-1].strip() or uniq_id
    elif auth_type == AUTH_OPENID_CONNECT:
        uniq_id = accepted['oidc.claim.upn'][-1].strip() \
            or accepted['oidc.claim.sub'][-1].strip()
        raw_name = accepted['oidc.claim.fullname'][-1].strip()
        country = accepted['oidc.claim.country'][-1].strip()
        state = accepted['oidc.claim.state'][-1].strip()
        org = accepted['oidc.claim.o'][-1].strip() \
            or accepted['oidc.claim.organization'][-1].strip()
        org_unit = accepted['oidc.claim.ou'][-1].strip() \
            or accepted['oidc.claim.organizational_unit'][-1].strip()

        # We may receive multiple roles and associations

        role = ','.join([i for i in accepted['oidc.claim.role'] if i])
        association = ','.join([i for i in
                                accepted['oidc.claim.association']
                                if i])
        locality = accepted['oidc.claim.locality'][-1].strip()
        timezone = accepted['oidc.claim.timezone'][-1].strip()

        # We may encounter results without an email, fall back to uniq_id then

        email = accepted['oidc.claim.email'][-1].strip() or uniq_id

    # TODO: switch to canonical_user fra mig.shared.base instead?
    # Fix case of values:
    # force name to capitalized form (henrik karlsen -> Henrik Karlsen)
    # please note that we get utf8 coded bytes here and title() treats such
    # chars as word termination. Temporarily force to unicode.

    try:
        full_name = force_utf8(force_unicode(raw_name).title())
    except Exception:
        logger.warning('could not use unicode form to capitalize full name'
                       )
        full_name = raw_name.title()
    country = country.upper()
    state = state.upper()
    email = email.lower()
    accept_terms = (accepted['accept_terms'][-1].strip().lower() in
                    ('1', 'o', 'y', 't', 'on', 'yes', 'true'))

    if auth_type in (AUTH_OPENID_V2, AUTH_OPENID_CONNECT):

        # KU OpenID sign up does not deliver accept_terms so we implicitly
        # let it imply acceptance for now
        accept_terms = True

        # Remap some oid attributes if on KIT format with faculty in
        # organization and institute in organizational_unit. We can add them
        # as different fields as long as we make sure the x509 fields are
        # preserved.
        # Additionally in the special case with unknown institute (ou=ukendt)
        # we force organization to KU to align with cert policies.
        # We do that to allow autocreate updating existing cert users.

        if org_unit not in ('', 'NA'):
            org_unit = org_unit.upper()
            oid_extras['faculty'] = org
            oid_extras['institute'] = org_unit
            org = org_unit.upper()
            org_unit = 'NA'
            if org == 'UKENDT':
                org = 'KU'
                logger.info('unknown affilition, set organization to %s'
                            % org)

        # Stay on virtual host - extra useful while we test dual OpenID

        base_url = environ.get('REQUEST_URI', base_url).split('?')[0]
        backend = 'home.py'
        if configuration.site_enable_gdp:
            backend = 'gdpman.py'
        elif configuration.site_autolaunch_page:
            backend = os.path.basename(configuration.site_autolaunch_page)
        elif configuration.site_landing_page:
            backend = os.path.basename(configuration.site_landing_page)
        base_url = base_url.replace('autocreate.py', backend)

        raw_login = ''
        if auth_type == AUTH_OPENID_V2:
            # OpenID 2.0 provides user ID on URL format - only add plain ID
            for oid_provider in configuration.user_openid_providers:
                openid_prefix = oid_provider.rstrip('/') + '/'
                if identity.startswith(openid_prefix):
                    raw_login = identity.replace(openid_prefix, '')
                    break
        elif auth_type == AUTH_OPENID_CONNECT:
            raw_login = identity

        if raw_login and not raw_login in openid_names:
            openid_names.append(raw_login)
        if email and not email in openid_names:
            openid_names.append(email)
        # TODO: Add additional ext oid/oidc provider ID aliases here?

    # we should have the proxy file read...

    proxy_content = accepted['proxy_upload'][-1]

    # keep comment to a single line

    comment = accepted['comment'][-1].replace('\n', '   ')

    # single quotes break command line format - remove

    comment = comment.replace("'", ' ')

    # TODO: improve and enforce full authsig from extoid/extoidc provider
    authsig_list = accepted.get('authsig', [])
    # if len(authsig_list) != 1:
    #    logger.warning('%s from %s got invalid authsig: %s' %
    #                   (op_name, client_id, authsig_list))

    user_dict = {
        'short_id': uniq_id,
        'full_name': full_name,
        'organization': org,
        'organizational_unit': org_unit,
        'locality': locality,
        'state': state,
        'country': country,
        'email': email,
        'role': role,
        'association': association,
        'timezone': timezone,
        'password': '',
        'comment': 'Signed up through autocreate with %s' % auth_type,
        'openid_names': openid_names,
    }
    user_dict.update(oid_extras)

    # We must receive some ID from the provider otherwise we probably hit the
    # already logged in situation and must autologout first

    if not uniq_id and not email:
        if auth_type == AUTH_OPENID_V2 and identity and \
                accepted.get('openid.sreg.required', ''):
            logger.warning('autocreate forcing autologut for %s' % client_id)
            output_objects.append({'object_type': 'html_form',
                                   'text': '''<p class="spinner iconleftpad">
Auto log out first to avoid sign up problems ...
</p>'''})
            req_url = environ['SCRIPT_URI']
            html = \
                """
            <a id='autologout' href='%s'></a>
            <script type='text/javascript'>
                document.getElementById('autologout').click();
            </script>""" \
                % openid_autologout_url(configuration, identity,
                                        client_id, req_url, user_arguments_dict)
            output_objects.append({'object_type': 'html_form',
                                   'text': html})
        else:
            logger.warning('%s autocreate without ID refused for %s' %
                           (auth_type, client_id))

        return (output_objects, returnvalues.CLIENT_ERROR)

    # NOTE: Unfortunately external OpenID 2.0 redirect does not enforce POST
    # Extract helper environments from Apache to verify request authenticity

    redirector = environ.get('HTTP_REFERER', '')
    extoid_prefix = configuration.user_ext_oid_provider.replace('id/', '')
    # TODO: extend redirector check to match the full signup request?
    #       may not work with recent browser policy changes to limit referrer
    #       details on cross site requests.
    # NOTE: redirector check breaks for FF default policy so disabled again!
    if auth_flavor == AUTH_EXT_OID and redirector and \
            not redirector.startswith(extoid_prefix) and \
            not redirector.startswith(configuration.migserver_https_sid_url) \
            and not redirector.startswith(configuration.migserver_http_url) \
            and not redirector.startswith(get_site_base_url(configuration)):
        logger.error('stray %s autocreate rejected for %r (ref: %r)' %
                     (auth_flavor, client_id, redirector))
        output_objects.append({'object_type': 'error_text', 'text': '''Only
accepting authentic requests through %s OpenID 2.0''' %
                               configuration.user_ext_oid_title})
        return (output_objects, returnvalues.CLIENT_ERROR)
    elif auth_flavor != AUTH_EXT_OID and not safe_handler(
            configuration, 'post', op_name, client_id,
            get_csrf_limit(configuration), accepted):
        logger.error('unsafe %s autocreate rejected for %s' % (auth_flavor,
                                                               client_id))
        output_objects.append({'object_type': 'error_text', 'text': '''Only
accepting CSRF-filtered POST requests to prevent unintended updates'''})
        return (output_objects, returnvalues.CLIENT_ERROR)

    if auth_flavor == AUTH_EXT_CERT:
        ext_login_title = "%s certificate" % configuration.user_ext_cert_title
        personal_page_url = configuration.migserver_https_ext_cert_url
        # TODO: consider limiting expire to real cert expire if before default?
        user_dict['expire'] = default_account_expire(configuration,
                                                     AUTH_CERTIFICATE)
        try:
            distinguished_name_to_user(uniq_id)
            user_dict['distinguished_name'] = uniq_id
        except:
            logger.error('%s autocreate with bad DN refused for %s' %
                         (auth_flavor, client_id))
            output_objects.append({'object_type': 'error_text',
                                   'text': '''Illegal Distinguished name:
Please note that the distinguished name must be a valid certificate DN with
multiple "key=val" fields separated by "/".
'''})
            return (output_objects, returnvalues.CLIENT_ERROR)
    elif auth_flavor == AUTH_EXT_OID:
        ext_login_title = "%s login" % configuration.user_ext_oid_title
        personal_page_url = configuration.migserver_https_ext_oid_url
        user_dict['expire'] = default_account_expire(configuration,
                                                     AUTH_OPENID_V2)
        fill_distinguished_name(user_dict)
        uniq_id = user_dict['distinguished_name']
    elif auth_flavor == AUTH_EXT_OIDC:
        ext_login_title = "%s login" % configuration.user_ext_oid_title
        personal_page_url = configuration.migserver_https_ext_oidc_url
        user_dict['expire'] = default_account_expire(configuration,
                                                     AUTH_OPENID_CONNECT)
        fill_distinguished_name(user_dict)
        uniq_id = user_dict['distinguished_name']
    else:
        # Reject the migX sign up methods through this handler
        logger.error('%s autocreate not supported for %s - only ext auth' %
                     (auth_flavor, client_id))
        output_objects.append({'object_type': 'error_text', 'text': '''
Unsuported %s sign up method - you should sign up through the official
sign up wrappers or go through the dedicated web form for %s.''' %
                               (auth_type, auth_flavor)})
        return (output_objects, returnvalues.CLIENT_ERROR)

    # IMPORTANT: do NOT let a user create with ID different from client_id
    if auth_type == AUTH_CERTIFICATE and client_id != uniq_id:
        logger.error('refusing autocreate invalid user for %s: %s' %
                     (client_id, user_dict))
        output_objects.append({'object_type': 'error_text', 'text': '''Only
accepting create matching supplied ID!'''})
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not accept_terms:
        output_objects.append({'object_type': 'error_text', 'text':
                               'You must accept the terms of use in sign up!'})
        output_objects.append(
            {'object_type': 'link', 'destination': 'javascript:history.back();',
             'class': 'genericbutton', 'text': "Try again"})
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Save auth access method

    user_dict['auth'] = user_dict.get('auth', None)
    if not user_dict['auth']:
        user_dict['auth'] = []
    elif isinstance(user_dict['auth'], basestring):
        user_dict['auth'] = [user_dict['auth']]
    user_dict['auth'].append(auth_flavor)

    fill_helper = {'short_title': configuration.short_title,
                   'base_url': base_url, 'admin_email': admin_email,
                   'ext_login_title': ext_login_title,
                   'front_page_url': get_site_base_url(configuration),
                   'personal_page_url': personal_page_url}
    fill_helper.update(user_dict)

    # If server allows automatic addition of users with a CA validated cert
    # we create the user immediately and skip mail

    if auth_type == AUTH_CERTIFICATE and configuration.auto_add_cert_user \
            or auth_type == AUTH_OPENID_V2 and \
            configuration.auto_add_oid_user \
            or auth_type == AUTH_OPENID_CONNECT and \
            configuration.auto_add_oid_user:
        fill_user(user_dict)

        logger.info('create user: %s' % user_dict)

        # Now all user fields are set and we can begin adding the user

        db_path = os.path.join(configuration.mig_server_home,
                               user_db_filename)
        try:
            create_user(user_dict, configuration.config_file, db_path,
                        ask_renew=False, default_renew=True)
            if configuration.site_enable_griddk \
                    and accepted['proxy_upload'] != ['']:

                # save the file, display expiration date

                proxy_out = handle_proxy(proxy_content, uniq_id,
                                         configuration)
                output_objects.extend(proxy_out)
        except Exception as err:
            logger.error('create failed for %s: %s' % (uniq_id, err))
            output_objects.append({'object_type': 'error_text', 'text': '''
Could not create the user account for you:
Please report this problem to the site administrators (%(admin_email)s).'''
                                   % fill_helper})
            return (output_objects, returnvalues.SYSTEM_ERROR)

        logger.info('created user account for %s' % uniq_id)

        email_header = 'Welcome to %s' % configuration.short_title
        email_msg = """Hi and welcome to %(short_title)s!

Your account sign up succeeded and you can now log in to your account using
your %(ext_login_title)s from
%(front_page_url)s
There you'll also find further information about making the most of
%(short_title)s, including a user guide and answers to Frequently Asked
Questions, plus site status and support information.
You're welcome to contact us with questions or comments using the contact
details there and in the footer of your personal %(short_title)s pages.

Please note that by signing up and using %(short_title)s you also formally
accept the site Terms of Use, which you'll always find in the current form at
%(front_page_url)s/terms.html

All the best,
The %(short_title)s Admins
""" % fill_helper

        logger.info('Send email: to: %s, header: %s, msg: %s, smtp_server: %s'
                    % (email, email_header, email_msg, smtp_server))
        if not send_email(email, email_header, email_msg, logger,
                          configuration):
            output_objects.append({
                'object_type': 'error_text', 'text': """An error occured trying
to send your account welcome email. Please inform the site admins (%s) manually
and include the session ID: %s""" % (admin_email, tmp_id)})
            return (output_objects, returnvalues.SYSTEM_ERROR)

        logger.info('sent welcome email for %s to %s' % (uniq_id, email))

        output_objects.append({'object_type': 'html_form', 'text': """
<p>Creating your %(short_title)s user account and sending welcome email ... </p>
<p class='spinner iconleftpad'>
redirecting to your <a href='%(personal_page_url)s'> personal pages </a> in a
moment.
</p>
<script type='text/javascript'>
    setTimeout(function() {location.href='%(personal_page_url)s';}, 3000);
</script>
""" % fill_helper})
        return (output_objects, returnvalues.OK)

    else:
        logger.warning('autocreate disabled and refused for %s' % client_id)
        output_objects.append({
            'object_type': 'error_text', 'text': """Automatic user creation
disabled on this site. Please contact the site admins (%(admin_email)s) if you
think it should be enabled.
""" % fill_helper})
        return (output_objects, returnvalues.ERROR)
示例#24
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    defaults = signature()[1]
    status = returnvalues.OK
    output_objects.append({
        'object_type': 'header',
        'text': 'Add Resource Owner(s)'
    })
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    unique_resource_name = accepted['unique_resource_name'][-1].strip()
    cert_id_list = accepted['cert_id']
    request_name = unhexlify(accepted['request_name'][-1])

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not is_owner(client_id, unique_resource_name,
                    configuration.resource_home, logger):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'You must be an owner of %s to add a new owner!' %
            unique_resource_name
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # is_owner incorporates unique_resource_name verification - no need to
    # specifically check for illegal directory traversal

    cert_id_added = []
    for cert_id in cert_id_list:
        cert_id = cert_id.strip()
        if not cert_id:
            continue
        if not is_user(cert_id, configuration.mig_server_home):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '%s is not a valid %s user!' %
                (cert_id, configuration.short_title)
            })
            status = returnvalues.CLIENT_ERROR
            continue

        # don't add if already an owner

        if resource_is_owner(unique_resource_name, cert_id, configuration):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '%s is already an owner of %s.' %
                (cert_id, unique_resource_name)
            })
            status = returnvalues.CLIENT_ERROR
            continue

        # Add owner

        (add_status, add_msg) = resource_add_owners(configuration,
                                                    unique_resource_name,
                                                    [cert_id])
        if not add_status:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Could not add new owner, reason: %s' % add_msg
            })
            status = returnvalues.SYSTEM_ERROR
            continue
        cert_id_added.append(cert_id)

    if request_name:
        request_dir = os.path.join(configuration.resource_home,
                                   unique_resource_name)
        if not delete_access_request(configuration, request_dir, request_name):
            logger.error("failed to delete owner request for %s in %s" % \
                         (unique_resource_name, request_name))
            output_objects.append({
                'object_type': 'error_text', 'text':
                'Failed to remove saved request for %s in %s!' % \
                (unique_resource_name, request_name)})

    if cert_id_added:
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            'New owner(s)<br/>%s<br/>successfully added to %s!' %
            ('<br />'.join(cert_id_added), unique_resource_name)
        })
        cert_id_fields = ''
        for cert_id in cert_id_added:
            cert_id_fields += """<input type=hidden name=cert_id value='%s' />
""" % cert_id

        form_method = 'post'
        csrf_limit = get_csrf_limit(configuration)
        fill_helpers = {
            'res_id': unique_resource_name,
            'cert_id_fields': cert_id_fields,
            'any_protocol': any_protocol,
            'form_method': form_method,
            'csrf_field': csrf_field,
            'csrf_limit': csrf_limit
        }
        target_op = 'sendrequestaction'
        csrf_token = make_csrf_token(configuration, form_method, target_op,
                                     client_id, csrf_limit)
        fill_helpers.update({'target_op': target_op, 'csrf_token': csrf_token})
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            """
<form method='%(form_method)s' action='%(target_op)s.py'>
<input type='hidden' name='%(csrf_field)s' value='%(csrf_token)s' />
<input type=hidden name=request_type value='resourceaccept' />
<input type=hidden name=unique_resource_name value='%(res_id)s' />
%(cert_id_fields)s
<input type=hidden name=protocol value='%(any_protocol)s' />
<table>
<tr>
<td class='title'>Custom message to user(s)</td>
</tr>
<tr>
<td><textarea name=request_text cols=72 rows=10>
We have granted you ownership access to our %(res_id)s resource.
You can access the resource administration page from the Resources page.

Regards, the %(res_id)s resource owners
</textarea></td>
</tr>
<tr>
<td><input type='submit' value='Inform user(s)' /></td>
</tr>
</table>
</form>
<br />
""" % fill_helpers
        })

    output_objects.append({'object_type': 'link', 'destination':
                           'resadmin.py?unique_resource_name=%s' % \
                           unique_resource_name, 'class':
                           'adminlink iconspace', 'title':
                           'Administrate resource', 'text': 'Manage resource'})
    return (output_objects, status)
示例#25
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        # NOTE: path cannot use wildcards here
        typecheck_overrides={},
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    path = accepted['path'][-1]
    chosen_newline = accepted['newline'][-1]
    submitjob = accepted['submitjob'][-1]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not configuration.site_enable_jobs and submitjob:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Job execution is not enabled on this system'''
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = os.path.abspath(
        os.path.join(configuration.user_home, client_dir)) + os.sep

    # HTML spec dictates newlines in forms to be MS style (\r\n)
    # rather than un*x style (\n): change if requested.

    form_newline = '\r\n'
    allowed_newline = {'unix': '\n', 'mac': '\r', 'windows': '\r\n'}
    output_objects.append({
        'object_type': 'header',
        'text': 'Saving changes to edited file'
    })

    if not chosen_newline in allowed_newline.keys():
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Unsupported newline style supplied: %s (must be one of %s)' %
            (chosen_newline, ', '.join(allowed_newline.keys()))
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    saved_newline = allowed_newline[chosen_newline]

    # Check directory traversal attempts before actual handling to avoid
    # leaking information about file system layout while allowing consistent
    # error messages

    abs_path = ''
    unfiltered_match = glob.glob(base_dir + path)
    for server_path in unfiltered_match:
        # IMPORTANT: path must be expanded to abs for proper chrooting
        abs_path = os.path.abspath(server_path)
        if not valid_user_path(configuration, abs_path, base_dir, True):
            logger.warning('%s tried to %s restricted path %s ! (%s)' %
                           (client_id, op_name, abs_path, path))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "Invalid path! (%s expands to an illegal path)" % path
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    if abs_path == '':
        # IMPORTANT: path must be expanded to abs for proper chrooting
        abs_path = os.path.abspath(os.path.join(base_dir, path.lstrip(os.sep)))
        if not valid_user_path(configuration, abs_path, base_dir, True):
            logger.warning('%s tried to %s restricted path %s ! (%s)' %
                           (client_id, op_name, abs_path, path))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "Invalid path! (%s expands to an illegal path)" % path
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    if not check_write_access(abs_path, parent_dir=True):
        logger.warning('%s called without write access: %s' %
                       (op_name, abs_path))
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'cannot edit "%s": inside a read-only location!' % path
        })
        status = returnvalues.CLIENT_ERROR
        return (output_objects, returnvalues.CLIENT_ERROR)

    (owner, time_left) = acquire_edit_lock(abs_path, client_id)
    if owner != client_id:
        output_objects.append({
            'object_type': 'error_text',
            'text': "You don't have the lock for %s!" % path
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    try:
        fh = open(abs_path, 'w+')
        fh.write(user_arguments_dict['editarea'][0].replace(
            form_newline, saved_newline))
        fh.close()

        # everything ok

        output_objects.append({
            'object_type': 'text',
            'text': 'Saved changes to %s.' % path
        })
        logger.info('saved changes to %s' % path)
        release_edit_lock(abs_path, client_id)
    except Exception as exc:

        # Don't give away information about actual fs layout

        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '%s could not be written! (%s)' %
            (path, str(exc).replace(base_dir, ''))
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)
    if submitjob:
        output_objects.append({
            'object_type': 'text',
            'text': 'Submitting saved file to parser'
        })
        submitstatus = {'object_type': 'submitstatus', 'name': path}
        (new_job_status, msg, job_id) = new_job(abs_path, client_id,
                                                configuration, False, True)
        if not new_job_status:
            submitstatus['status'] = False
            submitstatus['message'] = msg
        else:
            submitstatus['status'] = True
            submitstatus['job_id'] = job_id

        output_objects.append({
            'object_type': 'submitstatuslist',
            'submitstatuslist': [submitstatus]
        })

    output_objects.append({
        'object_type': 'link',
        'destination': 'javascript:history.back()',
        'class': 'backlink iconspace',
        'title': 'Go back to previous page',
        'text': 'Back to previous page'
    })

    return (output_objects, returnvalues.OK)
示例#26
0
def main(client_id, user_arguments_dict, environ=None):
    """Main function used by front end"""

    if environ is None:
        environ = os.environ

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False,
                                  op_menu=client_id)
    client_dir = client_id_dir(client_id)
    status = returnvalues.OK
    defaults = signature()[1]
    (validate_status, accepted) = validate_input(
        user_arguments_dict,
        defaults,
        output_objects,
        allow_rejects=False,
        # NOTE: path can use wildcards
        typecheck_overrides={'path': valid_path_pattern},
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    flags = ''.join(accepted['flags'])
    pattern_list = accepted['path']
    iosessionid = accepted['iosessionid'][-1]
    share_id = accepted['share_id'][-1]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Either authenticated user client_id set or sharelink ID
    if client_id:
        user_id = client_id
        target_dir = client_id_dir(client_id)
        base_dir = configuration.user_home
        id_query = ''
        page_title = 'Remove User File'
        if force(flags):
            rm_helper = delete_path
        else:
            rm_helper = remove_path
        userstyle = True
        widgets = True
    elif share_id:
        try:
            (share_mode, _) = extract_mode_id(configuration, share_id)
        except ValueError as err:
            logger.error('%s called with invalid share_id %s: %s' %
                         (op_name, share_id, err))
            output_objects.append({
                'object_type': 'error_text',
                'text': 'Invalid sharelink ID: %s' % share_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        # TODO: load and check sharelink pickle (currently requires client_id)
        user_id = 'anonymous user through share ID %s' % share_id
        if share_mode == 'read-only':
            logger.error('%s called without write access: %s' %
                         (op_name, accepted))
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No write access!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        target_dir = os.path.join(share_mode, share_id)
        base_dir = configuration.sharelink_home
        id_query = '?share_id=%s' % share_id
        page_title = 'Remove Shared File'
        rm_helper = delete_path
        userstyle = False
        widgets = False
    elif iosessionid.strip() and iosessionid.isalnum():
        user_id = iosessionid
        base_dir = configuration.webserver_home
        target_dir = iosessionid
        page_title = 'Remove Session File'
        rm_helper = delete_path
        userstyle = False
        widgets = False
    else:
        logger.error('%s called without proper auth: %s' % (op_name, accepted))
        output_objects.append({
            'object_type': 'error_text',
            'text': 'Authentication is missing!'
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = os.path.abspath(os.path.join(base_dir, target_dir)) + os.sep

    title_entry = find_entry(output_objects, 'title')
    title_entry['text'] = page_title
    title_entry['skipwidgets'] = not widgets
    title_entry['skipuserstyle'] = not userstyle
    output_objects.append({'object_type': 'header', 'text': page_title})

    logger.debug("%s: with paths: %s" % (op_name, pattern_list))

    # Input validation assures target_dir can't escape base_dir
    if not os.path.isdir(base_dir):
        output_objects.append({
            'object_type': 'error_text',
            'text': 'Invalid client/sharelink/session id!'
        })
        logger.warning('%s used %s with invalid base dir: %s' %
                       (user_id, op_name, base_dir))
        return (output_objects, returnvalues.CLIENT_ERROR)

    if verbose(flags):
        for flag in flags:
            output_objects.append({
                'object_type': 'text',
                'text': '%s using flag: %s' % (op_name, flag)
            })

    for pattern in pattern_list:

        # Check directory traversal attempts before actual handling to avoid
        # leaking information about file system layout while allowing
        # consistent error messages

        unfiltered_match = glob.glob(base_dir + pattern)
        match = []
        for server_path in unfiltered_match:
            # IMPORTANT: path must be expanded to abs for proper chrooting
            abs_path = os.path.abspath(server_path)
            if not valid_user_path(configuration, abs_path, base_dir, True):

                # out of bounds - save user warning for later to allow
                # partial match:
                # ../*/* is technically allowed to match own files.

                logger.warning('%s tried to %s restricted path %s ! ( %s)' %
                               (client_id, op_name, abs_path, pattern))
                continue
            match.append(abs_path)

        # Now actually treat list of allowed matchings and notify if no
        # (allowed) match

        if not match:
            logger.warning("%s: no matching paths: %s" %
                           (op_name, pattern_list))
            output_objects.append({
                'object_type': 'file_not_found',
                'name': pattern
            })
            status = returnvalues.FILE_NOT_FOUND

        for abs_path in match:
            real_path = os.path.realpath(abs_path)
            relative_path = abs_path.replace(base_dir, '')
            if verbose(flags):
                output_objects.append({
                    'object_type': 'file',
                    'name': relative_path
                })

            # Make it harder to accidentially delete too much - e.g. do not
            # delete VGrid files without explicit selection of subdir contents

            if abs_path == os.path.abspath(base_dir):
                logger.error("%s: refusing rm home dir: %s" %
                             (op_name, abs_path))
                output_objects.append({
                    'object_type':
                    'warning',
                    'text':
                    "You're not allowed to delete your entire home directory!"
                })
                status = returnvalues.CLIENT_ERROR
                continue
            # Generally refuse handling symlinks including root vgrid shares
            elif os.path.islink(abs_path):
                logger.error("%s: refusing rm link: %s" % (op_name, abs_path))
                output_objects.append({
                    'object_type':
                    'warning',
                    'text':
                    """
You're not allowed to delete entire special folders like %s shares and %s
""" % (configuration.site_vgrid_label, trash_linkname)
                })
                status = returnvalues.CLIENT_ERROR
                continue
            # Additionally refuse operations on inherited subvgrid share roots
            elif in_vgrid_share(configuration, abs_path) == relative_path:
                output_objects.append({
                    'object_type':
                    'warning',
                    'text':
                    """You're not allowed to
remove entire %s shared folders!""" % configuration.site_vgrid_label
                })
                status = returnvalues.CLIENT_ERROR
                continue
            elif os.path.isdir(abs_path) and not recursive(flags):
                logger.error("%s: non-recursive call on dir '%s'" %
                             (op_name, abs_path))
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    "cannot remove '%s': is a direcory" % relative_path
                })
                status = returnvalues.CLIENT_ERROR
                continue
            trash_base = get_trash_location(configuration, abs_path)
            if not trash_base and not force(flags):
                logger.error("%s: no trash for dir '%s'" % (op_name, abs_path))
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    "No trash enabled for '%s' - read-only?" % relative_path
                })
                status = returnvalues.CLIENT_ERROR
                continue
            try:
                if rm_helper == remove_path and \
                    os.path.commonprefix([real_path, trash_base]) \
                        == trash_base:
                    logger.warning("%s: already in trash: '%s'" %
                                   (op_name, real_path))
                    output_objects.append({
                        'object_type':
                        'error_text',
                        'text':
                        """
'%s' is already in trash - no action: use force flag to permanently delete""" %
                        relative_path
                    })
                    status = returnvalues.CLIENT_ERROR
                    continue
            except Exception as err:
                logger.error("%s: check trash failed: %s" % (op_name, err))
                continue
            if not check_write_access(abs_path):
                logger.warning('%s called without write access: %s' %
                               (op_name, abs_path))
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'cannot remove "%s": inside a read-only location!' %
                    pattern
                })
                status = returnvalues.CLIENT_ERROR
                continue

            # TODO: limit delete in vgrid share trash to vgrid owners / conf?
            #       ... malicious members can still e.g. truncate all files.
            #       we could consider removing write bit on move to trash.
            # TODO: user setting to switch on/off trash?
            # TODO: add direct delete checkbox in fileman move to trash dialog?
            # TODO: add empty trash option for Trash?
            # TODO: user settings to define read-only and auto-expire in trash?
            # TODO: add trash support for sftp/ftps/webdavs?

            gdp_iolog_action = 'deleted'
            gdp_iolog_paths = [relative_path]
            if rm_helper == remove_path:
                gdp_iolog_action = 'moved'
                trash_base_path = \
                    get_trash_location(configuration, abs_path, True)
                trash_relative_path = \
                    trash_base_path.replace(configuration.user_home, '')
                trash_relative_path = \
                    trash_relative_path.replace(
                        configuration.vgrid_files_home, '')
                gdp_iolog_paths.append(trash_relative_path)
            try:
                gdp_iolog(configuration, client_id, environ['REMOTE_ADDR'],
                          gdp_iolog_action, gdp_iolog_paths)
                gdp_iolog_status = True
            except GDPIOLogError as exc:
                gdp_iolog_status = False
                rm_err = [str(exc)]
            rm_status = False
            if gdp_iolog_status:
                (rm_status, rm_err) = rm_helper(configuration, abs_path)
            if not rm_status or not gdp_iolog_status:
                if gdp_iolog_status:
                    gdp_iolog(configuration,
                              client_id,
                              environ['REMOTE_ADDR'],
                              gdp_iolog_action,
                              gdp_iolog_paths,
                              failed=True,
                              details=rm_err)
                logger.error("%s: failed on '%s': %s" %
                             (op_name, abs_path, ', '.join(rm_err)))
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    "remove '%s' failed: %s" %
                    (relative_path, '. '.join(rm_err))
                })
                status = returnvalues.SYSTEM_ERROR
                continue
            logger.info("%s: successfully (re)moved %s" % (op_name, abs_path))
            output_objects.append({
                'object_type': 'text',
                'text': "removed %s" % (relative_path)
            })

    output_objects.append({
        'object_type': 'link',
        'destination': 'ls.py%s' % id_query,
        'text': 'Return to files overview'
    })
    return (output_objects, status)
示例#27
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id)
    client_dir = client_id_dir(client_id)
    defaults = signature()[1]
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
        # NOTE: path can use wildcards, dst and current_dir cannot
        typecheck_overrides={'path': valid_path_pattern},
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    flags = ''.join(accepted['flags'])
    pattern_list = accepted['path']
    dst = accepted['dst'][-1]
    current_dir = accepted['current_dir'][-1].lstrip(os.sep)

    # All paths are relative to current_dir

    pattern_list = [os.path.join(current_dir, i) for i in pattern_list]
    if dst:
        dst = os.path.join(current_dir, dst)

    # Please note that base_dir must end in slash to avoid access to other
    # user dirs when own name is a prefix of another user name

    base_dir = os.path.abspath(
        os.path.join(configuration.user_home, client_dir)) + os.sep

    status = returnvalues.OK

    if verbose(flags):
        for flag in flags:
            output_objects.append({
                'object_type': 'text',
                'text': '%s using flag: %s' % (op_name, flag)
            })

    # IMPORTANT: path must be expanded to abs for proper chrooting
    abs_dir = os.path.abspath(
        os.path.join(base_dir, current_dir.lstrip(os.sep)))
    if not valid_user_path(configuration, abs_dir, base_dir, True):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            "You're not allowed to work in %s!" % current_dir
        })
        logger.warning('%s tried to %s restricted path %s ! (%s)' %
                       (client_id, op_name, abs_dir, current_dir))
        return (output_objects, returnvalues.CLIENT_ERROR)

    if verbose(flags):
        output_objects.append({
            'object_type': 'text',
            'text': "working in %s" % current_dir
        })

    if dst:
        if not safe_handler(configuration, 'post', op_name, client_id,
                            get_csrf_limit(configuration), accepted):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '''Only accepting
                CSRF-filtered POST requests to prevent unintended updates'''
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        # NOTE: dst already incorporates current_dir prefix here
        # IMPORTANT: path must be expanded to abs for proper chrooting
        abs_dest = os.path.abspath(os.path.join(base_dir, dst))
        logger.info('chksum in %s' % abs_dest)

        # Don't use abs_path in output as it may expose underlying
        # fs layout.

        relative_dest = abs_dest.replace(base_dir, '')
        if not valid_user_path(configuration, abs_dest, base_dir, True):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                "Invalid path! (%s expands to an illegal path)" % dst
            })
            logger.warning('%s tried to %s restricted path %s !(%s)' %
                           (client_id, op_name, abs_dest, dst))
            return (output_objects, returnvalues.CLIENT_ERROR)
        if not check_write_access(abs_dest, parent_dir=True):
            logger.warning('%s called without write access: %s' %
                           (op_name, abs_dest))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'cannot checksum to "%s": inside a read-only location!' %
                relative_dest
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

    all_lines = []
    for pattern in pattern_list:

        # Check directory traversal attempts before actual handling to avoid
        # leaking information about file system layout while allowing
        # consistent error messages

        unfiltered_match = glob.glob(base_dir + pattern)
        match = []
        for server_path in unfiltered_match:
            # IMPORTANT: path must be expanded to abs for proper chrooting
            abs_path = os.path.abspath(server_path)
            if not valid_user_path(configuration, abs_path, base_dir, True):

                # out of bounds - save user warning for later to allow
                # partial match:
                # ../*/* is technically allowed to match own files.

                logger.warning('%s tried to %s restricted path %s ! (%s)' %
                               (client_id, op_name, abs_path, pattern))
                continue
            match.append(abs_path)

        # Now actually treat list of allowed matchings and notify if no
        # (allowed) match

        if not match:
            output_objects.append({
                'object_type': 'file_not_found',
                'name': pattern
            })
            status = returnvalues.FILE_NOT_FOUND

        # NOTE: we produce output matching an invocation of:
        # du -aL --apparent-size --block-size=1 PATH [PATH ...]
        filedus = []
        summarize_output = summarize(flags)
        for abs_path in match:
            if invisible_path(abs_path):
                continue
            relative_path = abs_path.replace(base_dir, '')
            # cache accumulated sub dir sizes - du sums into parent dir size
            dir_sizes = {}
            try:
                # Assume a directory to walk
                for (root, dirs, files) in walk(abs_path,
                                                topdown=False,
                                                followlinks=True):
                    if invisible_path(root):
                        continue
                    dir_bytes = 0
                    for name in files:
                        real_file = os.path.join(root, name)
                        if invisible_path(real_file):
                            continue
                        relative_file = real_file.replace(base_dir, '')
                        size = os.path.getsize(real_file)
                        dir_bytes += size
                        if not summarize_output:
                            filedus.append({
                                'object_type': 'filedu',
                                'name': relative_file,
                                'bytes': size
                            })
                    for name in dirs:
                        real_dir = os.path.join(root, name)
                        if invisible_path(real_dir):
                            continue
                        dir_bytes += dir_sizes[real_dir]
                    relative_root = root.replace(base_dir, '')
                    dir_bytes += os.path.getsize(root)
                    dir_sizes[root] = dir_bytes
                    if root == abs_path or not summarize_output:
                        filedus.append({
                            'object_type': 'filedu',
                            'name': relative_root,
                            'bytes': dir_bytes
                        })
                if os.path.isfile(abs_path):
                    # Fall back to plain file where walk is empty
                    size = os.path.getsize(abs_path)
                    filedus.append({
                        'object_type': 'filedu',
                        'name': relative_path,
                        'bytes': size
                    })
            except Exception as exc:
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    "%s: '%s': %s" % (op_name, relative_path, exc)
                })
                logger.error("%s: failed on '%s': %s" %
                             (op_name, relative_path, exc))
                status = returnvalues.SYSTEM_ERROR
                continue
        if dst:
            all_lines += [
                '%(bytes)d\t\t%(name)s\n' % entry for entry in filedus
            ]
        else:
            output_objects.append({
                'object_type': 'filedus',
                'filedus': filedus
            })

    if dst and not write_file(''.join(all_lines), abs_dest, logger):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            "failed to write disk use to %s" % relative_dest
        })
        logger.error("writing disk use to %s for %s failed" %
                     (abs_dest, client_id))
        status = returnvalues.SYSTEM_ERROR

    return (output_objects, status)
示例#28
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    defaults = signature()[1]
    title_entry = find_entry(output_objects, 'title')
    label = "%s" % configuration.site_vgrid_label
    title_entry['text'] = '%s send request' % configuration.short_title
    output_objects.append({
        'object_type': 'header',
        'text': '%s send request' % configuration.short_title
    })
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    target_id = client_id
    vgrid_name = accepted['vgrid_name'][-1].strip()
    visible_user_names = accepted['cert_id']
    visible_res_names = accepted['unique_resource_name']
    request_type = accepted['request_type'][-1].strip().lower()
    request_text = accepted['request_text'][-1].strip()
    protocols = [proto.strip() for proto in accepted['protocol']]
    use_any = False
    if any_protocol in protocols:
        use_any = True
        protocols = configuration.notify_protocols
    protocols = [proto.lower() for proto in protocols]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    valid_request_types = [
        'resourceowner', 'resourceaccept', 'resourcereject', 'vgridowner',
        'vgridmember', 'vgridresource', 'vgridaccept', 'vgridreject', 'plain'
    ]
    if not request_type in valid_request_types:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '%s is not a valid request_type (valid types: %s)!' %
            (request_type.lower(), valid_request_types)
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not protocols:
        output_objects.append({
            'object_type': 'error_text',
            'text': 'No protocol specified!'
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    user_map = get_user_map(configuration)
    reply_to = user_map[client_id][USERID]
    # Try to point replies to client_id email
    client_email = extract_field(reply_to, 'email')

    if request_type == "plain":
        if not visible_user_names:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No user ID specified!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        user_id = visible_user_names[-1].strip()
        anon_map = anon_to_real_user_map(configuration)
        if user_id in anon_map:
            user_id = anon_map[user_id]
        if user_id not in user_map:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No such user: %s' % user_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        target_name = user_id
        user_dict = user_map[user_id]
        vgrid_access = user_vgrid_access(configuration, client_id)
        vgrids_allow_email = user_dict[CONF].get('VGRIDS_ALLOW_EMAIL', [])
        vgrids_allow_im = user_dict[CONF].get('VGRIDS_ALLOW_IM', [])
        if any_vgrid in vgrids_allow_email:
            email_vgrids = vgrid_access
        else:
            email_vgrids = set(vgrids_allow_email).intersection(vgrid_access)
        if any_vgrid in vgrids_allow_im:
            im_vgrids = vgrid_access
        else:
            im_vgrids = set(vgrids_allow_im).intersection(vgrid_access)
        if use_any:
            # Do not try disabled protocols if ANY was requested
            if not email_vgrids:
                protocols = [
                    proto for proto in protocols
                    if proto not in email_keyword_list
                ]
            if not im_vgrids:
                protocols = [
                    proto for proto in protocols if proto in email_keyword_list
                ]
        if not email_vgrids and [
                proto for proto in protocols if proto in email_keyword_list
        ]:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'You are not allowed to send emails to %s!' % user_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        if not im_vgrids and [
                proto for proto in protocols if proto not in email_keyword_list
        ]:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'You are not allowed to send instant messages to %s!' % user_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        for proto in protocols:
            if not user_dict[CONF].get(proto.upper(), False):
                if use_any:
                    # Remove missing protocols if ANY protocol was requested
                    protocols = [i for i in protocols if i != proto]
                else:
                    output_objects.append({
                        'object_type':
                        'error_text',
                        'text':
                        'User %s does not accept %s messages!' %
                        (user_id, proto)
                    })
                    return (output_objects, returnvalues.CLIENT_ERROR)
        if not protocols:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'User %s does not accept requested protocol(s) messages!' %
                user_id
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        target_list = [user_id]
    elif request_type in ["vgridaccept", "vgridreject"]:
        # Always allow accept messages but only between owners/members
        if not visible_user_names and not visible_res_names:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No user or resource ID specified!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        if not vgrid_name:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No vgrid_name specified!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        if vgrid_name.upper() == default_vgrid.upper():
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'No requests for %s are allowed!' % default_vgrid
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        if not vgrid_is_owner(vgrid_name, client_id, configuration):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'You are not an owner of %s or a parent %s!' %
                (vgrid_name, label)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        # NOTE: we support exactly one vgrid but multiple users/resources here
        if visible_user_names:
            logger.info("setting user recipients: %s" % visible_user_names)
            target_list = [user_id.strip() for user_id in visible_user_names]
        elif visible_res_names:
            # vgrid resource accept - lookup and notify resource owners
            logger.info("setting res owner recipients: %s" % visible_res_names)
            target_list = []
            for unique_resource_name in visible_res_names:
                logger.info("loading res owners for %s" % unique_resource_name)
                (load_status,
                 res_owners) = resource_owners(configuration,
                                               unique_resource_name)
                if not load_status:
                    output_objects.append({
                        'object_type':
                        'error_text',
                        'text':
                        'Could not lookup owners of %s!' % unique_resource_name
                    })
                    continue
                logger.info("adding res owners to recipients: %s" % res_owners)
                target_list += [user_id for user_id in res_owners]

        target_id = '%s %s owners' % (vgrid_name, label)
        target_name = vgrid_name
    elif request_type in ["resourceaccept", "resourcereject"]:
        # Always allow accept messages between actual resource owners
        if not visible_user_names:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No user ID specified!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        if not visible_res_names:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No resource ID specified!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        # NOTE: we support exactly one resource but multiple users here
        unique_resource_name = visible_res_names[-1].strip()
        target_name = unique_resource_name
        res_map = get_resource_map(configuration)
        if unique_resource_name not in res_map:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'No such resource: %s' % unique_resource_name
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        owners_list = res_map[unique_resource_name][OWNERS]
        if not client_id in owners_list:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'You are not an owner of %s!' % unique_resource_name
            })
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Invalid resource %s message!' % request_type
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        target_id = '%s resource owners' % unique_resource_name
        target_name = unique_resource_name
        target_list = [user_id.strip() for user_id in visible_user_names]
    elif request_type == "resourceowner":
        if not visible_res_names:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No resource ID specified!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        # NOTE: we support exactly one resource but multiple users here
        unique_resource_name = visible_res_names[-1].strip()
        anon_map = anon_to_real_res_map(configuration.resource_home)
        if unique_resource_name in anon_map:
            unique_resource_name = anon_map[unique_resource_name]
        target_name = unique_resource_name
        res_map = get_resource_map(configuration)
        if unique_resource_name not in res_map:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'No such resource: %s' % unique_resource_name
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        target_list = res_map[unique_resource_name][OWNERS]
        if client_id in target_list:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'You are already an owner of %s!' % unique_resource_name
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        request_dir = os.path.join(configuration.resource_home,
                                   unique_resource_name)
        access_request = {
            'request_type': request_type,
            'entity': client_id,
            'target': unique_resource_name,
            'request_text': request_text
        }
        if not save_access_request(configuration, request_dir, access_request):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Could not save request - owners may still manually add you'
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)
    elif request_type in ["vgridmember", "vgridowner", "vgridresource"]:
        if not vgrid_name:
            output_objects.append({
                'object_type': 'error_text',
                'text': 'No vgrid_name specified!'
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        # default vgrid is read-only

        if vgrid_name.upper() == default_vgrid.upper():
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'No requests for %s are not allowed!' % default_vgrid
            })
            return (output_objects, returnvalues.CLIENT_ERROR)

        # stop owner or member request if already an owner
        # and prevent repeated resource access requests

        if request_type == 'vgridresource':
            # NOTE: we support exactly one resource here
            unique_resource_name = visible_res_names[-1].strip()
            target_id = entity = unique_resource_name
            if vgrid_is_resource(vgrid_name, unique_resource_name,
                                 configuration):
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'You already have access to %s or a parent %s.' %
                    (vgrid_name, label)
                })
                return (output_objects, returnvalues.CLIENT_ERROR)
        else:
            target_id = entity = client_id
            if vgrid_is_owner(vgrid_name, client_id, configuration):
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'You are already an owner of %s or a parent %s!' %
                    (vgrid_name, label)
                })
                return (output_objects, returnvalues.CLIENT_ERROR)

        # only ownership requests are allowed for existing members

        if request_type == 'vgridmember':
            if vgrid_is_member(vgrid_name, client_id, configuration):
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'You are already a member of %s or a parent %s.' %
                    (vgrid_name, label)
                })
                return (output_objects, returnvalues.CLIENT_ERROR)

        # Find all VGrid owners configured to receive notifications

        target_name = vgrid_name
        (settings_status, settings_dict) = vgrid_settings(vgrid_name,
                                                          configuration,
                                                          recursive=True,
                                                          as_dict=True)
        if not settings_status:
            settings_dict = {}
        request_recipients = settings_dict.get('request_recipients',
                                               default_vgrid_settings_limit)
        # We load and use direct owners first if any - otherwise inherited
        owners_list = []
        for inherited in (False, True):
            (owners_status, owners_list) = vgrid_owners(vgrid_name,
                                                        configuration,
                                                        recursive=inherited)
            if not owners_status:
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    'Failed to lookup owners for %s %s - sure it exists?' %
                    (vgrid_name, label)
                })
                return (output_objects, returnvalues.CLIENT_ERROR)
            elif owners_list:
                break
        if not owners_list:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Failed to lookup owners for %s %s - sure it exists?' %
                (vgrid_name, label)
            })
            return (output_objects, returnvalues.CLIENT_ERROR)
        # Now we have direct or inherited owners to notify
        target_list = owners_list[:request_recipients]

        request_dir = os.path.join(configuration.vgrid_home, vgrid_name)
        access_request = {
            'request_type': request_type,
            'entity': entity,
            'target': vgrid_name,
            'request_text': request_text
        }
        if not save_access_request(configuration, request_dir, access_request):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Could not save request - owners may still manually add you'
            })
            return (output_objects, returnvalues.SYSTEM_ERROR)

    else:
        output_objects.append({
            'object_type': 'error_text',
            'text': 'Invalid request type: %s' % request_type
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Now send request to all targets in turn
    # TODO: inform requestor if no owners have mail/IM set in their settings

    logger.debug("sending notification to recipients: %s" % target_list)

    for target in target_list:

        if not target:
            logger.warning("skipping empty notify target: %s" % target_list)
            continue

        # USER_CERT entry is destination

        notify = []
        for proto in protocols:
            notify.append('%s: SETTINGS' % proto)
        job_dict = {
            'NOTIFY': notify,
            'JOB_ID': 'NOJOBID',
            'USER_CERT': target,
            'EMAIL_SENDER': client_email
        }

        notifier = notify_user_thread(
            job_dict,
            [target_id, target_name, request_type, request_text, reply_to],
            'SENDREQUEST',
            logger,
            '',
            configuration,
        )

        # Try finishing delivery but do not block forever on one message
        notifier.join(30)
    output_objects.append({
        'object_type':
        'text',
        'text':
        'Sent %s message to %d people' % (request_type, len(target_list))
    })
    im_notify_protocols = [
        i for i in configuration.notify_protocols if i != 'email'
    ]
    if im_notify_protocols:
        enabled_notify = 'IM / email'
    else:
        enabled_notify = 'email'
    output_objects.append({
        'object_type':
        'text',
        'text':
        """Please make sure you have %s notifications
configured on your Setings page if you expect a reply to this message""" %
        enabled_notify
    })

    return (output_objects, returnvalues.OK)
示例#29
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    defaults = signature()[1]
    output_objects.append({
        'object_type': 'header',
        'text': 'Remove Resource Owner'
    })
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    unique_resource_name = accepted['unique_resource_name'][-1]
    cert_id = accepted['cert_id'][-1]

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    if not is_owner(client_id, unique_resource_name,
                    configuration.resource_home, logger):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'You must be an owner of %s to remove another owner!' %
            unique_resource_name
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # is_owner incorporates unique_resource_name verification - no need to
    # specifically check for illegal directory traversal

    if not is_user(cert_id, configuration.mig_server_home):
        output_objects.append({'object_type': 'error_text', 'text'
                              : '%s is not a valid %s user!' % \
                                (cert_id, configuration.short_title) })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # reject remove if cert_id is not an owner

    if not resource_is_owner(unique_resource_name, cert_id, configuration):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '%s is not an owner of %s.' % (cert_id, unique_resource_name)
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # Remove owner

    (rm_status, rm_msg) = resource_remove_owners(configuration,
                                                 unique_resource_name,
                                                 [cert_id])
    if not rm_status:
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Could not remove owner, reason: %s' % rm_msg
        })
        return (output_objects, returnvalues.SYSTEM_ERROR)

    output_objects.append({
        'object_type':
        'text',
        'text':
        '%s was successfully removed and is no longer an owner of %s!' %
        (cert_id, unique_resource_name)
    })
    output_objects.append({'object_type': 'link', 'destination':
                        'resadmin.py?unique_resource_name=%s' % \
                           unique_resource_name, 'class': 'adminlink iconspace',
                           'title': 'Administrate resource',
                           'text': 'Manage resource'})
    return (output_objects, returnvalues.OK)
示例#30
0
def main(client_id, user_arguments_dict):
    """Main function used by front end"""

    (configuration, logger, output_objects, op_name) = \
        initialize_main_variables(client_id, op_header=False)
    defaults = signature()[1]
    title_entry = find_entry(output_objects, 'title')
    label = "%s" % configuration.site_vgrid_label
    title_entry['text'] = "Add %s Owner" % label
    output_objects.append({
        'object_type': 'header',
        'text': 'Add %s Owner(s)' % label
    })
    status = returnvalues.OK
    (validate_status, accepted) = validate_input_and_cert(
        user_arguments_dict,
        defaults,
        output_objects,
        client_id,
        configuration,
        allow_rejects=False,
    )
    if not validate_status:
        return (accepted, returnvalues.CLIENT_ERROR)

    vgrid_name = accepted['vgrid_name'][-1].strip()
    cert_id_list = accepted['cert_id']
    request_name = unhexlify(accepted['request_name'][-1])
    rank_list = accepted['rank'] + ['' for _ in cert_id_list]
    # inherited vgrid membership
    inherit_vgrid_member = False

    if not safe_handler(configuration, 'post', op_name, client_id,
                        get_csrf_limit(configuration), accepted):
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            '''Only accepting
CSRF-filtered POST requests to prevent unintended updates'''
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    user_map = get_full_user_map(configuration)
    user_dict = user_map.get(client_id, None)
    # Optional site-wide limitation of manage vgrid permission
    if not user_dict or \
            not vgrid_manage_allowed(configuration, user_dict):
        logger.warning("user %s is not allowed to manage vgrids!" % client_id)
        output_objects.append({
            'object_type':
            'error_text',
            'text':
            'Only privileged users can manage %ss' % label
        })
        return (output_objects, returnvalues.CLIENT_ERROR)

    # make sure vgrid settings allow this owner to edit owners

    (allow_status, allow_msg) = allow_owners_adm(configuration, vgrid_name,
                                                 client_id)
    if not allow_status:
        output_objects.append({'object_type': 'error_text', 'text': allow_msg})
        return (output_objects, returnvalues.CLIENT_ERROR)

    cert_id_added = []
    for (cert_id, rank_str) in zip(cert_id_list, rank_list):
        cert_id = cert_id.strip()
        cert_dir = client_id_dir(cert_id)
        try:
            rank = int(rank_str)
        except ValueError:
            rank = None

        # Allow openid alias as subject if openid with alias is enabled
        if configuration.user_openid_providers and configuration.user_openid_alias:
            cert_id = expand_openid_alias(cert_id, configuration)

        # Validity of user and vgrid names is checked in this init function so
        # no need to worry about illegal directory traversal through variables

        (ret_val, msg, _) = \
            init_vgrid_script_add_rem(vgrid_name, client_id, cert_id,
                                      'owner', configuration)
        if not ret_val:
            output_objects.append({'object_type': 'error_text', 'text': msg})
            status = returnvalues.CLIENT_ERROR
            continue

        # don't add if already an owner unless rank is given

        if rank is None and vgrid_is_owner(vgrid_name, cert_id, configuration):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '%s is already an owner of %s or a parent %s.' %
                (cert_id, vgrid_name, label)
            })
            status = returnvalues.CLIENT_ERROR
            continue

        # don't add if already a direct member

        if vgrid_is_member(vgrid_name, cert_id, configuration,
                           recursive=False):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '%s is already a member of %s - please remove first.' %
                (cert_id, vgrid_name)
            })
            status = returnvalues.CLIENT_ERROR
            continue

        # owner of subvgrid?

        (list_status,
         subvgrids) = vgrid_list_subvgrids(vgrid_name, configuration)
        if not list_status:
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Error getting list of sub%ss: %s' % (label, subvgrids)
            })
            status = returnvalues.SYSTEM_ERROR
            continue

        skip_entity = False
        for subvgrid in subvgrids:
            if vgrid_is_owner(subvgrid,
                              cert_id,
                              configuration,
                              recursive=False):
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    """%s is already an owner of a sub-%s ('%s'). Please
remove the person first and then try this operation again.""" %
                    (cert_id, label, subvgrid)
                })
                status = returnvalues.CLIENT_ERROR
                skip_entity = True
                break
            if vgrid_is_member(subvgrid,
                               cert_id,
                               configuration,
                               recursive=False):
                output_objects.append({
                    'object_type':
                    'error_text',
                    'text':
                    """%s is already a member of a sub-%s ('%s'). Please
remove the person first and then try this operation again.""" %
                    (cert_id, label, subvgrid)
                })
                status = returnvalues.CLIENT_ERROR
                skip_entity = True
                break
        if skip_entity:
            continue

        # we DO allow ownership if member of parent vgrid - only handle with care

        if vgrid_is_member(vgrid_name, cert_id, configuration):
            # list is in top-down order
            parent_vgrids = vgrid_list_parents(vgrid_name, configuration)
            inherit_vgrid_member = vgrid_name
            for parent in parent_vgrids:
                if vgrid_is_member(parent,
                                   cert_id,
                                   configuration,
                                   recursive=False):
                    inherit_vgrid_member = parent
                    break
            output_objects.append({
                'object_type':
                'text',
                'text':
                '''NOTE: %s is already a member of parent %s %s.''' %
                (cert_id, label, inherit_vgrid_member)
            })

        # Check if only rank change was requested and apply if so

        if rank is not None:
            (add_status, add_msg) = vgrid_add_owners(configuration,
                                                     vgrid_name, [cert_id],
                                                     rank=rank)
            if not add_status:
                output_objects.append({
                    'object_type': 'error_text',
                    'text': add_msg
                })
                status = returnvalues.SYSTEM_ERROR
            else:
                output_objects.append({
                    'object_type':
                    'text',
                    'text':
                    'changed %s to owner %d' % (cert_id, rank)
                })
            # No further action after rank change as everything else exists
            continue

        # Getting here means cert_id is not owner of any parent or child vgrids.
        # may still be member of a parent grid but not a child vgrid.

        public_base_dir = \
            os.path.abspath(os.path.join(configuration.vgrid_public_base,
                                         vgrid_name)) + os.sep
        private_base_dir = \
            os.path.abspath(os.path.join(configuration.vgrid_private_base,
                                         vgrid_name)) + os.sep

        # Please note that base_dir must end in slash to avoid access to other
        # user dirs when own name is a prefix of another user name

        user_dir = os.path.abspath(
            os.path.join(configuration.user_home, cert_dir)) + os.sep

        user_public_base = os.path.abspath(
            os.path.join(user_dir, 'public_base')) + os.sep
        user_private_base = os.path.abspath(
            os.path.join(user_dir, 'private_base')) + os.sep

        # make sure all dirs can be created (that a file or directory with the same
        # name do not exist prior to adding the owner)

        if os.path.exists(user_public_base + vgrid_name):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '''Could not add owner, a file or directory in public_base
    exists with the same name! %s''' % user_dir + vgrid_name
            })
            status = returnvalues.CLIENT_ERROR
            continue

        if os.path.exists(user_private_base + vgrid_name):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '''Could not add owner, a file or directory in private_base
exists with the same name!'''
            })
            status = returnvalues.CLIENT_ERROR
            continue

        # vgrid share already exists if user is a member of parent vgrid

        if not inherit_vgrid_member and os.path.exists(user_dir + vgrid_name):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                '''Could not add owner, a file or directory in the home
directory exists with the same name!'''
            })
            status = returnvalues.CLIENT_ERROR
            continue

        # Add

        (add_status, add_msg) = vgrid_add_owners(configuration, vgrid_name,
                                                 [cert_id])
        if not add_status:
            output_objects.append({
                'object_type': 'error_text',
                'text': add_msg
            })
            status = returnvalues.SYSTEM_ERROR
            continue

        vgrid_name_parts = vgrid_name.split('/')
        is_subvgrid = len(vgrid_name_parts) > 1

        # create public_base in cert_ids home dir if it does not exists

        try:
            os.mkdir(user_public_base)
        except Exception as exc:
            pass

        # create private_base in cert_ids home dir if it does not exists

        try:
            os.mkdir(user_private_base)
        except Exception as exc:
            pass

        if is_subvgrid:
            share_dir = None
            try:

                # Example:
                #    vgrid_name = IMADA/STUD/BACH
                #    vgrid_name_last_fragment = BACH
                #    vgrid_name_without_last_fragment = IMADA/STUD/

                vgrid_name_last_fragment = \
                    vgrid_name_parts[len(vgrid_name_parts) - 1].strip()

                vgrid_name_without_last_fragment = \
                    ('/'.join(vgrid_name_parts[0:len(vgrid_name_parts) - 1]) +
                     os.sep).strip()

                # create dirs if they do not exist

                share_dir = user_dir + vgrid_name_without_last_fragment
                if not os.path.isdir(share_dir):
                    os.makedirs(share_dir)
                pub_dir = user_public_base + vgrid_name_without_last_fragment
                if not os.path.isdir(pub_dir):
                    os.makedirs(pub_dir)
                priv_dir = user_private_base + vgrid_name_without_last_fragment
                if not os.path.isdir(priv_dir):
                    os.makedirs(priv_dir)
            except Exception as exc:

                # out of range? should not be possible due to is_subvgrid check

                output_objects.append({
                    'object_type':
                    'error_text',
                    'text': ('Could not create needed dirs on %s server! %s' %
                             (configuration.short_title, exc))
                })
                logger.error('%s when looking for dir %s.' % (exc, share_dir))
                status = returnvalues.SYSTEM_ERROR
                continue

        # create symlink from users home directory to vgrid file directory
        # unless member of parent vgrid so that it is included already

        link_src = os.path.abspath(configuration.vgrid_files_home + os.sep +
                                   vgrid_name) + os.sep
        link_dst = user_dir + vgrid_name

        if not inherit_vgrid_member and \
                not make_symlink(link_src, link_dst, logger):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Could not create link to %s share!' % label
            })
            logger.error('Could not create link to %s files (%s -> %s)' %
                         (label, link_src, link_dst))
            status = returnvalues.SYSTEM_ERROR
            continue

        public_base_dst = user_public_base + vgrid_name

        # create symlink for public_base files

        if not make_symlink(public_base_dir, public_base_dst, logger):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Could not create link to public_base dir!'
            })
            logger.error(
                'Could not create link to public_base dir (%s -> %s)' %
                (public_base_dir, public_base_dst))
            status = returnvalues.SYSTEM_ERROR
            continue

        private_base_dst = user_private_base + vgrid_name

        # create symlink for private_base files

        if not make_symlink(private_base_dir, private_base_dst, logger):
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Could not create link to private_base dir!'
            })
            status = returnvalues.SYSTEM_ERROR
            continue

        if configuration.trac_admin_path:
            public_tracker_dir = \
                os.path.abspath(os.path.join(
                    configuration.vgrid_public_base, vgrid_name, '.vgridtracker'))
            private_tracker_dir = \
                os.path.abspath(os.path.join(
                    configuration.vgrid_private_base, vgrid_name, '.vgridtracker'))
            vgrid_tracker_dir = \
                os.path.abspath(os.path.join(
                    configuration.vgrid_files_home, vgrid_name, '.vgridtracker'))
            for tracker_dir in [
                    public_tracker_dir, private_tracker_dir, vgrid_tracker_dir
            ]:
                if not add_tracker_admin(configuration, cert_id, vgrid_name,
                                         tracker_dir, output_objects):
                    status = returnvalues.SYSTEM_ERROR
                    continue
        cert_id_added.append(cert_id)

    if request_name:
        request_dir = os.path.join(configuration.vgrid_home, vgrid_name)
        if not delete_access_request(configuration, request_dir, request_name):
            logger.error("failed to delete owner request for %s in %s" %
                         (vgrid_name, request_name))
            output_objects.append({
                'object_type':
                'error_text',
                'text':
                'Failed to remove saved request for %s in %s!' %
                (vgrid_name, request_name)
            })

    if cert_id_added:
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            'New owner(s)<br />%s<br />successfully added to %s %s!'
            '' % ('<br />'.join(cert_id_added), vgrid_name, label)
        })
        cert_id_fields = ''
        for cert_id in cert_id_added:
            cert_id_fields += """<input type=hidden name=cert_id value='%s' />
""" % cert_id

        form_method = 'post'
        csrf_limit = get_csrf_limit(configuration)
        fill_helpers = {
            'vgrid_name': vgrid_name,
            'cert_id': cert_id,
            'protocol': any_protocol,
            'short_title': configuration.short_title,
            'vgrid_label': label,
            'cert_id_fields': cert_id_fields,
            'form_method': form_method,
            'csrf_field': csrf_field,
            'csrf_limit': csrf_limit
        }
        target_op = 'sendrequestaction'
        csrf_token = make_csrf_token(configuration, form_method, target_op,
                                     client_id, csrf_limit)
        fill_helpers.update({'target_op': target_op, 'csrf_token': csrf_token})
        output_objects.append({
            'object_type':
            'html_form',
            'text':
            """
<form method='%(form_method)s' action='%(target_op)s.py'>
<input type='hidden' name='%(csrf_field)s' value='%(csrf_token)s' />
<input type=hidden name=request_type value='vgridaccept' />
<input type=hidden name=vgrid_name value='%(vgrid_name)s' />
%(cert_id_fields)s
<input type=hidden name=protocol value='%(protocol)s' />
<table>
<tr>
<td class='title'>Custom message to user(s)</td>
</tr><tr>
<td><textarea name=request_text cols=72 rows=10>
We have granted you ownership access to our %(vgrid_name)s %(vgrid_label)s.
You can access the %(vgrid_label)s administration page from the
%(vgrid_label)ss page on %(short_title)s.

Regards, the %(vgrid_name)s %(vgrid_label)s owners
</textarea></td>
</tr>
<tr>
<td><input type='submit' value='Inform user(s)' /></td>
</tr>
</table>
</form>
<br />
""" % fill_helpers
        })

    output_objects.append({
        'object_type': 'link',
        'destination': 'adminvgrid.py?vgrid_name=%s' % vgrid_name,
        'text': 'Back to administration for %s' % vgrid_name
    })
    return (output_objects, status)