Exemple #1
0
def vgrid_set_entities(configuration, vgrid_name, kind, id_list, allow_empty):
    """Set kind list to provided id_list for given vgrid. The allow_empty
    argument cam be used to e.g. prevent empty owners lists.
    """

    if kind == 'owners':
        entity_filename = configuration.vgrid_owners
    elif kind == 'members':
        entity_filename = configuration.vgrid_members
    elif kind == 'resources':
        entity_filename = configuration.vgrid_resources
    elif kind == 'triggers':
        entity_filename = configuration.vgrid_triggers
    else:
        return (False, "vgrid_set_entities: unknown kind: '%s'" % kind)

    entity_filepath = os.path.join(configuration.vgrid_home, vgrid_name, 
                                   entity_filename)

    try:
        if not id_list and not allow_empty:
            raise ValueError("not allowed to set empty list of %s" % kind)
        dump(id_list, entity_filepath)
        mark_vgrid_modified(configuration, vgrid_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not set %s for %s: %s" % (kind, vgrid_name, exc))
Exemple #2
0
def update_pickled_dict(path, changes):
    """Update pickled dictionary on disk with provided changes"""

    saved_dict = load(path)
    saved_dict.update(changes)
    dump(saved_dict, path)
    return saved_dict
Exemple #3
0
def vgrid_add_entities(configuration, vgrid_name, kind, id_list):
    """Append list of IDs to pickled list of kind for vgrid_name"""

    if kind == 'owners':
        entity_filename = configuration.vgrid_owners
    elif kind == 'members':
        entity_filename = configuration.vgrid_members
    elif kind == 'resources':
        entity_filename = configuration.vgrid_resources
    elif kind == 'triggers':
        entity_filename = configuration.vgrid_triggers
    else:
        return (False, "vgrid_add_entities: unknown kind: '%s'" % kind)

    entity_filepath = os.path.join(configuration.vgrid_home, vgrid_name, 
                                   entity_filename)
    try:
        if os.path.exists(entity_filepath):
            entities = load(entity_filepath)
        else:
            entities = []
            log_msg = "creating missing file: '%s'" % (entity_filepath)
            configuration.logger.info(log_msg)

        entities += [i for i in id_list if not i in entities]
        dump(entities, entity_filepath)
        mark_vgrid_modified(configuration, vgrid_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not add %s for %s: %s" % (kind, vgrid_name, exc))
Exemple #4
0
def update_pickled_dict(path, changes):
    """Update pickled dictionary on disk with provided changes"""

    saved_dict = load(path)
    saved_dict.update(changes)
    dump(saved_dict, path)
    return saved_dict
Exemple #5
0
def refresh_job_stats(configuration, client_id):
    """Refresh job stats for specified user"""
    dirty = False
    client_dir = client_id_dir(client_id)
    job_base = os.path.join(configuration.mrsl_files_dir, client_dir)
    stats_base = os.path.join(configuration.user_cache, client_dir)
    stats_path = os.path.join(stats_base, "job-stats.pck")
    lock_path = stats_path + ".lock"

    try:
        os.makedirs(stats_base)
    except:
        pass

    lock_handle = open(lock_path, 'a')

    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)

    job_stats = {PARSE: 0, QUEUED: 0, EXECUTING:0, FINISHED: 0, RETRY: 0,
                    CANCELED: 0, EXPIRED: 0, FAILED: 0, FROZEN: 0}
    try:
        stats = load(stats_path)
        stats_stamp = os.path.getmtime(stats_path)
        # Backwards compatible update
        job_stats.update(stats[JOBS])
        stats[JOBS] = job_stats
    except IOError:
        configuration.logger.warn("No job stats to load - ok first time")
        stats = {JOBS: job_stats}
        stats_stamp = -1

    now = time.time()
    if now < stats_stamp + JOB_REFRESH_DELAY:
        lock_handle.close()
        return stats        

    # Inspect all jobs in user job dir and update the ones that changed
    # since last stats run
    for name in os.listdir(job_base):
        if stats.has_key(name) and stats[name]["STATUS"] in FINAL_STATES:
            continue

        job_path = os.path.join(job_base, name)
        job_stamp = os.path.getmtime(job_path)
        
        if stats.has_key(name) and job_stamp < stats_stamp:
            continue

        dirty = True
        job = load(job_path)
        update_job_stats(stats, name, job)

    if dirty:
        try:
            dump(stats, stats_path)
            stats_stamp = os.path.getmtime(stats_path)
        except Exception, exc:
            configuration.logger.error("Could not save stats cache: %s" % exc)
Exemple #6
0
def pickle(data_object, path, logger):
    """Pack data_object as pickled object in path"""
    try:
        dump(data_object, path)
        logger.debug('pickle success: %s' % path)
        return True
    except Exception, err:
        logger.error('could not pickle: %s %s' % (path, err))
        return False
Exemple #7
0
def filter_pickled_list(path, changes):
    """Filter pickled list on disk with provided changes where changes is a
    dictionary mapping existing list entries and the value to replace it with.
    """

    saved_list = load(path)
    saved_list = [changes.get(entry, entry) for entry in saved_list]
    dump(saved_list, path)
    return saved_list
Exemple #8
0
def pickle(job_dict, path, logger):
    """Pack job_dict as pickled object in path"""
    try:
        dump(job_dict, path)
        logger.debug('pickle success: %s' % path)
        return True
    except Exception, err:
        logger.error('could not pickle: %s %s' % (path, err))
        return False
Exemple #9
0
def refresh_user_map(configuration):
    """Refresh map of users and their configuration. Uses a pickled
    dictionary for efficiency. 
    User IDs are stored in their raw (non-anonymized form).
    Only update map for users that updated conf after last map save.
    """
    dirty = []
    map_path = os.path.join(configuration.mig_system_files, "user.map")
    lock_path = os.path.join(configuration.mig_system_files, "user.lock")
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    user_map, map_stamp = load_user_map(configuration, do_lock=False)

    # Find all users and their configurations
    
    all_users = list_users(configuration.user_home)
    real_map = real_to_anon_user_map(configuration.user_home)
    for user in all_users:
        settings_path = os.path.join(configuration.user_settings,
                                     client_id_dir(user), settings_filename)
        profile_path = os.path.join(configuration.user_settings,
                                    client_id_dir(user), profile_filename)
        settings_mtime, profile_mtime = 0, 0
        if os.path.isfile(settings_path):
            settings_mtime = os.path.getmtime(settings_path)
        if os.path.isfile(profile_path):
            profile_mtime = os.path.getmtime(profile_path)

        if settings_mtime + profile_mtime > 0:
            conf_mtime = max(settings_mtime, profile_mtime)
        else:
            conf_mtime = -1
        # init first time
        user_map[user] = user_map.get(user, {})
        if not user_map[user].has_key(CONF) or conf_mtime >= map_stamp:
            user_conf = get_user_conf(user, configuration, True)
            if not user_conf:
                user_conf = {}
            user_map[user][CONF] = user_conf
            public_id = user
            if user_conf.get('ANONYMOUS', True):
                public_id = real_map[user]
            user_map[user][USERID] = public_id
            user_map[user][MODTIME] = map_stamp
            dirty += [user]
    # Remove any missing users from map
    missing_user = [user for user in user_map.keys() \
                   if not user in all_users]
    for user in missing_user:
        del user_map[user]
        dirty += [user]

    if dirty:
        try:
            dump(user_map, map_path)
        except Exception, exc:
            configuration.logger.error("Could not save user map: %s" % exc)
Exemple #10
0
def save_sandbox_db(sandbox_db, configuration=None):
    """Read in the sandbox DB dictionary:
    Format is {username: (password, [list_of_resources])}
    """

    if not configuration:
        configuration = get_configuration_object()
    sandbox_db_path = os.path.join(configuration.sandbox_home, sandbox_db_name)
    dump(sandbox_db, sandbox_db_path)
Exemple #11
0
def filter_pickled_list(path, changes):
    """Filter pickled list on disk with provided changes where changes is a
    dictionary mapping existing list entries and the value to replace it with.
    """

    saved_list = load(path)
    saved_list = [changes.get(entry, entry) for entry in saved_list]
    dump(saved_list, path)
    return saved_list
Exemple #12
0
def save_sandbox_db(sandbox_db, configuration=None):
    """Read in the sandbox DB dictionary:
    Format is {username: (password, [list_of_resources])}
    """

    if not configuration:
        configuration = get_configuration_object()
    sandbox_db_path = os.path.join(configuration.sandbox_home,
                                   sandbox_db_name)
    dump(sandbox_db, sandbox_db_path)
Exemple #13
0
def resource_set_owners(configuration, unique_resource_name, clients):
    """Set list of owners for given resource"""
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        dump(clients, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not set owners for %s: %s" % \
                (unique_resource_name, exc))
Exemple #14
0
def resource_set_owners(configuration, unique_resource_name, clients):
    """Set list of owners for given resource"""
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        dump(clients, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not set owners for %s: %s" % \
                (unique_resource_name, exc))
Exemple #15
0
def filter_pickled_dict(path, changes):
    """Filter pickled dictionary on disk with provided changes where changes
    is a dictionary mapping existing dictionary values to a value to replace
    it with.
    """

    saved_dict = load(path)
    for (key, val) in saved_dict.items():
        if val in changes.keys():
            saved_dict[key] = changes[val]
    dump(saved_dict, path)
    return saved_dict
Exemple #16
0
def reset_entities_modified(configuration, kind):
    """Reset all modified entity marks of given kind"""
    home_map = home_paths(configuration)
    modified_path = os.path.join(home_map[kind], "%s.modified" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        dump([], modified_path)
    except Exception, exc:
        configuration.logger.error("Could not reset %s modified mark: %s" % \
                                   (kind, exc))
Exemple #17
0
def filter_pickled_dict(path, changes):
    """Filter pickled dictionary on disk with provided changes where changes
    is a dictionary mapping existing dictionary values to a value to replace
    it with.
    """

    saved_dict = load(path)
    for (key, val) in saved_dict.items():
        if val in changes.keys():
            saved_dict[key] = changes[val]
    dump(saved_dict, path)
    return saved_dict
Exemple #18
0
def reset_entities_modified(configuration, kind):
    """Reset all modified entity marks of given kind"""
    modified_path = os.path.join(configuration.mig_system_files,
                                 "%s.modified" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        dump([], modified_path)
    except Exception, exc:
        configuration.logger.error("Could not reset %s modified mark: %s" % \
                                   (kind, exc))
Exemple #19
0
def resource_add_owners(configuration, unique_resource_name, clients):
    """Append list of clients to pickled list of resource owners"""
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        owners += [i for i in clients if not i in owners]
        dump(owners, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not add owners for %s: %s" % \
                (unique_resource_name, exc))
Exemple #20
0
def resource_add_owners(configuration, unique_resource_name, clients):
    """Append list of clients to pickled list of resource owners"""
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        owners += [i for i in clients if not i in owners]
        dump(owners, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not add owners for %s: %s" % \
                (unique_resource_name, exc))
Exemple #21
0
def refresh_re_map(configuration):
    """Refresh map of runtime environments and their configuration. Uses a
    pickled dictionary for efficiency. 
    Only update map for runtime environments that appeared or disappeared after
    last map save.
    NOTE: Save start time so that any concurrent updates get caught next time.
    """
    start_time = time.time()
    dirty = []
    map_path = os.path.join(configuration.mig_system_files, "runtimeenvs.map")
    lock_path = map_path.replace('.map', '.lock')
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    re_map, map_stamp = load_re_map(configuration, do_lock=False)

    # Find all runtimeenvs and their configurations

    (load_status, all_res) = list_runtime_environments(configuration)
    if not load_status:
        configuration.logger.error("failed to load runtimeenv list: %s" %
                                   all_res)
        return re_map
    for re_name in all_res:
        re_path = os.path.join(configuration.re_home, re_name)
        re_mtime = 0
        if os.path.isfile(re_path):
            re_mtime = os.path.getmtime(re_path)

        # init first time
        re_map[re_name] = re_map.get(re_name, {})
        if not re_map[re_name].has_key(CONF) or re_mtime >= map_stamp:
            re_conf = get_re_conf(re_name, configuration)
            if not re_conf:
                re_conf = {}
            re_map[re_name][CONF] = re_conf
            re_map[re_name][MODTIME] = map_stamp
            dirty += [re_name]
    # Remove any missing runtimeenvs from map
    missing_re = [re_name for re_name in re_map.keys() \
                   if not re_name in all_res]
    for re_name in missing_re:
        del re_map[re_name]
        dirty += [re_name]

    if dirty:
        try:
            dump(re_map, map_path)
            os.utime(map_path, (start_time, start_time))
        except Exception, exc:
            configuration.logger.error("Could not save re map: %s" % exc)
Exemple #22
0
def modify_data_transfers(configuration,
                          client_id,
                          transfer_dict,
                          action,
                          transfers=None):
    """Modify data transfers with given action and transfer_dict for client_id.
    In practice this a shared helper to add or remove transfers from the saved
    data transfers. The optional transfers argument can be used to pass an
    already loaded dictionary of saved transfers to avoid reloading.
    """
    logger = configuration.logger
    transfer_id = transfer_dict['transfer_id']
    if transfers is None:
        (load_status, transfers) = load_data_transfers(configuration,
                                                       client_id)
        if not load_status:
            logger.error("modify_data_transfers failed in load: %s" % \
                         transfers)
            return (load_status, transfers)

    if action == "create":
        now = datetime.datetime.now()
        transfer_dict.update({
            'created_timestamp': now,
            'updated_timestamp': now,
            'owner': client_id,
        })
        transfers[transfer_id] = transfer_dict
    elif action == "modify":
        transfer_dict['updated_timestamp'] = datetime.datetime.now()
        transfers[transfer_id].update(transfer_dict)
    elif action == "delete":
        del transfers[transfer_id]
    else:
        return (False, "Invalid action %s on data transfers" % action)

    try:
        transfers_path = os.path.join(configuration.user_settings,
                                      client_id_dir(client_id),
                                      datatransfers_filename)
        dump(transfers, transfers_path)
        res_dir = get_status_dir(configuration, client_id, transfer_id)
        makedirs_rec(res_dir, configuration)
    except Exception, err:
        logger.error("modify_data_transfers failed: %s" % err)
        return (False, 'Error updating data transfers: %s' % err)
Exemple #23
0
def resource_remove_owners(configuration, unique_resource_name, clients,
                           allow_empty=False):
    """Remove list of clients from pickled list of resource owners. The
    optional allow_empty option is used to prevent or allow removal of last
    owner.
    """
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        owners = [i for i in owners if not i in clients]
        if not owners and not allow_empty:
            raise ValueError("not allowed to remove last owner")
        dump(owners, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not remove owners for %s: %s" % \
                (unique_resource_name, exc))
Exemple #24
0
def vgrid_remove_entities(configuration, vgrid_name, kind, id_list,
                          allow_empty, dict_field=False):
    """Remove list of IDs from pickled list of kind for vgrid_name.
    The allow_empty argument can be used to prevent removal of e.g. the last
    owner.
    Use the dict_field if the entries are dictionaries and the id_list should
    be matched against dict_field in each of them. 
    """

    if kind == 'owners':
        entity_filename = configuration.vgrid_owners
    elif kind == 'members':
        entity_filename = configuration.vgrid_members
    elif kind == 'resources':
        entity_filename = configuration.vgrid_resources
    elif kind == 'triggers':
        entity_filename = configuration.vgrid_triggers
    else:
        return (False, "vgrid_remove_entities: unknown kind: '%s'" % kind)
    
    entity_filepath = os.path.join(configuration.vgrid_home, vgrid_name, 
                                   entity_filename)

    # Force raw string to list to avoid nasty silent substring matching below
    # I.e. removing abc.def.0 would also remove def.0
    
    if isinstance(id_list, basestring):
        id_list = [id_list]
        
    try:
        entities = load(entity_filepath)
        if dict_field:
            entities = [i for i in entities if not i[dict_field] in id_list]
        else:
            entities = [i for i in entities if not i in id_list]
        if not entities and not allow_empty:
            raise ValueError("not allowed to remove last entry of %s" % kind)
        dump(entities, entity_filepath)
        mark_vgrid_modified(configuration, vgrid_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not remove %s for %s: %s" % (kind, vgrid_name,
                                                           exc))
Exemple #25
0
def mark_entity_modified(configuration, kind, name):
    """Mark name of given kind modified to signal reload before use from other
    locations.
    """
    home_map = home_paths(configuration)
    modified_path = os.path.join(home_map[kind], "%s.modified" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        if os.path.exists(modified_path):
            modified_list = load(modified_path)
        else:
            modified_list = []
        if not name in modified_list:
            modified_list.append(name)
        dump(modified_list, modified_path)
    except Exception, exc:
        configuration.logger.error("Could not update %s modified mark: %s" % \
                                   (kind, exc))
Exemple #26
0
def mark_entity_modified(configuration, kind, name):
    """Mark name of given kind modified to signal reload before use from other
    locations.
    """
    modified_path = os.path.join(configuration.mig_system_files,
                                 "%s.modified" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        if os.path.exists(modified_path):
            modified_list = load(modified_path)
        else:
            modified_list = []
        if not name in modified_list:
            modified_list.append(name)
        dump(modified_list, modified_path)
    except Exception, exc:
        configuration.logger.error("Could not update %s modified mark: %s" % \
                                   (kind, exc))
Exemple #27
0
def resource_remove_owners(configuration,
                           unique_resource_name,
                           clients,
                           allow_empty=False):
    """Remove list of clients from pickled list of resource owners. The
    optional allow_empty option is used to prevent or allow removal of last
    owner.
    """
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        owners = [i for i in owners if not i in clients]
        if not owners and not allow_empty:
            raise ValueError("not allowed to remove last owner")
        dump(owners, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not remove owners for %s: %s" % \
                (unique_resource_name, exc))
Exemple #28
0
def update_runtimeenv_owner(re_name, old_owner, new_owner, configuration):
    """Update owner on an existing runtime environment if existing owner
    matches old_owner.
    """
    status, msg = True, ""
    # Lock the access to the runtime env files, so that edit is done
    # with exclusive access.
    lock_path = os.path.join(configuration.re_home, WRITE_LOCK)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    re_filename = os.path.join(configuration.re_home, re_name)
    try:
        re_dict = load(re_filename)
        if re_dict['CREATOR'] == old_owner:
            re_dict['CREATOR'] = new_owner
            dump(re_dict, re_filename)
        else:
            status = False
    except Exception, err:
        msg = "Failed to edit owner of runtime enviroment '%s': %s" % \
              (re_name, err)
        configuration.logger.warning(msg)
        status = False
Exemple #29
0
def update_runtimeenv_owner(re_name, old_owner, new_owner, configuration):
    """Update owner on an existing runtime environment if existing owner
    matches old_owner.
    """
    status, msg = True, ""
    # Lock the access to the runtime env files, so that edit is done
    # with exclusive access.
    lock_path = os.path.join(configuration.re_home, WRITE_LOCK)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    re_filename = os.path.join(configuration.re_home, re_name)
    try:
        re_dict = load(re_filename)
        if re_dict['CREATOR'] == old_owner:
            re_dict['CREATOR'] = new_owner
            dump(re_dict, re_filename)
            mark_re_modified(configuration, re_name)
        else:
            status = False
    except Exception, err:
        msg = "Failed to edit owner of runtime enviroment '%s': %s" % \
              (re_name, err)
        configuration.logger.warning(msg)
        status = False
Exemple #30
0
            job_stamp = -1

        if stats.has_key(name) and job_stamp < stats_stamp:
            continue

        dirty = True
        try:
            job = load(job_path)
        except Exception, exc:
            _logger.warning("unpickle failed on %s: %s" % (job_path, exc))
            continue
        update_job_stats(stats, name, job)

    if dirty:
        try:
            dump(stats, stats_path)
            stats_stamp = os.path.getmtime(stats_path)
        except Exception, exc:
            _logger.error("Could not save stats cache: %s" % exc)

    lock_handle.close()

    stats['time_stamp'] = stats_stamp
    return stats


if "__main__" == __name__:
    import sys
    from shared.conf import get_configuration_object
    conf = get_configuration_object()
    raw_stats = refresh_disk_stats(conf, sys.argv[1])
Exemple #31
0
def refresh_disk_stats(configuration, client_id):
    """Refresh disk use stats for specified user"""
    _logger = configuration.logger
    dirty = False
    client_dir = client_id_dir(client_id)
    user_base = os.path.join(configuration.user_home, client_dir)
    stats_base = os.path.join(configuration.user_cache, client_dir)
    stats_path = os.path.join(stats_base, "disk-stats.pck")
    lock_path = stats_path + ".lock"

    try:
        os.makedirs(stats_base)
    except:
        pass

    lock_handle = open(lock_path, 'a')

    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)

    try:
        stats = load(stats_path)
        stats_stamp = os.path.getmtime(stats_path)
    except IOError:
        _logger.warning("No disk stats to load - ok first time")
        stats = {
            OWN: {
                FILES: 0,
                DIRECTORIES: 0,
                BYTES: 0
            },
            VGRID: {
                FILES: 0,
                DIRECTORIES: 0,
                BYTES: 0
            }
        }
        stats_stamp = -1

    now = time.time()
    if now < stats_stamp + DISK_REFRESH_DELAY:
        lock_handle.close()
        return stats

    # Walk entire home dir and update any parts that changed
    # Please note that walk doesn't follow symlinks so we have
    # to additionally walk vgrid dir symlinks explicitly
    cur_roots = []
    vgrid_dirs = []
    total = OWN
    for (root, dirs, files) in os.walk(user_base):
        rel_root = root.replace(user_base, '').lstrip(os.sep)
        cur_roots.append(rel_root)
        for dir_name in dirs:
            dir_path = os.path.join(root, dir_name)
            if os.path.islink(dir_path):
                vgrid_dirs.append(dir_path)

        # Directory and contents unchanged - ignore

        if stats.has_key(rel_root) and \
                not contents_changed(configuration, root, files, stats_stamp):
            continue

        dirty = True

        update_disk_stats(configuration, stats, root, rel_root, dirs, files,
                          total)

    # Now walk vgrid dir symlinks explicitly
    total = VGRID
    for vgrid_base in vgrid_dirs:
        for (root, dirs, files) in os.walk(vgrid_base):
            # Still use path relative to user base!
            rel_root = root.replace(user_base, '').lstrip(os.sep)
            cur_roots.append(rel_root)

            # Directory and contents unchanged - ignore

            if stats.has_key(rel_root) and \
                not contents_changed(configuration, root, files,
                                     stats_stamp):
                continue

            dirty = True

            update_disk_stats(configuration, stats, root, rel_root, dirs,
                              files, total)

    # Update stats for any roots no longer there

    for rel_root in stats.keys():
        if rel_root in list(TOTALS) + cur_roots:
            continue
        root = os.path.join(user_base, rel_root)
        # NOTE: legacy stats may lack KIND field - just ignore and delete
        total = stats[rel_root].get(KIND, None)
        if total:
            stats[total][FILES] -= stats[rel_root][FILES]
            stats[total][DIRECTORIES] -= stats[rel_root][DIRECTORIES]
            stats[total][BYTES] -= stats[rel_root][BYTES]
        else:
            _logger.warning("Ignoring outdated stat entry for %s: %s" %
                            (root, stats[rel_root]))
        del stats[rel_root]
        dirty = True

    if dirty:
        try:
            dump(stats, stats_path)
            stats_stamp = os.path.getmtime(stats_path)
        except Exception, exc:
            _logger.error("Could not save stats cache: %s" % exc)
Exemple #32
0
def refresh_resource_map(configuration):
    """Refresh map of resources and their configuration. Uses a pickled
    dictionary for efficiency. 
    Resource IDs are stored in their raw (non-anonymized form).
    Only update map for resources that updated conf after last map save.
    """
    dirty = []
    map_path = os.path.join(configuration.mig_system_files, "resource.map")
    lock_path = os.path.join(configuration.mig_system_files, "resource.lock")
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    resource_map, map_stamp = load_resource_map(configuration, do_lock=False)

    # Find all resources and their configurations
    
    all_resources = list_resources(configuration.resource_home,
                                   only_valid=True)
    real_map = real_to_anon_res_map(configuration.resource_home)
    for res in all_resources:
        # Sandboxes do not change their configuration
        if resource_map.has_key(res) and sandbox_resource(res):
            continue
        conf_path = os.path.join(configuration.resource_home, res, "config")
        if not os.path.isfile(conf_path):
            continue
        conf_mtime = os.path.getmtime(conf_path)
        owners_path = os.path.join(configuration.resource_home, res, "owners")
        if not os.path.isfile(owners_path):
            continue
        owners_mtime = os.path.getmtime(owners_path)
        # init first time
        resource_map[res] = resource_map.get(res, {})
        if not resource_map[res].has_key(CONF) or conf_mtime >= map_stamp:
            (status, res_conf) = get_resource_configuration(
                configuration.resource_home, res, configuration.logger)
            if not status:
                continue
            resource_map[res][CONF] = res_conf
            public_id = res
            if res_conf.get('ANONYMOUS', True):
                public_id = real_map[res]
            resource_map[res][RESID] = public_id
            resource_map[res][MODTIME] = map_stamp
            dirty += [res]
        if not resource_map[res].has_key(OWNERS) or owners_mtime >= map_stamp:
            owners = load(owners_path)
            resource_map[res][OWNERS] = owners
            resource_map[res][MODTIME] = map_stamp
            dirty += [res]
    # Remove any missing resources from map
    missing_res = [res for res in resource_map.keys() \
                   if not res in all_resources]
    for res in missing_res:
        del resource_map[res]
        dirty += [res]

    if dirty:
        try:
            dump(resource_map, map_path)
        except Exception, exc:
            configuration.logger.error("Could not save resource map: %s" % exc)
Exemple #33
0
from shared.conf import get_configuration_object

configuration = get_configuration_object()

sandboxdb_file = configuration.sandbox_home + os.sep\
     + 'sandbox_users.pkl'

PW = 0
RESOURCES = 1

try:
    username = sys.argv[1]
except:
    print 'You must specify a username.'
    sys.exit(1)

# Load the user file

userdb = load(sandboxdb_file)

if userdb.has_key(username):

    # Open the user file in write-mode - this deletes the file!

    del userdb[username]
    dump(userdb, sandboxdb_file)
    print 'Username %s has now been deleted!' % username
else:
    print 'Sorry, username does not exist: %s' % username
    sys.exit(0)
Exemple #34
0
    re_filename = os.path.join(configuration.re_home, re_name)

    # Lock the access to the runtime env files, so that creation is done
    # with exclusive access.
    lock_path = os.path.join(configuration.re_home, WRITE_LOCK)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)

    status, msg = True, ''
    if os.path.exists(re_filename):
        status = False
        msg = \
            "can not recreate existing runtime environment '%s'!" % re_name

    try:
        dump(new_dict, re_filename)
    except Exception, err:
        status = False
        msg = 'Internal error saving new runtime environment: %s' % err

    lock_handle.close()
    return (status, msg)

def update_runtimeenv_owner(re_name, old_owner, new_owner, configuration):
    """Update owner on an existing runtime environment if existing owner
    matches old_owner.
    """
    status, msg = True, ""
    # Lock the access to the runtime env files, so that edit is done
    # with exclusive access.
    lock_path = os.path.join(configuration.re_home, WRITE_LOCK)
Exemple #35
0
    re_filename = os.path.join(configuration.re_home, re_name)

    # Lock the access to the runtime env files, so that creation is done
    # with exclusive access.
    lock_path = os.path.join(configuration.re_home, WRITE_LOCK)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)

    status, msg = True, ''
    if os.path.exists(re_filename):
        status = False
        msg = \
            "can not recreate existing runtime environment '%s'!" % re_name

    try:
        dump(new_dict, re_filename)
        mark_re_modified(configuration, re_name)
    except Exception, err:
        status = False
        msg = 'Internal error saving new runtime environment: %s' % err

    lock_handle.close()
    return (status, msg)


def update_runtimeenv_owner(re_name, old_owner, new_owner, configuration):
    """Update owner on an existing runtime environment if existing owner
    matches old_owner.
    """
    status, msg = True, ""
    # Lock the access to the runtime env files, so that edit is done
Exemple #36
0
def refresh_vgrid_map(configuration):
    """Refresh map of users and resources with their direct vgrid
    participation. That is, without inheritance. Uses a pickled dictionary for
    efficiency. 
    Resource and user IDs are stored in their raw (non-anonymized form).
    Only update map for users and resources that updated conf after last map
    save.
    """
    dirty = {}
    vgrid_changes = {}
    map_path = os.path.join(configuration.mig_system_files, "vgrid.map")
    lock_path = os.path.join(configuration.mig_system_files, "vgrid.lock")
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    vgrid_map, map_stamp = load_vgrid_map(configuration, do_lock=False)
    
    vgrid_helper = {default_vgrid: {RESOURCES: '*', OWNERS: '', MEMBERS: '*'}}
    if not vgrid_map.has_key(VGRIDS):
        vgrid_map[VGRIDS] = vgrid_helper
        dirty[VGRIDS] = dirty.get(VGRIDS, []) + [default_vgrid]
    if not vgrid_map.has_key(RESOURCES):
        vgrid_map[RESOURCES] = {}
        dirty[RESOURCES] = dirty.get(RESOURCES, [])
    if not vgrid_map.has_key(USERS):
        vgrid_map[USERS] = {}
        dirty[USERS] = dirty.get(USERS, [])

    # Find all vgrids and their allowed users and resources

    (status, all_vgrids) = vgrid_list_vgrids(configuration)
    if not status:
        all_vgrids = []

    conf_read = [(RESOURCES, configuration.vgrid_resources, vgrid_resources),
                 (OWNERS, configuration.vgrid_owners, vgrid_owners),
                 (MEMBERS, configuration.vgrid_members, vgrid_members)]

    for vgrid in all_vgrids:
        for (field, name, list_call) in conf_read:
            conf_path = os.path.join(configuration.vgrid_home, vgrid, name)
            if not os.path.isfile(conf_path):
                configuration.logger.warning('missing file: %s' % (conf_path)) 
                # Make sure vgrid dict exists before filling it
                vgrid_map[VGRIDS][vgrid] = vgrid_map[VGRIDS].get(vgrid, {})
                vgrid_map[VGRIDS][vgrid][field] = []
                dirty[VGRIDS] = dirty.get(VGRIDS, []) + [vgrid]

            elif not vgrid_map[VGRIDS].has_key(vgrid) or \
                   os.path.getmtime(conf_path) >= map_stamp:
                (status, entries) = list_call(vgrid, configuration,
                                              recursive=False)
                if not status:
                    entries = []
                vgrid_changes[vgrid] = (vgrid_map[VGRIDS].get(vgrid, []),
                                        entries)
                vgrid_map[VGRIDS][vgrid] = vgrid_map[VGRIDS].get(vgrid, {})
                vgrid_map[VGRIDS][vgrid][field] = entries
                dirty[VGRIDS] = dirty.get(VGRIDS, []) + [vgrid]
    # Remove any missing vgrids from map
    missing_vgrids = [vgrid for vgrid in vgrid_map[VGRIDS].keys() \
                   if not vgrid in all_vgrids]
    for vgrid in missing_vgrids:
        vgrid_changes[vgrid] = (vgrid_map[VGRIDS][vgrid], [])
        del vgrid_map[VGRIDS][vgrid]
        dirty[VGRIDS] = dirty.get(VGRIDS, []) + [vgrid]

    # Find all resources and their vgrid assignments
    
    # TODO: use get_resource_map output instead?
    all_resources = list_resources(configuration.resource_home, only_valid=True)
    real_map = real_to_anon_res_map(configuration.resource_home)
    for res in all_resources:
        # Sandboxes do not change their vgrid participation
        if vgrid_map[RESOURCES].has_key(res) and sandbox_resource(res):
            continue
        conf_path = os.path.join(configuration.resource_home, res, "config")
        if not os.path.isfile(conf_path):
            continue
        if os.path.getmtime(conf_path) >= map_stamp:
            vgrid_map[RESOURCES][res] = get_all_exe_vgrids(res)
            assigned = []
            all_exes = [i for i in vgrid_map[RESOURCES][res].keys() \
                        if not i in RES_SPECIALS]
            for exe in all_exes:
                exe_vgrids = vgrid_map[RESOURCES][res][exe]
                assigned += [i for i in exe_vgrids if i and i not in assigned]
            vgrid_map[RESOURCES][res][ASSIGN] = assigned
            vgrid_map[RESOURCES][res][ALLOW] = vgrid_map[RESOURCES][res].get(ALLOW, [])
            public_id = res
            anon_val = get_resource_fields(configuration.resource_home, res,
                                           ['ANONYMOUS'], configuration.logger)
            if anon_val.get('ANONYMOUS', True):
                public_id = real_map[res]
            vgrid_map[RESOURCES][res][RESID] = public_id
            dirty[RESOURCES] = dirty.get(RESOURCES, []) + [res]
    # Remove any missing resources from map
    missing_res = [res for res in vgrid_map[RESOURCES].keys() \
                   if not res in all_resources]
    for res in missing_res:
        del vgrid_map[RESOURCES][res]
        dirty[RESOURCES] = dirty.get(RESOURCES, []) + [res]

    # Update list of mutually agreed vgrid participations for dirty resources
    # and resources assigned to dirty vgrids
    configuration.logger.info("update res vgrid participations: %s" % vgrid_changes)
    update_res = [i for i in dirty.get(RESOURCES, []) if i not in MAP_SECTIONS]
    # configuration.logger.info("update vgrid allow res")
    for (vgrid, (old, new)) in vgrid_changes.items():
        # configuration.logger.info("update res vgrid %s" % vgrid)
        for res in [i for i in vgrid_map[RESOURCES].keys() \
                    if i not in update_res]:
            # Sandboxes do not change their vgrid participation
            if sandbox_resource(res):
                continue
            # configuration.logger.info("update res vgrid %s for res %s" % (vgrid, res))
            if vgrid_allowed(res, old) != vgrid_allowed(res, new):
                update_res.append(res)
    # configuration.logger.info("update res assign vgrid")
    for res in [i for i in update_res if i not in missing_res]:
        allow = []
        for vgrid in vgrid_map[RESOURCES][res][ASSIGN]:
            if vgrid_allowed(res, vgrid_map[VGRIDS][vgrid][RESOURCES]):
                allow.append(vgrid)
            vgrid_map[RESOURCES][res][ALLOW] = allow

    configuration.logger.info("done updating vgrid res participations")

    # Find all users and their vgrid assignments
    
    # TODO: use get_user_map output instead?
    all_users = list_users(configuration.user_home)
    real_map = real_to_anon_user_map(configuration.user_home)
    for user in all_users:
        settings_path = os.path.join(configuration.user_settings,
                                     client_id_dir(user), settings_filename)
        profile_path = os.path.join(configuration.user_settings,
                                    client_id_dir(user), profile_filename)
        settings_mtime, profile_mtime = 0, 0
        if os.path.isfile(settings_path):
            settings_mtime = os.path.getmtime(settings_path)
        if os.path.isfile(profile_path):
            profile_mtime = os.path.getmtime(profile_path)

        if settings_mtime + profile_mtime > 0:
            conf_mtime = max(settings_mtime, profile_mtime)
            user_conf = get_user_conf(user, configuration)
        else:
            conf_mtime = -1
            user_conf = {}
        if conf_mtime >= map_stamp:
            vgrid_map[USERS][user] = user_conf
            vgrid_map[USERS][user][ASSIGN] = vgrid_map[USERS][user].get(ASSIGN,
                                                                        [])
            vgrid_map[USERS][user][ALLOW] = vgrid_map[USERS][user].get(ALLOW,
                                                                       [])
            public_id = user
            if user_conf.get('ANONYMOUS', True):
                public_id = real_map[user]
            vgrid_map[USERS][user][USERID] = public_id
            dirty[USERS] = dirty.get(USERS, []) + [user]
    # Remove any missing users from map
    missing_user = [user for user in vgrid_map[USERS].keys() \
                   if not user in all_users]
    for user in missing_user:
        del vgrid_map[USERS][user]
        dirty[USERS] = dirty.get(USERS, []) + [user]

    # Update list of mutually agreed vgrid participations for dirty users
    # and users assigned to dirty vgrids
    update_user = [i for i in dirty.get(USERS, []) if i not in MAP_SECTIONS]
    for (vgrid, (old, new)) in vgrid_changes.items():
        for user in [i for i in vgrid_map[USERS].keys() \
                    if i not in update_user]:
            if vgrid_allowed(user, old) != vgrid_allowed(user, new):
                update_user.append(user)
    for user in [i for i in update_user if i not in missing_user]:
        allow = []
        for vgrid in vgrid_map[USERS][user][ASSIGN]:
            if vgrid_allowed(user, vgrid_map[VGRIDS][vgrid][OWNERS]) or \
                   vgrid_allowed(user, vgrid_map[VGRIDS][vgrid][MEMBERS]):
                allow.append(vgrid)
            # users implicitly assign all vgrids
            vgrid_map[USERS][user][ASSIGN] = allow
            vgrid_map[USERS][user][ALLOW] = allow

    if dirty:
        try:
            dump(vgrid_map, map_path)
        except Exception, exc:
            configuration.logger.error("Could not save vgrid map: %s" % exc)
Exemple #37
0
def refresh_disk_stats(configuration, client_id):
    """Refresh disk use stats for specified user"""
    dirty = False
    client_dir = client_id_dir(client_id)
    user_base = os.path.join(configuration.user_home, client_dir)
    stats_base = os.path.join(configuration.user_cache, client_dir)
    stats_path = os.path.join(stats_base, "disk-stats.pck")
    lock_path = stats_path + ".lock"

    try:
        os.makedirs(stats_base)
    except:
        pass

    lock_handle = open(lock_path, 'a')

    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)

    try:
        stats = load(stats_path)
        stats_stamp = os.path.getmtime(stats_path)
    except IOError:
        configuration.logger.warn("No disk stats to load - ok first time")
        stats = {OWN: {FILES: 0, DIRECTORIES: 0, BYTES: 0},
                 VGRID: {FILES: 0, DIRECTORIES: 0, BYTES: 0}}
        stats_stamp = -1

    now = time.time()
    if now < stats_stamp + DISK_REFRESH_DELAY:
        lock_handle.close()
        return stats        

    # Walk entire home dir and update any parts that changed
    # Please note that walk doesn't follow symlinks so we have
    # to additionally walk vgrid dir symlinks explicitly
    cur_roots = []
    vgrid_dirs = []
    total = OWN
    for (root, dirs, files) in os.walk(user_base):
        rel_root = root.replace(user_base, '').lstrip(os.sep)
        cur_roots.append(rel_root)
        for dir_name in dirs:
            dir_path = os.path.join(root, dir_name)
            if os.path.islink(dir_path):
                vgrid_dirs.append(dir_path)

        # Directory and contents unchanged - ignore

        if stats.has_key(rel_root) and \
               not contents_changed(root, files, stats_stamp):
            continue

        dirty = True
        
        update_disk_stats(stats, root, rel_root, dirs, files, total)

    # Now walk vgrid dir symlinks explicitly
    total = VGRID
    for vgrid_base in vgrid_dirs:
        for (root, dirs, files) in os.walk(vgrid_base):
            # Still use path relative to user base!
            rel_root = root.replace(user_base, '').lstrip(os.sep)
            cur_roots.append(rel_root)

            # Directory and contents unchanged - ignore

            if stats.has_key(rel_root) and \
                   not contents_changed(root, files, stats_stamp):
                continue

            dirty = True
        
            update_disk_stats(stats, root, rel_root, dirs, files, total)

    # Update stats for any roots no longer there

    for rel_root in stats.keys():
        if rel_root in list(TOTALS) + cur_roots:
            continue
        root = os.path.join(user_base, rel_root)
        total = stats[rel_root][KIND]
        stats[total][FILES] -= stats[rel_root][FILES]
        stats[total][DIRECTORIES] -= stats[rel_root][DIRECTORIES]
        stats[total][BYTES] -= stats[rel_root][BYTES]
        del stats[rel_root]
        dirty = True

    if dirty:
        try:
            dump(stats, stats_path)
            stats_stamp = os.path.getmtime(stats_path)
        except Exception, exc:
            configuration.logger.error("Could not save stats cache: %s" % exc)
Exemple #38
0
    freeze_id = os.path.basename(frozen_dir)
    
    freeze_dict = {
        'ID': freeze_id,
        'CREATED_TIMESTAMP': datetime.datetime.now(),
        'CREATOR': client_id,
        }
    freeze_dict.update(freeze_meta)
    if freeze_meta['PUBLISH']:
        real_pub_dir = published_dir(freeze_dict, configuration)
        real_pub_index = os.path.join(real_pub_dir, public_archive_index)
        freeze_dict['PUBLISH_URL'] = published_url(freeze_dict, configuration)
    frozen_files = []
    logger.info("create_frozen_archive: save meta for %s" % freeze_id)
    try:
        dump(freeze_dict, os.path.join(frozen_dir, freeze_meta_filename))
    except Exception, err:
        logger.error("create_frozen_archive: failed: %s" % err)
        remove_rec(frozen_dir, configuration)
        return (False, 'Error writing frozen archive info: %s' % err)

    logger.info("create_frozen_archive: copy %s for %s" % \
                              (freeze_copy, freeze_id))
    for (real_source, rel_dst) in freeze_copy:
        freeze_path = os.path.join(frozen_dir, rel_dst)
        frozen_files.append(rel_dst)
        logger.debug("create_frozen_archive: copy %s" % freeze_path)
        if os.path.isdir(real_source):
            (status, msg) = copy_rec(real_source, freeze_path, configuration)
            if not status:
                logger.error("create_frozen_archive: failed: %s" % msg)
Exemple #39
0
def modify_share_links(action,
                       share_dict,
                       client_id,
                       configuration,
                       share_map=None):
    """Modify share links with given action and share_dict for client_id.
    In practice this a shared helper to add or remove share links from the
    saved dictionary. The optional share_map argument can be used to pass an
    already loaded dictionary of saved share links to avoid reloading.
    """
    logger = configuration.logger
    share_id = share_dict['share_id']
    if share_map is None:
        (load_status, share_map) = load_share_links(configuration, client_id)
        if not load_status:
            logger.error("modify_share_links failed in load: %s" % share_map)
            return (load_status, share_map)

    share_dict.update(share_map.get(share_id, {}))
    rel_path = share_dict['path'].lstrip(os.sep)
    access = share_dict['access']
    if 'read' in access and 'write' in access:
        access_dir = 'read-write'
    elif 'read' in access:
        access_dir = 'read-only'
    elif 'write' in access:
        access_dir = 'write-only'
    else:
        logger.error("modify_share_links invalid access: %s" % access)
        return (load_status, share_map)
    symlink_path = os.path.join(configuration.sharelink_home, access_dir,
                                share_id)
    target_path = os.path.join(configuration.user_home,
                               client_id_dir(client_id), rel_path)
    if action == "create":
        if not make_symlink(target_path, symlink_path, configuration.logger,
                            False):
            logger.error("could not make share symlink: %s (already exists?)" %
                         symlink_path)
            return (False, share_map)
        share_dict.update({
            'created_timestamp': datetime.datetime.now(),
            'owner': client_id,
        })
        share_map[share_id] = share_dict
    elif action == "modify":
        if not make_symlink(target_path, symlink_path, configuration.logger,
                            True):
            logger.error("could not update share symlink: %s" % symlink_path)
            return (False, share_map)
        share_dict['created_timestamp'] = datetime.datetime.now()
        share_map[share_id].update(share_dict)
    elif action == "delete":
        if not delete_symlink(symlink_path, configuration.logger):
            logger.error("could not delete share symlink: %s (missing?)" %
                         symlink_path)
            return (False, share_map)
        del share_map[share_id]
    else:
        return (False, "Invalid action %s on share links" % action)

    try:
        sharelinks_path = os.path.join(configuration.user_settings,
                                       client_id_dir(client_id),
                                       sharelinks_filename)
        dump(share_map, sharelinks_path)
    except Exception, err:
        logger.error("modify_share_links failed: %s" % err)
        return (False, 'Error updating share links: %s' % err)