Пример #1
0
def refresh_job_stats(configuration, client_id):
    """Refresh job stats for specified user"""
    dirty = False
    client_dir = client_id_dir(client_id)
    job_base = os.path.join(configuration.mrsl_files_dir, client_dir)
    stats_base = os.path.join(configuration.user_cache, client_dir)
    stats_path = os.path.join(stats_base, "job-stats.pck")
    lock_path = stats_path + ".lock"

    try:
        os.makedirs(stats_base)
    except:
        pass

    lock_handle = open(lock_path, 'a')

    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)

    job_stats = {PARSE: 0, QUEUED: 0, EXECUTING:0, FINISHED: 0, RETRY: 0,
                    CANCELED: 0, EXPIRED: 0, FAILED: 0, FROZEN: 0}
    try:
        stats = load(stats_path)
        stats_stamp = os.path.getmtime(stats_path)
        # Backwards compatible update
        job_stats.update(stats[JOBS])
        stats[JOBS] = job_stats
    except IOError:
        configuration.logger.warn("No job stats to load - ok first time")
        stats = {JOBS: job_stats}
        stats_stamp = -1

    now = time.time()
    if now < stats_stamp + JOB_REFRESH_DELAY:
        lock_handle.close()
        return stats        

    # Inspect all jobs in user job dir and update the ones that changed
    # since last stats run
    for name in os.listdir(job_base):
        if stats.has_key(name) and stats[name]["STATUS"] in FINAL_STATES:
            continue

        job_path = os.path.join(job_base, name)
        job_stamp = os.path.getmtime(job_path)
        
        if stats.has_key(name) and job_stamp < stats_stamp:
            continue

        dirty = True
        job = load(job_path)
        update_job_stats(stats, name, job)

    if dirty:
        try:
            dump(stats, stats_path)
            stats_stamp = os.path.getmtime(stats_path)
        except Exception, exc:
            configuration.logger.error("Could not save stats cache: %s" % exc)
Пример #2
0
def vgrid_add_entities(configuration, vgrid_name, kind, id_list):
    """Append list of IDs to pickled list of kind for vgrid_name"""

    if kind == 'owners':
        entity_filename = configuration.vgrid_owners
    elif kind == 'members':
        entity_filename = configuration.vgrid_members
    elif kind == 'resources':
        entity_filename = configuration.vgrid_resources
    elif kind == 'triggers':
        entity_filename = configuration.vgrid_triggers
    else:
        return (False, "vgrid_add_entities: unknown kind: '%s'" % kind)

    entity_filepath = os.path.join(configuration.vgrid_home, vgrid_name, 
                                   entity_filename)
    try:
        if os.path.exists(entity_filepath):
            entities = load(entity_filepath)
        else:
            entities = []
            log_msg = "creating missing file: '%s'" % (entity_filepath)
            configuration.logger.info(log_msg)

        entities += [i for i in id_list if not i in entities]
        dump(entities, entity_filepath)
        mark_vgrid_modified(configuration, vgrid_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not add %s for %s: %s" % (kind, vgrid_name, exc))
Пример #3
0
def get_re_dict(name, configuration):
    """Helper to extract a saved runtime environment"""
    re_dict = load(os.path.join(configuration.re_home, name))
    if not re_dict:
        return (False, 'Could not open runtime environment %s' % name)
    else:
        return (re_dict, '')
Пример #4
0
def load_re_map(configuration, do_lock=True):
    """Load map of runtime environments. Uses a pickled dictionary for
    efficiency. The do_lock option is used to enable and disable locking
    during load.
    Returns tuple with map and time stamp of last map modification.
    Please note that time stamp is explicitly set to start of last update
    to make sure any concurrent updates get caught in next run.
    """
    map_path = os.path.join(configuration.mig_system_files, "runtimeenvs.map")
    lock_path = map_path.replace('.map', '.lock')
    if do_lock:
        lock_handle = open(lock_path, 'a')
        fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        configuration.logger.info("before re map load")
        re_map = load(map_path)
        configuration.logger.info("after re map load")
        map_stamp = os.path.getmtime(map_path)
    except IOError:
        configuration.logger.warning("No re map to load")
        re_map = {}
        map_stamp = -1
    if do_lock:
        lock_handle.close()
    return (re_map, map_stamp)
Пример #5
0
def load_entity_map(configuration, kind, do_lock):
    """Load map of given entities and their configuration. Uses a pickled
    dictionary for efficiency. The do_lock option is used to enable and
    disable locking during load.
    Entity IDs are stored in their raw (non-anonymized form).
    Returns tuple with map and time stamp of last map modification.
    """
    home_map = home_paths(configuration)
    map_path = os.path.join(configuration.mig_system_files, "%s.map" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    if do_lock:
        lock_handle = open(lock_path, 'a')
        fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        configuration.logger.info("before %s map load" % kind)
        entity_map = load(map_path)
        configuration.logger.info("after %s map load" % kind)
        map_stamp = os.path.getmtime(map_path)
    except IOError:
        configuration.logger.warn("No %s map to load" % kind)
        entity_map = {}
        map_stamp = -1
    if do_lock:
        lock_handle.close()
    return (entity_map, map_stamp)
Пример #6
0
def load_system_map(configuration, kind, do_lock):
    """Load map of given kind and their configuration from the
    mig_system_files directory.
    Here the kind maps to what the mapfile is named.
    Uses a pickled dictionary for efficiency.
    The do_lock option is used to enable and disable locking during load.
    Returns tuple with map and time stamp of last map modification.
    Please note that time stamp is explicitly set to start of last update
    to make sure any concurrent updates get caught in next run.
    """
    map_path = os.path.join(configuration.mig_system_files, "%s.map" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    if do_lock:
        lock_handle = open(lock_path, 'a')
        fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        entity_map = load(map_path)

        map_stamp = os.path.getmtime(map_path)

    except IOError:
        configuration.logger.warn("No %s map to load" % kind)
        entity_map = {}
        map_stamp = -1
    if do_lock:
        lock_handle.close()
    return (entity_map, map_stamp)
Пример #7
0
def update_pickled_dict(path, changes):
    """Update pickled dictionary on disk with provided changes"""

    saved_dict = load(path)
    saved_dict.update(changes)
    dump(saved_dict, path)
    return saved_dict
Пример #8
0
def get_re_dict(name, configuration):
    """Helper to extract a saved runtime environment"""
    re_dict = load(os.path.join(configuration.re_home, name))
    if not re_dict:
        return (False, 'Could not open runtime environment %s' % name)
    else:
        return (re_dict, '')
Пример #9
0
def update_pickled_dict(path, changes):
    """Update pickled dictionary on disk with provided changes"""

    saved_dict = load(path)
    saved_dict.update(changes)
    dump(saved_dict, path)
    return saved_dict
Пример #10
0
def filter_pickled_list(path, changes):
    """Filter pickled list on disk with provided changes where changes is a
    dictionary mapping existing list entries and the value to replace it with.
    """

    saved_list = load(path)
    saved_list = [changes.get(entry, entry) for entry in saved_list]
    dump(saved_list, path)
    return saved_list
Пример #11
0
def filter_pickled_list(path, changes):
    """Filter pickled list on disk with provided changes where changes is a
    dictionary mapping existing list entries and the value to replace it with.
    """

    saved_list = load(path)
    saved_list = [changes.get(entry, entry) for entry in saved_list]
    dump(saved_list, path)
    return saved_list
Пример #12
0
def get_account_req(req_id, configuration):
    """Helper to fetch dictionary for a pending account request"""
    req_path = os.path.join(configuration.user_pending, req_id)
    req_dict = load(req_path)
    if not req_dict:
        return (False, 'Could not open account request %s' % req_id)
    else:
        req_dict['id'] = req_id
        req_dict['created'] = os.path.getctime(req_path)
        return (True, req_dict)
Пример #13
0
def load_sandbox_db(configuration=None):
    """Read in the sandbox DB dictionary:
    Format is {username: (password, [list_of_resources])}
    """

    if not configuration:
        configuration = get_configuration_object()
    sandbox_db_path = os.path.join(configuration.sandbox_home, sandbox_db_name)
    sandbox_db = load(sandbox_db_path)
    return sandbox_db
Пример #14
0
def get_frozen_meta(freeze_id, configuration):
    """Helper to fetch dictionary of metadata for a frozen archive"""
    frozen_path = os.path.join(configuration.freeze_home, freeze_id,
                               freeze_meta_filename)
    freeze_dict = load(frozen_path)
    if not freeze_dict:
        return (False, 'Could not open metadata for frozen archive %s' % \
                freeze_id)
    else:
        return (True, freeze_dict)
Пример #15
0
def get_cert_req(req_id, configuration):
    """Helper to fetch dictionary for a pending certificate request"""
    req_path = os.path.join(configuration.user_pending, req_id)
    req_dict = load(req_path)
    if not req_dict:
        return (False, 'Could not open certificate request %s' % req_id)
    else:
        req_dict['id'] = req_id
        req_dict['created'] = os.path.getctime(req_path)
        return (True, req_dict)
Пример #16
0
def resource_owners(configuration, unique_resource_name):
    """Load list of resource owners for unique_resource_name"""
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        return (True, owners)
    except Exception, exc:
        return (False, "could not load owners for %s: %s" % \
                (unique_resource_name, exc))
Пример #17
0
def unpickle(path, logger, allow_missing=False):
    """Unpack pickled object in path"""
    try:
        data_object = load(path)
        logger.debug('%s was unpickled successfully' % path)
        return data_object
    except Exception, err:
        if not allow_missing:
            logger.error('%s could not be opened/unpickled! %s' % (path, err))
        return False
Пример #18
0
def resource_owners(configuration, unique_resource_name):
    """Load list of resource owners for unique_resource_name"""
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        return (True, owners)
    except Exception, exc:
        return (False, "could not load owners for %s: %s" % \
                (unique_resource_name, exc))
Пример #19
0
def unpickle(path, logger):
    """Unpack pickled object in path"""
    try:
        job_dict = load(path)
        logger.debug('%s was unpickled successfully' % path)
        return job_dict
    except Exception, err:
        logger.error('%s could not be opened/unpickled! %s'
                      % (path, err))
        return False
Пример #20
0
def load_sandbox_db(configuration=None):
    """Read in the sandbox DB dictionary:
    Format is {username: (password, [list_of_resources])}
    """

    if not configuration:
        configuration = get_configuration_object()
    sandbox_db_path = os.path.join(configuration.sandbox_home,
                                   sandbox_db_name)
    sandbox_db = load(sandbox_db_path)
    return sandbox_db
Пример #21
0
def filter_pickled_dict(path, changes):
    """Filter pickled dictionary on disk with provided changes where changes
    is a dictionary mapping existing dictionary values to a value to replace
    it with.
    """

    saved_dict = load(path)
    for (key, val) in saved_dict.items():
        if val in changes.keys():
            saved_dict[key] = changes[val]
    dump(saved_dict, path)
    return saved_dict
Пример #22
0
def filter_pickled_dict(path, changes):
    """Filter pickled dictionary on disk with provided changes where changes
    is a dictionary mapping existing dictionary values to a value to replace
    it with.
    """

    saved_dict = load(path)
    for (key, val) in saved_dict.items():
        if val in changes.keys():
            saved_dict[key] = changes[val]
    dump(saved_dict, path)
    return saved_dict
Пример #23
0
def load_json(path, logger, allow_missing=False, convert_utf8=True):
    """Unpack json object in path"""
    try:
        data_object = load(path, serializer='json')
        logger.debug('%s was loaded successfully' % path)
        if convert_utf8:
            data_object = force_utf8_rec(data_object)
        return data_object
    except Exception, err:
        if not allow_missing:
            logger.error('%s could not be opened/loaded! %s' % (path, err))
        return False
Пример #24
0
def resource_add_owners(configuration, unique_resource_name, clients):
    """Append list of clients to pickled list of resource owners"""
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        owners += [i for i in clients if not i in owners]
        dump(owners, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not add owners for %s: %s" % \
                (unique_resource_name, exc))
Пример #25
0
def resource_add_owners(configuration, unique_resource_name, clients):
    """Append list of clients to pickled list of resource owners"""
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        owners += [i for i in clients if not i in owners]
        dump(owners, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not add owners for %s: %s" % \
                (unique_resource_name, exc))
Пример #26
0
def is_user(entity_id, mig_server_home):
    """Check if user exists in database"""

    result = False

    db_path = os.path.join(mig_server_home, user_db_filename)
    try:
        user_db = load(db_path)
        if user_db.has_key(entity_id):
            result = True
    except:
        pass

    return result
Пример #27
0
def is_user(entity_id, mig_server_home):
    """Check if user exits in database""" 

    result = False

    db_path = os.path.join(mig_server_home, user_db_filename)
    try:   
        user_db = load(db_path)
        if user_db.has_key(entity_id):
            result = True
    except:
        pass

    return result
Пример #28
0
def load_access_request(configuration, request_dir, req_name):
    """Load request req_name with predefined file extension for given
    request_dir.
    """
    request = None
    req_path = os.path.join(request_dir, req_name)
    try:
        if not req_name.startswith(request_prefix) or \
               not req_name.endswith(request_ext):
            raise ValueError("invalid request name: %s" % req_name)
        request = load(req_path)
    except Exception, err:
        configuration.logger.error("could not load request in %s: %s" % \
                                   (req_path, err))
Пример #29
0
def load_data_transfers(configuration, client_id):
    """Find all data transfers owned by user"""
    logger = configuration.logger
    logger.debug("load transfers for %s" % client_id)
    try:
        transfers_path = os.path.join(configuration.user_settings,
                                      client_id_dir(client_id),
                                      datatransfers_filename)
        logger.debug("load transfers from %s" % transfers_path)
        if os.path.isfile(transfers_path):
            transfers = load(transfers_path)
        else:
            transfers = {}
    except Exception, exc:
        return (False, "could not load saved data transfers: %s" % exc)
Пример #30
0
def load_share_links(configuration, client_id):
    """Find all share links owned by user"""
    logger = configuration.logger
    logger.debug("load share links for %s" % client_id)
    try:
        sharelinks_path = os.path.join(configuration.user_settings,
                                       client_id_dir(client_id),
                                       sharelinks_filename)
        logger.debug("load sharelinks from %s" % sharelinks_path)
        if os.path.isfile(sharelinks_path):
            sharelinks = load(sharelinks_path)
        else:
            sharelinks = {}
    except Exception, exc:
        return (False, "could not load saved share links: %s" % exc)
Пример #31
0
def check_entities_modified(configuration, kind):
    """Check and return any name of given kind that are marked as modified
    along with a time stamp for the latest modification"""
    home_map = home_paths(configuration)
    modified_path = os.path.join(home_map[kind], "%s.modified" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        modified_list = load(modified_path)
        modified_stamp = os.path.getmtime(modified_path)
    except Exception, exc:
        # No modified list - probably first time so force update
        modified_list = ['UNKNOWN']
        modified_stamp = time.time()
Пример #32
0
def vgrid_remove_entities(configuration, vgrid_name, kind, id_list,
                          allow_empty, dict_field=False):
    """Remove list of IDs from pickled list of kind for vgrid_name.
    The allow_empty argument can be used to prevent removal of e.g. the last
    owner.
    Use the dict_field if the entries are dictionaries and the id_list should
    be matched against dict_field in each of them. 
    """

    if kind == 'owners':
        entity_filename = configuration.vgrid_owners
    elif kind == 'members':
        entity_filename = configuration.vgrid_members
    elif kind == 'resources':
        entity_filename = configuration.vgrid_resources
    elif kind == 'triggers':
        entity_filename = configuration.vgrid_triggers
    else:
        return (False, "vgrid_remove_entities: unknown kind: '%s'" % kind)
    
    entity_filepath = os.path.join(configuration.vgrid_home, vgrid_name, 
                                   entity_filename)

    # Force raw string to list to avoid nasty silent substring matching below
    # I.e. removing abc.def.0 would also remove def.0
    
    if isinstance(id_list, basestring):
        id_list = [id_list]
        
    try:
        entities = load(entity_filepath)
        if dict_field:
            entities = [i for i in entities if not i[dict_field] in id_list]
        else:
            entities = [i for i in entities if not i in id_list]
        if not entities and not allow_empty:
            raise ValueError("not allowed to remove last entry of %s" % kind)
        dump(entities, entity_filepath)
        mark_vgrid_modified(configuration, vgrid_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not remove %s for %s: %s" % (kind, vgrid_name,
                                                           exc))
Пример #33
0
def resource_remove_owners(configuration, unique_resource_name, clients,
                           allow_empty=False):
    """Remove list of clients from pickled list of resource owners. The
    optional allow_empty option is used to prevent or allow removal of last
    owner.
    """
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        owners = [i for i in owners if not i in clients]
        if not owners and not allow_empty:
            raise ValueError("not allowed to remove last owner")
        dump(owners, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not remove owners for %s: %s" % \
                (unique_resource_name, exc))
Пример #34
0
def check_entities_modified(configuration, kind):
    """Check and return any name of given kind that are marked as modified
    along with a time stamp for the latest modification"""
    modified_path = os.path.join(configuration.mig_system_files,
                                 "%s.modified" % kind)
    map_path = os.path.join(configuration.mig_system_files, "%s.map" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        if not os.path.isfile(map_path):
            configuration.logger.warning("%s map doesn't exist, new install?" \
                                         % kind)
            raise Exception("%s map does not exist" % kind)
        modified_list = load(modified_path)
        modified_stamp = os.path.getmtime(modified_path)
    except Exception, exc:
        # No modified list - probably first time so force update
        modified_list = [keyword_all]
        modified_stamp = time.time()
Пример #35
0
def mark_entity_modified(configuration, kind, name):
    """Mark name of given kind modified to signal reload before use from other
    locations.
    """
    modified_path = os.path.join(configuration.mig_system_files,
                                 "%s.modified" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        if os.path.exists(modified_path):
            modified_list = load(modified_path)
        else:
            modified_list = []
        if not name in modified_list:
            modified_list.append(name)
        dump(modified_list, modified_path)
    except Exception, exc:
        configuration.logger.error("Could not update %s modified mark: %s" % \
                                   (kind, exc))
Пример #36
0
def mark_entity_modified(configuration, kind, name):
    """Mark name of given kind modified to signal reload before use from other
    locations.
    """
    home_map = home_paths(configuration)
    modified_path = os.path.join(home_map[kind], "%s.modified" % kind)
    lock_path = os.path.join(configuration.mig_system_files, "%s.lock" % kind)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    try:
        if os.path.exists(modified_path):
            modified_list = load(modified_path)
        else:
            modified_list = []
        if not name in modified_list:
            modified_list.append(name)
        dump(modified_list, modified_path)
    except Exception, exc:
        configuration.logger.error("Could not update %s modified mark: %s" % \
                                   (kind, exc))
Пример #37
0
def resource_remove_owners(configuration,
                           unique_resource_name,
                           clients,
                           allow_empty=False):
    """Remove list of clients from pickled list of resource owners. The
    optional allow_empty option is used to prevent or allow removal of last
    owner.
    """
    owners_file = os.path.join(configuration.resource_home,
                               unique_resource_name, 'owners')
    try:
        owners = load(owners_file)
        owners = [i for i in owners if not i in clients]
        if not owners and not allow_empty:
            raise ValueError("not allowed to remove last owner")
        dump(owners, owners_file)
        mark_resource_modified(configuration, unique_resource_name)
        return (True, '')
    except Exception, exc:
        return (False, "could not remove owners for %s: %s" % \
                (unique_resource_name, exc))
Пример #38
0
def update_runtimeenv_owner(re_name, old_owner, new_owner, configuration):
    """Update owner on an existing runtime environment if existing owner
    matches old_owner.
    """
    status, msg = True, ""
    # Lock the access to the runtime env files, so that edit is done
    # with exclusive access.
    lock_path = os.path.join(configuration.re_home, WRITE_LOCK)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    re_filename = os.path.join(configuration.re_home, re_name)
    try:
        re_dict = load(re_filename)
        if re_dict['CREATOR'] == old_owner:
            re_dict['CREATOR'] = new_owner
            dump(re_dict, re_filename)
        else:
            status = False
    except Exception, err:
        msg = "Failed to edit owner of runtime enviroment '%s': %s" % \
              (re_name, err)
        configuration.logger.warning(msg)
        status = False
Пример #39
0
def update_runtimeenv_owner(re_name, old_owner, new_owner, configuration):
    """Update owner on an existing runtime environment if existing owner
    matches old_owner.
    """
    status, msg = True, ""
    # Lock the access to the runtime env files, so that edit is done
    # with exclusive access.
    lock_path = os.path.join(configuration.re_home, WRITE_LOCK)
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    re_filename = os.path.join(configuration.re_home, re_name)
    try:
        re_dict = load(re_filename)
        if re_dict['CREATOR'] == old_owner:
            re_dict['CREATOR'] = new_owner
            dump(re_dict, re_filename)
            mark_re_modified(configuration, re_name)
        else:
            status = False
    except Exception, err:
        msg = "Failed to edit owner of runtime enviroment '%s': %s" % \
              (re_name, err)
        configuration.logger.warning(msg)
        status = False
Пример #40
0
def refresh_disk_stats(configuration, client_id):
    """Refresh disk use stats for specified user"""
    dirty = False
    client_dir = client_id_dir(client_id)
    user_base = os.path.join(configuration.user_home, client_dir)
    stats_base = os.path.join(configuration.user_cache, client_dir)
    stats_path = os.path.join(stats_base, "disk-stats.pck")
    lock_path = stats_path + ".lock"

    try:
        os.makedirs(stats_base)
    except:
        pass

    lock_handle = open(lock_path, 'a')

    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)

    try:
        stats = load(stats_path)
        stats_stamp = os.path.getmtime(stats_path)
    except IOError:
        configuration.logger.warn("No disk stats to load - ok first time")
        stats = {OWN: {FILES: 0, DIRECTORIES: 0, BYTES: 0},
                 VGRID: {FILES: 0, DIRECTORIES: 0, BYTES: 0}}
        stats_stamp = -1

    now = time.time()
    if now < stats_stamp + DISK_REFRESH_DELAY:
        lock_handle.close()
        return stats        

    # Walk entire home dir and update any parts that changed
    # Please note that walk doesn't follow symlinks so we have
    # to additionally walk vgrid dir symlinks explicitly
    cur_roots = []
    vgrid_dirs = []
    total = OWN
    for (root, dirs, files) in os.walk(user_base):
        rel_root = root.replace(user_base, '').lstrip(os.sep)
        cur_roots.append(rel_root)
        for dir_name in dirs:
            dir_path = os.path.join(root, dir_name)
            if os.path.islink(dir_path):
                vgrid_dirs.append(dir_path)

        # Directory and contents unchanged - ignore

        if stats.has_key(rel_root) and \
               not contents_changed(root, files, stats_stamp):
            continue

        dirty = True
        
        update_disk_stats(stats, root, rel_root, dirs, files, total)

    # Now walk vgrid dir symlinks explicitly
    total = VGRID
    for vgrid_base in vgrid_dirs:
        for (root, dirs, files) in os.walk(vgrid_base):
            # Still use path relative to user base!
            rel_root = root.replace(user_base, '').lstrip(os.sep)
            cur_roots.append(rel_root)

            # Directory and contents unchanged - ignore

            if stats.has_key(rel_root) and \
                   not contents_changed(root, files, stats_stamp):
                continue

            dirty = True
        
            update_disk_stats(stats, root, rel_root, dirs, files, total)

    # Update stats for any roots no longer there

    for rel_root in stats.keys():
        if rel_root in list(TOTALS) + cur_roots:
            continue
        root = os.path.join(user_base, rel_root)
        total = stats[rel_root][KIND]
        stats[total][FILES] -= stats[rel_root][FILES]
        stats[total][DIRECTORIES] -= stats[rel_root][DIRECTORIES]
        stats[total][BYTES] -= stats[rel_root][BYTES]
        del stats[rel_root]
        dirty = True

    if dirty:
        try:
            dump(stats, stats_path)
            stats_stamp = os.path.getmtime(stats_path)
        except Exception, exc:
            configuration.logger.error("Could not save stats cache: %s" % exc)
Пример #41
0
def refresh_job_stats(configuration, client_id):
    """Refresh job stats for specified user"""
    _logger = configuration.logger
    dirty = False
    client_dir = client_id_dir(client_id)
    job_base = os.path.join(configuration.mrsl_files_dir, client_dir)
    stats_base = os.path.join(configuration.user_cache, client_dir)
    stats_path = os.path.join(stats_base, "job-stats.pck")
    lock_path = stats_path + ".lock"

    try:
        os.makedirs(stats_base)
    except:
        pass

    lock_handle = open(lock_path, 'a')

    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)

    job_stats = {
        PARSE: 0,
        QUEUED: 0,
        EXECUTING: 0,
        FINISHED: 0,
        RETRY: 0,
        CANCELED: 0,
        EXPIRED: 0,
        FAILED: 0,
        FROZEN: 0
    }
    try:
        stats = load(stats_path)
        stats_stamp = os.path.getmtime(stats_path)
        # Backwards compatible update
        job_stats.update(stats[JOBS])
        stats[JOBS] = job_stats
    except IOError:
        _logger.warning("No job stats to load - ok first time")
        stats = {JOBS: job_stats}
        stats_stamp = -1

    now = time.time()
    if now < stats_stamp + JOB_REFRESH_DELAY:
        lock_handle.close()
        return stats

    # Inspect all jobs in user job dir and update the ones that changed
    # since last stats run
    for name in os.listdir(job_base):
        if stats.has_key(name) and stats[name]["STATUS"] in FINAL_STATES:
            continue

        job_path = os.path.join(job_base, name)
        try:
            job_stamp = os.path.getmtime(job_path)
        except Exception, exc:
            _logger.warning("getmtime failed on %s: %s" % (job_path, exc))
            job_stamp = -1

        if stats.has_key(name) and job_stamp < stats_stamp:
            continue

        dirty = True
        try:
            job = load(job_path)
        except Exception, exc:
            _logger.warning("unpickle failed on %s: %s" % (job_path, exc))
            continue
Пример #42
0
def refresh_disk_stats(configuration, client_id):
    """Refresh disk use stats for specified user"""
    _logger = configuration.logger
    dirty = False
    client_dir = client_id_dir(client_id)
    user_base = os.path.join(configuration.user_home, client_dir)
    stats_base = os.path.join(configuration.user_cache, client_dir)
    stats_path = os.path.join(stats_base, "disk-stats.pck")
    lock_path = stats_path + ".lock"

    try:
        os.makedirs(stats_base)
    except:
        pass

    lock_handle = open(lock_path, 'a')

    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)

    try:
        stats = load(stats_path)
        stats_stamp = os.path.getmtime(stats_path)
    except IOError:
        _logger.warning("No disk stats to load - ok first time")
        stats = {
            OWN: {
                FILES: 0,
                DIRECTORIES: 0,
                BYTES: 0
            },
            VGRID: {
                FILES: 0,
                DIRECTORIES: 0,
                BYTES: 0
            }
        }
        stats_stamp = -1

    now = time.time()
    if now < stats_stamp + DISK_REFRESH_DELAY:
        lock_handle.close()
        return stats

    # Walk entire home dir and update any parts that changed
    # Please note that walk doesn't follow symlinks so we have
    # to additionally walk vgrid dir symlinks explicitly
    cur_roots = []
    vgrid_dirs = []
    total = OWN
    for (root, dirs, files) in os.walk(user_base):
        rel_root = root.replace(user_base, '').lstrip(os.sep)
        cur_roots.append(rel_root)
        for dir_name in dirs:
            dir_path = os.path.join(root, dir_name)
            if os.path.islink(dir_path):
                vgrid_dirs.append(dir_path)

        # Directory and contents unchanged - ignore

        if stats.has_key(rel_root) and \
                not contents_changed(configuration, root, files, stats_stamp):
            continue

        dirty = True

        update_disk_stats(configuration, stats, root, rel_root, dirs, files,
                          total)

    # Now walk vgrid dir symlinks explicitly
    total = VGRID
    for vgrid_base in vgrid_dirs:
        for (root, dirs, files) in os.walk(vgrid_base):
            # Still use path relative to user base!
            rel_root = root.replace(user_base, '').lstrip(os.sep)
            cur_roots.append(rel_root)

            # Directory and contents unchanged - ignore

            if stats.has_key(rel_root) and \
                not contents_changed(configuration, root, files,
                                     stats_stamp):
                continue

            dirty = True

            update_disk_stats(configuration, stats, root, rel_root, dirs,
                              files, total)

    # Update stats for any roots no longer there

    for rel_root in stats.keys():
        if rel_root in list(TOTALS) + cur_roots:
            continue
        root = os.path.join(user_base, rel_root)
        # NOTE: legacy stats may lack KIND field - just ignore and delete
        total = stats[rel_root].get(KIND, None)
        if total:
            stats[total][FILES] -= stats[rel_root][FILES]
            stats[total][DIRECTORIES] -= stats[rel_root][DIRECTORIES]
            stats[total][BYTES] -= stats[rel_root][BYTES]
        else:
            _logger.warning("Ignoring outdated stat entry for %s: %s" %
                            (root, stats[rel_root]))
        del stats[rel_root]
        dirty = True

    if dirty:
        try:
            dump(stats, stats_path)
            stats_stamp = os.path.getmtime(stats_path)
        except Exception, exc:
            _logger.error("Could not save stats cache: %s" % exc)
Пример #43
0
     try:
         user_dict['full_name'] = args[0]
         user_dict['organization'] = args[1]
         user_dict['state'] = args[2]
         user_dict['country'] = args[3]
         user_dict['email'] = args[4]
         user_dict['comment'] = args[5]
         user_dict['password'] = args[6]
     except IndexError:
         print 'Error: too few arguments given (expected 7 got %d)'\
              % len(args)
         usage()
         sys.exit(1)
 elif user_file:
     try:
         user_dict = load(user_file)
     except Exception, err:
         print 'Error in user name extraction: %s' % err
         usage()
         sys.exit(1)
 elif default_renew and user_id:
     saved = load_user_dict(user_id, db_path, verbose)
     if not saved:
         print 'Error: no such user in user db: %s' % user_id
         usage()
         sys.exit(1)
     user_dict.update(saved)
     del user_dict['expire']
 else:
     if verbose:
         print '''Entering interactive mode
Пример #44
0
def refresh_resource_map(configuration):
    """Refresh map of resources and their configuration. Uses a pickled
    dictionary for efficiency. 
    Resource IDs are stored in their raw (non-anonymized form).
    Only update map for resources that updated conf after last map save.
    """
    dirty = []
    map_path = os.path.join(configuration.mig_system_files, "resource.map")
    lock_path = os.path.join(configuration.mig_system_files, "resource.lock")
    lock_handle = open(lock_path, 'a')
    fcntl.flock(lock_handle.fileno(), fcntl.LOCK_EX)
    resource_map, map_stamp = load_resource_map(configuration, do_lock=False)

    # Find all resources and their configurations
    
    all_resources = list_resources(configuration.resource_home,
                                   only_valid=True)
    real_map = real_to_anon_res_map(configuration.resource_home)
    for res in all_resources:
        # Sandboxes do not change their configuration
        if resource_map.has_key(res) and sandbox_resource(res):
            continue
        conf_path = os.path.join(configuration.resource_home, res, "config")
        if not os.path.isfile(conf_path):
            continue
        conf_mtime = os.path.getmtime(conf_path)
        owners_path = os.path.join(configuration.resource_home, res, "owners")
        if not os.path.isfile(owners_path):
            continue
        owners_mtime = os.path.getmtime(owners_path)
        # init first time
        resource_map[res] = resource_map.get(res, {})
        if not resource_map[res].has_key(CONF) or conf_mtime >= map_stamp:
            (status, res_conf) = get_resource_configuration(
                configuration.resource_home, res, configuration.logger)
            if not status:
                continue
            resource_map[res][CONF] = res_conf
            public_id = res
            if res_conf.get('ANONYMOUS', True):
                public_id = real_map[res]
            resource_map[res][RESID] = public_id
            resource_map[res][MODTIME] = map_stamp
            dirty += [res]
        if not resource_map[res].has_key(OWNERS) or owners_mtime >= map_stamp:
            owners = load(owners_path)
            resource_map[res][OWNERS] = owners
            resource_map[res][MODTIME] = map_stamp
            dirty += [res]
    # Remove any missing resources from map
    missing_res = [res for res in resource_map.keys() \
                   if not res in all_resources]
    for res in missing_res:
        del resource_map[res]
        dirty += [res]

    if dirty:
        try:
            dump(resource_map, map_path)
        except Exception, exc:
            configuration.logger.error("Could not save resource map: %s" % exc)
Пример #45
0
from shared.conf import get_configuration_object

configuration = get_configuration_object()

sandboxdb_file = configuration.sandbox_home + os.sep\
     + 'sandbox_users.pkl'

PW = 0
RESOURCES = 1

try:
    username = sys.argv[1]
except:
    print 'You must specify a username.'
    sys.exit(1)

# Load the user file

userdb = load(sandboxdb_file)

if userdb.has_key(username):

    # Open the user file in write-mode - this deletes the file!

    del userdb[username]
    dump(userdb, sandboxdb_file)
    print 'Username %s has now been deleted!' % username
else:
    print 'Sorry, username does not exist: %s' % username
    sys.exit(0)
Пример #46
0
     try:
         user_dict['full_name'] = args[0]
         user_dict['organization'] = args[1]
         user_dict['state'] = args[2]
         user_dict['country'] = args[3]
         user_dict['email'] = args[4]
         user_dict['comment'] = args[5]
         user_dict['password'] = args[6]
     except IndexError:
         print 'Error: too few arguments given (expected 7 got %d)'\
             % len(args)
         usage()
         sys.exit(1)
 elif user_file:
     try:
         user_dict = load(user_file)
     except Exception, err:
         print 'Error in user name extraction: %s' % err
         usage()
         sys.exit(1)
 elif default_renew and user_id:
     saved = load_user_dict(logger, user_id, db_path, verbose)
     if not saved:
         print 'Error: no such user in user db: %s' % user_id
         usage()
         sys.exit(1)
     user_dict.update(saved)
     del user_dict['expire']
 elif not configuration.site_enable_gdp:
     if verbose:
         print '''Entering interactive mode