示例#1
0
 def __new_ids(self, passwd_db, group_db):
     if self.user() in passwd_db:
         raise ENameInUse("User Manager",
                          "%r already exists as a user." % self.user())
     if self.user() in group_db:
         raise ENameInUse("User Manger",
                          "%r already exists as a group." % self.user())
     id = passwd_db.new_uid()
     while id in passwd_db or id in group_db:
         id = passwd_db.new_uid(id)
     self.__use_ids(passwd_db, group_db, id, id)
     return
示例#2
0
 def __init__(self, name, hash_it=1):
     self._md5 = name
     if hash_it:
         self._md5 = md5.new(name).hexdigest()
     self.path = properties.PDO_DIRECTORY
     if not os.path.isdir(self.path):
         os.makedirs(self.path)
     self.filename = os.path.join(self.path, self._md5 + '.dat')
     _locks_lock.acquire()
     try:
         # There should only be one lock object for pdo's using this name:
         if _locks.has_key(self._md5):
             # The use of the WeakValueDict to track name use in PDOs can be
             # susceptable to circular references.  Force a gc.collect() IFF
             # there is a name in use collision, which will break any
             # circular references.  THIS IS A BIG HAMMER, BUT IN THE REAL
             # WORLD IT SHOULD BE SUPER RARE.
             gc.collect()
             if _locks.has_key(self._md5):
                 # OK, the LockMap still has an entry, the name REALLY is in
                 # use.
                 raise ENameInUse(name)
         self._lock = Lock()
         _locks[self._md5] = self._lock
     finally:
         _locks_lock.release()
     self._delete = []
示例#3
0
 def create_logger_alias(self, parent, name):
     logger_service = as_internal_node('/services/logger')
     if logger_service.has_child(name):
         # get_child returns the actual child, as_node would follow an
         # alias.
         trend_alias = logger_service.get_child(name)
         name_in_use = not isinstance(trend_alias, Alias)
         if not name_in_use:
             name_in_use |= as_internal_node(
                 os.path.dirname(trend_alias.node_url)) is not self
         if name_in_use:
             raise ENameInUse(trend_alias.as_node_url())
     else:
         trend_alias = Alias()
         parent_url = as_node(parent).as_node_url()
         quoted_name = urllib.quote(name, '')
         trend_alias.configure({
             'name':
             name,
             'parent':
             logger_service,
             'node_url':
             os.path.join(parent_url, quoted_name, quoted_name)
         })
     return
示例#4
0
    def __init__(self, name):
        _logs_lock.acquire()
        try:
            if _logs.has_key(name):
                from mpx.lib.msglog import types
                raise ENameInUse(name, logtype=types.ERR)
            self._lock = threading.Lock()
            self._lock.acquire()
            _logs[name] = self
        finally:
            _logs_lock.release()
        try:
            self.name = name
            self.eval = RExec().r_eval
            self.path = properties.LOGFILE_DIRECTORY
            self.filename = os.path.join(self.path, self.name + ".log")
            self.data_manager = _LogDataManager(self.filename)

            self._slice_position = 0
            self.max_return_length = 100000
            self.last_values = self._get_last_row()
            # It is likely that the log contains a higher seq number than
            #  the one that was last saved by DataManager, if so, update it.
            # NOTE: the data manager will have a higher number if a trim_g* was
            #       done and then the log was closed.
            if self.last_values:
                if self.last_values[-1] >= self.data_manager.inspect_seq():
                    self.data_manager.set_seq(self.last_values[-1] + 1)
                else:
                    self.last_values = []
        finally:
            self._lock.release()
示例#5
0
def force_outside_log_object(name, instance):
    _logs_lock.acquire()
    try:
        if not _logs.has_key(name):
            _logs[name] = instance
        elif _logs[name] is not instance:
            raise ENameInUse(name)
        instance.__outside_log = 1
    finally:
        _logs_lock.release()
    return
示例#6
0
 def __init__(self, name, locker=None, hash_it=1, context=None, path=None):
     if context is None:
         context = {}
     if path is None:
         self.path = properties.PDO_DIRECTORY
     else:
         self.path = path
     self._name = name
     self._context = context
     if hash_it:
         self._name = md5.new(self._name).hexdigest()
     self._name = self._name + '.dat.1'
     if not os.path.isdir(self.path):
         os.makedirs(self.path)
     self.filename = os.path.join(self.path, self._name)
     _locks_lock.acquire()
     try:
         # There should only be one lock object for pdo's using this name:
         if not locker and _locks.has_key(self._name):
             # The use of the WeakValueDict to track name use in PDOs can be
             # susceptable to circular references.  Force a gc.collect() IFF
             # there is a name in use collision, which will break any
             # circular references.  THIS IS A BIG HAMMER, BUT IN THE REAL
             # WORLD IT SHOULD BE SUPER RARE.
             gc.collect()
             if _locks.has_key(self._name):
                 # OK, the LockMap still has an entry, the name REALLY is in
                 # use.
                 raise ENameInUse(name)
         if locker:
             self._lock = locker
         else:
             new_lock = Lock()
             self._lock = new_lock
         _locks[self._name] = self._lock
         # if our version does not exist, but a previous
         # version of this persistent data does, do an
         # upgrade by instaciating previous version.
         if not os.path.exists(self.filename) and \
            _prior_version_exists(name, self.path, hash_it):
             from mpx.upgrade.persistent.persistent_0 import \
                  _PersistentStorage_0
             old = _PersistentStorage_0(name, hash_it)
             old_name = old.filename
             os.rename(old_name, self.filename)
             old.destroy()
             del (old)
     finally:
         _locks_lock.release()
     self._delete = []
     return
示例#7
0
def _setup_new_log(name, class_ref):
    _logs_lock.acquire()
    try:
        if not _logs.has_key(name):
            _logs[name] = class_ref(name)
        elif not ((hasattr(_logs[name], '__outside_log')
                   and _logs[name].__outside_log)
                  or isinstance(_logs[name], class_ref)):
            # The existing instance is not an instanciation of class_ref, nor
            # of a class derived from class_ref.
            raise ENameInUse(name)
        log = _logs[name]
    finally:
        _logs_lock.release()
    return log
示例#8
0
 def _create_group(self, entity_path, group_name, config):
     group_container = self._get_group_container(entity_path)
     if group_container is None:
         if not entity_path.startswith(EM):
             entity_path = EM + entity_path
         group_container = GlobalSetpointGroupContainer()
         config = {'name': 'Global Setpoints', 'parent': entity_path}
         group_container.configure(config)
     elif group_container.has_child(group_name):
         raise ENameInUse()
     group = GlobalSetpointGroup()
     config = {
         'name': group_name,
         'parent': group_container,
         'entity_path': entity_path
     }
     group.configure(config)
     group.start()
     return group
示例#9
0
 def _configure(self, columns, max_return_length):
     self.max_return_length = max_return_length * 1024
     columns = list(columns)
     # build list of ColConfigs from list of strings - backward compat.
     self._string_configured = 0
     if columns and type(columns[0]) == types.StringType:
         self._string_configured = 1
         column_names = columns
         columns = []
         for name in column_names:
             columns.append(ColumnConfiguration(name, len(columns), 'none'))
     # check to make sure they haven't given a col a name that matches
     #  one of our hidden names.
     for column in columns:
         if column.name == '_seq':
             raise ENameInUse(column.name)
     columns.append(ColumnConfiguration('_seq', len(columns), 'ascending'))
     self.last_values = []
     self.columns = columns
     self.data_manager.configure(columns)
示例#10
0
         try:
             sched = self._get_schedule_node(sched)
             self.move_schedule(sched, destination, force)
         except Exception, e:
             exceptions[sched.as_node_url()] = str(current_exception())
     if exceptions:
         raise EBatchedException('remove_schedule', exceptions)
     return
 if self._is_rename(source, destination):
     return self._rename_schedule(source, destination, force)
 source_sched = self._get_schedule_node(source)
 source_sched.set_override(True)
 orig_sched_path = source_sched.as_node_url()
 dest_sched = self._get_schedule_node(destination)
 if dest_sched.has_child(source_sched.name):
     raise ENameInUse(source_sched.name)
 dest_sched_path = dest_sched.as_node_url()
 cd = source_sched.configuration()
 cd['parent'] = dest_sched
 source_sched.configure(cd)
 try:
     PERSISTANCE_MANAGER.move_sched(orig_sched_path,
                                    dest_sched.as_node_url(),
                                    serialize_node(source_sched))
 except:
     msglog.exception()
 if dest_sched is self:
     source_sched.set_override(True)
 source_sched.stop()
 source_sched.start()
 self.event_generate(