Esempio n. 1
0
class Signal(object):
    def __init__(self):
        self.__slots = WeakValueDictionary()

    def __call__(self, *args, **kargs):
        for key in self.__slots:
            func, selfid = key
            if selfid is not None:
                func(self.__slots[key], *args, **kargs)
            else:
                func(*args, **kargs)


    def __get_key(self, slot):
        if hasattr(slot, 'im_func'):
            return (slot.im_func, id(slot.im_self))
        else:
            return (slot, None)

    def connect(self, slot):
        key = self.__get_key(slot)
        if hasattr(slot, 'im_func'):
            self.__slots[key] = slot.im_self
        else:
            self.__slots[key] = slot

    def disconnect(self, slot):
        key = self.__get_key(slot)
        if key in self.__slots:
            self.__slots.pop(key)

    def clear(self):
        self.__slots.clear()
Esempio n. 2
0
class Signal(object):
    """
    A Signal is callable. When called, it calls all the callables in its slots.
    """
    def __init__(self):
        self._slots = WeakValueDictionary()

    def __call__(self, *args, **kargs):
        for key in self._slots:
            func, _ = key
            func(self._slots[key], *args, **kargs)

    def connect(self, slot):
        """
        Slots must call this to register a callback method.
        :param slot: callable
        """
        key = (slot.im_func, id(slot.im_self))
        self._slots[key] = slot.im_self

    def disconnect(self, slot):
        """
        They can also unregister their callbacks here.
        :param slot: callable
        """
        key = (slot.im_func, id(slot.im_self))
        if key in self._slots:
            self._slots.pop(key)

    def clear(self):
        """
        Clears all slots
        """
        self._slots.clear()
Esempio n. 3
0
class Signal(object):
    """
    A Signal is callable. When called, it calls all the callables in its slots.
    """
    def __init__(self):
        self._slots = WeakValueDictionary()

    def __call__(self, *args, **kargs):
        for key in self._slots:
            func, _ = key
            func(self._slots[key], *args, **kargs)

    def connect(self, slot):
        """
        Slots must call this to register a callback method.
        :param slot: callable
        """
        key = (slot.im_func, id(slot.im_self))
        self._slots[key] = slot.im_self

    def disconnect(self, slot):
        """
        They can also unregister their callbacks here.
        :param slot: callable
        """
        key = (slot.im_func, id(slot.im_self))
        if key in self._slots:
            self._slots.pop(key)

    def clear(self):
        """
        Clears all slots
        """
        self._slots.clear()
Esempio n. 4
0
class Signal(object):
    def __init__(self):
        self.__slots = WeakValueDictionary()

    def __call__(self, *args, **kargs):
        for key in self.__slots:
            func, selfid = key
            if selfid is not None:
                func(self.__slots[key], *args, **kargs)
            else:
                func(*args, **kargs)

    def __get_key(self, slot):
        if hasattr(slot, 'im_func'):
            return (slot.im_func, id(slot.im_self))
        else:
            return (slot, None)

    def connect(self, slot):
        key = self.__get_key(slot)
        if hasattr(slot, 'im_func'):
            self.__slots[key] = slot.im_self
        else:
            self.__slots[key] = slot

    def disconnect(self, slot):
        key = self.__get_key(slot)
        if key in self.__slots:
            self.__slots.pop(key)

    def clear(self):
        self.__slots.clear()
Esempio n. 5
0
class Signal(object):
    def __init__(self):
        self.__slots = WeakValueDictionary()

    def __call__(self, *args, **kargs):
        for key in self.__slots:
            func, _ = key
            func(self.__slots[key], *args, **kargs)

    def connect(self, slot):
        key = (slot.__func__, id(slot.__self__))
        self.__slots[key] = slot.__self__


    def disconnect(self, slot):
        key = (slot.__func__, id(slot.__self__))
        if key in self.__slots:
            self.__slots.pop(key)

    def clear(self):
        self.__slots.clear()

        ## Sample usage:
        #class Model(object):
        #  def __init__(self, value):
        #    self.__value = value
        #    self.changed = Signal()
        #
        #  def set_value(self, value):
        #    self.__value = value
        #    self.changed() # Emit signal
        #
        #  def get_value(self):
        #    return self.__value
        #
        #
        #class View(object):
        #  def __init__(self, model):
        #    self.model = model
        #    model.changed.connect(self.model_changed)
        #
        #  def model_changed(self):
        #    print "New value:", self.model.get_value()
        #
        #
        #model = Model(10)
        #view1 = View(model)
        #view2 = View(model)
        #view3 = View(model)
        #
        #model.set_value(20)
        #
        #del view1
        #model.set_value(30)
        #
        #model.changed.clear()
        #model.set_value(40)
        ### end of http://code.activestate.com/recipes/576477/ }}}
Esempio n. 6
0
class ObjectPool(object):
    """
        This class allows to fetch mvc model objects using their UUID.
        This requires to model to have a property called "uuid". All
        class inheriting from the base 'Model' class will have this.
        If implementing a custom model, the UUID property is responsible
        for the removal and addition to the pool when it changes values.
        Also see the UUIDProperty descriptor for an example implementation.
        We can use this to store complex relations between objects where 
        references to each other can be replaced with the UUID.
        For a multi-threaded version see ThreadedObjectPool. 
    """
    def __init__(self, *args, **kwargs):
        object.__init__(self)
        self._objects = WeakValueDictionary()

    def add_or_get_object(self, obj):
        try:
            self.add_object(obj, force=False, silent=False)
            return obj
        except KeyError:
            return self.get_object(obj.uuid)

    def add_object(self, obj, force=False, fail_on_duplicate=False):
        if not obj.uuid in self._objects or force:
            self._objects[obj.uuid] = obj
        elif fail_on_duplicate:
            raise KeyError(
                "UUID %s is already taken by another object %s, cannot add object %s"
                % (obj.uuid, self._objects[obj.uuid], obj))
        else:
            # Just change the objects uuid, will break refs, but
            # it prevents issues with inherited properties etc.
            logger.warning(
                "A duplicate UUID was passed to an ObjectPool for a %s object."
                % obj)
            obj.uuid = get_new_uuid()

    def change_all_uuids(self):
        # first get a copy off all uuids & objects:
        items = list(self._objects.items())
        for uuid, obj in items:  # @UnusedVariable
            obj.uuid = get_new_uuid()

    def remove_object(self, obj):
        if obj.uuid in self._objects and self._objects[obj.uuid] == obj:
            del self._objects[obj.uuid]

    def get_object(self, uuid):
        obj = self._objects.get(uuid, None)
        return obj

    def clear(self):
        self._objects.clear()
Esempio n. 7
0
class ObjectPool(object):
    """
        This class allows to fetch mvc model objects using their UUID.
        This requires to model to have a property called "uuid". All
        class inheriting from the base 'Model' class will have this.
        If implementing a custom model, the UUID property is responsible
        for the removal and addition to the pool when it changes values.
        Also see the UUIDPropIntel class for an example implementation.
        We can use this to store complex relations between objects where 
        references to each other can be replaced with the UUID.
        For a multi-threaded version see ThreadedObjectPool. 
    """

    def __init__(self, *args, **kwargs):
        object.__init__(self)
        self._objects = WeakValueDictionary()

    def add_or_get_object(self, obj):
        try:
            self.add_object(obj, force=False, silent=False)
            return obj
        except KeyError:
            return self.get_object(obj.uuid)

    def add_object(self, obj, force=False, fail_on_duplicate=False):
        if not obj.uuid in self._objects or force:
            self._objects[obj.uuid] = obj
        elif fail_on_duplicate:
            raise KeyError, "UUID %s is already taken by another object %s, cannot add object %s" % (obj.uuid, self._objects[obj.uuid], obj)
        else:
            # Just change the objects uuid, will break refs, but
            # it prevents issues with inherited properties etc.
            logger.warning("A duplicate UUID was passed to an ObjectPool for a %s object." % obj)
            obj.uuid = get_new_uuid()

    def change_all_uuids(self):
        # first get a copy off all uuids & objects:
        items = self._objects.items()
        for uuid, obj in items: # @UnusedVariable
            obj.uuid = get_new_uuid()

    def remove_object(self, obj):
        if obj.uuid in self._objects and self._objects[obj.uuid] == obj:
            del self._objects[obj.uuid]

    def get_object(self, uuid):
        obj = self._objects.get(uuid, None)
        return obj

    def clear(self):
        self._objects.clear()
Esempio n. 8
0
class factory:
    """package generator

    does weakref caching per repository

    :cvar child_class: callable to generate packages
    """

    child_class = package

    def __init__(self, parent_repo):
        self._parent_repo = parent_repo
        self._cached_instances = WeakValueDictionary()

    def new_package(self, *args):
        """generate a new package instance"""
        inst = self._cached_instances.get(args)
        if inst is None:
            inst = self._cached_instances[args] = self.child_class(self, *args)
        return inst

    def __call__(self, *args, **kwds):
        return self.new_package(*args, **kwds)

    def clear(self):
        """wipe the weakref cache of packages instances"""
        self._cached_instances.clear()

    def _get_metadata(self, *args):
        """Pulls metadata from the repo/cache/wherever.

        Must be overridden in derivatives.
        """
        raise NotImplementedError

    def _update_metadata(self, *args):
        """Updates metadata in the repo/cache/wherever.

        Must be overridden in derivatives.
        """
        raise NotImplementedError

    def __getstate__(self):
        d = self.__dict__.copy()
        del d['_cached_instances']
        return d

    def __setstate__(self, state):
        self.__dict__ = state.copy()
        self.__dict__['_cached_instances'] = WeakValueDictionary()
Esempio n. 9
0
class DictRegistryBase(RegistryBase, MappingRegistryMixin):
    """
    A registry class with dict-based item storage.

    Note that the registry just owns a weak reference to the items.
    """

    key_error_class = KeyError

    def __init__(self):
        self.mapping = WeakValueDictionary()

    def perform_cache_clear(self):
        self.mapping.clear()

    def register(self, key, value):
        """
        Registration.

        If the given key has already existed, the function will raise a
        KeyError.
        """

        if key in self.mapping:
            raise self.key_error_class(key)

        self.mapping[key] = value
        self.perform_register(key, value)

    def perform_register(self, key, value):
        pass

    def unregister(self, key):
        """
        Unregistration.

        If the given key does not exist, the function will fail silently.
        """
        self.mapping.pop(key, None)

        self.perform_unregister(key)

    def perform_unregister(self, key):
        pass

    def __getitem__(self, key):
        """
        Shortcuts to access registered items.
        """
        return self.mapping[key]
Esempio n. 10
0
class Signal(object):
    def __init__(self):
        self.__slots = WeakValueDictionary()
    def __call__(self, *args, **kargs):
        for key in self.__slots:
            func, _ = key
            func(self.__slots[key], *args, **kargs)
    def connect(self, slot):
        key = (slot.__func__, id(slot.__self__))
        self.__slots[key] = slot.__self__
    def disconnect(self, slot):
        key = (slot.__func__, id(slot.__self__))
        if key in self.__slots:
            self.__slots.pop(key)
    def clear(self):
        self.__slots.clear()
Esempio n. 11
0
class ThreadLocalEntityCache(local):
    def __init__(self):
        self.lock = Lock()
        self._dict = WeakValueDictionary()

    def __contains__(self, key):
        return key in self._dict

    def __getitem__(self, key):
        return self._dict[key]

    def get(self, key, default=None):
        return self._dict.get(key, default)

    def clear(self):
        self._dict.clear()

    def keys(self):
        return self._dict.keys()

    def update(self, key, value):
        """ Extract, insert or remove a value for a given key.
        """
        with self.lock:
            if value is None:
                # remove
                try:
                    del self._dict[key]
                except KeyError:
                    pass
                else:
                    return None
            elif callable(value):
                try:
                    # extract
                    return self._dict[key]
                except KeyError:
                    # construct and insert
                    new_value = value()
                    self._dict[key] = new_value
                    return new_value
            else:
                # insert or replace
                self._dict[key] = value
                return value
Esempio n. 12
0
class Signal(object):
    def __init__(self):
        self.__slots = WeakValueDictionary()

    def __call__(self, *args, **kargs):
        for key in self.__slots:
            func, _ = key
            func(self.__slots[key], *args, **kargs)

    def connect(self, slot):
        key = (slot.__func__, id(slot.__self__))
        self.__slots[key] = slot.__self__

    def disconnect(self, slot):
        key = (slot.__func__, id(slot.__self__))
        if key in self.__slots:
            self.__slots.pop(key)

    def clear(self):
        self.__slots.clear()
Esempio n. 13
0
class SpamManagerCatched:
    def __init__(self):
        self._catched = WeakValueDictionary()
        self.name = None

    def _get_spam(self, name):
        self.name = name

        if name in self._catched:
            return self._catched[name]
        else:
            obj = SpamManager(name)
            self._catched[name] = obj
            return obj

    def _clear(self):
        self._catched.clear()

    def __repr__(self):
        return self.name
Esempio n. 14
0
class ThreadLocalEntityCache(local):

    def __init__(self):
        self.lock = Lock()
        self._dict = WeakValueDictionary()

    def __contains__(self, key):
        return key in self._dict

    def __getitem__(self, key):
        return self._dict[key]

    def clear(self):
        self._dict.clear()

    def update(self, key, value):
        """ Extract, insert or remove a value for a given key.
        """
        with self.lock:
            if value is None:
                # remove
                try:
                    del self._dict[key]
                except KeyError:
                    pass
                else:
                    return None
            elif callable(value):
                try:
                    # extract
                    return self._dict[key]
                except KeyError:
                    # construct and insert
                    new_value = value()
                    self._dict[key] = new_value
                    return new_value
            else:
                # insert or replace
                self._dict[key] = value
                return value
Esempio n. 15
0
class Signal(object):
    """
    Signal slot object, used for handling events passed to the objects in the gui.
    """
    def __init__(self):
        self.__slots = WeakValueDictionary()

    def __call__(self, *args, **kargs):
        for key in self.__slots:
            func, _ = key
            func(self.__slots[key], *args, **kargs)

    def connect(self, slot):
        key = (slot.im_func, id(slot.im_self))
        self.__slots[key] = slot.im_self

    def disconnect(self, slot):
        key = (slot.im_func, id(slot.im_self))
        if key in self.__slots:
            self.__slots.pop(key)

    def clear(self):
        self.__slots.clear()
Esempio n. 16
0
class Signal(object):

    def __init__(self):
        self.__slots = WeakValueDictionary()

        # For keeping references to _FuncHost objects.
        self.__funchosts = {}

    def __call__(self, *args, **kargs):
        for key in self.__slots:
            func, _ = key
            func(self.__slots[key], *args, **kargs)

    def connect(self, slot):
        if inspect.ismethod(slot):
            key = (slot.im_func, id(slot.im_self))
            self.__slots[key] = slot.im_self
        else:
            host = _FuncHost(slot)
            self.connect(host.meth)
            # We stick a copy in here just to keep the instance alive.
            self.__funchosts[slot] = host

    def disconnect(self, slot):
        if inspect.ismethod(slot):
            key = (slot.im_func, id(slot.im_self))
            if key in self.__slots:
                self.__slots.pop(key)
        else:
            if slot in self.__funchosts:
                self.disconnect(self.__funchosts[slot].meth)
                self.__funchosts.pop(slot)

    def clear(self):
        self.__slots.clear()
        self.__funchosts.clear()
Esempio n. 17
0
 def clear(self):
     self.queue.clear()
     return WeakValueDictionary.clear(self)
Esempio n. 18
0
class JsonCacheHandler:
    """
    Each time Eos is initialized, it loads data from packed JSON
    (disk cache) into memory data cache, and uses it to instantiate
    objects, which are stored in in-memory weakref object cache.

    Positional arguments:
    diskCacheFolder -- folder where on-disk cache files are stored
    name -- unique indentifier of cache, e.g. Eos instance name
    logger -- logger to use for errors
    """

    def __init__(self, diskCacheFolder, name, logger):
        self._diskCacheFile = os.path.join(diskCacheFolder, '{}.json.bz2'.format(name))
        self._logger = logger
        # Initialize memory data cache
        self.__typeDataCache = {}
        self.__attributeDataCache = {}
        self.__effectDataCache = {}
        self.__modifierDataCache = {}
        self.__fingerprint = None
        # Initialize weakref object cache
        self.__typeObjCache = WeakValueDictionary()
        self.__attributeObjCache = WeakValueDictionary()
        self.__effectObjCache = WeakValueDictionary()
        self.__modifierObjCache = WeakValueDictionary()

        # If cache doesn't exist, silently finish initialization
        if not os.path.exists(self._diskCacheFile):
            return
        # Read JSON into local variable
        try:
            with bz2.BZ2File(self._diskCacheFile, 'r') as file:
                jsonData = file.read().decode('utf-8')
                data = json.loads(jsonData)
        # If file doesn't exist, JSON load errors occur, or
        # anything else bad happens, do not load anything
        # and leave values as initialized
        except:
            msg = 'error during reading cache'
            self._logger.error(msg, childName='cacheHandler')
        # Load data into data cache, if no errors occurred
        # during JSON reading/parsing
        else:
            self.__updateMemCache(data)

    def getType(self, typeId):
        try:
            type_ = self.__typeObjCache[typeId]
        except KeyError:
            # We do str(int(id)) here because JSON dictionaries
            # always have strings as key
            jsonTypeId = str(int(typeId))
            try:
                data = self.__typeDataCache[jsonTypeId]
            except KeyError as e:
                raise TypeFetchError(typeId) from e
            groupId, catId, duration, discharge, optimal, falloff, tracking, fittable, effects, attribs = data
            type_ = Type(typeId=typeId,
                         groupId=groupId,
                         categoryId=catId,
                         durationAttributeId=duration,
                         dischargeAttributeId=discharge,
                         rangeAttributeId=optimal,
                         falloffAttributeId=falloff,
                         trackingSpeedAttributeId=tracking,
                         fittableNonSingleton=fittable,
                         attributes={attrId: attrVal for attrId, attrVal in attribs},
                         effects=tuple(self.getEffect(effectId) for effectId in effects))
            self.__typeObjCache[typeId] = type_
        return type_

    def getAttribute(self, attrId):
        try:
            attribute = self.__attributeObjCache[attrId]
        except KeyError:
            jsonAttrId = str(int(attrId))
            try:
                data = self.__attributeDataCache[jsonAttrId]
            except KeyError as e:
                raise AttributeFetchError(attrId) from e
            maxAttributeId, defaultValue, highIsGood, stackable = data
            attribute = Attribute(attributeId=attrId,
                                  maxAttributeId=maxAttributeId,
                                  defaultValue=defaultValue,
                                  highIsGood=highIsGood,
                                  stackable=stackable)
            self.__attributeObjCache[attrId] = attribute
        return attribute

    def getEffect(self, effectId):
        try:
            effect = self.__effectObjCache[effectId]
        except KeyError:
            jsonEffectId = str(int(effectId))
            try:
                data = self.__effectDataCache[jsonEffectId]
            except KeyError as e:
                raise EffectFetchError(effectId) from e
            effCategoryId, isOffence, isAssist, fitChanceId, buildStatus, modifiers = data
            effect = Effect(effectId=effectId,
                            categoryId=effCategoryId,
                            isOffensive=isOffence,
                            isAssistance=isAssist,
                            fittingUsageChanceAttributeId=fitChanceId,
                            buildStatus=buildStatus,
                            modifiers=tuple(self.getModifier(modifierId) for modifierId in modifiers))
            self.__effectObjCache[effectId] = effect
        return effect

    def getModifier(self, modifierId):
        try:
            modifier = self.__modifierObjCache[modifierId]
        except KeyError:
            jsonModifierId = str(int(modifierId))
            try:
                data = self.__modifierDataCache[jsonModifierId]
            except KeyError as e:
                raise ModifierFetchError(modifierId) from e
            state, context, srcAttrId, operator, tgtAttrId, location, filType, filValue = data
            modifier = Modifier(modifierId=modifierId,
                                state=state,
                                context=context,
                                sourceAttributeId=srcAttrId,
                                operator=operator,
                                targetAttributeId=tgtAttrId,
                                location=location,
                                filterType=filType,
                                filterValue=filValue)
            self.__modifierObjCache[modifierId] = modifier
        return modifier

    def getFingerprint(self):
        """Get disk cache fingerprint."""
        return self.__fingerprint

    def updateCache(self, data, fingerprint):
        """
        Updates on-disk and memory caches.

        Positional arguments:
        data -- dictionary with data to update
        fingerprint -- string with fingerprint
        """
        # Make light version of data and add fingerprint
        # to it
        data = self.__stripData(data)
        data['fingerprint'] = fingerprint
        # Update disk cache
        os.makedirs(os.path.dirname(self._diskCacheFile), mode=0o755, exist_ok=True)
        with bz2.BZ2File(self._diskCacheFile, 'w') as file:
            jsonData = json.dumps(data)
            file.write(jsonData.encode('utf-8'))
        # Update data cache; encode to JSON and decode back
        # to make sure form of data is the same as after
        # loading it from cache
        data = json.loads(jsonData)
        self.__updateMemCache(data)

    def __stripData(self, data):
        """
        Rework passed data, stripping dictionary
        keys from it to reduce space needed to store it.
        """
        slimData = {}

        slimTypes = {}
        for typeRow in data['types']:
            typeId = typeRow['typeId']
            slimTypes[typeId] = (typeRow['groupId'],
                                 typeRow['categoryId'],
                                 typeRow['durationAttributeId'],
                                 typeRow['dischargeAttributeId'],
                                 typeRow['rangeAttributeId'],
                                 typeRow['falloffAttributeId'],
                                 typeRow['trackingSpeedAttributeId'],
                                 typeRow['fittableNonSingleton'],
                                 tuple(typeRow['effects']),  # List -> tuple
                                 tuple(typeRow['attributes'].items()))  # Dictionary -> tuple
        slimData['types'] = slimTypes

        slimAttribs = {}
        for attrRow in data['attributes']:
            attrId = attrRow['attributeId']
            slimAttribs[attrId] = (attrRow['maxAttributeId'],
                                   attrRow['defaultValue'],
                                   attrRow['highIsGood'],
                                   attrRow['stackable'])
        slimData['attributes'] = slimAttribs

        slimEffects = {}
        for effectRow in data['effects']:
            effectId = effectRow['effectId']
            slimEffects[effectId] = (effectRow['effectCategory'],
                                     effectRow['isOffensive'],
                                     effectRow['isAssistance'],
                                     effectRow['fittingUsageChanceAttributeId'],
                                     effectRow['buildStatus'],
                                     tuple(effectRow['modifiers']))  # List -> tuple
        slimData['effects'] = slimEffects

        slimModifiers = {}
        for modifierRow in data['modifiers']:
            modifierId = modifierRow['modifierId']
            slimModifiers[modifierId] = (modifierRow['state'],
                                         modifierRow['context'],
                                         modifierRow['sourceAttributeId'],
                                         modifierRow['operator'],
                                         modifierRow['targetAttributeId'],
                                         modifierRow['location'],
                                         modifierRow['filterType'],
                                         modifierRow['filterValue'])
        slimData['modifiers'] = slimModifiers

        return slimData

    def __updateMemCache(self, data):
        """
        Loads data into memory data cache.

        Positional arguments:
        data -- dictionary with data to load
        """
        self.__typeDataCache = data['types']
        self.__attributeDataCache = data['attributes']
        self.__effectDataCache = data['effects']
        self.__modifierDataCache = data['modifiers']
        self.__fingerprint = data['fingerprint']
        # Also clear object cache to make sure objects composed
        # from old data are gone
        self.__typeObjCache.clear()
        self.__attributeObjCache.clear()
        self.__effectObjCache.clear()
        self.__modifierObjCache.clear()
Esempio n. 19
0
class DispatchTree(object):
    def __init__(self):
        # core data
        self.root = FolderNode(0, "root", None, "root", 1, 1, 0, FifoStrategy())
        self.nodes = WeakValueDictionary()
        self.nodes[0] = self.root
        self.pools = {}
        self.renderNodes = {}
        self.tasks = {}
        self.rules = []
        self.poolShares = {}
        self.commands = {}
        # deduced properties
        self.nodeMaxId = 0
        self.poolMaxId = 0
        self.renderNodeMaxId = 0
        self.taskMaxId = 0
        self.commandMaxId = 0
        self.poolShareMaxId = 0
        self.toCreateElements = []
        self.toModifyElements = []
        self.toArchiveElements = []
        # listeners
        self.nodeListener = ObjectListener(self.onNodeCreation, self.onNodeDestruction, self.onNodeChange)
        self.taskListener = ObjectListener(self.onTaskCreation, self.onTaskDestruction, self.onTaskChange)
        # # JSA
        # self.taskGroupListener = ObjectListener(self.onTaskCreation, self.onTaskDestruction, self.onTaskGroupChange)
        self.renderNodeListener = ObjectListener(
            self.onRenderNodeCreation, self.onRenderNodeDestruction, self.onRenderNodeChange
        )
        self.poolListener = ObjectListener(self.onPoolCreation, self.onPoolDestruction, self.onPoolChange)
        self.commandListener = ObjectListener(
            onCreationEvent=self.onCommandCreation, onChangeEvent=self.onCommandChange
        )
        self.poolShareListener = ObjectListener(self.onPoolShareCreation)
        self.modifiedNodes = []

    def registerModelListeners(self):
        BaseNode.changeListeners.append(self.nodeListener)
        Task.changeListeners.append(self.taskListener)
        TaskGroup.changeListeners.append(self.taskListener)
        RenderNode.changeListeners.append(self.renderNodeListener)
        Pool.changeListeners.append(self.poolListener)
        Command.changeListeners.append(self.commandListener)
        PoolShare.changeListeners.append(self.poolShareListener)

    def destroy(self):
        BaseNode.changeListeners.remove(self.nodeListener)
        Task.changeListeners.remove(self.taskListener)
        RenderNode.changeListeners.remove(self.renderNodeListener)
        Pool.changeListeners.remove(self.poolListener)
        Command.changeListeners.remove(self.commandListener)
        PoolShare.changeListeners.remove(self.poolShareListener)
        self.root = None
        self.nodes.clear()
        self.pools.clear()
        self.renderNodes.clear()
        self.tasks.clear()
        self.rules = None
        self.commands.clear()
        self.poolShares = None
        self.modifiedNodes = None
        self.toCreateElements = None
        self.toModifyElements = None
        self.toArchiveElements = None

    def findNodeByPath(self, path, default=None):
        nodenames = splitpath(path)
        node = self.root
        for name in nodenames:
            for child in node.children:
                if child.name == name:
                    node = child
                    break
            else:
                return default
        return node

    def updateCompletionAndStatus(self):
        self.root.updateCompletionAndStatus()

    def validateDependencies(self):
        nodes = set()
        for dependency in self.modifiedNodes:
            for node in dependency.reverseDependencies:
                nodes.add(node)
        del self.modifiedNodes[:]
        for node in nodes:
            # logger.debug("Dependencies on %r = %r"% (node.name, node.checkDependenciesSatisfaction() ) )
            if not hasattr(node, "task") or node.task is None:
                continue
            if isinstance(node, TaskNode):
                if node.checkDependenciesSatisfaction():
                    for cmd in node.task.commands:
                        if cmd.status == CMD_BLOCKED:
                            cmd.status = CMD_READY
                else:
                    for cmd in node.task.commands:
                        if cmd.status == CMD_READY:
                            cmd.status = CMD_BLOCKED

            # TODO: may be needed to check dependencies on task groups
            #       so far, a hack is done on the client side when submitting:
            #       dependencies of a taksgroup are reported on each task of its heirarchy
            #
            # elif isinstance(node, FolderNode):
            #
            #     if node.checkDependenciesSatisfaction():
            #         for cmd in node.getAllCommands():
            #             if cmd.status == CMD_BLOCKED:
            #                 cmd.status = CMD_READY
            #     else:
            #         for cmd in node.getAllCommands():
            #             if cmd.status == CMD_READY:
            #                 cmd.status = CMD_BLOCKED

    def registerNewGraph(self, graph):
        user = graph["user"]
        taskDefs = graph["tasks"]
        poolName = graph["poolName"]
        if "maxRN" in graph.items():
            maxRN = int(graph["maxRN"])
        else:
            maxRN = -1

        #
        # Create objects.
        #
        tasks = [None for i in xrange(len(taskDefs))]
        for (index, taskDef) in enumerate(taskDefs):
            if taskDef["type"] == "Task":
                # logger.debug("taskDef.watcherPackages = %s" % taskDef["watcherPackages"])
                # logger.debug("taskDef.runnerPackages = %s" % taskDef["runnerPackages"])
                task = self._createTaskFromJSON(taskDef, user)
            elif taskDef["type"] == "TaskGroup":
                task = self._createTaskGroupFromJSON(taskDef, user)
            tasks[index] = task
        root = tasks[graph["root"]]

        # get the pool
        try:
            pool = self.pools[poolName]
        except KeyError:
            pool = Pool(None, poolName)
            self.pools[poolName] = pool
        #
        # Rebuild full job hierarchy
        #
        for (taskDef, task) in zip(taskDefs, tasks):
            if taskDef["type"] == "TaskGroup":
                for taskIndex in taskDef["tasks"]:
                    task.addTask(tasks[taskIndex])
                    tasks[taskIndex].parent = task
        #
        # Compute dependencies for each created task or taskgroup object.
        #
        dependencies = {}
        for (taskDef, task) in zip(taskDefs, tasks):
            taskDependencies = {}
            if not isinstance(taskDef["dependencies"], list):
                raise SyntaxError(
                    "Dependencies must be a list of (taskId, [status-list]), got %r." % taskDef["dependencies"]
                )
            if not all(
                (
                    (isinstance(i, int) and isinstance(sl, list) and all((isinstance(s, int) for s in sl)))
                    for (i, sl) in taskDef["dependencies"]
                )
            ):
                raise SyntaxError(
                    "Dependencies must be a list of (taskId, [status-list]), got %r." % taskDef["dependencies"]
                )
            for (taskIndex, statusList) in taskDef["dependencies"]:
                taskDependencies[tasks[taskIndex]] = statusList
            dependencies[task] = taskDependencies
        #
        # Apply rules to generate dispatch tree nodes.
        #
        if not self.rules:
            logger.warning("graph submitted but no rule has been defined")

        unprocessedTasks = [root]
        nodes = []
        while unprocessedTasks:
            unprocessedTask = unprocessedTasks.pop(0)
            for rule in self.rules:
                try:
                    nodes += rule.apply(unprocessedTask)
                except RuleError:
                    logger.warning("rule %s failed for graph %s" % (rule, graph))
                    raise
            if isinstance(unprocessedTask, TaskGroup):
                for task in unprocessedTask:
                    unprocessedTasks.append(task)

        # create the poolshare, if any, and affect it to the node
        if pool:
            # FIXME nodes[0] may not be the root node of the graph...
            ps = PoolShare(None, pool, nodes[0], maxRN)
            # if maxRN is not -1 (e.g not default) set the userDefinedMaxRN to true
            if maxRN != -1:
                ps.userDefinedMaxRN = True

        #
        # Process dependencies
        #
        for rule in self.rules:
            rule.processDependencies(dependencies)

        for node in nodes:
            assert isinstance(node.id, int)
            self.nodes[node.id] = node

        # Init number of command in hierarchy
        self.populateCommandCounts(nodes[0])
        return nodes

    def populateCommandCounts(self, node):
        """
        Updates "commandCount" over a whole hierarchy starting from the given node.
        """
        res = 0
        if isinstance(node, FolderNode):
            for child in node.children:
                res += self.populateCommandCounts(child)
        elif isinstance(node, TaskNode):
            res = len(node.task.commands)

        node.commandCount = res
        return res

    def _createTaskGroupFromJSON(self, taskGroupDefinition, user):
        # name, parent, arguments, environment, priority, dispatchKey, strategy
        id = None
        name = taskGroupDefinition["name"]
        parent = None
        arguments = taskGroupDefinition["arguments"]
        environment = taskGroupDefinition["environment"]
        requirements = taskGroupDefinition["requirements"]
        maxRN = taskGroupDefinition["maxRN"]
        priority = taskGroupDefinition["priority"]
        dispatchKey = taskGroupDefinition["dispatchKey"]
        strategy = taskGroupDefinition["strategy"]
        strategy = loadStrategyClass(strategy.encode())
        strategy = strategy()
        tags = taskGroupDefinition["tags"]
        timer = None
        if "timer" in taskGroupDefinition.keys():
            timer = taskGroupDefinition["timer"]
        return TaskGroup(
            id,
            name,
            parent,
            user,
            arguments,
            environment,
            requirements,
            maxRN,
            priority,
            dispatchKey,
            strategy,
            tags=tags,
            timer=timer,
        )

    def _createTaskFromJSON(self, taskDefinition, user):
        # id, name, parent, user, priority, dispatchKey, runner, arguments,
        # validationExpression, commands, requirements=[], minNbCores=1,
        # maxNbCores=0, ramUse=0, environment={}
        name = taskDefinition["name"]
        runner = taskDefinition["runner"]
        arguments = taskDefinition["arguments"]
        environment = taskDefinition["environment"]
        requirements = taskDefinition["requirements"]
        maxRN = taskDefinition["maxRN"]
        priority = taskDefinition["priority"]
        dispatchKey = taskDefinition["dispatchKey"]
        validationExpression = taskDefinition["validationExpression"]
        minNbCores = taskDefinition["minNbCores"]
        maxNbCores = taskDefinition["maxNbCores"]
        ramUse = taskDefinition["ramUse"]
        lic = taskDefinition["lic"]
        tags = taskDefinition["tags"]
        runnerPackages = taskDefinition.get("runnerPackages", "")
        watcherPackages = taskDefinition.get("watcherPackages", "")
        timer = None
        if "timer" in taskDefinition.keys():
            timer = taskDefinition["timer"]

        maxAttempt = taskDefinition.get("maxAttempt", 1)

        task = Task(
            None,
            name,
            None,
            user,
            maxRN,
            priority,
            dispatchKey,
            runner,
            arguments,
            validationExpression,
            [],
            requirements,
            minNbCores,
            maxNbCores,
            ramUse,
            environment,
            lic=lic,
            tags=tags,
            timer=timer,
            maxAttempt=maxAttempt,
            runnerPackages=runnerPackages,
            watcherPackages=watcherPackages,
        )

        for commandDef in taskDefinition["commands"]:
            description = commandDef["description"]
            arguments = commandDef["arguments"]
            cmd = Command(
                None, description, task, arguments, runnerPackages=runnerPackages, watcherPackages=watcherPackages
            )
            task.commands.append(cmd)
            # import sys
            # logger.warning("cmd creation : %s" % str(sys.getrefcount(cmd)))

        return task

    ## Resets the lists of elements to create or update in the database.
    #
    def resetDbElements(self):
        self.toCreateElements = []
        self.toModifyElements = []
        self.toArchiveElements = []

    ## Recalculates the max ids of all elements. Generally called after a reload from db.
    #
    def recomputeMaxIds(self):
        self.nodeMaxId = max([n.id for n in self.nodes.values()]) if self.nodes else 0
        self.nodeMaxId = max(self.nodeMaxId, StatDB.getFolderNodesMaxId(), StatDB.getTaskNodesMaxId())
        self.poolMaxId = max([p.id for p in self.pools.values()]) if self.pools else 0
        self.poolMaxId = max(self.poolMaxId, StatDB.getPoolsMaxId())
        self.renderNodeMaxId = max([rn.id for rn in self.renderNodes.values()]) if self.renderNodes else 0
        self.renderNodeMaxId = max(self.renderNodeMaxId, StatDB.getRenderNodesMaxId())
        self.taskMaxId = max([t.id for t in self.tasks.values()]) if self.tasks else 0
        self.taskMaxId = max(self.taskMaxId, StatDB.getTasksMaxId(), StatDB.getTaskGroupsMaxId())
        self.commandMaxId = max([c.id for c in self.commands.values()]) if self.commands else 0
        self.commandMaxId = max(self.commandMaxId, StatDB.getCommandsMaxId())
        self.poolShareMaxId = max([ps.id for ps in self.poolShares.values()]) if self.poolShares else 0
        self.poolShareMaxId = max(self.poolShareMaxId, StatDB.getPoolSharesMaxId())

    ## Removes from the dispatchtree the provided element and all its parents and children.
    #
    def unregisterElementsFromTree(self, element):
        # /////////////// Handling of the Task
        if isinstance(element, Task):
            del self.tasks[element.id]
            self.toArchiveElements.append(element)
            for cmd in element.commands:
                self.unregisterElementsFromTree(cmd)
            for node in element.nodes.values():
                self.unregisterElementsFromTree(node)
        # /////////////// Handling of the TaskGroup
        elif isinstance(element, TaskGroup):
            del self.tasks[element.id]
            self.toArchiveElements.append(element)
            for task in element.tasks:
                self.unregisterElementsFromTree(task)
            for node in element.nodes.values():
                self.unregisterElementsFromTree(node)
        # /////////////// Handling of the TaskNode
        elif isinstance(element, TaskNode):
            # remove the element from the children of the parent
            if element.parent:
                element.parent.removeChild(element)
            if element.poolShares:
                for poolShare in element.poolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            if element.additionnalPoolShares:
                for poolShare in element.additionnalPoolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            del self.nodes[element.id]
            self.toArchiveElements.append(element)
            for dependency in element.dependencies:
                self.unregisterElementsFromTree(dependency)
        # /////////////// Handling of the FolderNode
        elif isinstance(element, FolderNode):
            if element.parent:
                element.parent.removeChild(element)
            if element.poolShares:
                for poolShare in element.poolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            if element.additionnalPoolShares:
                for poolShare in element.additionnalPoolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            del self.nodes[element.id]
            self.toArchiveElements.append(element)
            for dependency in element.dependencies:
                self.unregisterElementsFromTree(dependency)
        # /////////////// Handling of the Command
        elif isinstance(element, Command):
            del self.commands[element.id]
            self.toArchiveElements.append(element)

    ### methods called after interaction with a Task

    def onTaskCreation(self, task):
        # logger.info("  -- on task creation: %s" % task)

        if task.id is None:
            self.taskMaxId += 1
            task.id = self.taskMaxId
            self.toCreateElements.append(task)
        else:
            self.taskMaxId = max(self.taskMaxId, task.id, StatDB.getTasksMaxId(), StatDB.getTaskGroupsMaxId())
        self.tasks[task.id] = task

    def onTaskDestruction(self, task):
        # logger.info("  -- on task destruction: %s" % task)
        self.unregisterElementsFromTree(task)

    def onTaskChange(self, task, field, oldvalue, newvalue):
        """
        Normally, taskgroup should not be updated to DB, there would be too manby updates due to command/state changes
        However in order to keep track of comments (stored in task's tags[comment] field), we make the following change:
        - enable task/taskgroups update in DB (cf pulidb.py)
        - disable changeEvent (append an event in dispatchTree.toModifyElements array) for all fields of tasks and TGs
          BUT the only field we want to update: "tags"
        """
        if field == "tags":
            self.toModifyElements.append(task)

    ### methods called after interaction with a BaseNode

    def onNodeCreation(self, node):
        # logger.info("  -- on node creation: %s" % node)
        if node.id is None:
            self.nodeMaxId += 1
            node.id = self.nodeMaxId
            self.toCreateElements.append(node)
        else:
            self.nodeMaxId = max(self.nodeMaxId, node.id, StatDB.getFolderNodesMaxId(), StatDB.getTaskNodesMaxId())
        if node.parent is None:
            node.parent = self.root

    def onNodeDestruction(self, node):
        # logger.info("  -- on node destruction: %s" % node)
        del self.nodes[node.id]

    def onNodeChange(self, node, field, oldvalue, newvalue):
        # logger.info("  -- on node change: %s [ %s = %s -> %s ]" % (node,field, oldvalue, newvalue) )
        # FIXME: do something when nodes are reparented from or to the root node
        if node.id is not None:
            self.toModifyElements.append(node)
            if field == "status" and node.reverseDependencies:
                self.modifiedNodes.append(node)

    ### methods called after interaction with a RenderNode

    def onRenderNodeCreation(self, renderNode):
        if renderNode.id is None:
            self.renderNodeMaxId += 1
            renderNode.id = self.renderNodeMaxId
            self.toCreateElements.append(renderNode)
        else:
            self.renderNodeMaxId = max(self.renderNodeMaxId, renderNode.id, StatDB.getRenderNodesMaxId())
        self.renderNodes[renderNode.name] = renderNode

    def onRenderNodeDestruction(self, rendernode):
        try:
            del self.renderNodes[rendernode.name]
            self.toArchiveElements.append(rendernode)
        except KeyError:
            # TOFIX: use of class method vs obj method in changeListener might generate a duplicate call
            logger.warning("RN %s seems to have been deleted already." % rendernode.name)

    def onRenderNodeChange(self, rendernode, field, oldvalue, newvalue):
        if field == "performance":
            self.toModifyElements.append(rendernode)

    ### methods called after interaction with a Pool

    def onPoolCreation(self, pool):
        if pool.id is None:
            self.poolMaxId += 1
            pool.id = self.poolMaxId
            self.toCreateElements.append(pool)
        else:
            self.poolMaxId = max(self.poolMaxId, pool.id, StatDB.getPoolsMaxId())
        self.pools[pool.name] = pool

    def onPoolDestruction(self, pool):
        del self.pools[pool.name]
        self.toArchiveElements.append(pool)

    def onPoolChange(self, pool, field, oldvalue, newvalue):
        if pool not in self.toModifyElements:
            self.toModifyElements.append(pool)

    ### methods called after interaction with a Command

    def onCommandCreation(self, command):
        if command.id is None:
            self.commandMaxId += 1
            command.id = self.commandMaxId
            self.toCreateElements.append(command)
        else:
            self.commandMaxId = max(self.commandMaxId, command.id, StatDB.getCommandsMaxId())
        self.commands[command.id] = command

    def onCommandChange(self, command, field, oldvalue, newvalue):
        self.toModifyElements.append(command)
        if command.task is not None:
            for node in command.task.nodes.values():
                node.invalidate()

    ### methods called after interaction with a Pool

    def onPoolShareCreation(self, poolShare):
        if poolShare.id is None:
            self.poolShareMaxId += 1
            poolShare.id = self.poolShareMaxId
            self.toCreateElements.append(poolShare)
        else:
            self.poolShareMaxId = max(self.poolShareMaxId, poolShare.id, StatDB.getPoolSharesMaxId())
        self.poolShares[poolShare.id] = poolShare
Esempio n. 20
0
File: memory.py Progetto: oeway/WorQ
class TaskQueue(AbstractTaskQueue):
    """Simple in-memory task queue implementation"""

    @classmethod
    def factory(cls, url, name=const.DEFAULT, *args, **kw):
        obj = _REFS.get((url, name))
        if obj is None:
            obj = _REFS[(url, name)] = cls(url, name, *args, **kw)
        return obj

    def __init__(self, *args, **kw):
        super(TaskQueue, self).__init__(*args, **kw)
        self.queue = Queue()
        self.results = WeakValueDictionary()
        self.results_lock = Lock()

    def _init_result(self, result, status, message):
        with self.results_lock:
            if result.id in self.results:
                return False
            self.results[result.id] = result
        result.__status = status
        result.__value = Queue()
        result.__task = message
        result.__args = {}
        result.__lock = Lock()
        result.__for = None
        return True

    def enqueue_task(self, result, message):
        if self._init_result(result, const.ENQUEUED, message):
            self.queue.put(result)
            return True
        return False

    def defer_task(self, result, message, args):
        if self._init_result(result, const.PENDING, message):
            results = self.results
            # keep references to results to prevent GC
            result.__refs = [results.get(arg) for arg in args]
            return True
        return False

    def undefer_task(self, task_id):
        result = self.results[task_id]
        self.queue.put(result)

    def get(self, timeout=None):
        try:
            result = self.queue.get(timeout=timeout)
        except Empty:
            return None
        result.__status = const.PROCESSING
        return result.id, result.__task

    def size(self):
        return len(self.results)

    def discard_pending(self):
        with self.results_lock:
            while True:
                try:
                    self.queue.get_nowait()
                except Empty:
                    break
            self.results.clear()

    def reserve_argument(self, argument_id, deferred_id):
        result = self.results.get(argument_id)
        if result is None:
            return (False, None)
        with result.__lock:
            if result.__for is not None:
                return (False, None)
            result.__for = deferred_id
            try:
                message = result.__value.get_nowait()
            except Empty:
                message = None
            if message is not None:
                with self.results_lock:
                    self.results.pop(argument_id, None)
            return (True, message)

    def set_argument(self, task_id, argument_id, message):
        result = self.results[task_id]
        with self.results_lock:
            self.results.pop(argument_id, None)
        with result.__lock:
            result.__args[argument_id] = message
            return len(result.__args) == len(result.__refs)

    def get_arguments(self, task_id):
        try:
            return self.results[task_id].__args
        except KeyError:
            return {}

    def set_task_timeout(self, task_id, timeout):
        pass

    def get_status(self, task_id):
        result = self.results.get(task_id)
        return None if result is None else result.__status

    def set_result(self, task_id, message, timeout):
        result = self.results.get(task_id)
        if result is not None:
            with result.__lock:
                result.__value.put(message)
                return result.__for

    def pop_result(self, task_id, timeout):
        result = self.results.get(task_id)
        if result is None:
            return const.TASK_EXPIRED
#        with result.__lock:
#            if result.__for is not None:
#                raise NotImplementedError
#                #return const.RESERVED
#            result.__for = task_id
        try:
            if timeout == 0:
                value = result.__value.get_nowait()
            else:
                value = result.__value.get(timeout=timeout)
        except Empty:
            value = None
        else:
            self.results.pop(task_id, None)
        return value

    def discard_result(self, task_id, task_expired_token):
        result = self.results.pop(task_id)
        if result is not None:
            result.__value.put(task_expired_token)
Esempio n. 21
0
class Signal(object):
    def __init__(self):
        self.__slots = WeakValueDictionary()

    def __call__(self, *args, **kargs):
        for key in self.__slots:
            func, _ = key
            func(self.__slots[key], *args, **kargs)

    def connect(self, slot):
        if PYTHON3:
            key = (slot.__func__, id(slot.__self__))
            self.__slots[key] = slot.__self__
        else:
            key = (slot.im_func, id(slot.im_self))
            self.__slots[key] = slot.im_self

    def disconnect(self, slot):
        if PYTHON3:
            key = (slot.__func__, id(slot.__self__))
            if key in self.__slots:
                self.__slots.pop(key)
        else:
            key = (slot.im_func, id(slot.im_self))
            if key in self.__slots:
                self.__slots.pop(key)

    def clear(self):
        self.__slots.clear()

        ## Sample usage:
        #class Model(object):
        #  def __init__(self, value):
        #    self.__value = value
        #    self.changed = Signal()
        #
        #  def set_value(self, value):
        #    self.__value = value
        #    self.changed() # Emit signal
        #
        #  def get_value(self):
        #    return self.__value
        #
        #
        #class View(object):
        #  def __init__(self, model):
        #    self.model = model
        #    model.changed.connect(self.model_changed)
        #
        #  def model_changed(self):
        #    print "New value:", self.model.get_value()
        #
        #
        #model = Model(10)
        #view1 = View(model)
        #view2 = View(model)
        #view3 = View(model)
        #
        #model.set_value(20)
        #
        #del view1
        #model.set_value(30)
        #
        #model.changed.clear()
        #model.set_value(40)
        ### end of http://code.activestate.com/recipes/576477/ }}}
Esempio n. 22
0
class CommandDispatcher(object):

    count = 0

    def __init__(self, dispatcher_id=None):

        try:
            self.dispatcher_id = dispatcher_id
        except AttributeError:
            pass

        self.main_window = None
        """:type: QtGui.QWidget"""

        self._undo_stack = UndoStack()
        self._action_history = []

        self._actions = {}
        self._commands = {}

        self._parent_dispatcher = None
        """:type: ref[CommandDispatcher]"""

        self._children_dispatchers = WeakValueDictionary()

        self._action_added = MrSignal()

        self._main_data = None

        self.main_data_can_change = True

    @property
    def main_data(self):
        return self._main_data

    @main_data.setter
    def main_data(self, value):
        if self.main_data_can_change:
            self._main_data = value

    @property
    def undo_stack(self):
        try:
            return self._parent_dispatcher().undo_stack
        except (TypeError, AttributeError):
            return self._undo_stack

    @property
    def action_history(self):
        try:
            return self._parent_dispatcher().action_history
        except (TypeError, AttributeError):
            return self._action_history

    @property
    def action_added(self):
        try:
            return self._parent_dispatcher().action_added
        except (TypeError, AttributeError):
            return self._action_added

    def clear_children(self):
        self._children_dispatchers.clear()

    def add_child(self, dispatcher):

        if dispatcher.dispatcher_id is None:
            CommandDispatcher.count += 1
            dispatcher.dispatcher_id = str(CommandDispatcher.count)

        if dispatcher.dispatcher_id in self._children_dispatchers:
            assert self._children_dispatchers[dispatcher.dispatcher_id] is dispatcher
            return

        self._children_dispatchers[dispatcher.dispatcher_id] = dispatcher

        dispatcher.set_parent(self)

    def set_parent(self, parent_dispatcher):
        self._parent_dispatcher = ref(parent_dispatcher)

    def multiple_dispatch(self, actions):
        for action in actions:
            self.dispatch(action)

    def _get_command(self, action):

        if isinstance(action, Action):
            action_name = action.action_name

            try:
                command = self._commands[action_name](action, main_window=self.main_window)
            except KeyError:
                raise TypeError('CommandDispatcher4: Command %s not found in defined actions!' % str(action_name))

        elif isinstance(action, (Command, ChildCommand)):
            command = action
            command.main_window = self.main_window

        else:
            raise TypeError('CommandDispatcher4: Action type not valid! %s' % str(action))

        return command

    def _try_undo_redo(self, action):
        if isinstance(action, str):
            tmp = action.upper()

            if tmp == 'UNDO':
                self.action_history.append('Undo')
                self.action_added.emit(action)
                self.undo_stack.undo()
                return True
            elif tmp == 'REDO':
                self.action_history.append('Redo')
                self.action_added.emit(action)
                self.undo_stack.redo()
                return True

        return False

    def undo(self):
        self.dispatch('Undo')

    def redo(self):
        self.dispatch('Redo')

    def _subdata(self, data):
        return self.main_data.subdata(data)

    def _action_str(self, action, action_data=None):

        assert isinstance(action, str)

        if action.upper() in ('REDO', 'UNDO'):
            return action

        # debuginfo(self.dispatcher_id, self._parent_dispatcher)

        if None not in (self.dispatcher_id, self._parent_dispatcher):
            if action_data is not None:
                data = action_data.split()

                # debuginfo(22222, data)

                if data[0] is None and data[1] is None:
                    action = '%s.%s()' % (self.dispatcher_id, action)
                elif data[0] is None:
                    action = '%s.%s%s' % (self.dispatcher_id, action, data[1])
                elif data[1] is None:
                    action = '%s[%s].%s()' % (self.dispatcher_id, data[0], action)
                else:
                    action = '%s[%s].%s%s' % (self.dispatcher_id, data[0], action, data[1])

            else:
                action = '%s.%s' % (self.dispatcher_id, action)

        return action

    def dispatch(self, action, tracking=True):

        if self._try_undo_redo(action):
            return True

        try:
            action_name, action_data = action
            action_name = action_name.replace('.', '_')
        except (TypeError, ValueError):
            assert isinstance(action, str)
            action_info = self.parse_action(action)
            # debuginfo(action_info)
            return self._dispatch(action_info, tracking, action)

        if not isinstance(action_data, tuple):
            action_data = (action_data,)

        try:
            action_cls = self._actions[action_name]
        except KeyError:
            raise TypeError('CommandDispatcher4: Action type not valid! %s' % str(action_name))

        action_data = action_cls.ActionDataCls(*action_data)

        try:
            # debuginfo('getting action_str')
            action_str = self._action_str(action_name, action_data)
            # debuginfo(1111111, action_str)
            return self._parent_dispatcher()._traceback(action_str, tracking, action_data)
        except (TypeError, AttributeError):
            action_str = '%s%s' % (action_name, str(action_data))
            action_info = self.parse_action(action_str)
            return self._dispatch(action_info, tracking, action_str, action_data)

    def _traceback(self, action, tracking=True, action_data=None):

        action_str = self._action_str(action)

        try:
            return self._parent_dispatcher()._traceback(action_str, tracking, action_data)
        except (TypeError, AttributeError):
            action_info = self.parse_action(action_str)
            # debuginfo(action_str)
            return self._dispatch(action_info, tracking, action_str, action_data)

    def _dispatch(self, action_info, tracking, action_str, action_data=None):
        _dispatches, _action = action_info

        # debuginfo(action_info)

        if len(_dispatches) == 0:
            action_name, action_data_ = _action

            if action_data is None:
                action_data = action_data_

            # if not isinstance(action_data, tuple):
            #     action_data = (action_data,)

            return self._final_dispatch(action_name, action_data, action_str, tracking)

        dispatcher_id, dispatcher_data = _dispatches[0]

        # debuginfo(action_info)

        if self.dispatcher_id is not None:
            try:
                assert dispatcher_id == self.dispatcher_id
            except AssertionError:
                print('This dispatcher = %s, other dispatcher = %s' % (self.dispatcher_id, dispatcher_id))
                raise

            try:
                dispatcher_id = _dispatches[1][0]
            except IndexError:
                _action_info = [], _action
                return self._dispatch(_action_info, tracking, action_str, action_data)

        else:
            return self._children_dispatchers[dispatcher_id]._dispatch(action_info, tracking, action_str, action_data)

        # debuginfo(self.dispatcher_id, list(self._children_dispatchers.keys()))
        dispatcher = self._children_dispatchers[dispatcher_id]

        # FIXME: should the dispatcher be responsible for this?  might be taken care of by the commands

        if dispatcher_data is not None:

            subdata = self._subdata(dispatcher_data)
        else:
            subdata = None

        old_main_data = dispatcher.get_model

        if subdata is not None:
            dispatcher.get_model = subdata

        _action_info = _dispatches[1:], _action

        # noinspection PyProtectedMember
        dispatch_result = dispatcher._dispatch(_action_info, tracking, action_str, action_data)

        dispatcher.get_model = old_main_data

        return dispatch_result

    def _final_dispatch(self, action_name, action_data, action_str, tracking=True):

        # debuginfo(action_str)

        if self._try_undo_redo(action_name):
            return True

        try:
            action_cls = self._actions[action_name]
        except KeyError:
            raise TypeError('CommandDispatcher4: Action type not valid! %s' % str(action_name))

        if isinstance(action_data, tuple):
            action_data = action_cls.ActionDataCls(*action_data)

        assert isinstance(action_data, action_cls.ActionDataCls)

        action = action_cls(action_data)
        action.get_model = self.main_data

        command = self._get_command(action)

        if command is None:
            return False

        command = self._wrap_command(command)

        command.skip_first = False
        command.redo()
        command_result = command.command_result
        command.skip_first = True

        # TODO: not sure if this is the desired behavior
        if command_result is False:
            command.finalize()
            return False

        action_ = command.action

        if action_.log_action is True and tracking is True:
            if action_str is None:
                action_str = str(action_)
            self.action_history.append(action_str)
            # this notifies the main window that an action has been added, so that it can update the log
            self.action_added.emit(action_str)

        # if the action is successful, push it to the stack (it will be skipped on first push)
        if command_result is True:
            if command.push_to_stack and tracking is True:

                self.undo_stack.push(command)

                if command.set_clean is True:
                    self.undo_stack.setClean()

            return True
        else:
            return False

    def _wrap_command(self, command):
        try:
            return self._parent_dispatcher()._wrap_command(command)
        except (TypeError, AttributeError):
            return command

    def verify(self):

        action_keys = set(self._actions.keys())
        command_keys = set(self._commands.keys())

        if action_keys != command_keys:
            if len(action_keys) > len(command_keys):
                raise Exception("CommandDispatcher4: Missing commands! %s" % str(action_keys - command_keys))
            else:
                raise Exception("CommandDispatcher4: Missing actions! %s" % str(command_keys - action_keys))

        for key, child in iteritems(self._children_dispatchers):
            child.verify()

    def finalize(self):
        self._parent_dispatcher = None
        self._actions.clear()
        self._commands.clear()

        for key, child in iteritems(self._children_dispatchers):
            child.finalize()

        self._children_dispatchers.clear()

    def __call__(self, action_name):
        action_name = action_name.replace('.', '_')

        def add_action(cls):

            if issubclass(cls, Action):
                self._actions[action_name] = cls
            elif issubclass(cls, Command):
                self._commands[action_name] = cls
            else:
                raise TypeError("CommandDispatcher4: %s is not an Action or Command!" % cls.__name__)

            cls.action_name = action_name

            return cls

        return add_action

    @staticmethod
    def parse_action(s):

        # debuginfo(s)

        tmp = s
        data = ''

        if s[-1] == ')':
            count = 1

            for i in range(1, len(s)):
                a = s[-i-1]

                if a == ')':
                    count += 1

                elif a == '(':
                    count -= 1

                if count == 0:
                    j = len(s) - i - 1

                    data_ = s[j + 1:-1]
                    if data_ != '':
                        data = literal_eval(data_)
                    else:
                        data = []

                    tmp = s[:j]

                    break

        tmp = tmp.split('.')

        # debuginfo(tmp, data)

        tmp_ = tmp[:-1]

        _tmp = []

        for i in tmp_:
            a = i.split('[')
            b = a[0]
            try:
                c = literal_eval(a[1][:-1])
            except IndexError:
                c = None

            _tmp.append((b, c))

        try:
            insert_data = _tmp[-1][1]
            _tmp[-1] = _tmp[-1][0], None

            if insert_data is not None:
                try:
                    data.insert(0, insert_data)
                    data = tuple(data)
                except AttributeError:
                    data = tuple([insert_data, data])

            else:
                data = tuple([data])

        except IndexError:
            data = tuple(data)

        # debuginfo(_tmp, (tmp[-1], data))

        return _tmp, (tmp[-1], data)
Esempio n. 23
0
 def clear(self):
     self._head = self._tail = None
     self._keepDict.clear()
     WeakValueDictionary.clear(self)
Esempio n. 24
0
class JsonCacheHandler(BaseCacheHandler):
    """
    This cache handler implements on-disk cache store in the form
    of compressed JSON. To improve performance further, it also
    keeps loads data from on-disk cache to memory, and uses weakref
    object cache for assembled objects.

    Required arguments:
    cache_path -- file name where on-disk cache will be stored (.json.bz2)
    """
    def __init__(self, cache_path):
        self._cache_path = os.path.abspath(cache_path)
        # Initialize memory data cache
        self.__type_data_cache = {}
        self.__attribute_data_cache = {}
        self.__effect_data_cache = {}
        self.__modifier_data_cache = {}
        self.__fingerprint = None
        # Initialize weakref object cache
        self.__type_obj_cache = WeakValueDictionary()
        self.__attribute_obj_cache = WeakValueDictionary()
        self.__effect_obj_cache = WeakValueDictionary()
        self.__modifier_obj_cache = WeakValueDictionary()

        # If cache doesn't exist, silently finish initialization
        if not os.path.exists(self._cache_path):
            return
        # Read JSON into local variable
        try:
            with bz2.BZ2File(self._cache_path, 'r') as file:
                json_data = file.read().decode('utf-8')
                data = json.loads(json_data)
        except KeyboardInterrupt:
            raise
        # If file doesn't exist, JSON load errors occur, or
        # anything else bad happens, do not load anything
        # and leave values as initialized
        except:
            msg = 'error during reading cache'
            logger.error(msg)
        # Load data into data cache, if no errors occurred
        # during JSON reading/parsing
        else:
            self.__update_mem_cache(data)

    def get_type(self, type_id):
        try:
            type_id = int(type_id)
        except TypeError as e:
            raise TypeFetchError(type_id) from e
        try:
            type_ = self.__type_obj_cache[type_id]
        except KeyError:
            # We do str(int(id)) here because JSON dictionaries
            # always have strings as key
            json_type_id = str(type_id)
            try:
                type_data = self.__type_data_cache[json_type_id]
            except KeyError as e:
                raise TypeFetchError(type_id) from e
            type_ = Type(type_id=type_id,
                         group=type_data[0],
                         category=type_data[1],
                         attributes={
                             attr_id: attr_val
                             for attr_id, attr_val in type_data[2]
                         },
                         effects=tuple(
                             self.get_effect(effect_id)
                             for effect_id in type_data[3]),
                         default_effect=None if type_data[4] is None else
                         self.get_effect(type_data[4]))
            self.__type_obj_cache[type_id] = type_
        return type_

    def get_attribute(self, attr_id):
        try:
            attr_id = int(attr_id)
        except TypeError as e:
            raise AttributeFetchError(attr_id) from e
        try:
            attribute = self.__attribute_obj_cache[attr_id]
        except KeyError:
            json_attr_id = str(attr_id)
            try:
                attr_data = self.__attribute_data_cache[json_attr_id]
            except KeyError as e:
                raise AttributeFetchError(attr_id) from e
            attribute = Attribute(attribute_id=attr_id,
                                  max_attribute=attr_data[0],
                                  default_value=attr_data[1],
                                  high_is_good=attr_data[2],
                                  stackable=attr_data[3])
            self.__attribute_obj_cache[attr_id] = attribute
        return attribute

    def get_effect(self, effect_id):
        try:
            effect_id = int(effect_id)
        except TypeError as e:
            raise EffectFetchError(effect_id) from e
        try:
            effect = self.__effect_obj_cache[effect_id]
        except KeyError:
            json_effect_id = str(effect_id)
            try:
                effect_data = self.__effect_data_cache[json_effect_id]
            except KeyError as e:
                raise EffectFetchError(effect_id) from e
            effect = Effect(effect_id=effect_id,
                            category=effect_data[0],
                            is_offensive=effect_data[1],
                            is_assistance=effect_data[2],
                            duration_attribute=effect_data[3],
                            discharge_attribute=effect_data[4],
                            range_attribute=effect_data[5],
                            falloff_attribute=effect_data[6],
                            tracking_speed_attribute=effect_data[7],
                            fitting_usage_chance_attribute=effect_data[8],
                            build_status=effect_data[9],
                            modifiers=tuple(
                                self.get_modifier(modifier_id)
                                for modifier_id in effect_data[10]))
            self.__effect_obj_cache[effect_id] = effect
        return effect

    def get_modifier(self, modifier_id):
        try:
            modifier_id = int(modifier_id)
        except TypeError as e:
            raise ModifierFetchError(modifier_id) from e
        try:
            modifier = self.__modifier_obj_cache[modifier_id]
        except KeyError:
            json_modifier_id = str(modifier_id)
            try:
                modifier_data = self.__modifier_data_cache[json_modifier_id]
            except KeyError as e:
                raise ModifierFetchError(modifier_id) from e
            modifier = Modifier(modifier_id=modifier_id,
                                state=modifier_data[0],
                                scope=modifier_data[1],
                                src_attr=modifier_data[2],
                                operator=modifier_data[3],
                                tgt_attr=modifier_data[4],
                                domain=modifier_data[5],
                                filter_type=modifier_data[6],
                                filter_value=modifier_data[7])
            self.__modifier_obj_cache[modifier_id] = modifier
        return modifier

    def get_fingerprint(self):
        return self.__fingerprint

    def update_cache(self, data, fingerprint):
        # Make light version of data and add fingerprint
        # to it
        data = self.__strip_data(data)
        data['fingerprint'] = fingerprint
        # Update disk cache
        cache_folder = os.path.dirname(self._cache_path)
        if os.path.isdir(cache_folder) is not True:
            os.makedirs(cache_folder, mode=0o755)
        with bz2.BZ2File(self._cache_path, 'w') as file:
            json_data = json.dumps(data)
            file.write(json_data.encode('utf-8'))
        # Update data cache; encode to JSON and decode back
        # to make sure form of data is the same as after
        # loading it from cache (e.g. dictionary keys are
        # stored as strings in JSON)
        data = json.loads(json_data)
        self.__update_mem_cache(data)

    def __strip_data(self, data):
        """
        Rework passed data, keying it and stripping dictionary
        keys from rows for performance.
        """
        slim_data = {}

        slim_types = {}
        for type_row in data['types']:
            type_id = type_row['type_id']
            slim_types[type_id] = (
                type_row['group'],
                type_row['category'],
                tuple(type_row['attributes'].items()),  # Dictionary -> tuple
                tuple(type_row['effects']),  # List -> tuple
                type_row['default_effect'])
        slim_data['types'] = slim_types

        slim_attribs = {}
        for attr_row in data['attributes']:
            attribute_id = attr_row['attribute_id']
            slim_attribs[attribute_id] = (attr_row['max_attribute'],
                                          attr_row['default_value'],
                                          attr_row['high_is_good'],
                                          attr_row['stackable'])
        slim_data['attributes'] = slim_attribs

        slim_effects = {}
        for effect_row in data['effects']:
            effect_id = effect_row['effect_id']
            slim_effects[effect_id] = (
                effect_row['effect_category'],
                effect_row['is_offensive'],
                effect_row['is_assistance'],
                effect_row['duration_attribute'],
                effect_row['discharge_attribute'],
                effect_row['range_attribute'],
                effect_row['falloff_attribute'],
                effect_row['tracking_speed_attribute'],
                effect_row['fitting_usage_chance_attribute'],
                effect_row['build_status'],
                tuple(effect_row['modifiers'])  # List -> tuple
            )
        slim_data['effects'] = slim_effects

        slim_modifiers = {}
        for modifier_row in data['modifiers']:
            modifier_id = modifier_row['modifier_id']
            slim_modifiers[modifier_id] = (modifier_row['state'],
                                           modifier_row['scope'],
                                           modifier_row['src_attr'],
                                           modifier_row['operator'],
                                           modifier_row['tgt_attr'],
                                           modifier_row['domain'],
                                           modifier_row['filter_type'],
                                           modifier_row['filter_value'])
        slim_data['modifiers'] = slim_modifiers

        return slim_data

    def __update_mem_cache(self, data):
        """
        Loads data into memory data cache.

        Required arguments:
        data -- dictionary with data to load
        """
        self.__type_data_cache = data['types']
        self.__attribute_data_cache = data['attributes']
        self.__effect_data_cache = data['effects']
        self.__modifier_data_cache = data['modifiers']
        self.__fingerprint = data['fingerprint']
        # Also clear object cache to make sure objects composed
        # from old data are gone
        self.__type_obj_cache.clear()
        self.__attribute_obj_cache.clear()
        self.__effect_obj_cache.clear()
        self.__modifier_obj_cache.clear()

    def __repr__(self):
        spec = [['cache_path', '_cache_path']]
        return make_repr_str(self, spec)
Esempio n. 25
0
class RpcService(object):
    """ service for one socket """
    UID_LEN = 32

    def __init__(self, svr, sock, uid, size=None):
        if 0:
            self.svr = RpcServer()
        self.svr = svr
        #self._pool = Pool(size=size)
        self.sock = sock
        if isinstance(svr, RpcClient):
            self.sock_addr = svr.addr
        else:
            self.sock_addr = self.sock.getpeername()
        self.uid = str(uid)
        if len(self.uid) != self.UID_LEN:
            raise ValueError, 'uid length error: len(uid)=%d <> %d' % (
                len(uid), self.UID_LEN)

        self._slock = Semaphore()
        self._reconnected = None
        self.reconnect_timeout = RECONNECT_TIMEOUT
        #self.iter_id = itertools.cycle(xrange(MAX_INDEX))
        self._next_id = 0
        self._resps = {}
        self._proxys = WeakValueDictionary()
        self.stoped = True
        self.sock_error = False
        if HEARTBEAT_TIME > 0:
            self._heart_time = time.time()
            self._heart_task = spawn(self.heartbeat)
        self.shells = {}

    def next_id(self):
        self._next_id += 1
        if self._next_id >= MAX_INDEX:
            self._next_id = 1
        return self._next_id

    def start(self):
        if not self.stoped:
            return
        self.stoped = False
        self._recv_task = spawn(self._recver)
        self._recver_on_error = False
        #_services_.append(self.sock_addr)

    def remote_stop(self):
        #printf('remote_stop:%s', self.sock_addr)
        self.sock_error = True
        self.stop()

    def close(self):
        if not self.sock:
            return
        try:
            self.sock._sock.close()
        except socket_error:
            pass
        self.sock.close()
        self.sock = None

    def stop(self):
        if self.stoped:
            return
        self.stoped = True
        self._recv_task.kill(block=0)
        self._recv_task = None
        if 1 and not self.sock_error:
            try:
                #printf('remote_stop:%s', self.sock_addr)
                self.call('', 'remote_stop', tuple(), None, no_result=True)
                sleep(0.01)
            except:
                pass
        self.svr.svc_stop(self)
        if getattr(self, '_heart_task', None):
            self._heart_task.kill(block=False)
            self._heart_task = None
        try:
            self._stop_resps()
            self._stop_proxys()
        finally:
            self.close()
            #_services_.append('-%s' % str(self.sock_addr))

    def _stop_resps(self):
        error = RpcRuntimeError('service stoped')
        for k, v in self._resps.iteritems():
            v.set_exception(error)
        self._resps.clear()

    def _stop_proxys(self):
        if not len(self._proxys):
            return
        proxys = self._proxys.values()
        self._proxys.clear()
        for p in proxys:
            p.on_close()

##    def _sender(self):
##        running = True
##        _send = self.sock.sendall
##        try:
##            for data in self._send_queue:
##                _send('%s%s' %(pack('I', len(data)), data))
##        except GreenletExit:
##            pass

    def _recver(self):
        """ 接收处理数据 """
        recv_func = self.sock.recv

        def _read(c):
            d = recv_func(c)
            if d:
                return d
            if self.stoped:
                raise GreenletExit
            self._recver_on_error = True
            self._on_socket_error(None)
            self._recver_on_error = False
            return None

        try:
            sio = StringIO()
            while not self.stoped:
                dlen = 4
                d = ''
                while dlen > 0:
                    data = _read(dlen)
                    if data is None:
                        continue
                    d += data
                    dlen -= len(data)
                dlen = unpack('I', d)[0]
                #rs = []
                sio.seek(0)
                sio.truncate()
                while dlen > 0:
                    data = _read(dlen)
                    if data is None:
                        continue
                    #rs.append(data)
                    sio.write(data)
                    dlen -= len(data)
                #spawn(self._handle, loads(''.join(rs)))
                sio.seek(0)
                self._handle(load(sio))
                #self._pool.spawn(self._handle, loads(''.join(rs)))
        except GreenletExit:
            pass
        except Exception as err:
            printf('[RpcService._recver]%s', err)
        finally:
            self.stop()

    def _on_socket_error(self, err):
        if self.stoped or self.reconnect_timeout <= 0:
            self.sock_error = True
            self.stop()
            return

        def _reconnect():
            #尝试重连或等待重连
            while not self.stoped:
                try:
                    self.svr.reconnect()
                    break
                except socket_error:
                    pass
                sleep(0.5)

        if self._reconnected is None:
            self._reconnected = AsyncResult()
            printf('socket error:%s,  RpcService try reconnect', err)
            self.send = self.send_wait
            if hasattr(self.svr, 'reconnect'):  #RpcClient.reconnect
                spawn(_reconnect)

        self._wait_reconnect()

    def _wait_reconnect(self):
        _reconnected = self._reconnected
        try:
            _reconnected.get(timeout=self.reconnect_timeout)
        except Timeout:
            pass
        if not _reconnected.successful():
            self.stop()
            if self.sock_error or _reconnected.exception is None:
                return
            self.sock_error = True
            raise _reconnected.exception

    def reconnect(self, sock):
        ##        if not self._recver_on_error:
        ##            self._recv_task.kill(exception=socket_error('reconnect'))
        self.sock = sock
        self.send = self.send_imme
        if self._reconnected is not None:
            self._reconnected.set(True)
            self._reconnected = None

    def send_imme(self, *args):
        data = dumps(args)
        with self._slock:
            try:
                self.sock.sendall('%s%s' % (pack('I', len(data)), data))
            except socket_error as err:
                self._on_socket_error(err)
                #重新发送
                self.sock.sendall('%s%s' % (pack('I', len(data)), data))


##        self._send_queue.put(dumps(args))

    def send_wait(self, *args):
        if self._reconnected is not None:
            self._wait_reconnect()
        self.send_imme(*args)

    send = send_imme

    def _read_response(self, index, timeout):
        rs = AsyncResult()
        self._resps[index] = rs
        resp = rs.wait(timeout)
        self._resps.pop(index, None)
        if not rs.successful():
            error = rs.exception
            if error is None:
                error = Timeout
            raise error
        return resp

    def _reg_obj(self, obj):
        if hasattr(obj, 'proxy_pack'):
            return obj.proxy_pack(), False
        if isinstance(obj, RpcProxy):
            return obj._id, False
        if hasattr(obj, '_rpc_proxy_'):
            return obj._rpc_proxy_(), True
        return self.svr.register(obj), False

    def call(self,
             obj_id,
             name,
             args,
             kw,
             no_result=False,
             timeout=CALL_TIMEORUT,
             pickle=False,
             proxy=False):
        dtype = RT_REQUEST
        if proxy:
            objs = args[0]  #first arg is proxy(str, RpcProxy or list)
            if isinstance(objs, (tuple, list)):
                obj_ids = []
                for o in objs:
                    obj, is_pickle = self._reg_obj(o)
                    pickle = pickle or is_pickle
                    obj_ids.append(obj)
            else:
                obj, is_pickle = self._reg_obj(objs)
                pickle = pickle or is_pickle
                obj_ids = obj
            args = (obj_ids, ) + args[1:]
            dtype |= DT_PROXY
        if pickle:
            dtype |= DT_PICKLE
            argkw = pickle_dumps((args, kw), PICKLE_PROTOCOL)
        else:
            argkw = dumps((args, kw))
        if len(argkw) >= ZIP_LENGTH:
            dtype |= DT_ZIP
            argkw = zlib.compress(argkw, ZIP_LEVEL)
        if no_result:
            dtype |= ST_NO_RESULT
        index = self.next_id()  #iter_id.next()
        self.send(dtype, obj_id, index, name, argkw)
        if no_result:
            return
        result = self._read_response(index, timeout)
        return result

    def _handle_request(self, parts):
        dtype, obj_id, index, name, argkw = parts
        try:
            obj = self.get_export(obj_id)
            if obj is None:
                raise RpcExportNoFound, obj_id
            func = getattr(obj, name)
            if not callable(func):
                raise RpcFuncNoFound, name

            if dtype & DT_ZIP:
                argkw = zlib.decompress(argkw)
            if dtype & DT_PICKLE:
                args, kw = pickle_loads(argkw)
            else:
                args, kw = loads(argkw)

            if dtype & DT_PROXY:
                export_ids = args[0]
                if isinstance(export_ids, (tuple, list)):
                    proxys = []
                    for e in export_ids:
                        proxys.append(self.get_proxy(e))
                else:
                    proxys = self.get_proxy(export_ids)
                args = (proxys, ) + tuple(args[1:])

            if getattr(func, "_block_", True):
                spawn(self._handle_request_call, func, args, kw, dtype, index,
                      obj_id, name, argkw)
            else:
                self._handle_request_call(func, args, kw, dtype, index, obj_id,
                                          name, argkw)
        except Exception as e:
            log_except('export(%s).%s(%s)', obj_id, name, repr(argkw))
            if dtype & ST_NO_RESULT or self.svr.stoped:
                return
            self.send(RT_EXCEPTION, index, str(e))

    def _handle_request_call(self, func, args, kw, dtype, index, obj_id, name,
                             argkw):
        try:
            let = getcurrent()
            setattr(let, _service_name_, self)
            if args is None:
                rs = func()
            else:
                rs = func(*args, **kw) if kw is not None else func(*args)
            if dtype & ST_NO_RESULT:
                return
            if not getattr(func, '_rpc_pickle_result_', False):
                self.send(RT_RESPONSE, index, dumps(rs))
            else:
                self.send(RT_RESPONSE | DT_PICKLE, index,
                          pickle_dumps(rs, PICKLE_PROTOCOL))
        except Exception as e:
            log_except('export(%s).%s(%s)', obj_id, name, repr(argkw))
            if dtype & ST_NO_RESULT or self.svr.stoped:
                return
            self.send(RT_EXCEPTION, index, str(e))

    def _handle_response(self, parts):
        dtype, index, argkw = parts
        try:
            rs = self._resps.pop(index)
            if dtype & DT_PICKLE:
                result = pickle_loads(argkw)
            else:
                result = loads(argkw)
            rs.set(result)
        except KeyError:
            pass

    def _handle_exception(self, parts):
        RT_EXCEPTION, index, error = parts
        #try:
        #    error = pickle_loads(error)
        #except:
        error = RpcCallError(str(error))
        try:
            rs = self._resps.pop(index)
            rs.set_exception(error)
        except KeyError:
            pass

    def _handle(self, parts):
        #parts = (parts[0], ) + loads(parts[1]) if len(parts) ==2 else loads(parts[0])
        rt = parts[0] & RT_MARK
        if rt == RT_REQUEST:
            self._handle_request(parts)
        elif rt == RT_RESPONSE:
            self._handle_response(parts)
        elif rt == RT_EXCEPTION:
            self._handle_exception(parts)
        elif rt == RT_HEARTBEAT:
            self._heart_time = time.time()
        else:
            raise ValueError('unknown data:%s' % str(rt))

    def heartbeat(self):
        beat = RT_HEARTBEAT
        btime = HEARTBEAT_TIME
        check_times = HEARTBEAT_TIME * max(3, RECONNECT_TIMEOUT)
        try:
            while not self.stoped:
                self.send(beat)
                sleep(btime)
                if (self._heart_time + check_times) < time.time():
                    printf('heartbeat timeout!!!!!!!!')
                    self.sock_error = True
                    break
        finally:
            self.stop()

    @classmethod
    def handshake_svr(cls, sock):
        uid = sock.recv(cls.UID_LEN)
        return uid

    def handshake_cli(self):
        self.sock.sendall(self.uid)

    #######remote call##############
    def get_export(self, export_id):
        """ get export obj by export_name """
        if not export_id:
            return self
        return self.svr.get_export(export_id)

    def get_proxy(self, export_id, proxy_cls=None):
        """ remote call: get export obj by id """
        if isinstance(export_id, RpcProxy):
            return export_id
        if isinstance(export_id, (tuple, list)):
            export_cls = PROXYS[export_id[0]]
            return export_cls.proxy_unpack(export_id, svc=self)

        if proxy_cls in (None, RpcProxy):
            try:
                return self._proxys[export_id]
            except KeyError:
                proxy_cls = RpcProxy
                proxy = proxy_cls(export_id, svc=self)
                self.reg_proxy(export_id, proxy)
                return proxy
        else:
            p = proxy_cls(export_id, svc=self)
            self.reg_proxy(id(p), p)
            return p

    def reg_proxy(self, key, proxy):
        self._proxys[key] = proxy

    def stop_shell(self, shell_id):
        shell = self.shells.pop(shell_id, None)
        if not shell:
            return
        shell.stop()

    def start_shell(self, console_proxy, pre_prompt):
        from rpc_shell import RpcShell
        if self.svr.access and not self.svr.access.access_shell(self):
            printf('[rpc]shell deny:%s', self.sock_addr)
            return 0
        printf('[rpc]shell start:%s', self.sock_addr)
        shell = RpcShell(self, console_proxy, pre_prompt=pre_prompt)
        shell.start()
        #shell.stop remove shell from self.shells
        shell_id = id(shell)
        self.shells[shell_id] = shell
        return shell_id

    def stop_console(self, shell_id):
        self.call('',
                  'stop_shell', (shell_id, ),
                  None,
                  no_result=True,
                  timeout=20)

    def start_console(self, pre_prompt='', shell=None):
        from rpc_shell import RpcLocalConsole, RpcProxyConsole
        console = RpcLocalConsole(self) if shell is None else RpcProxyConsole(
            self, shell)
        shell_id = self.call('',
                             'start_shell', (console, pre_prompt),
                             None,
                             proxy=True,
                             timeout=20)
        try:
            console.wait(shell_id)
        finally:
            pass

    def execute(self, func, args, kw, **others):
        return self.svr.execute(func, args, kw)

    def valid_proxy(self, export_id):
        return self.get_export(export_id) != None
Esempio n. 26
0
class FilesystemStore(Store):
    """
    Save and load objects in a given directory.  Uses Python's
    standard `pickle` module to serialize objects onto files.

    All objects are saved as files in the given directory (default:
    `gc3libs.defaults.JOBS_DIR`).  The file name is the object ID.

    If an object contains references to other `Persistable` objects,
    these are saved in the file they would have been saved if the
    `save` method was called on them in the first place, and only an
    'external reference' is saved in the pickled container. This
    ensures that: (1) only one copy of a shared object is ever saved,
    and (2) any shared reference to `Persistable` objects is correctly
    restored when restoring the container.

    The default `idfactory` assigns object IDs by appending a
    sequential number to the class name; see class `Id` for
    details.

    The `protocol` argument specifies the serialization protocol to use,
    if different from `gc3libs.persistence.serialization.DEFAULT_PROTOCOL`.

    Any extra keyword arguments are ignored for compatibility with
    `SqlStore`.
    """
    def __init__(self,
                 directory=gc3libs.defaults.JOBS_DIR,
                 idfactory=IdFactory(),
                 protocol=DEFAULT_PROTOCOL,
                 **extra_args):
        if isinstance(directory, Url):
            super(FilesystemStore, self).__init__(directory)
            directory = directory.path
        else:
            super(FilesystemStore, self).__init__(
                Url(scheme='file', path=os.path.abspath(directory)))
        self._directory = directory

        self.idfactory = idfactory
        self._loaded = WeakValueDictionary()
        self._protocol = protocol

    @same_docstring_as(Store.invalidate_cache)
    def invalidate_cache(self):
        self._loaded.clear()

    @same_docstring_as(Store.list)
    def list(self):
        if not os.path.exists(self._directory):
            return []
        return [
            id_ for id_ in os.listdir(self._directory)
            if not id_.endswith('.OLD')
        ]

    def _load_from_file(self, path):
        """Auxiliary method for `load`."""
        # gc3libs.log.debug("Loading object from file '%s' ...", path)
        with open(path, 'rb') as src:
            unpickler = make_unpickler(self, src)
            obj = unpickler.load()
            return obj

    @same_docstring_as(Store.load)
    def load(self, id_):
        # return cached copy, if any
        try:
            return self._loaded[str(id_)]
        except KeyError:
            pass

        # no cached copy, load from disk
        filename = os.path.join(self._directory, id_)

        sources = [filename, filename + '.OLD']
        for source in sources:
            if not os.path.exists(source):
                gc3libs.log.debug(
                    "Cannot load object %s from '%s':"
                    " file does not exist", id_, source)
                continue
            try:
                obj = self._load_from_file(source)
                break  # exit `for source in sources` loop ...
            except Exception as ex:
                gc3libs.log.warning("Failed loading file '%s': %s: %s",
                                    filename,
                                    ex.__class__.__name__,
                                    ex,
                                    exc_info=True)
        else:
            # complain loudly
            raise gc3libs.exceptions.LoadError(
                "Failed loading object %s from file(s) %r."
                " (Earlier log lines may provide more details.)" %
                (id_, sources))

        # minimal sanity check
        if not hasattr(obj, 'persistent_id'):
            raise gc3libs.exceptions.LoadError(
                "Invalid format in file '%s':"
                " missing 'persistent_id' attribute" % (filename, ))
        if str(obj.persistent_id) != str(id_):
            raise gc3libs.exceptions.LoadError(
                "Retrieved persistent ID '%s' %s"
                " does not match given ID '%s' %s" %
                (obj.persistent_id, type(obj.persistent_id), id_, type(id_)))

        # maybe update object after GC3Pie update?
        super(FilesystemStore, self)._update_to_latest_schema()

        # update cache
        assert str(id_) not in self._loaded
        self._loaded[str(id_)] = obj

        return obj

    @same_docstring_as(Store.remove)
    def remove(self, id_):
        filename = os.path.join(self._directory, id_)
        os.remove(filename)
        try:
            del self._loaded[str(id_)]
        except KeyError:
            pass

    @same_docstring_as(Store.replace)
    def replace(self, id_, obj):
        self._save_or_replace(id_, obj)

    @same_docstring_as(Store.save)
    def save(self, obj):
        if not hasattr(obj, 'persistent_id'):
            obj.persistent_id = self.idfactory.new(obj)
        self._save_or_replace(obj.persistent_id, obj)
        return obj.persistent_id

    def _save_or_replace(self, id_, obj):
        """
        Save `obj` into file identified by `id_`; if no such
        destination file exists, create it.  Ensure that the
        destination file is kept intact in case dumping `obj` fails.
        """
        filename = os.path.join(self._directory, id_)
        # gc3libs.log.debug("Storing job '%s' into file '%s'", obj, filename)

        if not os.path.exists(self._directory):
            try:
                os.makedirs(self._directory)
            except Exception as ex:
                # raise same exception but add context message
                gc3libs.log.error("Could not create jobs directory '%s': %s" %
                                  (self._directory, str(ex)))
                raise

        backup = None
        if os.path.exists(filename):
            backup = filename + '.OLD'
            os.rename(filename, backup)

        with open(filename, 'w+b') as tgt:
            try:
                pickler = make_pickler(self, tgt, obj)
                pickler.dump(obj)
            except Exception as err:
                gc3libs.log.error(
                    "Error saving task '%s' to file '%s': %s: %s", obj,
                    filename, err.__class__.__name__, err)
                # move backup file back in place
                if backup is not None:
                    try:
                        os.rename(backup, filename)
                    except:
                        pass  # ignore errors
                raise
            if hasattr(obj, 'changed'):
                obj.changed = False
            # remove backup file, if exists
            try:
                os.remove(backup)
            except:
                pass  # ignore errors
            # update cache
            if id_ in self._loaded:
                old = self._loaded[str(id_)]
                if old is not obj:
                    self._loaded[str(id_)] = obj
Esempio n. 27
0
class Storage(object):
	def __init__(self, cache=None):
		# {(InstanceInfo(instance).model_info.model, instance.pk): InstanceInfo, ...}}
		self._alive = WeakValueDictionary()

		# {InstanceInfo(instance): instance, ...}}
		self._dirty = {}

		if cache is not None:
			self._cache = cache
		else:
			self._cache = MRUCache(settings.MAX_CACHE)

		signals.register(
			(
				"instance-deleted",
	
				"start-tracking-changes",
				"stop-tracking-changes",
	
				"model-pre-init",
				"model-post-init",
				"model-pre-save",
				"model-post-save",
				"model-pre-delete",
				"model-post-delete",
				"model-pre-update",
				"model-post-update",
				
				# "model-history-reset",
				# "model-history-redo",
				# "model-history-undo",
				
				"relation-pre-get",
				"relation-post-get",
				"relation-pre-set",
				"relation-post-set",
				"relation-pre-delete",
				"relation-post-delete",
				"relation-pre-add",
				"relation-post-add",
				"relation-pre-remove",
				"relation-post-remove",

				"model-do-cache",
				"model-do-not-cache",
			),
			self
		)
		signals.register_with_callback("cache-rollback", self, "rollback")

	def get(self, query):
		return QuerySetIterator(query, self)

	def clear(self):
		self._dirty.clear()
		self._alive.clear()
		self._cache.clear()

	def set_dirty(self, inst_info):
		self._dirty[inst_info] = inst_info.get_inst()

	def cache(self, inf):
		self._alive.pop((inf.model_info.model, inf._lazypkval), None)
		self._alive[(inf.model_info.model, inf.get_pk_as_key())] = inf
		self._cache.add(inf)

	def uncache(self, inf):
		if inf in self._cache:
			self._cache.remove(inf)

		self._dirty.pop(inf, None)
		self._alive.pop((inf.model_info.model, inf.get_pk_as_key()), None)
		self._alive.pop((inf.model_info.model, inf._lazypkval), None)

	### signals ###

	#??? what is this used for?
	def instance_deleted(self, inst_info):
		print "instance_deleted", inst_info

	def model_pre_init(self, instance, **kwargs):
		signals.fire("stop-tracking-changes", instance=instance)

	def model_post_init(self, instance, **kwargs):
		inf = get_inst_info(instance)
		signals.fire("start-tracking-changes", instance=instance)
		self._alive[(inf.model_info.model, inf.get_pk_as_key())] = inf

		# If instance was not initialized with a value for primary key,
		# then it has not been saved yet and goes into self._dirty.
		if instance.pk is None:
			self.set_dirty(inf)

	def start_tracking_changes(self, instance):
		get_inst_info(instance)._meta["track-changes"] = True

	def stop_tracking_changes(self, instance):
		get_inst_info(instance)._meta["track-changes"] = False

	def model_pre_save(self, instance):
		pass

	def model_post_save(self, instance, created):
		inf = get_inst_info(instance)
		if not inf._meta["do-cache"]:
			return

		self.cache(inf)
		self._dirty.pop(inf, None)

		# On calling Model.delete, tracking of changes is stopped, so start
		# tracking now.
		signals.fire("start-tracking-changes", instance=instance)

	def model_pre_update(self, instance, value, fieldname):
		# instance._inst_info.record_change(fieldname, value)
		pass

	def model_post_update(self, instance, value, fieldname):
		inf = get_inst_info(instance)

		if not inf in self._dirty:
			self.set_dirty(inf)

	def model_pre_delete(self, instance):
		signals.fire("stop-tracking-changes", instance=instance)

	def model_post_delete(self, instance, deleted):
		if not deleted:
			return

		inf = get_inst_info(instance)

		self.uncache(inf)

		instance.id = None

	def model_do_cache(self, instance):
		inf = get_inst_info(instance)
		self.cache(inf)
		self.set_dirty(inf)
		inf._meta["do-cache"] = True
		signals.fire("start-tracking-changes", instance=instance)

	def model_do_not_cache(self, instance):
		inf = get_inst_info(instance)
		self.uncache(inf)
		inf._meta["do-cache"] = False
		signals.fire("stop-tracking-changes", instance=instance)

	def model_history_reset(self, instance, **kwargs):
		raise NotImplementedError

	def model_history_undo(self, instance, fieldname, **kwargs):
		raise NotImplementedError

	def model_history_redo(self, instance, fieldname, **kwargs):
		raise NotImplementedError

	def relation_pre_get(self, manager, **kwargs):
		pass
	
	def relation_post_get(self, manager, **kwargs):
		pass

	def relation_pre_set(self, manager, values, **kwargs):
		pass
	
	def relation_post_set(self, manager, values, **kwargs):
		pass

	def relation_pre_delete(self, manager, **kwargs):
		pass

	def relation_post_delete(self, manager, **kwargs):
		pass

	def relation_pre_add(self, manager, values, **kwargs):
		pass

	def relation_post_add(self, manager, values, **kwargs):
		pass

	def relation_pre_remove(self, manager, values, **kwargs):
		pass

	def relation_post_remove(self, manager, values, **kwargs):
		pass
Esempio n. 28
0
class JsonCacheHandler(BaseCacheHandler):
    """
    This cache handler implements on-disk cache store in the form
    of compressed JSON. To improve performance further, it also
    keeps loads data from on-disk cache to memory, and uses weakref
    object cache for assembled objects.

    Required arguments:
    cache_path -- file name where on-disk cache will be stored (.json.bz2)
    """

    def __init__(self, cache_path):
        self._cache_path = os.path.abspath(cache_path)
        # Initialize memory data cache
        self.__type_data_cache = {}
        self.__attribute_data_cache = {}
        self.__effect_data_cache = {}
        self.__modifier_data_cache = {}
        self.__fingerprint = None
        # Initialize weakref object cache
        self.__type_obj_cache = WeakValueDictionary()
        self.__attribute_obj_cache = WeakValueDictionary()
        self.__effect_obj_cache = WeakValueDictionary()
        self.__modifier_obj_cache = WeakValueDictionary()

        # If cache doesn't exist, silently finish initialization
        if not os.path.exists(self._cache_path):
            return
        # Read JSON into local variable
        try:
            with bz2.BZ2File(self._cache_path, 'r') as file:
                json_data = file.read().decode('utf-8')
                data = json.loads(json_data)
        except KeyboardInterrupt:
            raise
        # If file doesn't exist, JSON load errors occur, or
        # anything else bad happens, do not load anything
        # and leave values as initialized
        except:
            msg = 'error during reading cache'
            logger.error(msg)
        # Load data into data cache, if no errors occurred
        # during JSON reading/parsing
        else:
            self.__update_mem_cache(data)

    def get_type(self, type_id):
        try:
            type_id = int(type_id)
        except TypeError as e:
            raise TypeFetchError(type_id) from e
        try:
            type_ = self.__type_obj_cache[type_id]
        except KeyError:
            # We do str(int(id)) here because JSON dictionaries
            # always have strings as key
            json_type_id = str(type_id)
            try:
                type_data = self.__type_data_cache[json_type_id]
            except KeyError as e:
                raise TypeFetchError(type_id) from e
            type_ = Type(
                type_id=type_id,
                group=type_data[0],
                category=type_data[1],
                attributes={attr_id: attr_val for attr_id, attr_val in type_data[2]},
                effects=tuple(self.get_effect(effect_id) for effect_id in type_data[3]),
                default_effect=None if type_data[4] is None else self.get_effect(type_data[4])
            )
            self.__type_obj_cache[type_id] = type_
        return type_

    def get_attribute(self, attr_id):
        try:
            attr_id = int(attr_id)
        except TypeError as e:
            raise AttributeFetchError(attr_id) from e
        try:
            attribute = self.__attribute_obj_cache[attr_id]
        except KeyError:
            json_attr_id = str(attr_id)
            try:
                attr_data = self.__attribute_data_cache[json_attr_id]
            except KeyError as e:
                raise AttributeFetchError(attr_id) from e
            attribute = Attribute(
                attribute_id=attr_id,
                max_attribute=attr_data[0],
                default_value=attr_data[1],
                high_is_good=attr_data[2],
                stackable=attr_data[3]
            )
            self.__attribute_obj_cache[attr_id] = attribute
        return attribute

    def get_effect(self, effect_id):
        try:
            effect_id = int(effect_id)
        except TypeError as e:
            raise EffectFetchError(effect_id) from e
        try:
            effect = self.__effect_obj_cache[effect_id]
        except KeyError:
            json_effect_id = str(effect_id)
            try:
                effect_data = self.__effect_data_cache[json_effect_id]
            except KeyError as e:
                raise EffectFetchError(effect_id) from e
            effect = Effect(
                effect_id=effect_id,
                category=effect_data[0],
                is_offensive=effect_data[1],
                is_assistance=effect_data[2],
                duration_attribute=effect_data[3],
                discharge_attribute=effect_data[4],
                range_attribute=effect_data[5],
                falloff_attribute=effect_data[6],
                tracking_speed_attribute=effect_data[7],
                fitting_usage_chance_attribute=effect_data[8],
                build_status=effect_data[9],
                modifiers=tuple(self.get_modifier(modifier_id) for modifier_id in effect_data[10])
            )
            self.__effect_obj_cache[effect_id] = effect
        return effect

    def get_modifier(self, modifier_id):
        try:
            modifier_id = int(modifier_id)
        except TypeError as e:
            raise ModifierFetchError(modifier_id) from e
        try:
            modifier = self.__modifier_obj_cache[modifier_id]
        except KeyError:
            json_modifier_id = str(modifier_id)
            try:
                modifier_data = self.__modifier_data_cache[json_modifier_id]
            except KeyError as e:
                raise ModifierFetchError(modifier_id) from e
            modifier = Modifier(
                modifier_id=modifier_id,
                state=modifier_data[0],
                scope=modifier_data[1],
                src_attr=modifier_data[2],
                operator=modifier_data[3],
                tgt_attr=modifier_data[4],
                domain=modifier_data[5],
                filter_type=modifier_data[6],
                filter_value=modifier_data[7]
            )
            self.__modifier_obj_cache[modifier_id] = modifier
        return modifier

    def get_fingerprint(self):
        return self.__fingerprint

    def update_cache(self, data, fingerprint):
        # Make light version of data and add fingerprint
        # to it
        data = self.__strip_data(data)
        data['fingerprint'] = fingerprint
        # Update disk cache
        cache_folder = os.path.dirname(self._cache_path)
        if os.path.isdir(cache_folder) is not True:
            os.makedirs(cache_folder, mode=0o755)
        with bz2.BZ2File(self._cache_path, 'w') as file:
            json_data = json.dumps(data)
            file.write(json_data.encode('utf-8'))
        # Update data cache; encode to JSON and decode back
        # to make sure form of data is the same as after
        # loading it from cache (e.g. dictionary keys are
        # stored as strings in JSON)
        data = json.loads(json_data)
        self.__update_mem_cache(data)

    def __strip_data(self, data):
        """
        Rework passed data, keying it and stripping dictionary
        keys from rows for performance.
        """
        slim_data = {}

        slim_types = {}
        for type_row in data['types']:
            type_id = type_row['type_id']
            slim_types[type_id] = (
                type_row['group'],
                type_row['category'],
                tuple(type_row['attributes'].items()),  # Dictionary -> tuple
                tuple(type_row['effects']),  # List -> tuple
                type_row['default_effect']
            )
        slim_data['types'] = slim_types

        slim_attribs = {}
        for attr_row in data['attributes']:
            attribute_id = attr_row['attribute_id']
            slim_attribs[attribute_id] = (
                attr_row['max_attribute'],
                attr_row['default_value'],
                attr_row['high_is_good'],
                attr_row['stackable']
            )
        slim_data['attributes'] = slim_attribs

        slim_effects = {}
        for effect_row in data['effects']:
            effect_id = effect_row['effect_id']
            slim_effects[effect_id] = (
                effect_row['effect_category'],
                effect_row['is_offensive'],
                effect_row['is_assistance'],
                effect_row['duration_attribute'],
                effect_row['discharge_attribute'],
                effect_row['range_attribute'],
                effect_row['falloff_attribute'],
                effect_row['tracking_speed_attribute'],
                effect_row['fitting_usage_chance_attribute'],
                effect_row['build_status'],
                tuple(effect_row['modifiers'])  # List -> tuple
            )
        slim_data['effects'] = slim_effects

        slim_modifiers = {}
        for modifier_row in data['modifiers']:
            modifier_id = modifier_row['modifier_id']
            slim_modifiers[modifier_id] = (
                modifier_row['state'],
                modifier_row['scope'],
                modifier_row['src_attr'],
                modifier_row['operator'],
                modifier_row['tgt_attr'],
                modifier_row['domain'],
                modifier_row['filter_type'],
                modifier_row['filter_value']
            )
        slim_data['modifiers'] = slim_modifiers

        return slim_data

    def __update_mem_cache(self, data):
        """
        Loads data into memory data cache.

        Required arguments:
        data -- dictionary with data to load
        """
        self.__type_data_cache = data['types']
        self.__attribute_data_cache = data['attributes']
        self.__effect_data_cache = data['effects']
        self.__modifier_data_cache = data['modifiers']
        self.__fingerprint = data['fingerprint']
        # Also clear object cache to make sure objects composed
        # from old data are gone
        self.__type_obj_cache.clear()
        self.__attribute_obj_cache.clear()
        self.__effect_obj_cache.clear()
        self.__modifier_obj_cache.clear()

    def __repr__(self):
        spec = [['cache_path', '_cache_path']]
        return make_repr_str(self, spec)
Esempio n. 29
0
class JsonCacheHandler(CacheHandler):
    """
    This cache handler implements on-disk cache store in the form
    of compressed JSON. To improve performance further, it also
    keeps loads data from on-disk cache to memory, and uses weakref
    object cache for assembled objects.

    Positional arguments:
    diskCacheFolder -- folder where on-disk cache files are stored
    name -- unique indentifier of cache, e.g. Eos instance name
    logger -- logger to use for errors
    """
    def __init__(self, diskCacheFolder, name, logger):
        self._diskCacheFile = os.path.join(diskCacheFolder,
                                           '{}.json.bz2'.format(name))
        self._logger = logger
        # Initialize memory data cache
        self.__typeDataCache = {}
        self.__attributeDataCache = {}
        self.__effectDataCache = {}
        self.__modifierDataCache = {}
        self.__fingerprint = None
        # Initialize weakref object cache
        self.__typeObjCache = WeakValueDictionary()
        self.__attributeObjCache = WeakValueDictionary()
        self.__effectObjCache = WeakValueDictionary()
        self.__modifierObjCache = WeakValueDictionary()

        # If cache doesn't exist, silently finish initialization
        if not os.path.exists(self._diskCacheFile):
            return
        # Read JSON into local variable
        try:
            with bz2.BZ2File(self._diskCacheFile, 'r') as file:
                jsonData = file.read().decode('utf-8')
                data = json.loads(jsonData)
        except KeyboardInterrupt:
            raise
        # If file doesn't exist, JSON load errors occur, or
        # anything else bad happens, do not load anything
        # and leave values as initialized
        except:
            msg = 'error during reading cache'
            self._logger.error(msg, childName='cacheHandler')
        # Load data into data cache, if no errors occurred
        # during JSON reading/parsing
        else:
            self.__updateMemCache(data)

    def getType(self, typeId):
        try:
            type_ = self.__typeObjCache[typeId]
        except KeyError:
            # We do str(int(id)) here because JSON dictionaries
            # always have strings as key
            jsonTypeId = str(int(typeId))
            try:
                data = self.__typeDataCache[jsonTypeId]
            except KeyError as e:
                raise TypeFetchError(typeId) from e
            groupId, catId, duration, discharge, optimal, falloff, tracking, fittable, effects, attribs = data
            type_ = Type(
                typeId=typeId,
                groupId=groupId,
                categoryId=catId,
                durationAttributeId=duration,
                dischargeAttributeId=discharge,
                rangeAttributeId=optimal,
                falloffAttributeId=falloff,
                trackingSpeedAttributeId=tracking,
                fittableNonSingleton=fittable,
                attributes={attrId: attrVal
                            for attrId, attrVal in attribs},
                effects=tuple(
                    self.getEffect(effectId) for effectId in effects))
            self.__typeObjCache[typeId] = type_
        return type_

    def getAttribute(self, attrId):
        try:
            attribute = self.__attributeObjCache[attrId]
        except KeyError:
            jsonAttrId = str(int(attrId))
            try:
                data = self.__attributeDataCache[jsonAttrId]
            except KeyError as e:
                raise AttributeFetchError(attrId) from e
            maxAttributeId, defaultValue, highIsGood, stackable = data
            attribute = Attribute(attributeId=attrId,
                                  maxAttributeId=maxAttributeId,
                                  defaultValue=defaultValue,
                                  highIsGood=highIsGood,
                                  stackable=stackable)
            self.__attributeObjCache[attrId] = attribute
        return attribute

    def getEffect(self, effectId):
        try:
            effect = self.__effectObjCache[effectId]
        except KeyError:
            jsonEffectId = str(int(effectId))
            try:
                data = self.__effectDataCache[jsonEffectId]
            except KeyError as e:
                raise EffectFetchError(effectId) from e
            effCategoryId, isOffence, isAssist, fitChanceId, buildStatus, modifiers = data
            effect = Effect(effectId=effectId,
                            categoryId=effCategoryId,
                            isOffensive=isOffence,
                            isAssistance=isAssist,
                            fittingUsageChanceAttributeId=fitChanceId,
                            buildStatus=buildStatus,
                            modifiers=tuple(
                                self.getModifier(modifierId)
                                for modifierId in modifiers))
            self.__effectObjCache[effectId] = effect
        return effect

    def getModifier(self, modifierId):
        try:
            modifier = self.__modifierObjCache[modifierId]
        except KeyError:
            jsonModifierId = str(int(modifierId))
            try:
                data = self.__modifierDataCache[jsonModifierId]
            except KeyError as e:
                raise ModifierFetchError(modifierId) from e
            state, context, srcAttrId, operator, tgtAttrId, location, filType, filValue = data
            modifier = Modifier(modifierId=modifierId,
                                state=state,
                                context=context,
                                sourceAttributeId=srcAttrId,
                                operator=operator,
                                targetAttributeId=tgtAttrId,
                                location=location,
                                filterType=filType,
                                filterValue=filValue)
            self.__modifierObjCache[modifierId] = modifier
        return modifier

    def getFingerprint(self):
        return self.__fingerprint

    def updateCache(self, data, fingerprint):
        # Make light version of data and add fingerprint
        # to it
        data = self.__stripData(data)
        data['fingerprint'] = fingerprint
        # Update disk cache
        os.makedirs(os.path.dirname(self._diskCacheFile),
                    mode=0o755,
                    exist_ok=True)
        with bz2.BZ2File(self._diskCacheFile, 'w') as file:
            jsonData = json.dumps(data)
            file.write(jsonData.encode('utf-8'))
        # Update data cache; encode to JSON and decode back
        # to make sure form of data is the same as after
        # loading it from cache (e.g. dictionary keys are
        # stored as strings in JSON)
        data = json.loads(jsonData)
        self.__updateMemCache(data)

    def __stripData(self, data):
        """
        Rework passed data, keying it and stripping dictionary
        keys from rows for performance.
        """
        slimData = {}

        slimTypes = {}
        for typeRow in data['types']:
            typeId = typeRow['typeId']
            slimTypes[typeId] = (
                typeRow['groupId'],
                typeRow['categoryId'],
                typeRow['durationAttributeId'],
                typeRow['dischargeAttributeId'],
                typeRow['rangeAttributeId'],
                typeRow['falloffAttributeId'],
                typeRow['trackingSpeedAttributeId'],
                typeRow['fittableNonSingleton'],
                tuple(typeRow['effects']),  # List -> tuple
                tuple(typeRow['attributes'].items()))  # Dictionary -> tuple
        slimData['types'] = slimTypes

        slimAttribs = {}
        for attrRow in data['attributes']:
            attrId = attrRow['attributeId']
            slimAttribs[attrId] = (attrRow['maxAttributeId'],
                                   attrRow['defaultValue'],
                                   attrRow['highIsGood'], attrRow['stackable'])
        slimData['attributes'] = slimAttribs

        slimEffects = {}
        for effectRow in data['effects']:
            effectId = effectRow['effectId']
            slimEffects[effectId] = (
                effectRow['effectCategory'], effectRow['isOffensive'],
                effectRow['isAssistance'],
                effectRow['fittingUsageChanceAttributeId'],
                effectRow['buildStatus'], tuple(effectRow['modifiers'])
            )  # List -> tuple
        slimData['effects'] = slimEffects

        slimModifiers = {}
        for modifierRow in data['modifiers']:
            modifierId = modifierRow['modifierId']
            slimModifiers[modifierId] = (modifierRow['state'],
                                         modifierRow['context'],
                                         modifierRow['sourceAttributeId'],
                                         modifierRow['operator'],
                                         modifierRow['targetAttributeId'],
                                         modifierRow['location'],
                                         modifierRow['filterType'],
                                         modifierRow['filterValue'])
        slimData['modifiers'] = slimModifiers

        return slimData

    def __updateMemCache(self, data):
        """
        Loads data into memory data cache.

        Positional arguments:
        data -- dictionary with data to load
        """
        self.__typeDataCache = data['types']
        self.__attributeDataCache = data['attributes']
        self.__effectDataCache = data['effects']
        self.__modifierDataCache = data['modifiers']
        self.__fingerprint = data['fingerprint']
        # Also clear object cache to make sure objects composed
        # from old data are gone
        self.__typeObjCache.clear()
        self.__attributeObjCache.clear()
        self.__effectObjCache.clear()
        self.__modifierObjCache.clear()
Esempio n. 30
0
class Signal(object):
    def __init__(self, *args):
        self.__slots = WeakValueDictionary()
        for slot in args:
            self.connect(slot)

    def __call__(self, slot, *args, **kwargs):
        """
        Emit signal. If slot passed signal will be called only for this
        slot, for all connected slots otherwise.

        Calling this method directly lead to immediate signal processing.
        It may be not thread-safe. Use emit method from this module for
        delayed calling of signals.
        """
        if slot is not None:
            slots = (self.__slots[self.key(slot)],)
        else:
            slots = self.__slots.values()
        for func in slots:
            func(*args, **kwargs)

    def key(self, slot):
        """
        Get local key name for slot.
        """
        if type(slot) == types.FunctionType:
            key = (slot.__module__, slot.__name__)
        elif type(slot) == types.MethodType:
            key = (slot.__func__, id(slot.__self__))
        elif isinstance(slot, basestring):
            if not slot in registred_slots.keys():
                raise ValueError('Slot {0} does not exists.'.format(slot))
            key = slot
        else:
            raise ValueError('Slot {0} has non-slot type'.format(slot))
        return key

    def connect(self, slot):
        """
        Connect signal to slot. Slot may be function, instance method
        or name of function perviously registred by `slot` decorator.
        """
        key = self.key(slot)
        if type(slot) == types.FunctionType:
            self.__slots[key] = slot
        elif type(slot) == types.MethodType:
            self.__slots[key] = partial(slot.__func__, slot.__self__)
        elif isinstance(slot, basestring):
            self.__slots[key] = registred_slots[slot]

    def disconnect(self, slot):
        """
        Remove slot from signal connetions.
        """
        key = self.key(slot)
        del self.__slots[key]

    def clear(self):
        """
        Disconnect all slots from signal.
        """
        self.__slots.clear()
Esempio n. 31
0
 def clear(self):
     self._keepDict.clear()
     WeakValueDictionary.clear(self)
class DispatchTree(object):

    def _display_(self):
        '''
        Debug purpose method, returns a basic display of the dispatch tree as html
        '''
        startTimer = time.time()
        timeout = 2.0

        result="<html><head><style>table,th,td { margin: 5px; border-collapse:collapse; border:1px solid black; }</style></head><body font-family='verdana'>"

        result +="<h3>Pools: %r</h3><table>" % len(self.pools)
        for i,curr in enumerate(self.pools):
            result += "<tr><td>%r</td><td>%s</td></tr>" % (i, self.pools[curr])

            if (time.time()-startTimer) > timeout:
                raise TimeoutException("TimeoutException occured: the dispatchTree might be too large to dump")
        result+="</table>"

        result +="<h3>Rendernodes: %r</h3><table>" % len(self.renderNodes)
        for i,curr in enumerate(self.renderNodes):
            result += "<tr><td>%r</td><td>%r</td></tr>" % (i, self.renderNodes[curr])

            if (time.time()-startTimer) > timeout:
                raise TimeoutException("TimeoutException occured: the dispatchTree might be too large to dump")
        result+="</table>"

        result +="<h3>PoolShares: (attribution de parc pour une tache fille du root, on attribue pas de poolshare aux autres)</h3><table>"
        for i,curr in enumerate(self.poolShares):
            result += "<tr><td>%r</td><td>%s</td></tr>" % (i, self.poolShares[curr])

            if (time.time()-startTimer) > timeout:
                raise TimeoutException("TimeoutException occured: the dispatchTree might be too large to dump")
        result+="</table>"


        result +="<h3>Main level nodes (proxy info only):</h3><table>"
        result +="<tr><th>id</th><th>name</th><th>readyCommandCount</th><th>commandCount</th><th>completion</th><th>poolshares</th></tr>"
        for i,curr in enumerate(self.nodes[1].children):
            result += "<tr><td>%r</td><td>%s</td><td>%d</td><td>%d</td><td>%.2f</td><td>%s</td></tr>" % (i, curr.name, curr.readyCommandCount, curr.commandCount, curr.completion, curr.poolShares.values())

            if (time.time()-startTimer) > timeout:
                raise TimeoutException("TimeoutException occured: the dispatchTree might be too large to dump")
        result+="</table>"


        result +="<h3>All nodes:</h3><table>"
        for i,curr in enumerate(self.nodes):
            result += "<tr><td>%d</td><td>%s</td><td>%r</td></tr>" % (i, curr, self.nodes[curr].name)

            if (time.time()-startTimer) > timeout:
                raise TimeoutException("TimeoutException occured: the dispatchTree might be too large to dump")
        result+="</table>"


        result +="<h3>Tasks:</h3><table>"
        for i,curr in enumerate(self.tasks):
            result += "<tr><td>%r</td><td>%s</td></tr>" % (i, repr(self.tasks[curr]) )
            if (time.time()-startTimer) > timeout:
                raise TimeoutException("TimeoutException occured: the dispatchTree might be too large to dump")
        result+="</table>"


        result +="<h3>Commands:</h3><table>"
        for i,curr in enumerate(self.commands):
            result += "<tr><td>%r</td><td>%s</td></tr>" % (i, self.commands[curr] )
            if (time.time()-startTimer) > timeout:
                raise TimeoutException("TimeoutException occured: the dispatchTree might be too large to dump")
        result+="</table>"


        result +="<h3>Rules:</h3><table>"
        for i,curr in enumerate(self.rules):
            result += "<tr><td>%r</td><td>%s</td></tr>" % (i, curr )
            if (time.time()-startTimer) > timeout:
                raise TimeoutException("TimeoutException occured: the dispatchTree might be too large to dump")
        result+="</table>"


        result +="</body></html>"
        logger.info("DispatchTree printed in %.6f s" % (time.time()-startTimer) )
        return result


    def __init__(self):
        # core data
        self.root = FolderNode(0, "root", None, "root", 1, 1, 0, FifoStrategy())
        self.nodes = WeakValueDictionary()
        self.nodes[0] = self.root
        self.pools = {}
        self.renderNodes = {}
        self.tasks = {}
        self.rules = []
        self.poolShares = {}
        self.commands = {}
        # deduced properties
        self.nodeMaxId = 0
        self.poolMaxId = 0
        self.renderNodeMaxId = 0
        self.taskMaxId = 0
        self.commandMaxId = 0
        self.poolShareMaxId = 0
        self.toCreateElements = []
        self.toModifyElements = []
        self.toArchiveElements = []
        # listeners
        self.nodeListener = ObjectListener(self.onNodeCreation, self.onNodeDestruction, self.onNodeChange)
        self.taskListener = ObjectListener(self.onTaskCreation, self.onTaskDestruction, self.onTaskChange)
        # # JSA
        # self.taskGroupListener = ObjectListener(self.onTaskCreation, self.onTaskDestruction, self.onTaskGroupChange)
        self.renderNodeListener = ObjectListener(self.onRenderNodeCreation, self.onRenderNodeDestruction, self.onRenderNodeChange)
        self.poolListener = ObjectListener(self.onPoolCreation, self.onPoolDestruction, self.onPoolChange)
        self.commandListener = ObjectListener(onCreationEvent=self.onCommandCreation, onChangeEvent=self.onCommandChange)
        self.poolShareListener = ObjectListener(self.onPoolShareCreation)
        self.modifiedNodes = []

    def registerModelListeners(self):
        BaseNode.changeListeners.append(self.nodeListener)
        Task.changeListeners.append(self.taskListener)
        TaskGroup.changeListeners.append(self.taskListener)
        RenderNode.changeListeners.append(self.renderNodeListener)
        Pool.changeListeners.append(self.poolListener)
        Command.changeListeners.append(self.commandListener)
        PoolShare.changeListeners.append(self.poolShareListener)

    def destroy(self):
        BaseNode.changeListeners.remove(self.nodeListener)
        Task.changeListeners.remove(self.taskListener)
        RenderNode.changeListeners.remove(self.renderNodeListener)
        Pool.changeListeners.remove(self.poolListener)
        Command.changeListeners.remove(self.commandListener)
        PoolShare.changeListeners.remove(self.poolShareListener)
        self.root = None
        self.nodes.clear()
        self.pools.clear()
        self.renderNodes.clear()
        self.tasks.clear()
        self.rules = None
        self.commands.clear()
        self.poolShares = None
        self.modifiedNodes = None
        self.toCreateElements = None
        self.toModifyElements = None
        self.toArchiveElements = None

    def findNodeByPath(self, path, default=None):
        nodenames = splitpath(path)
        node = self.root
        for name in nodenames:
            for child in node.children:
                if child.name == name:
                    node = child
                    break
            else:
                return default
        return node

    def updateCompletionAndStatus(self):
        self.root.updateCompletionAndStatus()



    def validateDependencies(self):
        nodes = set()
        for dependency in self.modifiedNodes:
            for node in dependency.reverseDependencies:
                nodes.add(node)
        del self.modifiedNodes[:]
        for node in nodes:
            # logger.debug("Dependencies on %r = %r"% (node.name, node.checkDependenciesSatisfaction() ) )
            if not hasattr(node,"task"):
                continue
            if isinstance(node, TaskNode):
                if node.checkDependenciesSatisfaction():
                    for cmd in node.task.commands:
                        if cmd.status == CMD_BLOCKED:
                            cmd.status = CMD_READY
                else:
                    for cmd in node.task.commands:
                        if cmd.status == CMD_READY:
                            cmd.status = CMD_BLOCKED

            # TODO: may be needed to check dependencies on task groups
            #       so far, a hack is done on the client side when submitting:
            #       dependencies of a taksgroup are reported on each task of its heirarchy
            #
            # elif isinstance(node, FolderNode):
            #
            #     if node.checkDependenciesSatisfaction():
            #         for cmd in node.getAllCommands():
            #             if cmd.status == CMD_BLOCKED:
            #                 cmd.status = CMD_READY
            #     else:
            #         for cmd in node.getAllCommands():
            #             if cmd.status == CMD_READY:
            #                 cmd.status = CMD_BLOCKED


    def registerNewGraph(self, graph):
        user = graph['user']
        taskDefs = graph['tasks']
        poolName = graph['poolName']
        if 'maxRN' in graph.items():
            maxRN = int(graph['maxRN'])
        else:
            maxRN = -1

        #
        # Create objects.
        #
        tasks = [None for i in xrange(len(taskDefs))]
        for (index, taskDef) in enumerate(taskDefs):
            if taskDef['type'] == 'Task':
                task = self._createTaskFromJSON(taskDef, user)
            elif taskDef['type'] == 'TaskGroup':
                task = self._createTaskGroupFromJSON(taskDef, user)
            tasks[index] = task
        root = tasks[graph['root']]

        # get the pool
        try:
            pool = self.pools[poolName]
        except KeyError:
            pool = Pool(None, poolName)
            self.pools[poolName] = pool
        #
        # Rebuild full job hierarchy
        #
        for (taskDef, task) in zip(taskDefs, tasks):
            if taskDef['type'] == 'TaskGroup':
                for taskIndex in taskDef['tasks']:
                    task.addTask(tasks[taskIndex])
                    tasks[taskIndex].parent = task
        #
        # Compute dependencies for each created task or taskgroup object.
        #
        dependencies = {}
        for (taskDef, task) in zip(taskDefs, tasks):
            taskDependencies = {}
            if not isinstance(taskDef['dependencies'], list):
                raise SyntaxError("Dependencies must be a list of (taskId, [status-list]), got %r." % taskDef['dependencies'])
            if not all(((isinstance(i, int) and
                         isinstance(sl, list) and
                         all((isinstance(s, int) for s in sl))) for (i, sl) in taskDef['dependencies'])):
                raise SyntaxError("Dependencies must be a list of (taskId, [status-list]), got %r." % taskDef['dependencies'])
            for (taskIndex, statusList) in taskDef['dependencies']:
                taskDependencies[tasks[taskIndex]] = statusList
            dependencies[task] = taskDependencies
        #
        # Apply rules to generate dispatch tree nodes.
        #
        if not self.rules:
            logger.warning("graph submitted but no rule has been defined")

        unprocessedTasks = [root]
        nodes = []
        while unprocessedTasks:
            unprocessedTask = unprocessedTasks.pop(0)
            for rule in self.rules:
                try:
                    nodes += rule.apply(unprocessedTask)
                except RuleError:
                    logger.warning("rule %s failed for graph %s" % (rule, graph))
                    raise
            if isinstance(unprocessedTask, TaskGroup):
                for task in unprocessedTask:
                    unprocessedTasks.append(task)

        # create the poolshare, if any, and affect it to the node
        if pool:
            # FIXME nodes[0] may not be the root node of the graph...
            ps = PoolShare(None, pool, nodes[0], maxRN)
            # if maxRN is not -1 (e.g not default) set the userDefinedMaxRN to true
            if maxRN != -1:
                ps.userDefinedMaxRN = True

        #
        # Process dependencies
        #
        for rule in self.rules:
            rule.processDependencies(dependencies)

        for node in nodes:
            assert isinstance(node.id, int)
            self.nodes[node.id] = node

        # Init number of command in hierarchy
        self.populateCommandCounts(nodes[0])
        return nodes

    def populateCommandCounts(self, node):
        """
        Updates "commandCount" over a whole hierarchy starting from the given node.
        """
        res = 0
        if isinstance(node, FolderNode):
            for child in node.children:
                res += self.populateCommandCounts( child )
        elif isinstance(node, TaskNode):
            res = len(node.task.commands)

        node.commandCount = res
        return res


    def _createTaskGroupFromJSON(self, taskGroupDefinition, user):
        # name, parent, arguments, environment, priority, dispatchKey, strategy
        id = None
        name = taskGroupDefinition['name']
        parent = None
        arguments = taskGroupDefinition['arguments']
        environment = taskGroupDefinition['environment']
        requirements = taskGroupDefinition['requirements']
        maxRN = taskGroupDefinition['maxRN']
        priority = taskGroupDefinition['priority']
        dispatchKey = taskGroupDefinition['dispatchKey']
        strategy = taskGroupDefinition['strategy']
        strategy = loadStrategyClass(strategy.encode())
        strategy = strategy()
        tags = taskGroupDefinition['tags']
        timer = None
        if 'timer' in taskGroupDefinition.keys():
            timer = taskGroupDefinition['timer']
        return TaskGroup(id, name, parent, user, arguments, environment, requirements,
                         maxRN, priority, dispatchKey, strategy, tags=tags, timer=timer)

    def _createTaskFromJSON(self, taskDefinition, user):
        # id, name, parent, user, priority, dispatchKey, runner, arguments,
        # validationExpression, commands, requirements=[], minNbCores=1,
        # maxNbCores=0, ramUse=0, environment={}
        name = taskDefinition['name']
        runner = taskDefinition['runner']
        arguments = taskDefinition['arguments']
        environment = taskDefinition['environment']
        requirements = taskDefinition['requirements']
        maxRN = taskDefinition['maxRN']
        priority = taskDefinition['priority']
        dispatchKey = taskDefinition['dispatchKey']
        validationExpression = taskDefinition['validationExpression']
        minNbCores = taskDefinition['minNbCores']
        maxNbCores = taskDefinition['maxNbCores']
        ramUse = taskDefinition['ramUse']
        lic = taskDefinition['lic']
        tags = taskDefinition['tags']
        timer = None
        if 'timer' in taskDefinition.keys():
            timer = taskDefinition['timer']
        task = Task(None, name, None, user, maxRN, priority, dispatchKey, runner,
                    arguments, validationExpression, [], requirements, minNbCores,
                    maxNbCores, ramUse, environment, lic=lic, tags=tags, timer=timer)

        for commandDef in taskDefinition['commands']:
            description = commandDef['description']
            arguments = commandDef['arguments']
            cmd = Command(None, description, task, arguments)
            task.commands.append(cmd)
            # import sys
            # logger.warning("cmd creation : %s" % str(sys.getrefcount(cmd)))

        return task

    ## Resets the lists of elements to create or update in the database.
    #
    def resetDbElements(self):
        self.toCreateElements = []
        self.toModifyElements = []
        self.toArchiveElements = []

    ## Recalculates the max ids of all elements. Generally called after a reload from db.
    #
    def recomputeMaxIds(self):
        self.nodeMaxId = max([n.id for n in self.nodes.values()]) if self.nodes else 0
        self.poolMaxId = max([p.id for p in self.pools.values()]) if self.pools else 0
        self.renderNodeMaxId = max([rn.id for rn in self.renderNodes.values()]) if self.renderNodes else 0
        self.taskMaxId = max([t.id for t in self.tasks.values()]) if self.tasks else 0
        self.commandMaxId = max([c.id for c in self.commands.values()]) if self.commands else 0
        self.poolShareMaxId = max([ps.id for ps in self.poolShares.values()]) if self.poolShares else 0

    ## Removes from the dispatchtree the provided element and all its parents and children.
    #
    def unregisterElementsFromTree(self, element):
        # /////////////// Handling of the Task
        if isinstance(element, Task):
            del self.tasks[element.id]
            self.toArchiveElements.append(element)
            for cmd in element.commands:
                self.unregisterElementsFromTree(cmd)
            for node in element.nodes.values():
                self.unregisterElementsFromTree(node)
        # /////////////// Handling of the TaskGroup
        elif isinstance(element, TaskGroup):
            del self.tasks[element.id]
            self.toArchiveElements.append(element)
            for task in element.tasks:
                self.unregisterElementsFromTree(task)
            for node in element.nodes.values():
                self.unregisterElementsFromTree(node)
        # /////////////// Handling of the TaskNode
        elif isinstance(element, TaskNode):
            # remove the element from the children of the parent
            if element.parent:
                element.parent.removeChild(element)
            if element.poolShares:
                for poolShare in element.poolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)
            
            if element.additionnalPoolShares:
                for poolShare in element.additionnalPoolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            del self.nodes[element.id]
            self.toArchiveElements.append(element)
            for dependency in element.dependencies:
                self.unregisterElementsFromTree(dependency)
        # /////////////// Handling of the FolderNode
        elif isinstance(element, FolderNode):
            if element.parent:
                element.parent.removeChild(element)
            if element.poolShares:
                for poolShare in element.poolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            if element.additionnalPoolShares:
                for poolShare in element.additionnalPoolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            del self.nodes[element.id]
            self.toArchiveElements.append(element)
            for dependency in element.dependencies:
                self.unregisterElementsFromTree(dependency)
        # /////////////// Handling of the Command
        elif isinstance(element, Command):
            del self.commands[element.id]
            self.toArchiveElements.append(element)

    ### methods called after interaction with a Task

    def onTaskCreation(self, task):
        # logger.info("  -- on task creation: %s" % task)

        if task.id == None:
            self.taskMaxId += 1
            task.id = self.taskMaxId
            self.toCreateElements.append(task)
        else:
            self.taskMaxId = max(self.taskMaxId, task.id)
        self.tasks[task.id] = task

    def onTaskDestruction(self, task):
        # logger.info("  -- on task destruction: %s" % task)
        self.unregisterElementsFromTree(task)

    def onTaskChange(self, task, field, oldvalue, newvalue):
        """
        Normally, taskgroup should not be updated to DB, there would be too manby updates due to command/state changes
        However in order to keep track of comments (stored in task's tags[comment] field), we make the following change:
        - enable task/taskgroups update in DB (cf pulidb.py)
        - disable changeEvent (append an event in dispatchTree.toModifyElements array) for all fields of tasks and TGs
          BUT the only field we want to update: "tags"
        """
        if field == "tags":
            self.toModifyElements.append(task)

    ### methods called after interaction with a BaseNode

    def onNodeCreation(self, node):
        # logger.info("  -- on node creation: %s" % node)
        if node.id == None:
            self.nodeMaxId += 1
            node.id = self.nodeMaxId
            self.toCreateElements.append(node)
        else:
            self.nodeMaxId = max(self.nodeMaxId, node.id)
        if node.parent == None:
            node.parent = self.root

    def onNodeDestruction(self, node):
        # logger.info("  -- on node destruction: %s" % node)
        del self.nodes[node.id]

    def onNodeChange(self, node, field, oldvalue, newvalue):
        # logger.info("  -- on node change: %s [ %s = %s -> %s ]" % (node,field, oldvalue, newvalue) )
        # FIXME: do something when nodes are reparented from or to the root node
        if node.id is not None:
            self.toModifyElements.append(node)
            if field == "status" and node.reverseDependencies:
                self.modifiedNodes.append(node)

    ### methods called after interaction with a RenderNode

    def onRenderNodeCreation(self, renderNode):
        if renderNode.id == None:
            self.renderNodeMaxId += 1
            renderNode.id = self.renderNodeMaxId
            self.toCreateElements.append(renderNode)
        else:
            self.renderNodeMaxId = max(self.renderNodeMaxId, renderNode.id)
        self.renderNodes[renderNode.name] = renderNode

    def onRenderNodeDestruction(self, rendernode):
        try:
            del self.renderNodes[rendernode.name]
            self.toArchiveElements.append(rendernode)
        except KeyError, e:
            # TOFIX: use of class method vs obj method in changeListener might generate a duplicate call
            logger.warning("RN %s seems to have been deleted already." % rendernode.name)
class DispatchTree(object):

    def __init__(self):
        # core data
        self.root = FolderNode(0, "root", None, "root", 1, 1, 0, FifoStrategy())
        self.nodes = WeakValueDictionary()
        self.nodes[0] = self.root
        self.pools = {}
        self.renderNodes = {}
        self.tasks = {}
        self.rules = []
        self.poolShares = {}
        self.commands = {}
        # deduced properties
        self.nodeMaxId = 0
        self.poolMaxId = 0
        self.renderNodeMaxId = 0
        self.taskMaxId = 0
        self.commandMaxId = 0
        self.poolShareMaxId = 0
        self.toCreateElements = []
        self.toModifyElements = []
        self.toArchiveElements = []
        # listeners
        self.nodeListener = ObjectListener(self.onNodeCreation, self.onNodeDestruction, self.onNodeChange)
        self.taskListener = ObjectListener(self.onTaskCreation, self.onTaskDestruction, self.onTaskChange)
        self.renderNodeListener = ObjectListener(self.onRenderNodeCreation, self.onRenderNodeDestruction, self.onRenderNodeChange)
        self.poolListener = ObjectListener(self.onPoolCreation, self.onPoolDestruction, self.onPoolChange)
        self.commandListener = ObjectListener(onCreationEvent=self.onCommandCreation, onChangeEvent=self.onCommandChange)
        self.poolShareListener = ObjectListener(self.onPoolShareCreation)
        self.modifiedNodes = []

    def registerModelListeners(self):
        BaseNode.changeListeners.append(self.nodeListener)
        Task.changeListeners.append(self.taskListener)
        TaskGroup.changeListeners.append(self.taskListener)
        RenderNode.changeListeners.append(self.renderNodeListener)
        Pool.changeListeners.append(self.poolListener)
        Command.changeListeners.append(self.commandListener)
        PoolShare.changeListeners.append(self.poolShareListener)

    def destroy(self):
        BaseNode.changeListeners.remove(self.nodeListener)
        Task.changeListeners.remove(self.taskListener)
        RenderNode.changeListeners.remove(self.renderNodeListener)
        Pool.changeListeners.remove(self.poolListener)
        Command.changeListeners.remove(self.commandListener)
        PoolShare.changeListeners.remove(self.poolShareListener)
        self.root = None
        self.nodes.clear()
        self.pools.clear()
        self.renderNodes.clear()
        self.tasks.clear()
        self.rules = None
        self.commands.clear()
        self.poolShares = None
        self.modifiedNodes = None
        self.toCreateElements = None
        self.toModifyElements = None
        self.toArchiveElements = None

    def findNodeByPath(self, path, default=None):
        nodenames = splitpath(path)
        node = self.root
        for name in nodenames:
            for child in node.children:
                if child.name == name:
                    node = child
                    break
            else:
                return default
        return node

    def updateCompletionAndStatus(self):
        self.root.updateCompletionAndStatus()

    def validateDependencies(self):
        nodes = set()
        for dependency in self.modifiedNodes:
            for node in dependency.reverseDependencies:
                nodes.add(node)
        del self.modifiedNodes[:]
        for node in nodes:
            if isinstance(node, TaskNode):
                if node.checkDependenciesSatisfaction():
                    for cmd in node.task.commands:
                        if cmd.status == CMD_BLOCKED:
                            cmd.status = CMD_READY
                else:
                    for cmd in node.task.commands:
                        if cmd.status == CMD_READY:
                            cmd.status = CMD_BLOCKED

    def registerNewGraph(self, graph):
        user = graph['user']
        taskDefs = graph['tasks']
        poolName = graph['poolName']
        if 'maxRN' in graph.items():
            maxRN = int(graph['maxRN'])
        else:
            maxRN = -1

        #
        # Create objects.
        #
        tasks = [None for i in xrange(len(taskDefs))]
        for (index, taskDef) in enumerate(taskDefs):
            if taskDef['type'] == 'Task':
                task = self._createTaskFromJSON(taskDef, user)
            elif taskDef['type'] == 'TaskGroup':
                task = self._createTaskGroupFromJSON(taskDef, user)
            tasks[index] = task
        root = tasks[graph['root']]

        # get the pool
        try:
            pool = self.pools[poolName]
        except KeyError:
            pool = Pool(None, poolName)
            self.pools[poolName] = pool
        #
        # Rebuild full job hierarchy
        #
        for (taskDef, task) in zip(taskDefs, tasks):
            if taskDef['type'] == 'TaskGroup':
                for taskIndex in taskDef['tasks']:
                    task.addTask(tasks[taskIndex])
                    tasks[taskIndex].parent = task
        #
        # Compute dependencies for each created task or taskgroup object.
        #
        dependencies = {}
        for (taskDef, task) in zip(taskDefs, tasks):
            taskDependencies = {}
            if not isinstance(taskDef['dependencies'], list):
                raise SyntaxError("Dependencies must be a list of (taskId, [status-list]), got %r." % taskDef['dependencies'])
            if not all(((isinstance(i, int) and
                         isinstance(sl, list) and
                         all((isinstance(s, int) for s in sl))) for (i, sl) in taskDef['dependencies'])):
                raise SyntaxError("Dependencies must be a list of (taskId, [status-list]), got %r." % taskDef['dependencies'])
            for (taskIndex, statusList) in taskDef['dependencies']:
                taskDependencies[tasks[taskIndex]] = statusList
            dependencies[task] = taskDependencies
        #
        # Apply rules to generate dispatch tree nodes.
        #
        if not self.rules:
            logger.warning("graph submitted but no rule has been defined")

        unprocessedTasks = [root]
        nodes = []
        while unprocessedTasks:
            unprocessedTask = unprocessedTasks.pop(0)
            for rule in self.rules:
                try:
                    nodes += rule.apply(unprocessedTask)
                except RuleError:
                    logger.warning("rule %s failed for graph %s" % (rule, graph))
                    raise
            if isinstance(unprocessedTask, TaskGroup):
                for task in unprocessedTask:
                    unprocessedTasks.append(task)

        # create the poolshare, if any, and affect it to the node
        if pool:
            # FIXME nodes[0] may not be the root node of the graph...
            PoolShare(None, pool, nodes[0], maxRN)

        #
        # Process dependencies
        #
        for rule in self.rules:
            rule.processDependencies(dependencies)

        for node in nodes:
            assert isinstance(node.id, int)
            self.nodes[node.id] = node

        return nodes

    def _createTaskGroupFromJSON(self, taskGroupDefinition, user):
        # name, parent, arguments, environment, priority, dispatchKey, strategy
        id = None
        name = taskGroupDefinition['name']
        parent = None
        arguments = taskGroupDefinition['arguments']
        environment = taskGroupDefinition['environment']
        requirements = taskGroupDefinition['requirements']
        maxRN = taskGroupDefinition['maxRN']
        priority = taskGroupDefinition['priority']
        dispatchKey = taskGroupDefinition['dispatchKey']
        strategy = taskGroupDefinition['strategy']
        strategy = loadStrategyClass(strategy.encode())
        strategy = strategy()
        tags = taskGroupDefinition['tags']
        return TaskGroup(id, name, parent, user, arguments, environment, requirements,
                         maxRN, priority, dispatchKey, strategy, tags=tags)

    def _createTaskFromJSON(self, taskDefinition, user):
        # id, name, parent, user, priority, dispatchKey, runner, arguments,
        # validationExpression, commands, requirements=[], minNbCores=1,
        # maxNbCores=0, ramUse=0, environment={}
        name = taskDefinition['name']
        runner = taskDefinition['runner']
        arguments = taskDefinition['arguments']
        environment = taskDefinition['environment']
        requirements = taskDefinition['requirements']
        maxRN = taskDefinition['maxRN']
        priority = taskDefinition['priority']
        dispatchKey = taskDefinition['dispatchKey']
        validationExpression = taskDefinition['validationExpression']
        minNbCores = taskDefinition['minNbCores']
        maxNbCores = taskDefinition['maxNbCores']
        ramUse = taskDefinition['ramUse']
        lic = taskDefinition['lic']
        tags = taskDefinition['tags']
        task = Task(None, name, None, user, maxRN, priority, dispatchKey, runner,
                    arguments, validationExpression, [], requirements, minNbCores,
                    maxNbCores, ramUse, environment, lic=lic, tags=tags)

        for commandDef in taskDefinition['commands']:
            description = commandDef['description']
            arguments = commandDef['arguments']
            task.commands.append(Command(None, description, task, arguments))

        return task

    ## Resets the lists of elements to create or update in the database.
    #
    def resetDbElements(self):
        self.toCreateElements = []
        self.toModifyElements = []
        self.toArchiveElements = []

    ## Recalculates the max ids of all elements. Generally called after a reload from db.
    #
    def recomputeMaxIds(self):
        self.nodeMaxId = max([n.id for n in self.nodes.values()]) if self.nodes else 0
        self.poolMaxId = max([p.id for p in self.pools.values()]) if self.pools else 0
        self.renderNodeMaxId = max([rn.id for rn in self.renderNodes.values()]) if self.renderNodes else 0
        self.taskMaxId = max([t.id for t in self.tasks.values()]) if self.tasks else 0
        self.commandMaxId = max([c.id for c in self.commands.values()]) if self.commands else 0
        self.poolShareMaxId = max([ps.id for ps in self.poolShares.values()]) if self.poolShares else 0

    ## Removes from the dispatchtree the provided element and all its parents and children.
    #
    def unregisterElementsFromTree(self, element):
        # /////////////// Handling of the Task
        if isinstance(element, Task):
            del self.tasks[element.id]
            self.toArchiveElements.append(element)
            for cmd in element.commands:
                self.unregisterElementsFromTree(cmd)
            for node in element.nodes.values():
                self.unregisterElementsFromTree(node)
        # /////////////// Handling of the TaskGroup
        elif isinstance(element, TaskGroup):
            del self.tasks[element.id]
            self.toArchiveElements.append(element)
            for task in element.tasks:
                self.unregisterElementsFromTree(task)
            for node in element.nodes.values():
                self.unregisterElementsFromTree(node)
        # /////////////// Handling of the TaskNode
        elif isinstance(element, TaskNode):
            # remove the element from the children of the parent
            if element.parent:
                element.parent.removeChild(element)
            if element.poolShares:
                for poolShare in element.poolShares.values():
                    self.toArchiveElements.append(poolShare)
            del self.nodes[element.id]
            self.toArchiveElements.append(element)
            for dependency in element.dependencies:
                self.unregisterElementsFromTree(dependency)
        # /////////////// Handling of the FolderNode
        elif isinstance(element, FolderNode):
            if element.parent:
                element.parent.removeChild(element)
            if element.poolShares:
                for poolShare in element.poolShares.values():
                    self.toArchiveElements.append(poolShare)
            del self.nodes[element.id]
            self.toArchiveElements.append(element)
            for dependency in element.dependencies:
                self.unregisterElementsFromTree(dependency)
        # /////////////// Handling of the Command
        elif isinstance(element, Command):
            del self.commands[element.id]
            self.toArchiveElements.append(element)

    ### methods called after interaction with a Task

    def onTaskCreation(self, task):
        if task.id == None:
            self.taskMaxId += 1
            task.id = self.taskMaxId
            self.toCreateElements.append(task)
        else:
            self.taskMaxId = max(self.taskMaxId, task.id)
        self.tasks[task.id] = task

    def onTaskDestruction(self, task):
        self.unregisterElementsFromTree(task)

    def onTaskChange(self, task, field, oldvalue, newvalue):
        self.toModifyElements.append(task)

    ### methods called after interaction with a BaseNode

    def onNodeCreation(self, node):
        if node.id == None:
            self.nodeMaxId += 1
            node.id = self.nodeMaxId
            self.toCreateElements.append(node)
        else:
            self.nodeMaxId = max(self.nodeMaxId, node.id)
        if node.parent == None:
            node.parent = self.root

    def onNodeDestruction(self, node):
        del self.nodes[node.id]

    def onNodeChange(self, node, field, oldvalue, newvalue):
        # FIXME: do something when nodes are reparented from or to the root node
        if node.id is not None:
            self.toModifyElements.append(node)
            if field == "status" and node.reverseDependencies:
                self.modifiedNodes.append(node)

    ### methods called after interaction with a RenderNode

    def onRenderNodeCreation(self, renderNode):
        if renderNode.id == None:
            self.renderNodeMaxId += 1
            renderNode.id = self.renderNodeMaxId
            self.toCreateElements.append(renderNode)
        else:
            self.renderNodeMaxId = max(self.renderNodeMaxId, renderNode.id)
        self.renderNodes[renderNode.name] = renderNode

    def onRenderNodeDestruction(self, rendernode):
        del self.renderNodes[rendernode.name]
        self.toArchiveElements.append(rendernode)

    def onRenderNodeChange(self, rendernode, field, oldvalue, newvalue):
        self.toModifyElements.append(rendernode)

    ### methods called after interaction with a Pool

    def onPoolCreation(self, pool):
        if pool.id == None:
            self.poolMaxId += 1
            pool.id = self.poolMaxId
            self.toCreateElements.append(pool)
        else:
            self.poolMaxId = max(self.poolMaxId, pool.id)
        self.pools[pool.name] = pool

    def onPoolDestruction(self, pool):
        del self.pools[pool.name]
        self.toArchiveElements.append(pool)

    def onPoolChange(self, pool, field, oldvalue, newvalue):
        if pool not in self.toModifyElements:
            self.toModifyElements.append(pool)

    ### methods called after interaction with a Command

    def onCommandCreation(self, command):
        if command.id is None:
            self.commandMaxId += 1
            command.id = self.commandMaxId
            self.toCreateElements.append(command)
        else:
            self.commandMaxId = max(self.commandMaxId, command.id)
        self.commands[command.id] = command

    def onCommandChange(self, command, field, oldvalue, newvalue):
        self.toModifyElements.append(command)
        for node in command.task.nodes.values():
            node.invalidate()

    ### methods called after interaction with a Pool

    def onPoolShareCreation(self, poolShare):
        if poolShare.id is None:
            self.poolShareMaxId += 1
            poolShare.id = self.poolShareMaxId
            self.toCreateElements.append(poolShare)
        else:
            self.poolShareMaxId = max(self.poolShareMaxId, poolShare.id)
        self.poolShares[poolShare.id] = poolShare
Esempio n. 34
0
 def clear(self):
     self.queue.clear()
     return WeakValueDictionary.clear(self)
Esempio n. 35
0
class TaskQueue(AbstractTaskQueue):
    """Simple in-memory task queue implementation"""

    @classmethod
    def factory(cls, url, name=const.DEFAULT, *args, **kw):
        obj = _REFS.get((url, name))
        if obj is None:
            obj = _REFS[(url, name)] = cls(url, name, *args, **kw)
        return obj

    def __init__(self, *args, **kw):
        super(TaskQueue, self).__init__(*args, **kw)
        self.queue = Queue()
        self.results = WeakValueDictionary()
        self.results_lock = Lock()

    def _init_result(self, result, status, message):
        with self.results_lock:
            if result.id in self.results:
                return False
            self.results[result.id] = result
        result.__status = status
        result.__value = Queue()
        result.__task = message
        result.__args = {}
        result.__lock = Lock()
        result.__for = None
        return True

    def enqueue_task(self, result, message):
        if self._init_result(result, const.ENQUEUED, message):
            self.queue.put(result)
            return True
        return False

    def defer_task(self, result, message, args):
        if self._init_result(result, const.PENDING, message):
            results = self.results
            # keep references to results to prevent GC
            result.__refs = [results.get(arg) for arg in args]
            return True
        return False

    def undefer_task(self, task_id):
        result = self.results[task_id]
        self.queue.put(result)

    def get(self, timeout=None):
        try:
            result = self.queue.get(timeout=timeout)
        except Empty:
            return None
        result.__status = const.PROCESSING
        return result.id, result.__task

    def size(self):
        return len(self.results)

    def discard_pending(self):
        with self.results_lock:
            while True:
                try:
                    self.queue.get_nowait()
                except Empty:
                    break
            self.results.clear()

    def reserve_argument(self, argument_id, deferred_id):
        result = self.results.get(argument_id)
        if result is None:
            return (False, None)
        with result.__lock:
            if result.__for is not None:
                return (False, None)
            result.__for = deferred_id
            try:
                message = result.__value.get_nowait()
            except Empty:
                message = None
            if message is not None:
                with self.results_lock:
                    self.results.pop(argument_id, None)
            return (True, message)

    def set_argument(self, task_id, argument_id, message):
        result = self.results[task_id]
        with self.results_lock:
            self.results.pop(argument_id, None)
        with result.__lock:
            result.__args[argument_id] = message
            return len(result.__args) == len(result.__refs)

    def get_arguments(self, task_id):
        try:
            return self.results[task_id].__args
        except KeyError:
            return {}

    def set_task_timeout(self, task_id, timeout):
        pass

    def get_status(self, task_id):
        result = self.results.get(task_id)
        return None if result is None else result.__status

    def set_result(self, task_id, message, timeout):
        result = self.results.get(task_id)
        if result is not None:
            with result.__lock:
                result.__value.put(message)
                return result.__for

    def pop_result(self, task_id, timeout):
        result = self.results.get(task_id)
        if result is None:
            return const.TASK_EXPIRED
#        with result.__lock:
#            if result.__for is not None:
#                raise NotImplementedError
#                #return const.RESERVED
#            result.__for = task_id
        try:
            if timeout == 0:
                value = result.__value.get_nowait()
            else:
                value = result.__value.get(timeout=timeout)
        except Empty:
            value = None
        else:
            self.results.pop(task_id, None)
        return value

    def discard_result(self, task_id, task_expired_token):
        result = self.results.pop(task_id)
        if result is not None:
            result.__value.put(task_expired_token)
Esempio n. 36
0
class Signal(object):
    def __init__(self, *args):
        self.__slots = WeakValueDictionary()
        for slot in args:
            self.connect(slot)

    def __call__(self, slot, *args, **kwargs):
        """
        Emit signal. If slot passed signal will be called only for this
        slot, for all connected slots otherwise.

        Calling this method directly lead to immediate signal processing.
        It may be not thread-safe. Use emit method from this module for
        delayed calling of signals.
        """
        if slot is not None:
            slots = (self.__slots[self.key(slot)], )
        else:
            slots = self.__slots.values()
        for func in slots:
            func(*args, **kwargs)

    def key(self, slot):
        """
        Get local key name for slot.
        """
        if type(slot) == types.FunctionType:
            key = (slot.__module__, slot.__name__)
        elif type(slot) == types.MethodType:
            key = (slot.__func__, id(slot.__self__))
        elif isinstance(slot, basestring):
            if not slot in registred_slots.keys():
                raise ValueError('Slot {0} does not exists.'.format(slot))
            key = slot
        else:
            raise ValueError('Slot {0} has non-slot type'.format(slot))
        return key

    def connect(self, slot):
        """
        Connect signal to slot. Slot may be function, instance method
        or name of function perviously registred by `slot` decorator.
        """
        key = self.key(slot)
        if type(slot) == types.FunctionType:
            self.__slots[key] = slot
        elif type(slot) == types.MethodType:
            self.__slots[key] = partial(slot.__func__, slot.__self__)
        elif isinstance(slot, basestring):
            self.__slots[key] = registred_slots[slot]

    def disconnect(self, slot):
        """
        Remove slot from signal connetions.
        """
        key = self.key(slot)
        del self.__slots[key]

    def clear(self):
        """
        Disconnect all slots from signal.
        """
        self.__slots.clear()
Esempio n. 37
0
class SqlStore(Store):
    """
    Save and load objects in a SQL db, using python's `pickle` module
    to serialize objects into a specific field.

    Access to the DB is done via SQLAlchemy module, therefore any
    driver supported by SQLAlchemy will be supported by this class.

    The `url` argument is used to access the store. It is supposed to
    be a `gc3libs.url.Url`:class: class, and therefore may contain
    username, password, host and port if they are needed by the db
    used.

    The `table_name` argument is the name of the table to create. By
    default it's ``store``.  Alternatively, the table name can be
    given in the "fragment" part of the database URL, as
    ``#table=...`` (replace ``...`` with the actual table name).  The
    constructor argument takes precedence over the table name
    specified in the DB URL.

    The constructor will create the `table_name` table if it does not
    exist, but if there already is such a table it will assume that
    its schema is compatible with our needs. A minimal table schema
    is as follows::

        +-----------+--------------+------+-----+---------+
        | Field     | Type         | Null | Key | Default |
        +-----------+--------------+------+-----+---------+
        | id        | int(11)      | NO   | PRI | NULL    |
        | data      | blob         | YES  |     | NULL    |
        | state     | varchar(128) | YES  |     | NULL    |
        +-----------+--------------+------+-----+---------+

    The meaning of the fields is:

    - `id`: this is the id returned by the `save()` method and
      uniquely identifies a stored object.

    - `data`: serialized Python object.

    - `state`: if the object is a `Task`:class: instance, this will be
      its current execution state.

    The `extra_fields` constructor argument is used to extend the
    database. It must contain a mapping `*column*: *function*`
    where:

    - *column* is a `sqlalchemy.Column` object.

    - *function* is a function which takes the object to be saved as
      argument and returns the value to be stored into the
      database. Any exception raised by this function will be
      *ignored*.  Classes `GetAttribute`:class: and `GetItem`:class:
      in module `get`:mod: provide convenient helpers to save object
      attributes into table columns.

    For each extra column the `save()` method will call the
    corresponding *function* in order to get the correct value to
    store into the DB.

    Any extra keyword arguments are ignored for compatibility with
    `FilesystemStore`:class:.
    """
    def __init__(self,
                 url,
                 table_name=None,
                 idfactory=None,
                 extra_fields=None,
                 create=True,
                 **extra_args):
        """
        Open a connection to the storage database identified by `url`.

        DB backend (MySQL, psql, sqlite3) is chosen based on the
        `url.scheme` value.
        """
        super(SqlStore, self).__init__(url)
        if self.url.fragment:
            kv = parse_qs(self.url.fragment)
        else:
            kv = {}

        # init static public args
        self.idfactory = idfactory or IdFactory(id_class=IntId)

        url_table_names = kv.get('table')
        if url_table_names:
            url_table_name = url_table_names[-1]  # last wins
        else:
            url_table_name = ''
        if table_name is None:
            self.table_name = url_table_name or "store"
        else:
            if table_name != url_table_name:
                gc3libs.log.debug(
                    "DB table name given in store URL fragment,"
                    " but overriden by `table` argument to SqlStore()")
            self.table_name = table_name

        # save ctor args for lazy-initialization
        self._init_extra_fields = (extra_fields
                                   if extra_fields is not None else {})
        self._init_create = create

        # create slots for lazy-init'ed attrs
        self._real_engine = None
        self._real_extra_fields = None
        self._real_tables = None

        self._loaded = WeakValueDictionary()

    @staticmethod
    def _to_sqlalchemy_url(url):
        if url.scheme == 'sqlite':
            # rewrite ``sqlite`` URLs to be RFC compliant, see:
            # https://github.com/uzh/gc3pie/issues/261
            db_url = "%s://%s/%s" % (url.scheme, url.netloc, url.path)
        else:
            db_url = str(url)
        # remove fragment identifier, if any
        try:
            fragment_loc = db_url.index('#')
            db_url = db_url[:fragment_loc]
        except ValueError:
            pass
        return db_url

    def _delayed_init(self):
        """
        Perform initialization tasks that can interfere with
        forking/multiprocess setup.

        See `GC3Pie issue #550 <https://github.com/uzh/gc3pie/issues/550>`_
        for more details and motivation.
        """
        url = self._to_sqlalchemy_url(self.url)
        gc3libs.log.debug("Initializing SQLAlchemy engine for `%s`...", url)
        self._real_engine = sqla.create_engine(url)

        # create schema
        meta = sqla.MetaData(bind=self._real_engine)
        table = sqla.Table(
            self.table_name, meta,
            sqla.Column('id', sqla.Integer(),
                        primary_key=True, nullable=False),
            sqla.Column('data', sqla.LargeBinary()),
            sqla.Column('state', sqla.String(length=128)))

        # create internal rep of table
        self._real_extra_fields = {}
        for col, func in self._init_extra_fields.items():
            assert isinstance(col, sqla.Column)
            table.append_column(col.copy())
            self._real_extra_fields[col.name] = func

        # check if the db exists and already has a 'store' table
        current_meta = sqla.MetaData(bind=self._real_engine)
        current_meta.reflect()
        if self._init_create and self.table_name not in current_meta.tables:
            meta.create_all()

        self._real_tables = meta.tables[self.table_name]

    def pre_fork(self):
        """
        Dispose current SQLAlchemy engine (if any).
        A new SQLAlchemy engine will be initialized
        upon the next interaction with a DB.

        This method only exists to allow `SessionBasedDaemon`:class:
        and similar applications that can do DB operations after
        fork()ing to continue to operate, without incurring into a
        SQLAlchemy "OperationalError: (...) could not receive data
        from server: Transport endpoint is not connected"
        """
        if self._real_engine:
            self._real_engine.dispose()
        self._real_engine = None
        self._real_extra_fields = None
        self._real_tables = None

    @property
    def _engine(self):
        if self._real_engine is None:
            self._delayed_init()
        return self._real_engine

    @property
    def _tables(self):
        if self._real_tables is None:
            self._delayed_init()
        return self._real_tables

    # FIXME: Remove once the TissueMAPS code is updated not to use this any more!
    @property
    def t_store(self):
        """
        Deprecated compatibility alias for `SqlStore._tables`
        """
        warn(
            "`SqlStore.t_store` has been renamed to `SqlStore._tables`;"
            " please update your code", DeprecationWarning, 2)
        return self._tables

    @property
    def extra_fields(self):
        if self._real_extra_fields is None:
            self._delayed_init()
        return self._real_extra_fields

    @same_docstring_as(Store.invalidate_cache)
    def invalidate_cache(self):
        self._loaded.clear()

    @same_docstring_as(Store.list)
    def list(self):
        q = sql.select([self._tables.c.id])
        with self._engine.begin() as conn:
            rows = conn.execute(q)
            ids = [i[0] for i in rows.fetchall()]
        return ids

    @same_docstring_as(Store.replace)
    def replace(self, id_, obj):
        self._save_or_replace(id_, obj)

    # copied from FilesystemStore
    @same_docstring_as(Store.save)
    def save(self, obj):
        if not hasattr(obj, 'persistent_id'):
            obj.persistent_id = self.idfactory.new(obj)
        return self._save_or_replace(obj.persistent_id, obj)

    def _save_or_replace(self, id_, obj):
        # if __debug__:
        #     global _lvl
        #     _lvl += '>'
        #     gc3libs.log.debug("%s Saving %r@%x as %s ...", _lvl, obj, id(obj), id_)

        # build row to insert/update
        fields = {'id': id_}

        with closing(BytesIO()) as dstdata:
            make_pickler(self, dstdata, obj).dump(obj)
            fields['data'] = dstdata.getvalue()

        try:
            fields['state'] = obj.execution.state
        except AttributeError:
            # If we cannot determine the state of a task, consider it UNKNOWN.
            fields['state'] = Run.State.UNKNOWN

        # insert into db
        for column in self.extra_fields:
            try:
                fields[column] = self.extra_fields[column](obj)
            except Exception as ex:
                gc3libs.log.warning(
                    "Error saving DB column '%s' of object '%s': %s: %s",
                    column, obj, ex.__class__.__name__, str(ex))

        if __debug__:
            for column in fields:
                if column == 'data':
                    continue
                gc3libs.log.debug(
                    "Writing value '%s' in column '%s' for object '%s'",
                    fields[column], column, obj)

        q = sql.select([self._tables.c.id]).where(self._tables.c.id == id_)
        with self._engine.begin() as conn:
            r = conn.execute(q)
            if not r.fetchone():
                # It's an insert
                q = self._tables.insert().values(**fields)
            else:
                # it's an update
                q = self._tables.update().where(
                    self._tables.c.id == id_).values(**fields)
            conn.execute(q)
        obj.persistent_id = id_
        if hasattr(obj, 'changed'):
            obj.changed = False

        # update cache
        if str(id_) in self._loaded:
            old = self._loaded[str(id_)]
            if old is not obj:
                self._loaded[str(id_)] = obj
                # if __debug__:
                #     gc3libs.log.debug(
                #         "%s Overwriting object %s %r@%x with %r@%x",
                #         _lvl, id_, old, id(old), obj, id(obj))
                #     from traceback import format_stack
                #     gc3libs.log.debug("Traceback:\n%s", ''.join(format_stack()))

        # if __debug__:
        #     gc3libs.log.debug("%s Done saving %r@%x as %s ...", _lvl, obj, id(obj), id_)
        #     if _lvl:
        #         _lvl = _lvl[:-1]

        # return id
        return id_

    @same_docstring_as(Store.load)
    def load(self, id_):
        # if __debug__:
        #     global _lvl
        #     _lvl += '<'
        #     gc3libs.log.debug("%s Store %s: Loading task %s %r ...", _lvl, self, id_, type(id_))

        # return cached copy, if any
        try:
            obj = self._loaded[str(id_)]
            # if __debug__:
            #     if _lvl:
            #         _lvl = _lvl[:-1]
            #     gc3libs.log.debug("%s Store %s: Returning cached object %r@%x as task %s", _lvl, self, obj, id(obj), id_)
            return obj
        except KeyError:
            pass

        # no cached copy, load from disk
        q = sql.select([self._tables.c.data]).where(self._tables.c.id == id_)
        with self._engine.begin() as conn:
            rawdata = conn.execute(q).fetchone()
        if not rawdata:
            raise gc3libs.exceptions.LoadError(
                "Unable to find any object with ID '%s'" % id_)
        obj = make_unpickler(self, BytesIO(rawdata[0])).load()
        super(SqlStore, self)._update_to_latest_schema()
        assert str(id_) not in self._loaded
        self._loaded[str(id_)] = obj
        # if __debug__:
        #     if _lvl:
        #         _lvl = _lvl[:-1]
        #     gc3libs.log.debug("%s Store %s: Done loading task %s as %r@%x.", _lvl, self, id_, obj, id(obj))
        return obj

    @same_docstring_as(Store.remove)
    def remove(self, id_):
        with self._engine.begin() as conn:
            conn.execute(self._tables.delete().where(self._tables.c.id == id_))
        try:
            del self._loaded[str(id_)]
        except KeyError:
            pass
Esempio n. 38
0
class DispatchTree(object):
    def __init__(self):
        # core data
        self.root = FolderNode(0, "root", None, "root", 1, 1, 0,
                               FifoStrategy())
        self.nodes = WeakValueDictionary()
        self.nodes[0] = self.root
        self.pools = {}
        self.renderNodes = {}
        self.tasks = {}
        self.rules = []
        self.poolShares = {}
        self.commands = {}
        # deduced properties
        self.nodeMaxId = 0
        self.poolMaxId = 0
        self.renderNodeMaxId = 0
        self.taskMaxId = 0
        self.commandMaxId = 0
        self.poolShareMaxId = 0
        self.toCreateElements = []
        self.toModifyElements = []
        self.toArchiveElements = []
        # listeners
        self.nodeListener = ObjectListener(self.onNodeCreation,
                                           self.onNodeDestruction,
                                           self.onNodeChange)
        self.taskListener = ObjectListener(self.onTaskCreation,
                                           self.onTaskDestruction,
                                           self.onTaskChange)
        # # JSA
        # self.taskGroupListener = ObjectListener(self.onTaskCreation, self.onTaskDestruction, self.onTaskGroupChange)
        self.renderNodeListener = ObjectListener(self.onRenderNodeCreation,
                                                 self.onRenderNodeDestruction,
                                                 self.onRenderNodeChange)
        self.poolListener = ObjectListener(self.onPoolCreation,
                                           self.onPoolDestruction,
                                           self.onPoolChange)
        self.commandListener = ObjectListener(
            onCreationEvent=self.onCommandCreation,
            onChangeEvent=self.onCommandChange)
        self.poolShareListener = ObjectListener(self.onPoolShareCreation)
        self.modifiedNodes = []

    def registerModelListeners(self):
        BaseNode.changeListeners.append(self.nodeListener)
        Task.changeListeners.append(self.taskListener)
        TaskGroup.changeListeners.append(self.taskListener)
        RenderNode.changeListeners.append(self.renderNodeListener)
        Pool.changeListeners.append(self.poolListener)
        Command.changeListeners.append(self.commandListener)
        PoolShare.changeListeners.append(self.poolShareListener)

    def destroy(self):
        BaseNode.changeListeners.remove(self.nodeListener)
        Task.changeListeners.remove(self.taskListener)
        RenderNode.changeListeners.remove(self.renderNodeListener)
        Pool.changeListeners.remove(self.poolListener)
        Command.changeListeners.remove(self.commandListener)
        PoolShare.changeListeners.remove(self.poolShareListener)
        self.root = None
        self.nodes.clear()
        self.pools.clear()
        self.renderNodes.clear()
        self.tasks.clear()
        self.rules = None
        self.commands.clear()
        self.poolShares = None
        self.modifiedNodes = None
        self.toCreateElements = None
        self.toModifyElements = None
        self.toArchiveElements = None

    def findNodeByPath(self, path, default=None):
        nodenames = splitpath(path)
        node = self.root
        for name in nodenames:
            for child in node.children:
                if child.name == name:
                    node = child
                    break
            else:
                return default
        return node

    def updateCompletionAndStatus(self):
        self.root.updateCompletionAndStatus()

    def validateDependencies(self):
        nodes = set()
        for dependency in self.modifiedNodes:
            for node in dependency.reverseDependencies:
                nodes.add(node)
        del self.modifiedNodes[:]
        for node in nodes:
            # logger.debug("Dependencies on %r = %r"% (node.name, node.checkDependenciesSatisfaction() ) )
            if not hasattr(node, "task") or node.task is None:
                continue
            if isinstance(node, TaskNode):
                if node.checkDependenciesSatisfaction():
                    for cmd in node.task.commands:
                        if cmd.status == CMD_BLOCKED:
                            cmd.status = CMD_READY
                else:
                    for cmd in node.task.commands:
                        if cmd.status == CMD_READY:
                            cmd.status = CMD_BLOCKED

            # TODO: may be needed to check dependencies on task groups
            #       so far, a hack is done on the client side when submitting:
            #       dependencies of a taksgroup are reported on each task of its heirarchy
            #
            # elif isinstance(node, FolderNode):
            #
            #     if node.checkDependenciesSatisfaction():
            #         for cmd in node.getAllCommands():
            #             if cmd.status == CMD_BLOCKED:
            #                 cmd.status = CMD_READY
            #     else:
            #         for cmd in node.getAllCommands():
            #             if cmd.status == CMD_READY:
            #                 cmd.status = CMD_BLOCKED

    def registerNewGraph(self, graph):
        user = graph['user']
        taskDefs = graph['tasks']
        poolName = graph['poolName']
        if 'maxRN' in graph.items():
            maxRN = int(graph['maxRN'])
        else:
            maxRN = -1

        #
        # Create objects.
        #
        tasks = [None for i in xrange(len(taskDefs))]
        for (index, taskDef) in enumerate(taskDefs):
            if taskDef['type'] == 'Task':
                # logger.debug("taskDef.watcherPackages = %s" % taskDef["watcherPackages"])
                # logger.debug("taskDef.runnerPackages = %s" % taskDef["runnerPackages"])
                task = self._createTaskFromJSON(taskDef, user)
            elif taskDef['type'] == 'TaskGroup':
                task = self._createTaskGroupFromJSON(taskDef, user)
            tasks[index] = task
        root = tasks[graph['root']]

        # get the pool
        try:
            pool = self.pools[poolName]
        except KeyError:
            pool = Pool(None, poolName)
            self.pools[poolName] = pool
        #
        # Rebuild full job hierarchy
        #
        for (taskDef, task) in zip(taskDefs, tasks):
            if taskDef['type'] == 'TaskGroup':
                for taskIndex in taskDef['tasks']:
                    task.addTask(tasks[taskIndex])
                    tasks[taskIndex].parent = task
        #
        # Compute dependencies for each created task or taskgroup object.
        #
        dependencies = {}
        for (taskDef, task) in zip(taskDefs, tasks):
            taskDependencies = {}
            if not isinstance(taskDef['dependencies'], list):
                raise SyntaxError(
                    "Dependencies must be a list of (taskId, [status-list]), got %r."
                    % taskDef['dependencies'])
            if not all(((isinstance(i, int) and isinstance(sl, list) and all(
                (isinstance(s, int) for s in sl)))
                        for (i, sl) in taskDef['dependencies'])):
                raise SyntaxError(
                    "Dependencies must be a list of (taskId, [status-list]), got %r."
                    % taskDef['dependencies'])
            for (taskIndex, statusList) in taskDef['dependencies']:
                taskDependencies[tasks[taskIndex]] = statusList
            dependencies[task] = taskDependencies
        #
        # Apply rules to generate dispatch tree nodes.
        #
        if not self.rules:
            logger.warning("graph submitted but no rule has been defined")

        unprocessedTasks = [root]
        nodes = []
        while unprocessedTasks:
            unprocessedTask = unprocessedTasks.pop(0)
            for rule in self.rules:
                try:
                    nodes += rule.apply(unprocessedTask)
                except RuleError:
                    logger.warning("rule %s failed for graph %s" %
                                   (rule, graph))
                    raise
            if isinstance(unprocessedTask, TaskGroup):
                for task in unprocessedTask:
                    unprocessedTasks.append(task)

        # create the poolshare, if any, and affect it to the node
        if pool:
            # FIXME nodes[0] may not be the root node of the graph...
            ps = PoolShare(None, pool, nodes[0], maxRN)
            # if maxRN is not -1 (e.g not default) set the userDefinedMaxRN to true
            if maxRN != -1:
                ps.userDefinedMaxRN = True

        #
        # Process dependencies
        #
        for rule in self.rules:
            rule.processDependencies(dependencies)

        for node in nodes:
            assert isinstance(node.id, int)
            self.nodes[node.id] = node

        # Init number of command in hierarchy
        self.populateCommandCounts(nodes[0])
        return nodes

    def populateCommandCounts(self, node):
        """
        Updates "commandCount" over a whole hierarchy starting from the given node.
        """
        res = 0
        if isinstance(node, FolderNode):
            for child in node.children:
                res += self.populateCommandCounts(child)
        elif isinstance(node, TaskNode):
            res = len(node.task.commands)

        node.commandCount = res
        return res

    def _createTaskGroupFromJSON(self, taskGroupDefinition, user):
        # name, parent, arguments, environment, priority, dispatchKey, strategy
        id = None
        name = taskGroupDefinition['name']
        parent = None
        arguments = taskGroupDefinition['arguments']
        environment = taskGroupDefinition['environment']
        requirements = taskGroupDefinition['requirements']
        maxRN = taskGroupDefinition['maxRN']
        priority = taskGroupDefinition['priority']
        dispatchKey = taskGroupDefinition['dispatchKey']
        strategy = taskGroupDefinition['strategy']
        strategy = loadStrategyClass(strategy.encode())
        strategy = strategy()
        tags = taskGroupDefinition['tags']
        timer = None
        if 'timer' in taskGroupDefinition.keys():
            timer = taskGroupDefinition['timer']
        return TaskGroup(id,
                         name,
                         parent,
                         user,
                         arguments,
                         environment,
                         requirements,
                         maxRN,
                         priority,
                         dispatchKey,
                         strategy,
                         tags=tags,
                         timer=timer)

    def _createTaskFromJSON(self, taskDefinition, user):
        # id, name, parent, user, priority, dispatchKey, runner, arguments,
        # validationExpression, commands, requirements=[], minNbCores=1,
        # maxNbCores=0, ramUse=0, environment={}
        name = taskDefinition['name']
        runner = taskDefinition['runner']
        arguments = taskDefinition['arguments']
        environment = taskDefinition['environment']
        requirements = taskDefinition['requirements']
        maxRN = taskDefinition['maxRN']
        priority = taskDefinition['priority']
        dispatchKey = taskDefinition['dispatchKey']
        validationExpression = taskDefinition['validationExpression']
        minNbCores = taskDefinition['minNbCores']
        maxNbCores = taskDefinition['maxNbCores']
        ramUse = taskDefinition['ramUse']
        lic = taskDefinition['lic']
        tags = taskDefinition['tags']
        runnerPackages = taskDefinition.get('runnerPackages', '')
        watcherPackages = taskDefinition.get('watcherPackages', '')
        timer = None
        if 'timer' in taskDefinition.keys():
            timer = taskDefinition['timer']

        maxAttempt = taskDefinition.get('maxAttempt', 1)

        task = Task(None,
                    name,
                    None,
                    user,
                    maxRN,
                    priority,
                    dispatchKey,
                    runner,
                    arguments,
                    validationExpression, [],
                    requirements,
                    minNbCores,
                    maxNbCores,
                    ramUse,
                    environment,
                    lic=lic,
                    tags=tags,
                    timer=timer,
                    maxAttempt=maxAttempt,
                    runnerPackages=runnerPackages,
                    watcherPackages=watcherPackages)

        for commandDef in taskDefinition['commands']:
            description = commandDef['description']
            arguments = commandDef['arguments']
            cmd = Command(None,
                          description,
                          task,
                          arguments,
                          runnerPackages=runnerPackages,
                          watcherPackages=watcherPackages)
            task.commands.append(cmd)
            # import sys
            # logger.warning("cmd creation : %s" % str(sys.getrefcount(cmd)))

        return task

    ## Resets the lists of elements to create or update in the database.
    #
    def resetDbElements(self):
        self.toCreateElements = []
        self.toModifyElements = []
        self.toArchiveElements = []

    ## Recalculates the max ids of all elements. Generally called after a reload from db.
    #
    def recomputeMaxIds(self):
        self.nodeMaxId = max([n.id for n in self.nodes.values()
                              ]) if self.nodes else 0
        self.nodeMaxId = max(self.nodeMaxId, StatDB.getFolderNodesMaxId(),
                             StatDB.getTaskNodesMaxId())
        self.poolMaxId = max([p.id for p in self.pools.values()
                              ]) if self.pools else 0
        self.poolMaxId = max(self.poolMaxId, StatDB.getPoolsMaxId())
        self.renderNodeMaxId = max([rn.id for rn in self.renderNodes.values()
                                    ]) if self.renderNodes else 0
        self.renderNodeMaxId = max(self.renderNodeMaxId,
                                   StatDB.getRenderNodesMaxId())
        self.taskMaxId = max([t.id for t in self.tasks.values()
                              ]) if self.tasks else 0
        self.taskMaxId = max(self.taskMaxId, StatDB.getTasksMaxId(),
                             StatDB.getTaskGroupsMaxId())
        self.commandMaxId = max([c.id for c in self.commands.values()
                                 ]) if self.commands else 0
        self.commandMaxId = max(self.commandMaxId, StatDB.getCommandsMaxId())
        self.poolShareMaxId = max([ps.id for ps in self.poolShares.values()
                                   ]) if self.poolShares else 0
        self.poolShareMaxId = max(self.poolShareMaxId,
                                  StatDB.getPoolSharesMaxId())

    ## Removes from the dispatchtree the provided element and all its parents and children.
    #
    def unregisterElementsFromTree(self, element):
        # /////////////// Handling of the Task
        if isinstance(element, Task):
            del self.tasks[element.id]
            self.toArchiveElements.append(element)
            for cmd in element.commands:
                self.unregisterElementsFromTree(cmd)
            for node in element.nodes.values():
                self.unregisterElementsFromTree(node)
        # /////////////// Handling of the TaskGroup
        elif isinstance(element, TaskGroup):
            del self.tasks[element.id]
            self.toArchiveElements.append(element)
            for task in element.tasks:
                self.unregisterElementsFromTree(task)
            for node in element.nodes.values():
                self.unregisterElementsFromTree(node)
        # /////////////// Handling of the TaskNode
        elif isinstance(element, TaskNode):
            # remove the element from the children of the parent
            if element.parent:
                element.parent.removeChild(element)
            if element.poolShares:
                for poolShare in element.poolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            if element.additionnalPoolShares:
                for poolShare in element.additionnalPoolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            del self.nodes[element.id]
            self.toArchiveElements.append(element)
            for dependency in element.dependencies:
                self.unregisterElementsFromTree(dependency)
        # /////////////// Handling of the FolderNode
        elif isinstance(element, FolderNode):
            if element.parent:
                element.parent.removeChild(element)
            if element.poolShares:
                for poolShare in element.poolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            if element.additionnalPoolShares:
                for poolShare in element.additionnalPoolShares.values():
                    del poolShare.pool.poolShares[poolShare.node]
                    del self.poolShares[poolShare.id]
                    self.toArchiveElements.append(poolShare)

            del self.nodes[element.id]
            self.toArchiveElements.append(element)
            for dependency in element.dependencies:
                self.unregisterElementsFromTree(dependency)
        # /////////////// Handling of the Command
        elif isinstance(element, Command):
            del self.commands[element.id]
            self.toArchiveElements.append(element)

    ### methods called after interaction with a Task

    def onTaskCreation(self, task):
        # logger.info("  -- on task creation: %s" % task)

        if task.id is None:
            self.taskMaxId += 1
            task.id = self.taskMaxId
            self.toCreateElements.append(task)
        else:
            self.taskMaxId = max(self.taskMaxId, task.id,
                                 StatDB.getTasksMaxId(),
                                 StatDB.getTaskGroupsMaxId())
        self.tasks[task.id] = task

    def onTaskDestruction(self, task):
        # logger.info("  -- on task destruction: %s" % task)
        self.unregisterElementsFromTree(task)

    def onTaskChange(self, task, field, oldvalue, newvalue):
        """
        Normally, taskgroup should not be updated to DB, there would be too manby updates due to command/state changes
        However in order to keep track of comments (stored in task's tags[comment] field), we make the following change:
        - enable task/taskgroups update in DB (cf pulidb.py)
        - disable changeEvent (append an event in dispatchTree.toModifyElements array) for all fields of tasks and TGs
          BUT the only field we want to update: "tags"
        """
        if field == "tags":
            self.toModifyElements.append(task)

    ### methods called after interaction with a BaseNode

    def onNodeCreation(self, node):
        # logger.info("  -- on node creation: %s" % node)
        if node.id is None:
            self.nodeMaxId += 1
            node.id = self.nodeMaxId
            self.toCreateElements.append(node)
        else:
            self.nodeMaxId = max(self.nodeMaxId, node.id,
                                 StatDB.getFolderNodesMaxId(),
                                 StatDB.getTaskNodesMaxId())
        if node.parent is None:
            node.parent = self.root

    def onNodeDestruction(self, node):
        # logger.info("  -- on node destruction: %s" % node)
        del self.nodes[node.id]

    def onNodeChange(self, node, field, oldvalue, newvalue):
        # logger.info("  -- on node change: %s [ %s = %s -> %s ]" % (node,field, oldvalue, newvalue) )
        # FIXME: do something when nodes are reparented from or to the root node
        if node.id is not None:
            self.toModifyElements.append(node)
            if field == "status" and node.reverseDependencies:
                self.modifiedNodes.append(node)

    ### methods called after interaction with a RenderNode

    def onRenderNodeCreation(self, renderNode):
        if renderNode.id is None:
            self.renderNodeMaxId += 1
            renderNode.id = self.renderNodeMaxId
            self.toCreateElements.append(renderNode)
        else:
            self.renderNodeMaxId = max(self.renderNodeMaxId, renderNode.id,
                                       StatDB.getRenderNodesMaxId())
        self.renderNodes[renderNode.name] = renderNode

    def onRenderNodeDestruction(self, rendernode):
        try:
            del self.renderNodes[rendernode.name]
            self.toArchiveElements.append(rendernode)
        except KeyError:
            # TOFIX: use of class method vs obj method in changeListener might generate a duplicate call
            logger.warning("RN %s seems to have been deleted already." %
                           rendernode.name)

    def onRenderNodeChange(self, rendernode, field, oldvalue, newvalue):
        if field == "performance":
            self.toModifyElements.append(rendernode)

    ### methods called after interaction with a Pool

    def onPoolCreation(self, pool):
        if pool.id is None:
            self.poolMaxId += 1
            pool.id = self.poolMaxId
            self.toCreateElements.append(pool)
        else:
            self.poolMaxId = max(self.poolMaxId, pool.id,
                                 StatDB.getPoolsMaxId())
        self.pools[pool.name] = pool

    def onPoolDestruction(self, pool):
        del self.pools[pool.name]
        self.toArchiveElements.append(pool)

    def onPoolChange(self, pool, field, oldvalue, newvalue):
        if pool not in self.toModifyElements:
            self.toModifyElements.append(pool)

    ### methods called after interaction with a Command

    def onCommandCreation(self, command):
        if command.id is None:
            self.commandMaxId += 1
            command.id = self.commandMaxId
            self.toCreateElements.append(command)
        else:
            self.commandMaxId = max(self.commandMaxId, command.id,
                                    StatDB.getCommandsMaxId())
        self.commands[command.id] = command

    def onCommandChange(self, command, field, oldvalue, newvalue):
        self.toModifyElements.append(command)
        if command.task is not None:
            for node in command.task.nodes.values():
                node.invalidate()

    ### methods called after interaction with a Pool

    def onPoolShareCreation(self, poolShare):
        if poolShare.id is None:
            self.poolShareMaxId += 1
            poolShare.id = self.poolShareMaxId
            self.toCreateElements.append(poolShare)
        else:
            self.poolShareMaxId = max(self.poolShareMaxId, poolShare.id,
                                      StatDB.getPoolSharesMaxId())
        self.poolShares[poolShare.id] = poolShare
Esempio n. 39
0
 def clear(self):
     self._head = self._tail = None
     self._keepDict.clear()
     WeakValueDictionary.clear(self)