def test_inheritance_row_created(): setupClass([InheritableEventTestA, InheritableEventTestB, InheritableEventTestC]) events.listen(_signal, InheritableEventTestA, events.RowCreatedSignal) InheritableEventTestC(a=1, b=2, c=3)
def test_inheritance_row_created(): setupClass( [InheritableEventTestA, InheritableEventTestB, InheritableEventTestC]) events.listen(_signal, InheritableEventTestA, events.RowCreatedSignal) InheritableEventTestC(a=1, b=2, c=3)
def __addtoclass__(self, soClass, name): self.name = name self.soClass = soClass attrs = {'dateArchived': col.DateTimeCol(default=datetime.now), 'master': col.ForeignKey(self.soClass.__name__), 'masterClass': self.soClass, 'extraCols': self.extraCols } getColumns(attrs, self.soClass) attrs.update(self.extraCols) self.versionClass = type(self.soClass.__name__ + 'Versions', (Version,), attrs) if '_connection' in self.soClass.__dict__: self.versionClass._connection = \ self.soClass.__dict__['_connection'] events.listen(self.createTable, soClass, events.CreateTableSignal) events.listen(self.rowUpdate, soClass, events.RowUpdateSignal)
def test_inheritance_row_created(): setupClass(A) setupClass(B) setupClass(C) def test_query(instance): id = instance.id A.get(id) def signal(kwargs, postfuncs): postfuncs.append(test_query) events.listen(signal, A, events.RowCreatedSignal) C(a=1, b=2, c=3)
def setupSync(): # in some situations it's not desired to have sync on right from the server boot. Like in new setups we may want # to create initial users before we start syncing. print "setup: Enabling sync" listen(usr_add_listener, model.User, RowCreatedSignal) listen(usr_updt_listener, model.User, RowUpdateSignal) listen(group_join_listener, model.UserGroup, RowCreatedSignal) listen(group_leave_listener, model.UserGroup, RowDestroySignal) thread.start_new(signonloop, ()) # with ldap disable this is required
def init_events(): events.listen(taskUpdated, Task, events.RowUpdateSignal) events.listen(taskCreated, Task, events.RowCreatedSignal) events.listen(commentCreated, Comment, events.RowCreatedSignal) events.listen(taskDeleted, Task, events.RowDestroySignal)
def __addtoclass__(self, soClass, name): self.name = name self.soClass = soClass attrs = { 'dateArchived': col.DateTimeCol(default=datetime.now), 'master': col.ForeignKey(self.soClass.__name__), 'masterClass': self.soClass, 'extraCols': self.extraCols } getColumns(attrs, self.soClass) attrs.update(self.extraCols) self.versionClass = type(self.soClass.__name__ + 'Versions', (Version, ), attrs) if '_connection' in self.soClass.__dict__: self.versionClass._connection = \ self.soClass.__dict__['_connection'] events.listen(self.createTable, soClass, events.CreateTableSignal) events.listen(self.rowUpdate, soClass, events.RowUpdateSignal)
def __init__(self, cls): """ Represent all rows of the :class:`SQLObject` passed, at all times. This causes them to remain in memory, so use with care. """ gtk.ListStore.__init__(self, object) events.listen(self.__on_updated, cls, events.RowUpdatedSignal) events.listen(self.__on_created, cls, events.RowCreatedSignal) events.listen(self.__on_destroy, cls, events.RowDestroySignal) for obj in cls.select(): self.append((obj,))
def listen_row_update(fListener, cClass): """listen for the row updated signal sent when a card set is modified.""" listen(fListener, cClass, RowUpdateSignal)
def listen_row_destroy(fListener, cClass): """listen for the row destroyed signal sent when a card set is deleted.""" listen(fListener, cClass, RowDestroySignal)
def make_listen(signal): watcher = make_watcher() events.listen(watcher, sqlmeta, signal) return watcher
from change_scan import update_maildir_cache, scan_mail, created_listener,\ destroy_listener, File, Directory, indexer_listener from sqlobject.events import listen, RowDestroySignal, RowCreatedSignal msg_indexer_queue = pQueue() ps = [] p = Process(target=IndexerProcess, args=('%s%i' % (xapidx, 1), msg_indexer_queue)) ps.append(p) p.start() p = Process(target=IndexerProcess, args=('%s%i' % (xapidx, 2), msg_indexer_queue)) ps.append(p) p.start() p = Process(target=IndexerProcess, args=('%s%i' % (xapidx, 3), msg_indexer_queue)) ps.append(p) p.start() il = indexer_listener(msg_indexer_queue) listen(il.created_listener, File, RowCreatedSignal) #listen(partial(created_listener, msg_indexer_queue=msg_indexer_queue), File, RowCreatedSignal) #listen(destroy_listener, File, RowDestroySignal) from time import time t = time() update_maildir_cache(scan_mail()) t = time() - t print('took %r seconds to scan messages' % t)
def _get_path(self, force=False): """ Returns the path to the asset on the filesystem or None if the asset file is being cached. """ self.last_reference = datetime.now() if not force and self.in_flight: return None elif force: self.in_flight = False # Prevent failures in the caching process to block asset in flight mode return os.path.join( 'static', 'storage', str(self.plugin_channel.id), str(self.id) + (self.extension if self.extension is not None else '')) def write_to_asset_file(self, content): """ Writes the content to the asset file. """ asset_path = os.path.join(get_root_path(), self.path) os.makedirs(os.path.dirname(asset_path), exist_ok=True) with open(asset_path, 'wb') as f: f.write(content) def on_asset_deleted(instance, kwargs): """ Deletes the file associated with this asset when this asset is deleted. """ try: os.remove(os.path.join(get_root_path(), instance.path)) except (OSError, TypeError) as e: # TODO: log message to app.log print(e) listen(on_asset_deleted, Asset, RowDestroyedSignal)
def make_listen(signal, cls=None): if cls is None: cls = EventTester watcher = make_watcher() events.listen(watcher, cls, signal) return watcher
instance_cache = cached_updates[location]['profiles'].get(user.id) public_field = kwargs.get('public_field', user.public_field) if instance_cache: if public_field: instance_cache.update(kwargs) cache_container.sort() else: cached_updates[location]['profiles'].remove(user.id) else: if public_field: cached_updates[location]['profiles'].add(user) cache_factories = dict (events=EventCacheContainer, profiles=ProfileCacheContainer) cached_updates = Cache() listen(on_add_rusage, RUsage, RowCreatedSignal) listen(on_updt_rusage, RUsage, RowUpdateSignal) listen(on_del_rusage, RUsage, RowDestroySignal) listen(on_add_user, User, RowCreatedSignal) listen(on_updt_user, User, RowUpdateSignal) def get_updates_data(location): local_updates = cached_updates[location.id] updates = {} updates['local_profiles'] = local_updates['profiles'] updates['local_events'] = local_updates['events'].get_future_events() updates['global_profiles'] = cached_updates['global']['profiles'] updates['global_events'] = cached_updates['global']['events'].get_future_events() return updates
def listen_row_created(fListener, cClass): """listen for the row created signal sent when a new set is created.""" listen(fListener, cClass, RowCreatedSignal)
from turbogears import config, update_config, start_server import cherrypy cherrypy.lowercase_api = True from os.path import * import sys # first look on the command line for a desired config file, # if it's not on the command line, then # look for setup.py in this directory. If it's not there, this script is # probably installed if len(sys.argv) > 1: update_config(configfile=sys.argv[1], modulename="ecaeps.config") elif exists(join(dirname(__file__), "setup.py")): update_config(configfile="dev.cfg",modulename="ecaeps.config") else: update_config(configfile="prod.cfg",modulename="ecaeps.config") config.update(dict(package="ecaeps")) #Add listeners to SQLObject so that the Lucene index is automatically updated import sqlobject.events as e from ecaeps.model import Child import ecaeps.luceneUtil as luceneUtil e.listen(luceneUtil.rowDeleted, Child, e.RowDestroySignal) e.listen(luceneUtil.rowAdded, Child, e.RowCreatedSignal) e.listen(luceneUtil.rowUpdated, Child, e.RowUpdateSignal) from ecaeps.controllers import Root start_server(Root())
def __init__(cls, name, bases, _dict): # @NoSelf InheritableSQLObject.__metaclass__.__init__(cls, name, bases, _dict) ObservablePropertyMeta.__init__(cls, name, bases, _dict) listen(cls.update_listener, cls, RowUpdateSignal)
def removeKey(self, key): key.destroySelf() def repositoryByName(self, name): from Phoenix.Models import Repository try: return Repository.selectBy(member=self, name=name)[0] except IndexError: return None def repositoryByPath(self, path): from Phoenix.Models import Repository try: return Repository.selectBy(member=self, path=path)[0] except IndexError: return None @classmethod def _beforedestroy(cls, member, *a): for role in member.ownedroles: role.destroySelf() for repository in member.repositories: repository.destroySelf() for key in member.keys: key.destroySelf() events.listen(Member._beforedestroy, Member, events.RowDestroySignal)
if old_value != value and name not in skip_attrs: try: current_user = identity.current.user.id except: current_user = None u = UpdateLog( changed_by=current_user, table_name=instance.sqlmeta.table, table_id=instance.id, attrib_name=name, attrib_old_value=old_value, attrib_new_value=value ) instance.last_updated = datetime.now() listen(update_listener, Journalled, RowUpdateSignal) # # Keep track of all edits to Journalled objects, so in case of vandalism, # the changes can be reverted. # class UpdateLog(SQLObject): created = DateTimeCol(default=datetime.now) changed_by = IntCol() table_name = UnicodeCol(length=12) table_id = IntCol() attrib_name = UnicodeCol(length=20) attrib_old_value = PickleCol() attrib_new_value = PickleCol()
if not member: privileges = Privilege.selectBy(repository=self, public=True) else: privileges = Privilege.select( AND( Privilege.q.repository == self.id, OR(Privilege.q.member == member.id, IN(Privilege.q.role, member.roles), Privilege.q.public == 1))) print privileges if privileges.count() > 0: for p in privileges: if branch and match(p.branch, branch) and action in p.crud: return True if tag and match(p.tag, tag) and action in p.crud: return True return False @classmethod def _beforedestroy(cls, repository, *a): for privilege in repository.privileges: privilege.destroySelf() for hook in repository.hooks: hook.destroySelf() rmtree(repository.getFullpath()) events.listen(Repository._beforedestroy, Repository, events.RowDestroySignal)
Exception ----------------------------------------------------------------------------""" class Exception(Exception): pass class RoleException(Exception): pass """---------------------------------------------------------------------------- Class ----------------------------------------------------------------------------""" class Role(SQLObject): name = StringCol(length=255) member = ForeignKey("Member") members = RelatedJoin("Member") privileges = MultipleJoin("Privilege") @classmethod def _beforedestroy(cls, role, *a): for member in role.members: member.removeRole(role) events.listen(Role._beforedestroy, Role, events.RowDestroySignal)
if not member: privileges = Privilege.selectBy(repository=self, public=True) else: privileges = Privilege.select(AND( Privilege.q.repository == self.id, OR( Privilege.q.member == member.id, IN(Privilege.q.role, member.roles), Privilege.q.public == 1))) print privileges if privileges.count() > 0: for p in privileges: if branch and match(p.branch, branch) and action in p.crud: return True if tag and match(p.tag, tag) and action in p.crud: return True return False @classmethod def _beforedestroy(cls, repository, *a): for privilege in repository.privileges: privilege.destroySelf() for hook in repository.hooks: hook.destroySelf() rmtree(repository.getFullpath()) events.listen(Repository._beforedestroy, Repository, events.RowDestroySignal)
def acquires_groupProxy(instance): user_acquires_policyProxy(instance.user, instance.group) def acquire_groupProxy(kwargs, post_funcs): post_funcs.append(acquires_groupProxy) def lose_policyGroupProxy(instance, post_funcs): user_loses_policyProxy(instance.user, instance.policygroup) def acquires_policyGroupProxy(instance): user_acquires_policyProxy(instance.user, instance.policygroup) def acquire_policyGroupProxy(kwargs, post_funcs): post_funcs.append(acquires_policyGroupProxy) listen(lose_groupProxy, UserGroup, RowDestroySignal) listen(acquire_groupProxy, UserGroup, RowCreatedSignal) listen(lose_policyGroupProxy, UserPolicyGroup, RowDestroySignal) listen(acquire_policyGroupProxy, UserPolicyGroup, RowCreatedSignal) #b) a group/policyGroup/tariff has a accessPolicy added or removed from hubspace.tariff import tariff_users def policy_removed(instance, post_funcs): """if a policy is removed this will be called automatically """ print "removing access policy " + `instance` if instance.group: users = instance.group.users elif instance.policygroup: users = instance.policygroup.users
def listen_changed(fListener, cClass): """Listens for the changed_signal.""" listen(fListener, cClass, ChangedSignal)
def __init__(cls, name, bases, dict): InheritableSQLObject.__metaclass__.__init__(cls, name, bases, dict) ObservablePropertyMeta.__init__(cls, name, bases, dict) listen(cls.update_listener, cls, RowUpdateSignal) return
def get_template(self) -> str: return self.template class AssetSlideMapping(SQLObject): asset = ForeignKey('Asset', cascade=True) slide = ForeignKey('EditorSlide', cascade=True) def on_mapping_deleted(instance, kwargs): """ Deletes the file associated with this asset when this asset is deleted. """ if AssetSlideMapping.selectBy(assetID=instance.asset.id).count() == 0: instance.asset.destroySelf() listen(on_mapping_deleted, AssetSlideMapping, RowDestroyedSignal) class EditorSlide(SQLObject, PluginSlide, metaclass=SQLObjectAndABCMeta): duration = IntCol(notNone=True) content = JSONCol(notNone=True, default={}) s_order = IntCol(notNone=True) template = StringCol(notNone=True) capsule = ForeignKey('EditorCapsule', cascade=True) asset_mappings = SQLMultipleJoin('AssetSlideMapping') @classmethod def from_slide(cls, slide: PluginSlide, capsule, slide_order=0): def create_asset_mappings(slide): for field, inputs in slide.get_content().items(): if 'file' in inputs:
if value and not value.strip(): raise KeyException("Provided key is not valid.") if self.initialized: Key._writeKey(self.id, value) else: self.keystring = value def _get_pubkey(self): return File.extractKey(Config.get("phoenix", "authorized_keys"), self.id) def getMember(self): from Phoenix.Models import Member return Member.get(self.member.id) @classmethod def _writeKey(cls, id, key): File.replaceLine(Config.get("phoenix", "authorized_keys"), "--key-id %s" % id, cls._prepareKey(id, key)) @classmethod def _prepareKey(cls, id, key): tmp = """command="phoenix serve --key-id %s",""" % id tmp += "no-port-forwarding,no-x11-forwarding,no-agent-forwarding %s" % key return tmp @classmethod def _beforedestroy(cls, key, *a): File.replaceLine(Config.get("phoenix", "authorized_keys"), "--key-id %s" % key.id) events.listen(Key._beforedestroy, Key, events.RowDestroySignal)