Beispiel #1
0
    def add(self, old_path, new_path):
        old_path = self._canonical(old_path)
        new_path = self._canonical(new_path)

        if old_path == new_path:
            return

        # Forget any existing reverse paths to old_path
        existing_target = self._paths.get(old_path, None)
        if (existing_target is not None) and (existing_target in self._rpaths):
            if len(self._rpaths[existing_target]) == 1:
                del self._rpaths[existing_target]
            else:
                self._rpaths[existing_target].remove(old_path)

        # Update any references that pointed to old_path
        for p in self.redirects(old_path):
            if p != new_path:
                self._paths[p] = new_path
                self._rpaths.setdefault(new_path, OOSet()).insert(p)
            else:
                del self._paths[new_path]

        # Remove reverse paths for old_path
        if old_path in self._rpaths:
            del self._rpaths[old_path]

        self._paths[old_path] = new_path
        self._rpaths.setdefault(new_path, OOSet()).insert(old_path)
Beispiel #2
0
    def read(self):
        """Return messages added and removed from folder.

        Two sets of message objects are returned.  The first set is
        messages that were added to the folder since the last read.
        The second set is the messages that were removed from the
        folder since the last read.

        The code assumes messages are added and removed but not edited.
        """
        mbox = mailbox.UnixMailbox(open(self.path, "rb"), factory)
        self._stat()
        cur = OOSet()
        new = OOSet()
        while 1:
            msg = mbox.next()
            if msg is None:
                break
            msgid = msg["message-id"]
            cur.insert(msgid)
            if not self.messages.has_key(msgid):
                self.messages[msgid] = msg
                new.insert(msg)

        removed = difference(self.messages, cur)
        for msgid in removed.keys():
            del self.messages[msgid]

        # XXX perhaps just return the OOBTree for removed?
        return new, OOSet(removed.values())
 def test_Terms(self):
   # normalize term
   ki= self.ki; obj= self.obj1
   ki.NormalizeTerm= 'python: lambda value: value-1'
   self.assertEqual(ki._evaluate(obj).keys(),OOSet((0,1)).keys())
   # stop term
   ki.StopTermPredicate= 'python: value == 1'
   self.assertEqual(ki._evaluate(obj).keys(),OOSet((1,)).keys())
Beispiel #4
0
 def tags(self, value):
     #Is this the right way to mutate objects, or should we simply clear the contents?
     if value:
         self.__tags__ = OOSet(value)
     else:
         if hasattr(self, '__tags__'):
             delattr(self, '__tags__')
Beispiel #5
0
 def add_user_roles(self, userid: str, *roles):
     """ See kedja.interfaces.ISecurityAware """
     if isinstance(userid, str):
         userid = int(userid)
     if userid not in self._rolesdata:
         self._rolesdata[userid] = OOSet()
     self._rolesdata[userid].update(roles)
Beispiel #6
0
def marshal(obj, keys=None, marshallerName='', objs=None):
    """
    Convert an object to a dictionary. keys is an optional list of keys to
    include in the returned dictionary.  if keys is None then all public
    attributes are returned.  marshallerName is an optional marshalling
    adapter name. if it is an empty string then the default marshaller will be
    used.
    """
    #to prevent recursing back over something twice, keep track of seen objs
    if objs is None:
        objs = OOSet()

    # obj is itself marshallable, so make a marshaller and marshal away
    if IMarshallable.providedBy(obj):
        marshaller = component.getAdapter(obj, IMarshaller, marshallerName)
        verify.verifyObject(IMarshaller, marshaller)

        if IInfo.providedBy(obj):
            key = (obj._object._p_oid, obj.__class__)
            if key in objs:
                raise AlreadySeenException()
            else:
                objs.insert(key)
                try:
                    return marshal(marshaller.marshal(keys),
                            keys, marshallerName, objs)
                except AlreadySeenException:
                    pass
                finally:
                    objs.remove(key)
        else:
            return marshal(marshaller.marshal(keys), keys, marshallerName, objs)


    # obj is a dict, so marshal its values recursively
    # Zuul.marshal({'foo':1, 'bar':2})
    if isinstance(obj, dict):
        marshalled_dict = {}
        for k in obj:
            try:
                marshalled_dict[k] = marshal(obj[k], keys, marshallerName, objs)
            except AlreadySeenException:
                pass
        return marshalled_dict

    # obj is a non-string iterable, so marshal its members recursively
    # Zuul.marshal(set([o1, o2]))
    elif hasattr(obj, '__iter__'):
        marshalled_list = []
        for o in obj:
            try:
                marshalled_list.append(marshal(o, keys, marshallerName, objs))
            except AlreadySeenException:
                pass
        return marshalled_list
    elif isinstance(obj, DateTime ):
        return str(obj)
    # Nothing matched, so it's a string or number or other unmarshallable.
    else:
        return obj
Beispiel #7
0
    def html(self, suppress_entries=0):
        """ html log for viewing transactions in the ZMI """
        out = []
        keys = OOSet(self._transactions.keys())
        for t_id in keys:
            t = self._transactions[t_id]
            out.append('''
<h4>Transaction id: %s</h4>
<p>
<em>User:</em> %s<br/>
<em>Description:</em> %s<br/>
</p>
''' % (oid2str(t.id), t.user, t.description))
            if suppress_entries:
                continue
            for entry_id in t._entries.keys():
                entry = t._entries[entry_id]
                out.append('''
<p>
<em>id:</em> %(id)s<br/>
<em>obj:</em> %(path)s<br/>
<em>method:</em> %(method)s<br/>
<em>args:</em> %(args)s<br/>
</p>
''' % entry)
        out = '<hr>'.join(out)
        return '<html><body>%s</body></html>' % out
Beispiel #8
0
 def unread_storage(self):
     try:
         return self.context.__unread_storage__
     except AttributeError:  #This is basically init
         self.context.__unread_storage__ = OOSet(
             find_authorized_userids(self.context, (VIEW, )))
         return self.context.__unread_storage__
Beispiel #9
0
    def store(self, domain, key, value, overwrite=False):
        """Store a dictionary in the domain's storage
        """
        # Get the storage for the current URL
        storage = self.get_storage(domain=domain)
        datastore = storage["data"]
        indexstore = storage["index"]

        # already fetched
        if key in datastore and not overwrite:
            logger.info("Skipping existing key {}".format(key))
            return

        # Create some indexes
        for index in ["portal_type", "parent_id", "parent_path"]:
            index_key = "by_{}".format(index)
            if not indexstore.get(index_key):
                indexstore[index_key] = OOBTree()
            indexvalue = value.get(index)
            # Check if the index value, e.g. the portal_type="Sample", is
            # already known as a key in the index.
            if not indexstore[index_key].get(indexvalue):
                indexstore[index_key][indexvalue] = OOSet()
            indexstore[index_key][indexvalue].add(key)

        # store the data
        datastore[key] = value
    def _index_object(self, documentId, obj, threshold=None, attr=''):
        """ index an object 'obj' with integer id 'i'

        Ideally, we've been passed a sequence of some sort that we
        can iterate over. If however, we haven't, we should do something
        useful with the results. In the case of a string, this means
        indexing the entire string as a keyword."""

        # First we need to see if there's anything interesting to look at
        # self.id is the name of the index, which is also the name of the
        # attribute we're interested in.  If the attribute is callable,
        # we'll do so.

        newKeywords = self._get_object_keywords(obj, attr)

        oldKeywords = self._unindex.get(documentId, None)

        if oldKeywords is None:
            # we've got a new document, let's not futz around.
            try:
                for kw in newKeywords:
                    self.insertForwardIndexEntry(kw, documentId)
                if newKeywords:
                    self._unindex[documentId] = list(newKeywords)
            except TypeError:
                return 0
        else:
            # we have an existing entry for this document, and we need
            # to figure out if any of the keywords have actually changed
            if type(oldKeywords) is not OOSet:
                oldKeywords = OOSet(oldKeywords)
            newKeywords = OOSet(newKeywords)
            fdiff = difference(oldKeywords, newKeywords)
            rdiff = difference(newKeywords, oldKeywords)
            if fdiff or rdiff:
                # if we've got forward or reverse changes
                if newKeywords:
                    self._unindex[documentId] = list(newKeywords)
                else:
                    del self._unindex[documentId]
                if fdiff:
                    self.unindex_objectKeywords(documentId, fdiff)
                if rdiff:
                    for kw in rdiff:
                        self.insertForwardIndexEntry(kw, documentId)
        return 1
Beispiel #11
0
 def __init__(self, name = '', desc = ''):
     """
     The BudgetGroup constructor takes a string name and desc as the Name of
     the project, and it's Description.
     It initialises with an empty set.
     """
     self.Name = name
     self.Description = desc
     self.ItemSet = OOSet()
Beispiel #12
0
 def __init__(self, storage, db=None, scan_interval=10):
     self.storage = storage
     self.db = db
     self.next_conn_id = 1
     self.conn_oids = IOBTree()  # IOBTree({ conn_id -> OOSet([oid]) } })
     self.oids = OOSet()  # OOSet([oid])
     self.lock = allocate_lock()
     self.scan_interval = scan_interval
     self.next_scan = time() + scan_interval
Beispiel #13
0
 def consume(buf):
     dmd._p_jar.sync()
     for docId, uid, ob, metadata in buf:
         if uid is not None and docId is None:
             global_catalog.uncatalog_object(uid)
             continue
         if uid not in catalog.uids:
             catalog._length.change(1)
         catalog.uids[uid] = docId
         catalog.paths[docId] = uid
         if metadata:
             catalog.data[docId] = metadata
         for idx, val, uval in ob:
             if val is not None:
                 idx = catalog.indexes[idx]
                 if isinstance(idx, MultiPathIndex):
                     if docId in idx._unindex:
                         unin = idx._unindex[docId]
                         if isinstance(unin, set):
                             unin = self._unindex[docId] = OOSet(unin)
                         for oldpath in list(unin):
                             if list(oldpath.split('/')) not in val:
                                 idx.unindex_paths(docId, (oldpath, ))
                     else:
                         idx._unindex[docId] = OOSet()
                         idx._length.change(1)
                     idx.index_paths(docId, val)
                 else:
                     oldval = idx._unindex.get(docId)
                     if uval == oldval: continue
                     customEq = idx._equalValues
                     if customEq is not None:
                         if customEq(val, oldval): continue
                     update = idx._update
                     if update is None or oldval is None or val is None:
                         if oldval is not None:
                             idx._unindex_object(docId, oldval, val is None)
                         if val is None: continue
                         idx._indexValue(docId, val, None)
                         if oldval is None: idx.numObjects.change(1)
                     else:
                         rv = update(docId, val, oldval, None)
                         if isinstance(rv, tuple): continue
                     idx._unindex[docId] = uval
Beispiel #14
0
 def __setitem__(self, key, value):
     if value:
         #Make sure it exist
         roles_principals = get_roles_registry()
         if IRole.providedBy(value):
             value = [value]
         for role in value:
             assert role in roles_principals, "'%s' isn't a role" % role
         self.data[key] = OOSet(value)
     elif key in self.data:
         del self.data[key]
Beispiel #15
0
 def __init__(self, name = '', desc = ''):
     """
     The Project constructor takes a string name and desc as the Name of
     the project, and it's Description.
     The default values of Name and Description would be ''.
     It initialises with an empty set.
     """
     
     self.Name = name
     self.Description = desc
     self.GroupSet = OOSet()
Beispiel #16
0
 def __setitem__(self, relation_id, rids):
     assert isinstance(relation_id, int)
     self.can_create_relation(rids)
     if relation_id in self:
         del self[relation_id]
     for x in rids:
         assert isinstance(x, int)
         if x not in self.rid_to_relations:
             self.rid_to_relations[x] = OOSet()
         self.rid_to_relations[x].add(relation_id)
     self.relation_to_rids[relation_id] = tuple(rids)
Beispiel #17
0
    def _addToRoomDayReservationsIndex(self):
        roomDayReservationsIndexBTree = Reservation.getRoomDayReservationsIndexRoot()

        for period in self.splitToPeriods():
            day = period.startDT.date()
            key = (self.room.id, day)
            resvs = roomDayReservationsIndexBTree.get(key)
            if resvs is None:
                resvs = OOSet()
            resvs.add(self)
            roomDayReservationsIndexBTree[key] = resvs
Beispiel #18
0
 def set_connection_oids(self, conn_id, oids):
     """Records the OIDs a connection is using and periodically scans.
     """
     changed = 0
     new_oids = OOSet()
     self.lock.acquire()
     try:
         if oids:
             self.conn_oids[conn_id] = OOSet(oids)
         else:
             if self.conn_oids.has_key(conn_id):
                 del self.conn_oids[conn_id]
         for set in self.conn_oids.values():
             new_oids.update(set)
         if self.oids != new_oids:
             self.oids = new_oids
             changed = 1
     finally:
         self.lock.release()
     if changed:
         self.storage.scanner.set_oids(new_oids)
Beispiel #19
0
 def __init__(self, parent=None):
     super(ItemModel, self).__init__(parent)
     self.labels = self.HEADERS.split(',')
     root = parent.conn.root()
     if not "item" in root:
         root["item"] = PersistentList()
         transaction.commit()
     if not "hash" in root:
         root["hash"] = OOSet()
         transaction.commit()
     self._data = root["item"]
     self._hash = root["hash"]
Beispiel #20
0
 def test_ExpressionEvaluator(self):
   ki= self.ki; obj2= self.obj2
   ee= ExpressionEvaluator(); ee.id= 'ee'
   ki._setObject(ee.id,ee); ee= ki._getOb(ee.id)
   ee.manage_changeProperties(Expression= 'python: (3,4,)')
   self.assertEqual(ki._evaluate(obj2).keys(),OOSet((1,2,3,4)).keys())
   # ignore
   ee.manage_changeProperties(IgnorePredicate= 'python: 3 in value')
   self.assertEqual(ki._evaluate(obj2).keys(),OOSet((1,2,)).keys())
   # ignore - call it
   ee.manage_changeProperties(IgnorePredicate= 'python: lambda v: 3 in v')
   # normalize
   ee.manage_changeProperties(Expression= 'python: (4,)')
   ee.manage_changeProperties(Normalizer= 'python: (0,) + value')
   self.assertEqual(ki._evaluate(obj2).keys(),OOSet((0,1,2,4,)).keys())
   # normalize - call it
   ee.manage_changeProperties(Normalizer= 'python: lambda v: (0,) + v')
   self.assertEqual(ki._evaluate(obj2).keys(),OOSet((0,1,2,4,)).keys())
   # method
   ee.manage_changeProperties(Expression= "python: lambda object: object.kw")
   self.assertEqual(ki._evaluate(obj2).keys(),OOSet((0,1,2,)).keys())
   ## combine
   # 'union' - already tested
   # 'useFirst'
   ki.CombineType= 'useFirst'
   self.assertEqual(ki._evaluate(obj2).keys(),OOSet((1,2,)).keys())
Beispiel #21
0
    def index_object(self, docid, obj, threshold=100):
        """ hook for (Z)Catalog """

        f = getattr(obj, self.id, None)
        if f is not None:
            if safe_callable(f):
                try:
                    paths = f()
                except AttributeError:
                    return 0
            else:
                paths = f
        else:
            try:
                paths = obj.getPhysicalPath()
            except AttributeError:
                return 0

        if not paths: return 0
        paths = _recursivePathSplit(paths)
        if not _isSequenceOfSequences(paths):
            paths = [paths]

        if docid in self._unindex:
            if isinstance(self._unindex[docid], set):
                self._unindex[docid] = OOSet(self._unindex[docid])

            unin = set(self._unindex[docid])
            paths_set = {'/'.join(x) for x in paths}

            for oldpath in unin - paths_set:
                self.unindex_paths(docid, (oldpath, ))
        else:
            self._unindex[docid] = OOSet()
            self._length.change(1)

        self.index_paths(docid, paths)

        return 1
Beispiel #22
0
    def add(self, tag, userid):
        if not isinstance(tag, basestring):
            raise TypeError('tag must be a string. Was: %s' % tag)
        if not TAG_PATTERN.match(tag):
            raise ValueError("'tag' doesn't conform to tag standard: %s" %
                             _TAG_STRING)
        if tag not in self.tags_storage:
            self.tags_storage[tag] = OOSet()

        if userid not in self.tags_storage[tag]:
            self.tags_storage[tag].add(userid)
            if tag == 'like':
                _notify(self.context)
Beispiel #23
0
    def test_doesnt_cause_redirect_loop_on_bogus_storage_entries(self):
        storage = queryUtility(IRedirectionStorage)
        storage._paths['/plone/same'] = '/plone/same'
        storage._rpaths['/plone/same'] = OOSet(['/plone/same'])
        transaction.commit()

        response = requests.get(
            self.portal_url + '/same/@@view',
            headers={'Accept': 'application/json'},
            auth=(SITE_OWNER_NAME, SITE_OWNER_PASSWORD),
            allow_redirects=False,
        )
        self.assertEqual(404, response.status_code)
Beispiel #24
0
    def index_object(self, docid, obj, threshold=100):
        """ hook for (Z)Catalog """

        f = getattr(obj, self.id, None)
        if f is not None:
            if safe_callable(f):
                try:
                    paths = f()
                except AttributeError:
                    return 0
            else:
                paths = f
        else:
            try:
                paths = obj.getPhysicalPath()
            except AttributeError:
                return 0

        if not paths: return 0
        paths = _recursivePathSplit(paths)
        if not _isSequenceOfSequences(paths):
            paths = [paths]

        if docid in self._unindex:
            unin = self._unindex[docid]
            # Migrate old versions of the index to use OOSet
            if isinstance(unin, set):
                unin = self._unindex[docid] = OOSet(unin)
            for oldpath in list(unin):
                if list(oldpath.split('/')) not in paths:
                    self.unindex_paths(docid, (oldpath, ))
        else:
            self._unindex[docid] = OOSet()
            self._length.change(1)

        self.index_paths(docid, paths)

        return 1
Beispiel #25
0
 def add(self, context):
     container = self._find_container(context)
     assert container
     if container.uid not in self:
         self[container.uid] = OOBTree()
     if context.type_name not in self[container.uid]:
         type_uids = self[container.uid][context.type_name] = OOSet()
     else:
         type_uids = self[container.uid][context.type_name]
     if context.uid not in type_uids:
         type_uids.add(context.uid)
         #counter_context = self._find_counter(container)
         #self._update_counter(counter_context.uid, context.type_name, 1)
         return context
Beispiel #26
0
 def indexConf(self, conf):
     # Note: conf can be any object which has getEndDate() and getStartDate() methods
     self._idxDay._p_changed = True
     days = (conf.getEndDate().date() - conf.getStartDate().date()).days
     startDate = datetime(conf.getStartDate().year, conf.getStartDate().month, conf.getStartDate().day)
     for day in range(days + 1):
         key = int(datetimeToUnixTime(startDate + timedelta(day)))
         #checking if 2038 problem occurs
         if key > BTREE_MAX_INT:
             continue
         if self._idxDay.has_key(key):
             self._idxDay[key].add(conf)
         else:
             self._idxDay[key] = OOSet([conf])
Beispiel #27
0
    def _index_object(self, documentId, obj, threshold=None, attr=''):

        # get permuted keywords
        newKeywords = self._get_permuted_keywords(obj)

        oldKeywords = self._unindex.get(documentId, None)

        if oldKeywords is None:
            # we've got a new document, let's not futz around.
            try:
                for kw in newKeywords:
                    self.insertForwardIndexEntry(kw, documentId)
                if newKeywords:
                    self._unindex[documentId] = list(newKeywords)
            except TypeError:
                return 0
        else:
            # we have an existing entry for this document, and we need
            # to figure out if any of the keywords have actually changed
            if type(oldKeywords) is not OOSet:
                oldKeywords = OOSet(oldKeywords)
            newKeywords = OOSet(newKeywords)
            fdiff = difference(oldKeywords, newKeywords)
            rdiff = difference(newKeywords, oldKeywords)
            if fdiff or rdiff:
                # if we've got forward or reverse changes
                if newKeywords:
                    self._unindex[documentId] = list(newKeywords)
                else:
                    del self._unindex[documentId]
                if fdiff:
                    self.unindex_objectKeywords(documentId, fdiff)
                if rdiff:
                    for kw in rdiff:
                        self.insertForwardIndexEntry(kw, documentId)
        return 1
Beispiel #28
0
    def index_paths(self, docid, paths):
        for path in paths:
            if isinstance(path, (list, tuple)):
                path = '/' + '/'.join(path[1:])
            comps = filter(None, path.split('/'))
            parent_path = '/' + '/'.join(comps[:-1])

            for i in range(len(comps)):
                comp = "/".join(comps[1:i + 1])
                if comp:
                    self.insertEntry(comp, docid, i)

            # Add terminator
            self.insertEntry(None, docid, len(comps) - 1, parent_path, path)

            self._unindex.setdefault(docid, OOSet()).insert(path)
Beispiel #29
0
 def replicate(self):
     """ replicate log to targets """
     keys = OOSet(self._transactions.keys())
     for key in keys:
         transaction = self._transactions[key]
         try:
             transaction.replicate(self)
             del self._transactions[key]
         except:
             from sys import exc_info
             import traceback
             info = exc_info()
             zlog('Replication',
                  'Could not replicate transaction %s to %s'%(
                     oid2str(transaction.id), self.id)) 
             break
def load_all_data():
    # For first launch (if types don't exist)
    dbtype_lists = ["Account", "Stock", "GLOBAL_STOCK_SCREEN_DICT"]
    dbtype_singletons = [
        "GLOBAL_ATTRIBUTE_SET",
        "GLOBAL_TICKER_LIST",
        "SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST",
    ]
    dbtypes = dbtype_lists + dbtype_singletons
    commit = False

    for dbtype in dbtypes:
        try:
            assert (hasattr(root, dbtype))
        except:
            logging.info("Creating DB for: {}".format(dbtype))
            if dbtype in dbtype_lists:
                setattr(root, dbtype, BTrees.OOBTree.BTree())
                commit = True
            elif dbtype in dbtype_singletons:
                if dbtype.endswith("_SET"):
                    setattr(root, dbtype, OOSet())
                    commit = True
                elif dbtype.endswith("_LIST"):
                    setattr(root, dbtype, persistent.list.PersistentList())
                    commit = True
                else:
                    logging.error(
                        "DB type {} does not conform. Exiting...".format(
                            dbtype))
            else:
                logging.error("DB types have been changed")
                sys.exit()

    if commit:
        config.LAST_PACKED_DB_SIZE = os.path.getsize(
            os.path.join('DO_NOT_COPY', 'mydata.fs'))
        commit_db()
    # now load db data if they exist
    config.LAST_PACKED_DB_SIZE = os.path.getsize(
        os.path.join('DO_NOT_COPY', 'mydata.fs'))
    load_GLOBAL_STOCK_DICT()
    load_all_portfolio_objects()
    load_GLOBAL_STOCK_SCREEN_DICT()
    load_SCREEN_NAME_AND_TIME_CREATED_TUPLE_LIST()
    load_filenames_imported_files()
    pack_if_necessary()