def __init__(self): self._v_current_card = 0 self._v_text = None self._v_cardNumber = None self.last_id = 0 self.card_list = PersistentList() self.to_be_indexed = PersistentList() self.card_store = OOBTree.OOBTree() self.hint_store = OOBTree.OOBTree() self.CreateCard(at_end=1, and_set=0)
def __init__(self): """Initialize (singleton!) tool object.""" # user configurable self.dictServers = {} self.secProcessWindows = {} self.secDefaultProcessWindow = self.DEFAULT_PROCESS_TIME_WINDOW # unknown to the user self.pendingRequests = PersistentList([]) self.processingRequests = PersistentList([]) log('__init__ completed.') # this no workeee???
def testNewFolderCriterias(self): """ testes that the import_criterias for a fold changed after being modifieded (folder bg) """ duplicates_criteria = { 'BookReference': PersistentList(('authors', 'title')) } self.second_bf = self.getEmptyBibFolder('second_bib_folder') old_criterias = self.bf.getCriterias() sec_old_criterias = self.second_bf.getCriterias() for bib_type in duplicates_criteria.keys(): self.bf.setCriteriasForType(bib_type, duplicates_criteria[bib_type]) new_criterias = self.bf.getCriterias() sec_new_criterias = self.second_bf.getCriterias() self.assertNotEquals(old_criterias, new_criterias, 'Criterias have not been changed') self.assertEquals(sec_old_criterias, sec_new_criterias, 'Criterias have been changed')
def test_persistent_types_buglets(self): l = PersistentList([1, 2, 3]) self.assertTrue(isinstance(l, PersistentList)) self.assertFalse(isinstance(l, list)) # dangerous self.assertTrue(isinstance(l[:], PersistentList)) d = PersistentMapping({1: 2}) self.assertTrue(isinstance(d, PersistentMapping)) self.assertFalse(isinstance(d, dict)) # dangerous self.assertTrue(isinstance(d.copy(), PersistentMapping))
def make_config_persistent(kwargs): """ iterates on the given dictionnary and replace list by persistent list, dictionary by persistent mapping. """ for key, value in kwargs.items(): if type(value) == type({}): p_value = PersistentMapping(value) kwargs[key] = p_value elif type(value) in (type(()), type([])): p_value = PersistentList(value) kwargs[key] = p_value
def getSelectedCriteria(self, bib_type=None): # migrate CMFBAT v0.8 duplicates engine, mending a linguistic fault-pas if shasattr(self, 'imp_criterias'): print 'CMFBibliographyAT: performing duplicates engine property update - v0.8 -> v0.9 (getSelectedCriteria of %s)' % '/'.join( self.getId()) self.duplicates_criteria = PersistentMapping() self.duplicates_criteria = copy.deepcopy(self.imp_criterias) self.criteriaUpdated = self.criteriasUpdated try: delattr(self, 'imp_criterias') except: pass try: delattr(self, 'criteriasUpdated') except: pass # first call? initialize self.duplicates_criteria bib_tool = getToolByName(self, 'portal_bibliography') if not shasattr(self, 'duplicates_criteria') or not self.duplicates_criteria: if self.getId() == bib_tool.getId(): for reference_type in bib_tool.getReferenceTypes(): self.duplicates_criteria[reference_type] = PersistentList( self._default_duplicates_criteria) self.criteriaUpdated = True else: self.duplicates_criteria = bib_tool.getSelectedCriteria() self.criteriaUpdated = True if not shasattr(self, '_criteria') or not self._criteria: self.initCriteria() # make sure, our selected criteria are in sync with available criteria duplicates_criteria = {} for key in self._criteria.keys(): duplicates_criteria[key] = [ criterion for criterion in self._criteria[key] if self.duplicates_criteria.has_key(key) and ( criterion in self.duplicates_criteria[key]) ] if bib_type: try: duplicates_criteria[bib_type] except KeyError: return False return duplicates_criteria[bib_type] else: return duplicates_criteria
def setCriteriaForType(self, bib_type, criteria): """update criteria for a bibliography type given""" self.duplicates_criteria[bib_type] = PersistentList(criteria) #FIXME may need to check if any change has been done self.criteriaUpdated = True return True
def __init__(self): self._libraries = PersistentList() self._res_types = PersistentMapping() self.linkbyuid = False
def __init__(self, folder_dir): self._dir = folder_dir self.classifier = PBayes() self.hams = PersistentList() self.spams = PersistentList()
def ___test_huge_db_ghosting_system(): """ Interactive testcase, to demonstrate the behaviour of ZODB regarding memory management and ghost objects. Launch it with a "top"-like window opened next to it. MIGHT TRIGGER THIS WARNING: <...>Connection.py:550: UserWarning: The <class 'persistent.list.PersistentList'> object you're saving is large. (20001339 bytes.) Perhaps you're storing media which should be stored in blobs. Perhaps you're using a non-scalable data structure, such as a PersistentMapping or PersistentList. Perhaps you're storing data in objects that aren't persistent at all. In cases like that, the data is stored in the record of the containing persistent object. In any case, storing records this big is probably a bad idea. If you insist and want to get rid of this warning, use the large_record_size option of the ZODB.DB constructor (or the large-record-size option in a configuration file) to specify a larger size. Playing with this test shows that: - contants of persistent lists and mappings are really only loaded when accessed (eg. when lookup is done on them..) - non persistent types (list(), dict()) are badly treated, and remain in memory even when committed to file """ use_non_persistent_types = False PersistentList = globals()["PersistentList"] PersistentMapping = globals()["PersistentMapping"] if use_non_persistent_types: PersistentList = list PersistentMapping = dict db, connection, root = _get_huge_db_root() root["data"] = PersistentList(PersistentMapping({"toto": "tata"*random.randint(5000, 6000)}) for i in range(20000)) root["data"][0] = PersistentMapping({"toto": "tata"*5000000}) # HUGE first element print("We have a HUGE database filled with transient data now!") time.sleep(5) transaction.commit() print("We have committed the transaction!") time.sleep(5) connection.close() db.close() # --------- db, connection, root = _get_huge_db_root() print("We have reopened the huge database now!") time.sleep(5) data = root["data"] print("We have accessed data list!") time.sleep(5) var1 = data[0] print("We have accessed first element of data list!") time.sleep(5) var1 = data[0]["toto"] print("We have unghosted first element of data list!") time.sleep(5) for i in data: i # no unghosting print("We have accessed all elements of data list!") time.sleep(5) for i in data: i["toto"] # THIS removes the ghosting effect on element print("We have unghosted all elements of data list!") time.sleep(15)
def test_conflict_errors(self): """ Here we realize that conflict errors occur only occur when concurrent modifications on a particular container (with specific oid) occur concurrently. Updates can still be lost if a branch of the object tree is disconnected from the root while one of its leaves gets updated. Similarly, readCurrent() only protects a specific container of the object tree, which can still be disconnected from the root by a transaction, while its content is updated by another transaction. """ conn = self.conn root = conn.root root.stuff = PersistentList([9]) root.origin = PersistentList([3]) root.target = PersistentList([8]) root.dummy1 = PersistentList([9]) transaction.commit() # basic conflict on root # pool.apply(delete_container, args=(self.db, "dummy1")) root.dummy2 = 5 self.assertRaises(ConflictError, transaction.commit) # conflict !! self.assertRaises(TransactionFailedError, transaction.commit) # transaction broken transaction.abort() self.assertFalse(hasattr(root, "dummy2")) # rolled back # no conflict when a branch gets detached while leaf is updated container = root.stuff pool.apply(delete_container, args=(self.db, "stuff")) container[0] = 88 transaction.commit() self.assertFalse(hasattr(root, "stuff")) # update lost # without readCurrent() - lost update # root.origin = PersistentList([13]) value = root.origin pool.apply(transfer_list_value, args=(self.db, "origin", "target")) root.target = value transaction.commit() self.assertEqual(root.target, PersistentList([13])) # we lost [3] # with readCurrent() and container update - ReadConflictError raised! # root.origin = PersistentList([17]) transaction.commit() res = conn.readCurrent(root.target) # container object selected !! assert res is None # no return value expected value = root.target pool.apply(transfer_list_value, args=(self.db, "origin", "target")) root.othertarget = value self.assertRaises(Exception, transaction.commit) self.assertEqual(root.target, PersistentList([17])) # auto refreshing occurred self.assertFalse(hasattr(root, "othertarget")) # auto refreshing occurred self.assertRaises(Exception, transaction.commit) # but transaction still broken transaction.abort() transaction.commit() # now all is ok once again # with readCurrent() and container deletion - somehow lost update! # value = root.origin[0] res = conn.readCurrent(root.origin) # container object selected !! assert res is None # no return value expected pool.apply(delete_container, args=(self.db, "origin")) root.target[0] = value # we use a value whose origin has now been deleted in other thread transaction.commit() # here it's OK, the deleted object still remains in the DB history even if unreachable
def __init__(self, parent): Base.__init__(self, parent) self.__pers_list = PersistentList()