Ejemplo n.º 1
0
    def checkPackLotsWhileWriting(self):
        # This is like the other pack-while-writing tests, except it packs
        # repeatedly until the client thread is done.  At the time it was
        # introduced, it reliably provoked
        #     CorruptedError:  ... transaction with checkpoint flag set
        # in the ZEO flavor of the FileStorage tests.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        choices = list(range(10))
        for i in choices:
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        NUM_LOOP_TRIP = 100
        timer = ElapsedTimer(time.time())
        thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
        thread.start()
        while thread.isAlive():
            db.pack(packt)
            snooze()
            packt = time.time()
        thread.join()

        self._sanity_check()
Ejemplo n.º 2
0
    def checkPackLotsWhileWriting(self):
        # This is like the other pack-while-writing tests, except it packs
        # repeatedly until the client thread is done.  At the time it was
        # introduced, it reliably provoked
        #     CorruptedError:  ... transaction with checkpoint flag set
        # in the ZEO flavor of the FileStorage tests.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        choices = range(10)
        for i in choices:
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        NUM_LOOP_TRIP = 100
        timer = ElapsedTimer(time.time())
        thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
        thread.start()
        while thread.isAlive():
            db.pack(packt)
            snooze()
            packt = time.time()
        thread.join()

        self._sanity_check()
Ejemplo n.º 3
0
    def __call__(self, container, name, args):
        copts, cargs = self.parser.parse_args(args)
        data = os.path.join(container.opts.data, name)

        # First, pack the ZODB
        storage = FileStorage.FileStorage("%s/var/Data.fs" % data)
        db = DB(storage)
        db.pack()

        # Can't pack an Rtree's storage in-place, so we move it away and 
        # recreate from the contents of the ZODB
        rtree = None
        rtree_filename = '%s/var/vrt1' % data
 
        try:
            shutil.move(rtree_filename + ".dat", rtree_filename + ".bkup.dat")
            shutil.move(rtree_filename + ".idx", rtree_filename + ".bkup.idx")
        
            conn = db.open()
            root = conn.root()
            keys = root['index'].keys

            bkup = Rtree('%s/var/vrt1.bkup' % data)
            pagesize = bkup.properties.pagesize

            if len(keys) == 0:
                fwd = Rtree(
                        '%s/var/vrt1' % data,
                        # Passing in copied properties doesn't work,
                        # leading to errors involving page ids
                        # properties=new_properties, 
                        pagesize=pagesize
                        )
            else:
                gen = ((intid, bbox, None) for intid, (uid, bbox) \
                      in keys.items())
                fwd = Rtree(
                        '%s/var/vrt1' % data,
                        gen, 
                        # Passing in copied properties doesn't work,
                        # leading to errors involving page ids
                        # properties=new_properties,
                        pagesize=pagesize
                        )
            
            conn.close()
            db.close()
            storage.close()
        except:
            # Restore backups
            shutil.copy(rtree_filename + ".bkup.dat", rtree_filename + ".dat")
            shutil.copy(rtree_filename + ".bkup.idx", rtree_filename + ".idx")
            raise
        finally:
            if fwd is not None:
                fwd.close()
Ejemplo n.º 4
0
class New:
  def __init__(self, dbPath=setup.dbPath):
    self.db = DB(FileStorage.FileStorage(dbPath))
    
  def connect(self, app=None): 
    conn = Connection(self.db.open(), db=self.db)
    return conn, conn.root() 
    
  def close(self):      self.db.close()
  def pack(self):       self.db.pack()
Ejemplo n.º 5
0
    class ZODB(AbstractDatabase):

        name = 'zodb'

        def __init__(self, conf=None):
            if conf is None:
                raise Exception('Path required.')
            if not os.path.exists(conf):
                os.makedirs(conf)
            storage = FileStorage.FileStorage(os.path.join(conf, 'db'),
                                              pack_keep_old=False)
            self._tmp_path = os.path.join(conf, 'db.tmp')
            self._db = DB(storage)
            self._connection = self._db.open()
            self._root = self._connection.root()
            if getattr(self._root, 'db', None) is None:
                self._root.db = BTrees.OOBTree.BTree()
            self._root_db = self._root.db
            self._transaction = transaction
            self._bytes_written = 0

        def put(self, k, v):
            self._root_db[k] = v
            self._bytes_written += len(k) + len(v)
            if self._bytes_written >= 104857600:
                self.compact()

        def get(self, k):
            db = self._root_db
            return db[k] if k in db else None

        def delete(self, k):
            del self._root_db[k]

        def close(self):
            self._transaction.commit()
            self._db.close()
            try:
                os.remove(self._tmp_path)
            except:
                pass

        def compact(self):
            self._transaction.commit()
            self._db.pack()
            self._bytes_written = 0

        def length(self):
            return len(self._root_db)

        def list(self):
            return self._root_db.keys()

        def savepoint(self):
            self._transaction.commit()
Ejemplo n.º 6
0
class OurDB:

    _file_name = None

    def __init__(self, dir):
        from BTrees.OOBTree import OOBTree
        import transaction
        self.dir = dir
        self.getdb()
        conn = self.db.open()
        conn.root()['tree'] = OOBTree()
        transaction.commit()
        self.pos = self.db.storage._pos
        self.close()

    def getdb(self):
        from ZODB import DB
        from ZODB.FileStorage import FileStorage
        self._file_name = storage_filename = os.path.join(self.dir, 'Data.fs')
        storage = FileStorage(storage_filename)
        self.db = DB(storage)

    def gettree(self):
        self.getdb()
        conn = self.db.open()
        return conn.root()['tree']

    def pack(self):
        self.getdb()
        self.db.pack()

    def close(self):
        if self.db is not None:
            self.db.close()
            self.db = None

    def mutate(self):
        # Make random mutations to the btree in the database.
        import random
        import transaction
        tree = self.gettree()
        for dummy in range(100):
            if random.random() < 0.6:
                tree[random.randrange(100000)] = random.randrange(100000)
            else:
                keys = tree.keys()
                if keys:
                    del tree[keys[0]]
        transaction.commit()
        self.pos = self.db.storage._pos
        self.maxkey = self.db.storage._oid
        self.close()
Ejemplo n.º 7
0
class OurDB(object):

    _file_name = None

    def __init__(self, dir):
        from BTrees.OOBTree import OOBTree
        import transaction
        self.dir = dir
        self.getdb()
        conn = self.db.open()
        conn.root()['tree'] = OOBTree()
        transaction.commit()
        self.pos = self.db.storage._pos
        self.close()

    def getdb(self):
        from ZODB import DB
        from ZODB.FileStorage import FileStorage
        self._file_name = storage_filename = os.path.join(self.dir, 'Data.fs')
        storage = FileStorage(storage_filename)
        self.db = DB(storage)

    def gettree(self):
        self.getdb()
        conn = self.db.open()
        return conn.root()['tree']

    def pack(self):
        self.getdb()
        self.db.pack()

    def close(self):
        if self.db is not None:
            self.db.close()
            self.db = None

    def mutate(self):
        # Make random mutations to the btree in the database.
        import random
        import transaction
        tree = self.gettree()
        for dummy in range(100):
            if random.random() < 0.6:
                tree[random.randrange(100000)] = random.randrange(100000)
            else:
                keys = tree.keys()
                if keys:
                    del tree[keys[0]]
        transaction.commit()
        self.pos = self.db.storage._pos
        self.maxkey = self.db.storage._oid
        self.close()
Ejemplo n.º 8
0
 def checkPackWithMultiDatabaseReferences(self):
     databases = {}
     db = DB(self._storage, databases=databases, database_name='')
     otherdb = ZODB.tests.util.DB(databases=databases, database_name='o')
     conn = db.open()
     root = conn.root()
     root[1] = C()
     transaction.commit()
     del root[1]
     transaction.commit()
     root[2] = conn.get_connection('o').root()
     transaction.commit()
     db.pack(time.time() + 1)
     assert (len(self._storage) == 1)
Ejemplo n.º 9
0
 def checkPackWithMultiDatabaseReferences(self):
     databases = {}
     db = DB(self._storage, databases=databases, database_name='')
     otherdb = ZODB.tests.util.DB(databases=databases, database_name='o')
     conn = db.open()
     root = conn.root()
     root[1] = C()
     transaction.commit()
     del root[1]
     transaction.commit()
     root[2] = conn.get_connection('o').root()
     transaction.commit()
     db.pack(time.time() + 1)
     # some valid storages always return 0 for len()
     self.assertTrue(len(self._storage) in (0, 1))
Ejemplo n.º 10
0
 def checkPackWithMultiDatabaseReferences(self):
     databases = {}
     db = DB(self._storage, databases=databases, database_name='')
     otherdb = ZODB.tests.util.DB(databases=databases, database_name='o')
     conn = db.open()
     root = conn.root()
     root[1] = C()
     transaction.commit()
     del root[1]
     transaction.commit()
     root[2] = conn.get_connection('o').root()
     transaction.commit()
     db.pack(time.time()+1)
     # some valid storages always return 0 for len()
     self.assertTrue(len(self._storage) in (0, 1))
Ejemplo n.º 11
0
    def checkRedundantPack(self):
        # It is an error to perform a pack with a packtime earlier
        # than a previous packtime.  The storage can't do a full
        # traversal as of the packtime, because the previous pack may
        # have removed revisions necessary for a full traversal.

        # It should be simple to test that a storage error is raised,
        # but this test case goes to the trouble of constructing a
        # scenario that would lose data if the earlier packtime was
        # honored.

        self._initroot()

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        root["d"] = d = PersistentMapping()
        transaction.commit()
        snooze()

        obj = d["obj"] = C()
        obj.value = 1
        transaction.commit()
        snooze()
        packt1 = time.time()
        lost_oid = obj._p_oid

        obj = d["anotherobj"] = C()
        obj.value = 2
        transaction.commit()
        snooze()
        packt2 = time.time()

        db.pack(packt2)
        # BDBStorage allows the second pack, but doesn't lose data.
        try:
            db.pack(packt1)
        except StorageError:
            pass
        # This object would be removed by the second pack, even though
        # it is reachable.
        load_current(self._storage, lost_oid)
Ejemplo n.º 12
0
    def checkRedundantPack(self):
        # It is an error to perform a pack with a packtime earlier
        # than a previous packtime.  The storage can't do a full
        # traversal as of the packtime, because the previous pack may
        # have removed revisions necessary for a full traversal.

        # It should be simple to test that a storage error is raised,
        # but this test case goes to the trouble of constructing a
        # scenario that would lose data if the earlier packtime was
        # honored.

        self._initroot()

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        root["d"] = d = PersistentMapping()
        transaction.commit()
        snooze()

        obj = d["obj"] = C()
        obj.value = 1
        transaction.commit()
        snooze()
        packt1 = time.time()
        lost_oid = obj._p_oid

        obj = d["anotherobj"] = C()
        obj.value = 2
        transaction.commit()
        snooze()
        packt2 = time.time()

        db.pack(packt2)
        # BDBStorage allows the second pack, but doesn't lose data.
        try:
            db.pack(packt1)
        except StorageError:
            pass
        # This object would be removed by the second pack, even though
        # it is reachable.
        self._storage.load(lost_oid, "")
Ejemplo n.º 13
0
    def checkPackLotsWhileWriting(self):
        # This is like the other pack-while-writing tests, except it packs
        # repeatedly until the client thread is done.  At the time it was
        # introduced, it reliably provoked
        #     CorruptedError:  ... transaction with checkpoint flag set
        # in the ZEO flavor of the FileStorage tests.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        choices = range(10)
        for i in choices:
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        NUM_LOOP_TRIP = 100
        timer = ElapsedTimer(time.time())
        thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
        thread.start()
        while thread.isAlive():
            db.pack(packt)
            snooze()
            packt = time.time()
        thread.join()

        # Iterate over the storage to make sure it's sane.
        if not hasattr(self._storage, "iterator"):
            return
        it = self._storage.iterator()
        for txn in it:
            for data in txn:
                pass
        it.close()
Ejemplo n.º 14
0
def convert_files(fname,debug=False):
    try:
        clean_old(fname)

        storage = FileStorage.FileStorage(fname+".idx")
        db = DB(storage)
        connection = db.open()
        dbroot = connection.root()
        root = dbroot['isis']

        read_l0x_records(fname,root,10,debug)
        read_l0x_records(fname,root,30,debug)

        connection.close()
        db.pack()
        db.close()
        storage.close()

        print "\nInverted files from database %s converted to %s.idx\n" %(fname,fname)
    except Exception, e:
        print "\n** Error converting inverted file to database: %s (%s)\n" % (fname, str(e))
Ejemplo n.º 15
0
class Db_zodb:
    def __init__(self, tkroot=None):
        self.pathdb = None
        self.pathdbrep = None
        self.db_tuple = None
        self.tkroot = tkroot
        self.Listes = class_listes.Listes()
        self.prefs = preferences.Preferences()

        self.storage     = None
        self.db          = None
        self.connection  = None
        self.root        = None
        self.changed = None
        self.noundo = 0
        self.UNDO = False
        
    def initialize(self, path):
        self.pathdb = path
        self.pathdbrep = os.path.normpath(os.path.split(os.path.abspath(path))[0])
        class_tree.PATHDBREP = self.pathdbrep

        self.clean()

        # Connect to DB
        self.storage     = FileStorage.FileStorage(path)
        self.db          = DB(self.storage)
        self.connection  = self.db.open()
        self.root        = self.connection.root()
        self.changed = False

        
    def open(self, path=None):
        "Open ZODB."
        """
        Returns a db_tupe tuple consisting of:(root, connection, db, storage)
        The same tuple must be passed to close_zodb() in order to close the DB.
        """

        if path == None:
            mess = 'please start by opening a database file'
            if self.tkroot:
                self.tkroot.dia.Message(mess)
            else: print mess
            return

        if self.pathdb is not None:
            self.save()
            self.close()

        self.pathdb = path
        self.dbname = os.path.splitext(os.path.split(os.path.normpath(path))[1])[0]
        self.initialize(path)
        
        # get preferences
        if 'listes' in self.root.keys():
            self.Listes = self.root['listes']            
            self.Listes.update()
        else:
            self.root['listes'] = self.Listes

        # get preferences
        if 'preferences' in self.root.keys():
            self.prefs = self.root['preferences']
            self.prefs.checkprefs()
        else:
            self.root['preferences'] = self.prefs
        
        if 'root_reps' in self.root.keys():
            try:
                self.root['root_reps'].reps
            except:
                # save to xml then convert
                # convert to new style
                rootreps = class_tree.Rootreps()
                oldrootreps = self.root['root_reps']
                for rootrep in self.root['root_reps'].values():
                    rootreps.add(rootrep)
                    rootreps.p_changed = 1
                self.root['root_reps'] = rootreps
                
        if 'root_reps' not in self.root:
            self.root['root_reps'] = class_tree.Rootreps()
        
        # say db is opened
        mess = self.pathdb + ' database opened.'

        self.refresh_gui()
        self.db_tuple = (self.root, self.connection, self.db, self.storage)
        
        self.make_backup()
    
    def make_backup(self):
        platforms.copy_file(self.pathdb, self.pathdb+'_backup')

    def refresh_gui(self):
        # apply esono changes in case gui is launched
        if self.tkroot:
            # refresh title and field names
            self.tkroot.title('e-sonoclaste     version 0.'+self.tkroot.version+'  ::  '+self.pathdb)
            self.tkroot.editor.fields_buttons.change_fields_labels()
            self.tkroot.editor.fields_buttons.change_fields_states()

            # listes menu
            
    # refreshes database
    def sort_by_refs(self, rep):
        rep.sort_by_refs()
        for rep in rep.reps:
            self.sort_by_refs(rep)

    def refresh(self, tkroot=None):
        for root_rep in self.root['root_reps'].reps:
            tree_parse.parse(root_rep.path(), root_rep, refresh=True, tkroot=tkroot, db=self)
            self.sort_by_refs(root_rep)

    def add_rep(self, root_rep):
        self.root['root_reps'].add(root_rep)
        self.root['root_reps']._p_changed = 1
        # print 'add_rep and save'
        #self.save(gui=False)

    def remove_rep(self, root_rep):
        self.root['root_reps'].rm(root_rep)
        self.root['root_reps']._p_changed = 1

    def change_root(self, rootrep_path):
        self.root['root_reps'].reps[0].fields.setref(rootrep_path)
        #for root_rep in self.root['root_reps'].reps:
        #    root_rep.fields.setref(rootrep)

    def clean(self):
        # remove all database temporary files
        try:
            tmpfiles = glob.glob(self.pathdb+'.*')
            for tmpfile in tmpfiles:
                platforms.remove_file(tmpfile)
        except:
            print 'could not clean temporary files for database'

    def close(self):
        "Closes the ZODB."
        """
        This function MUST be called at the end of each program !!!
        """
        try:
            self.pack()
            transaction.abort()
            self.db.close()
            self.connection.close()
            self.storage.close()
            self.clean()
        except:
            print 'could not close database'

        self.pathdb = None
        self.db_tuple = None
        self.storage     = None
        self.db          = None
        self.connection  = None
        self.root        = None
        self.changed = None
        
        
    def save(self, gui=True):
        if self.pathdb == None:
            mess = 'please start by opening a database file'
            if self.tkroot and gui:
                self.tkroot.dia.message(mess)
            else: print mess
            return
        
        transaction.commit()
        
        self.changed = False

        if self.tkroot and gui:
            mess = self.tkroot.trad.saved()
            # self.tkroot.dia.message(mess)
            # print 'database saved'
        else:
            # print 'database saved'
            pass

    def auto_save(self):
        transaction.commit()
        if self.UNDO:
            self.pack()
            self.UNDO = False
            self.noundo = 0
            
    def undo_list(self, data_base):
        "List of undo log, with latest transaction first."
        """
        The time is converted into a readable time.
        """
        undolog = data_base.undoLog(0, sys.maxint)
        # convert the time stamp into something readable
        for transact in undolog:
            transact['time'] = time.ctime(transact['time'])
        # convert into a list of lists
        ret = []
        for transact in undolog:
            id   = transact['id']
            usr  = transact['user_name']
            tme  = transact['time']
            des  = transact['description']
            ret.append([tme,id,usr,des])
        return ret


    def undo(self):
        undo_list = self.undo_list(self.db)
        if not undo_list: return
        if self.noundo>=len(undo_list): return
        idundo = undo_list[self.noundo][1]
        self.db.undo(idundo)
        transaction.commit()
        self.noundo+=2
        self.tkroot.tree_gui.display_all()
        self.tkroot.listes_gui.display()
        undo_list = self.undo_list(self.db)
        self.UNDO = True
        
    def pack(self):
        if self.db:
            self.db.pack()
            
# =================================== SEARCH FUNCTIONS
    def search(self, key, elt=None, fields_numbers=None):
        rsl ={}
        if not elt:
            for root_rep in self.root['root_reps']:
                rsl = self.search_rec(key, root_rep, fields_numbers, rsl)
        else:
            rsl = self.search_rec(key, elt, fields_numbers, rsl)
        return rsl
    
    def key2rsl(self, rsl, elt, fieldname):
        if elt in rsl.keys():
            rsl[elt].append(str(fieldname))
        else:
            rsl[elt] = [str(fieldname)]
        return rsl
    
    def search_in(self, key, elt, fields_numbers, rsl):
        if elt.fields.name.lower().find(key.lower()) > -1:
            rsl = self.key2rsl(rsl, elt, 'name')
        if fields_numbers == None:
            fields_numbers = range(len(elt.fields.contents))
        for fieldno in fields_numbers:
            content = elt.fields.contents[fieldno]
            if content<>u'':
                if content.lower().find(key.lower()) > -1:
                    rsl = self.key2rsl(rsl, elt, fieldno)
        return rsl
    
    def search_rec(self, key, elt, fields_numbers, rsl):
        rsl = self.search_in(key, elt, fields_numbers, rsl)
        if elt.type=='rep':
            for f in elt.files:
                rsl = self.search_rec(key, f, fields_numbers, rsl)
                for marker in f.markers:
                    rsl = self.search_rec(key, marker, fields_numbers, rsl)
            for rep in elt.reps:
                rsl = self.search_rec(key, rep, fields_numbers, rsl)
        return rsl
Ejemplo n.º 16
0
    def _PackWhileWriting(self, pack_now):
        # A storage should allow some reading and writing during
        # a pack.  This test attempts to exercise locking code
        # in the storage to test that it is safe.  It generates
        # a lot of revisions, so that pack takes a long time.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        for i in range(10):
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        choices = list(range(10))
        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        # How many client threads should we run, and how long should we
        # wait for them to finish?  Hard to say.  Running 4 threads and
        # waiting 30 seconds too often left a thread still alive on Tim's
        # Win98SE box, during ZEO flavors of this test.  Those tend to
        # run one thread at a time to completion, and take about 10 seconds
        # per thread.  There doesn't appear to be a compelling reason to
        # run that many threads.  Running 3 threads and waiting up to a
        # minute seems to work well in practice.  The ZEO tests normally
        # finish faster than that, and the non-ZEO tests very much faster
        # than that.
        NUM_LOOP_TRIP = 50
        timer = ElapsedTimer(time.time())
        threads = [
            ClientThread(db, choices, NUM_LOOP_TRIP, timer, i)
            for i in range(3)
        ]
        for t in threads:
            t.start()

        if pack_now:
            db.pack(time.time())
        else:
            db.pack(packt)

        for t in threads:
            t.join(60)
        liveness = [t.isAlive() for t in threads]
        if True in liveness:
            # They should have finished by now.
            print('Liveness:', liveness)
            # Combine the outcomes, and sort by start time.
            outcomes = []
            for t in threads:
                outcomes.extend(t.outcomes)
            # each outcome list has as many of these as a loop trip got thru:
            #     thread_id
            #     elapsed millis at loop top
            #     elapsed millis at attempt to assign to self.root[index]
            #     index into self.root getting replaced
            #     elapsed millis when outcome known
            #     'OK' or 'Conflict'
            #     True if we got beyond this line, False if it raised an
            #         exception (one possible Conflict cause):
            #             self.root[index].value = MinPO(j)
            def cmp_by_time(a, b):
                return cmp((a[1], a[0]), (b[1], b[0]))

            outcomes.sort(cmp_by_time)
            counts = [0] * 4
            for outcome in outcomes:
                n = len(outcome)
                assert n >= 2
                tid = outcome[0]
                print('tid:%d top:%5d' % (tid, outcome[1]), end=' ')
                if n > 2:
                    print('commit:%5d' % outcome[2], end=' ')
                    if n > 3:
                        print('index:%2d' % outcome[3], end=' ')
                        if n > 4:
                            print('known:%5d' % outcome[4], end=' ')
                            if n > 5:
                                print('%8s' % outcome[5], end=' ')
                                if n > 6:
                                    print('assigned:%5s' % outcome[6], end=' ')
                counts[tid] += 1
                if counts[tid] == NUM_LOOP_TRIP:
                    print('thread %d done' % tid, end=' ')
                print()

            self.fail('a thread is still alive')

        self._sanity_check()
Ejemplo n.º 17
0
class DBConn(object):
    def __init__(self, db_name):
        self.db_name = db_name
        self.zdbroot = None

    def get_filepath(self):
        return "%s.%s" % (db_path, self.db_name)

    def load_db(self):
        if self.zdbroot:
            return
        self.delete_extra_files()
        fp = self.get_filepath()
        log.debug("zodb load_db 1")
        storage = FileStorage.FileStorage(fp)
        log.debug("zodb load_db 2")
        self.db = DB(storage, cache_size=1000)
        log.debug("zodb load_db 3")
        self.conn = self.db.open()
        log.debug("zodb load_db 4")
        self.zdbroot = self.conn.root()
        log.debug("zodb load_db 5")

    def get_section(self, section_key, tp=None):
        log.debug("zodb a")
        if self.zdbroot is None:
            self.load_db()
        log.debug("zodb b")
        if self.zdbroot.has_key(section_key):
            section = self.zdbroot[section_key]
        else:
            if tp is None:
                tp = OOBTree.OOBTree
            section = self.zdbroot[section_key] = tp()
        return section

    def root(self):
        if not self.zdbroot:
            self.load_db()
        return self.zdbroot

    def pack(self, *args, **kwargs):
        self.db.pack(*args, **kwargs)
        self.delete_extra_files()

    def close(self):
        try:
            self.conn.close()
            self.conn = None
            self.db.close()
        except AttributeError:
            pass

    def delete_all_dangerous(self):
        """ Only use this for test code!!!"""
        exts = ["", ".tmp", ".lock", ".index", ".old"]
        self.delete_files(exts)

    def delete_extra_files(self):
        """ Delete as yet unnecessary files """
        exts = [".tmp", ".lock", ".index", ".old"]
        self.delete_files(exts)

    def delete_files(self, exts):
        for ext in exts:
            file_path = "%s%s" % (self.get_filepath(), ext)
            try:
                os.unlink(file_path)
            except OSError:
                pass
Ejemplo n.º 18
0
class StorageManager(object):
    """Persistence tool for entity instances."""

    __name__ = None

    def __init__(self, path=None, zodb_storage=None, connection=None):
        if all([path, zodb_storage, connection]) is False:
            zodb_storage = MappingStorage("test")
        if path is not None:
            zodb_storage = FileStorage(path)
        if zodb_storage is not None:
            self._db = DB(zodb_storage)
            self._zodb_storage = zodb_storage
            self.connection = self._db.open()
        if connection is not None:
            self.connection = connection
            self._db = self.connection._db
        self._root = self.connection.root()

    def __getitem__(self, namespace):
        """Container behavior"""
        return self._root[namespace]

    def __resource_url__(self, request, info):
        """For compatibility with pyramid traversal"""
        return info["app_url"]

    def register(self, *instances):
        """Register new instances to appropriate namespaces"""
        for instance in instances:
            namespace = instance.namespace
            if namespace not in self._root:
                self._root[namespace] = OOBTree.BTree()
            if instance.key not in self._root[namespace]:
                self._root[namespace][instance.key] = instance

    def delete(self, *instances):
        """Delete instances from appropriate namespaces"""
        for instance in instances:
            instance.delete_from(self)

    def delete_key(self, namespace, key):
        """Delete given key in the namespace"""
        try:
            del self._root[namespace][key]
            return True
        except KeyError:
            return False

    def get(self, namespace, key):
        """Get instance from appropriate namespace by the key"""
        try:
            return self._root[namespace][key]
        except KeyError:
            return None

    def get_all(self, namespace, objects_only=True):
        """Get all instances from namespace"""
        result = None
        if namespace in self._root:
            result = self._root[namespace]
        if objects_only:
            return result.values()
        else:
            return result

    def close(self):
        """Close ZODB connection and storage"""
        self.connection.close()
        self._zodb_storage.close()

    def load_fixtures(self, path):
        """Load fixtures from JSON file in path. Mostly for testing"""
        result = dict()
        with open(path) as f:
            fixture_list = json.load(f)
            for fixture in fixture_list:
                entity_class_name = fixture.pop("class")
                import sys

                entity_class = getattr(sys.modules[__name__], entity_class_name)
                instance, stats = entity_class.assemble(storage_manager=self, **fixture)
                if entity_class.namespace not in result:
                    result[entity_class.namespace] = list()
                result[entity_class.namespace].append(instance)
        return result

    def pack(self):
        """Perform ZODB pack"""
        self._db.pack()
Ejemplo n.º 19
0
    def _PackWhileWriting(self, pack_now):
        # A storage should allow some reading and writing during
        # a pack.  This test attempts to exercise locking code
        # in the storage to test that it is safe.  It generates
        # a lot of revisions, so that pack takes a long time.

        db = DB(self._storage)
        conn = db.open()
        root = conn.root()

        for i in range(10):
            root[i] = MinPO(i)
        transaction.commit()

        snooze()
        packt = time.time()

        choices = list(range(10))
        for dummy in choices:
            for i in choices:
                root[i].value = MinPO(i)
                transaction.commit()

        # How many client threads should we run, and how long should we
        # wait for them to finish?  Hard to say.  Running 4 threads and
        # waiting 30 seconds too often left a thread still alive on Tim's
        # Win98SE box, during ZEO flavors of this test.  Those tend to
        # run one thread at a time to completion, and take about 10 seconds
        # per thread.  There doesn't appear to be a compelling reason to
        # run that many threads.  Running 3 threads and waiting up to a
        # minute seems to work well in practice.  The ZEO tests normally
        # finish faster than that, and the non-ZEO tests very much faster
        # than that.
        NUM_LOOP_TRIP = 50
        timer = ElapsedTimer(time.time())
        threads = [ClientThread(db, choices, NUM_LOOP_TRIP, timer, i)
                   for i in range(3)]
        for t in threads:
            t.start()

        if pack_now:
            db.pack(time.time())
        else:
            db.pack(packt)

        for t in threads:
            t.join(60)
        liveness = [t.isAlive() for t in threads]
        if True in liveness:
            # They should have finished by now.
            print('Liveness:', liveness)
            # Combine the outcomes, and sort by start time.
            outcomes = []
            for t in threads:
                outcomes.extend(t.outcomes)
            # each outcome list has as many of these as a loop trip got thru:
            #     thread_id
            #     elapsed millis at loop top
            #     elapsed millis at attempt to assign to self.root[index]
            #     index into self.root getting replaced
            #     elapsed millis when outcome known
            #     'OK' or 'Conflict'
            #     True if we got beyond this line, False if it raised an
            #         exception (one possible Conflict cause):
            #             self.root[index].value = MinPO(j)
            def cmp_by_time(a, b):
                return cmp((a[1], a[0]), (b[1], b[0]))
            outcomes.sort(cmp_by_time)
            counts = [0] * 4
            for outcome in outcomes:
                n = len(outcome)
                assert n >= 2
                tid = outcome[0]
                print('tid:%d top:%5d' % (tid, outcome[1]), end=' ')
                if n > 2:
                    print('commit:%5d' % outcome[2], end=' ')
                    if n > 3:
                        print('index:%2d' % outcome[3], end=' ')
                        if n > 4:
                            print('known:%5d' % outcome[4], end=' ')
                            if n > 5:
                                print('%8s' % outcome[5], end=' ')
                                if n > 6:
                                    print('assigned:%5s' % outcome[6], end=' ')
                counts[tid] += 1
                if counts[tid] == NUM_LOOP_TRIP:
                    print('thread %d done' % tid, end=' ')
                print()

            self.fail('a thread is still alive')

        self._sanity_check()

        db.close()
Ejemplo n.º 20
0
def packing():
    storage = FileStorage.FileStorage(CUON_FS)
    db = DB(storage)
    db.pack()
Ejemplo n.º 21
0
class StorageManager(object):
    """Persistence tool for entity instances."""

    __name__ = None

    def __init__(self, path=None, zodb_storage=None, connection=None):
        if all([path, zodb_storage, connection]) is False:
            zodb_storage = MappingStorage('test')
        if path is not None:
            zodb_storage = FileStorage(path)
        if zodb_storage is not None:
            self._db = DB(zodb_storage)
            self._zodb_storage = zodb_storage
            self.connection = self._db.open()
        if connection is not None:
            self.connection = connection
            self._db = self.connection._db
        self._root = self.connection.root()

    def __getitem__(self, namespace):
        """Container behavior"""
        return self._root[namespace]

    def __resource_url__(self, request, info):
        """For compatibility with pyramid traversal"""
        return info['app_url']

    def register(self, *instances):
        """Register new instances to appropriate namespaces"""
        for instance in instances:
            namespace = instance.namespace
            if namespace not in self._root:
                self._root[namespace] = OOBTree.BTree()
            if instance.key not in self._root[namespace]:
                self._root[namespace][instance.key] = instance

    def delete(self, *instances):
        """Delete instances from appropriate namespaces"""
        for instance in instances:
            instance.delete_from(self)

    def delete_key(self, namespace, key):
        """Delete given key in the namespace"""
        try:
            del self._root[namespace][key]
            return True
        except KeyError:
            return False

    def get(self, namespace, key):
        """Get instance from appropriate namespace by the key"""
        try:
            return self._root[namespace][key]
        except KeyError:
            return None

    def get_all(self, namespace, objects_only=True):
        """Get all instances from namespace"""
        result = None
        if namespace in self._root:
            result = self._root[namespace]
        if objects_only:
            return result.values()
        else:
            return result

    def close(self):
        """Close ZODB connection and storage"""
        self.connection.close()
        self._zodb_storage.close()

    def load_fixtures(self, path):
        """Load fixtures from JSON file in path. Mostly for testing"""
        result = dict()
        with open(path) as f:
            fixture_list = json.load(f)
            for fixture in fixture_list:
                entity_class_name = fixture.pop('class')
                import sys
                entity_class = getattr(sys.modules[__name__],
                                       entity_class_name)
                instance, stats = entity_class.assemble(storage_manager=self,
                                                        **fixture)
                if entity_class.namespace not in result:
                    result[entity_class.namespace] = list()
                result[entity_class.namespace].append(instance)
        return result

    def pack(self):
        """Perform ZODB pack"""
        self._db.pack()
Ejemplo n.º 22
0
                post = PersistentList(['John Doe',time.time(),
                    'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
                    comment])
                dbroot[i1][3][i2][3][i3][3].append(post)
                c_commit()
                for i5 in xrange(10):
                    comment = PersistentList()
                    post = PersistentList(['John Doe',time.time(),
                        'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
                        comment])
                    dbroot[i1][3][i2][3][i3][3][i4][3].append(post)
                    c_commit()
                    for i6 in xrange(10):
                        comment = PersistentList()
                        post = PersistentList(['John Doe',time.time(),
                            'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
                            comment])
                        dbroot[i1][3][i2][3][i3][3][i4][3][i5][3].append(post)
                        c_commit()

    f.write("%s\n" % (time.time() - t))

t = time.time()
db.pack()
f.write("\n\nPackZODB time: %s" % (time.time() - t))
f.close()
# closing ZODB database
connection.close()
db.close()

Ejemplo n.º 23
0
class GitHubClient:
    def __init__(self) -> None:
        """Initialize the GitHub client and load state from db"""
        self.CONFIG = load_config()
        self.storage = ClientStorage(self.CONFIG["port"])
        self.db = DB(self.storage)
        self._client = github3.login(token=self.CONFIG["token"])
        self._init_db()

    def _init_db(self):
        with self.db.transaction() as conn:
            try:
                conn.root.pull_requests
            except AttributeError:
                conn.root.pull_requests = {}  # PersistentMapping()
                conn.root.notifications = {}  # PersistentMapping()
                conn.root.codeowners = {}  # PersistentMapping()
                conn.root.team_members = {}  # PersistentMapping()
                conn.root.mentioned = set()
                conn.root.team_mentioned = set()
                conn.root.last_update = None

    @classmethod
    def run_server(cls) -> None:
        """Run the GMB server

        Spins up the ZEO database server and a background scheduler to update state
        at the configured interval. Throws an exception if the server is already running.
        """
        if os.path.exists(CONFIG["pid_file"]):
            with open(CONFIG["pid_file"], "r") as fi:
                pid = int(fi.read().strip())
            if psutil.pid_exists(pid):
                raise Exception("Server already running!")
        with open(CONFIG["pid_file"], "w") as pidfile:
            pidfile.write(str(os.getpid()))
        logging.info("Starting server...")
        config = load_config()
        ZEO.server(path=CONFIG["db_location"], port=config["port"])
        client = cls()
        sched = BackgroundScheduler(daemon=True)
        sched.add_job(
            client.update,
            "interval",
            seconds=config["update_interval"],
            next_run_time=datetime.datetime.now(),
        )
        sched.start()
        logging.info("server running")
        try:
            while True:
                time.sleep(2)
        except (KeyboardInterrupt, SystemExit):
            sched.shutdown()

    def get_state(self, complete=False):
        """Get a dictionary specifying the current database state

        By default, excludes the following:
         - muted PRs and associated notifications
         - cleared notifications
         - PRs and associated notifications to be excluded based on the `mentions_only`
           and/or `team_mentions` flags
        If `complete` is True, this will return all notifications/PRs.

        Args:
            complete: If True, return all data, ignoring the `mentions_only and
                `team_mentions` flags
        Returns:
            A dictionary of the complete database state, with the following keys:
            {
                "notifications":
                "pull_requests":
                "codeowners":
                "team_members":
                "last_update":
                "mentioned":
                "team_mentioned":
            }

        """
        mentions_only = False if complete else self.CONFIG["mentions_only"]
        with self.db.transaction() as conn:
            pull_requests = conn.root.pull_requests
            notifications = conn.root.notifications
            if mentions_only:
                pull_requests = {
                    pr_id: pr
                    for pr_id, pr in pull_requests.items()
                    if (pr_id in conn.root.mentioned) or (
                        pr["author"] == self.CONFIG["user"]) or (
                            self.CONFIG["team_mentions"]
                            and pr_id in conn.root.team_mentioned)
                }
                notifications = {
                    notif_id: notif
                    for notif_id, notif in notifications.items()
                    if notif.get("pr_id") in conn.root.mentioned
                    or pull_requests.get(notif.get("pr_id"), {}).get(
                        "author") == self.CONFIG["user"]
                }
            if not complete:
                pull_requests = {
                    pr_id: pr
                    for pr_id, pr in pull_requests.items() if not pr["muted"]
                }
                notifications = {
                    notif_id: notif
                    for notif_id, notif in notifications.items()
                    if not pull_requests.get(notif.get("pr_id"), {}).get(
                        "muted") and not notif["cleared"]
                }

            return {
                "notifications": notifications,
                "pull_requests": pull_requests,
                "codeowners": conn.root.codeowners,
                "team_members": conn.root.team_members,
                "last_update": conn.root.last_update,
                "mentioned": conn.root.mentioned,
                "team_mentioned": conn.root.team_mentioned,
            }

    def _transform_pr_url(self, api_url):
        """Transform a pull request API URL to a browser URL"""
        return api_url.replace("api.github.com/repos",
                               "github.com").replace("/pulls/", "/pull/")

    def rate_limit(self):
        """Get rate limit information from the github3 client"""
        return self._client.rate_limit()

    def get_muted_prs(self) -> dict:
        """Retrieve information on all PRs the user has muted"""
        with self.db.transaction() as conn:
            return [
                pr for pr in conn.root.pull_requests.values() if pr["muted"]
            ]

    def mute_pr(self, id_) -> None:
        """Mute a PR"""
        with self.db.transaction() as conn:
            pull_requests = conn.root.pull_requests
            pull_requests[id_]["muted"] = True
            conn.root.pull_requests = pull_requests

    def unmute_pr(self, id_):
        """Unmute a PR"""
        with self.db.transaction() as conn:
            pull_requests = conn.root.pull_requests
            pull_requests[id_]["muted"] = False
            conn.root.pull_requests = pull_requests

    def clear_notification(self, notif_id):
        with self.db.transaction() as conn:
            notifications = conn.root.notifications
            notifications[notif_id]["cleared"] = True
            conn.root.notifications = notifications

    def clear_all_notifications(self):
        with self.db.transaction() as conn:
            notifications = conn.root.notifications
            for notif_id, notif in notifications.items():
                notifications[notif_id]["cleared"] = True
            conn.root.notifications = notifications

    def open_notification(self, notif_id):
        self.clear_notification(notif_id)
        with self.db.transaction() as conn:
            webbrowser.open(conn.root.notifications[notif_id]["pr_url"])

    def get_pull_requests(self):
        """Search for all pull_requests involving the user"""
        with self.db.transaction() as conn:
            user_teams = [
                team for repo, teams in conn.root.team_members.items()
                for team, members in teams.items()
                if self.CONFIG["user"] in members
            ]
        prs = []
        issue_pr_map = {}
        for issue in self._client.search_issues(
                f"is:open is:pr involves:{self.CONFIG['user']} archived:false"
        ):
            pr = issue.issue.pull_request()
            issue_pr_map[issue.id] = pr.id
            prs.append(pr)

        for issue in self._client.search_issues(
                f"is:open is:pr mentions:{self.CONFIG['user']} archived:false"
        ):
            with self.db.transaction() as conn:
                mentioned = conn.root.mentioned
                mentioned.add(issue_pr_map[issue.id])
                conn.root.mentioned = mentioned
        for team in user_teams:
            for issue in self._client.search_issues(
                    f"is:open is:pr team:{team} archived:false"):
                if issue.id not in issue_pr_map:
                    pr = issue.issue.pull_request()
                    prs.append(pr)
                    issue_pr_map[issue.id] = pr.id
                with self.db.transaction() as conn:
                    team_mentioned = conn.root.team_mentioned
                    team_mentioned.add(issue_pr_map[issue.id])
                    conn.root.team_mentioned = team_mentioned

        return prs

    def _notify(self, **kwargs):
        """Trigger a desktop notification (if they are enabled)"""
        if self.CONFIG["desktop_notifications"]:
            pync.notify(**kwargs)

    def _parse_notification(self, notification):
        notif = notification.subject.copy()
        comment_url = notif.get("latest_comment_url", "")
        if comment_url and "comments" in comment_url:
            notif["comment"] = json.loads(
                self._client._get(comment_url).content.decode())
        notif["cleared"] = False
        notif["reason"] = notification.reason
        notif["updated_at"] = arrow.get(notification.updated_at)
        return notif

    def _parse_pr_from_notification(self, notification):
        url = notification.subject["url"]
        url_info = url.replace("https://api.github.com/repos/", "").split("/")
        pr = self._client.pull_request(url_info[0], url_info[1],
                                       int(url_info[3]))
        parsed = self.parse_pull_request(pr, get_test_status=False)
        with self.db.transaction() as conn:
            pull_requests = conn.root.pull_requests
            pull_requests[pr.id] = parsed
            conn.root.pull_requests = pull_requests
        self.current_prs.add(pr.id)
        return parsed

    def _get_full_repo(self, pull_request):
        short_repo = pull_request.repository
        return self._client.repository(short_repo.owner.login, short_repo.name)

    def _get_protection(self, pull_request):
        full_repo = self._get_full_repo(pull_request)
        return full_repo.branch(pull_request.base.ref).original_protection

    def _update_pull_requests(self):
        self.protection = {}
        for pull_request in self.get_pull_requests():
            repo = pull_request.repository
            repo_key = f"{repo.owner.login}|{repo.name}"
            if pull_request.base.ref not in self.protection:
                self.protection[pull_request.base.ref] = self._get_protection(
                    pull_request)
            with self.db.transaction() as conn:
                codeowners = conn.root.codeowners
                if repo_key not in codeowners:
                    try:
                        codeowner_file = pull_request.repository.file_contents(
                            "CODEOWNERS")
                        codeowner_file_contents = codeowner_file.decoded.decode(
                        )
                        codeowners[repo_key] = self.parse_codeowners_file(
                            codeowner_file_contents)
                    except (NotFoundError, ForbiddenError):
                        codeowners[repo_key] = None
                    conn.root.codeowners = codeowners
            with self.db.transaction() as conn:
                team_members = conn.root.team_members
                if repo.owner.login not in team_members:
                    try:
                        org = self._client.organization(repo.owner.login)
                        team_members[repo.owner.login] = self.parse_teamembers(
                            org)
                    except (NotFoundError, ForbiddenError):
                        team_members[repo.owner.login] = None
                conn.root.team_members = team_members

            parsed = self.parse_pull_request(pull_request)
            with self.db.transaction() as conn:
                conn.root.pull_requests[pull_request.id] = parsed
            self.current_prs.add(pull_request.id)

    def _should_notify(self, notif):
        id_ = notif["pr_id"]
        with self.db.transaction() as conn:
            if conn.root.pull_requests[id_]["muted"]:
                return False
        if self.CONFIG["mentions_only"] and self.CONFIG["team_mentions"]:
            with self.db.transaction() as conn:
                return id_ in conn.root.mentioned or id_ in conn.root.team_mentioned
        if self.CONFIG["mentions_only"] and not self.CONFIG["team_mentions"]:
            with self.db.transaction() as conn:
                return id_ in conn.root.mentioned
        return True

    def _update_notifications(self, mentions_only=False):
        with self.db.transaction() as conn:
            prs_by_url = {
                pr["url"]: pr
                for pr in conn.root.pull_requests.values()
            }
        for notification in self._client.notifications():
            notif_id = int(notification.id)
            self.current_notifications[int(notification.id)] = notification
            with self.db.transaction() as conn:
                new = notif_id not in conn.root.notifications
            if new:
                parsed = self._parse_notification(notification)
            else:
                with self.db.transaction() as conn:
                    parsed = conn.root.notifications[notif_id]
                    if conn.root.notifications[notif_id]["cleared"]:
                        notification.mark()

            corresponding_pr = prs_by_url.get(notification.subject["url"])
            if (corresponding_pr is None
                    or corresponding_pr["id"] not in self.current_prs
                ) and parsed["type"] == "PullRequest":
                corresponding_pr = self._parse_pr_from_notification(
                    notification)
            parsed[
                "pr_id"] = corresponding_pr["id"] if corresponding_pr else None
            parsed["pr_url"] = (corresponding_pr["browser_url"]
                                if corresponding_pr else None)
            if new and self._should_notify(parsed):
                self._notify(
                    title="New Notification",
                    message=f"\{notification.subject['title']}",
                    open=parsed["pr_url"],
                )

            with self.db.transaction() as conn:
                conn.root.notifications[notif_id] = parsed

    def update(self):
        self.current_notifications = {}
        self.current_prs = set()
        self._update_pull_requests()
        self._update_notifications(
            mentions_only=load_config()["mentions_only"])
        # clear any old notifications
        with self.db.transaction() as conn:
            conn.root.notifications = {
                id_: notif
                for id_, notif in conn.root.notifications.items()
                if id_ in self.current_notifications
            }
            # clear any old pull requests
            conn.root.pull_requests = {
                pr["id"]: pr
                for pr in conn.root.pull_requests.values()
                if pr["id"] in self.current_prs
            }
        with self.db.transaction() as conn:
            conn.root.last_update = arrow.now()
        self.db.pack()

    def parse_codeowners_file(self, file_contents):
        codeowners = []
        for line in file_contents.split("\n"):
            line = line.strip()
            if line and not line.startswith("#"):
                path, owners = line.split(maxsplit=1)
                codeowners.append(
                    (path, tuple(sorted(owners.replace("@", "").split()))))
        return codeowners

    def parse_teamembers(self, org):
        team_members = {}
        for team in org.teams():
            members = [member.login for member in team.members()]
            team_name = "-".join(team.name.lower().split())
            team_members[f"{org.login}/{team_name}"] = members
        return team_members

    def get_pr_codeowners(self, pr, reviews):
        all_owners = {}
        with self.db.transaction() as conn:
            codeowner_info = conn.root.codeowners.get(
                f"{pr.repository.owner.login}|{pr.repository.name}")
        if codeowner_info:
            for file in pr.files():
                file_owners = None
                for path, owners in codeowner_info:
                    if path == "*":
                        file_owners = owners
                    if path in f"/{file.filename}":
                        file_owners = owners
                if file_owners and file_owners not in all_owners:
                    approved = False
                    if reviews:
                        for owner in file_owners:
                            if "/" in owner:
                                with self.db.transaction() as conn:
                                    if any(user in conn.root.team_members.get(
                                            pr.repository.owner.login, {}).get(
                                                owner, ())
                                           and review["state"] == "APPROVED"
                                           for user, review in
                                           reviews.items()):
                                        approved = True
                                        break
                            else:
                                if any(user == owner
                                       and review["state"] == "APPROVED"
                                       for user, review in reviews.items()):
                                    approved = True
                                    break

                    all_owners["|".join(file_owners)] = approved
        return all_owners

    def parse_reviews(self, pull_request):
        reviews = {}
        for review in pull_request.reviews():
            # if review.user.login != self.CONFIG["user"]:
            reviews[review.user.login] = {"state": review.state}
        return reviews

    def _format_pr_description(self, pull_request):
        return (
            f"{pull_request.repository.owner.login}/{pull_request.repository.name} "
            f"{pull_request.number}: {pull_request.title}")

    def parse_pull_request(self, pull_request, get_test_status=True):
        reviews = self.parse_reviews(pull_request)
        with self.db.transaction() as conn:
            previous = conn.root.pull_requests.get(pull_request.id, {})
        parsed = {
            "base":
            pull_request.base.ref,
            "head":
            pull_request.head.ref,
            "mergeable":
            pull_request.mergeable,
            "mergeable_state":
            pull_request.mergeable_state,
            "description":
            self._format_pr_description(pull_request),
            "state":
            "MERGED" if pull_request.merged else pull_request.state.upper(),
            "url":
            pull_request.url,
            "browser_url":
            pull_request.html_url,
            "author":
            pull_request.user.login,
            "updated_at":
            arrow.get(pull_request.updated_at),
            "reviews":
            reviews,
            "muted":
            previous.get("muted", False),
            "id":
            pull_request.id,
            "repo":
            pull_request.repository.name,
            "org":
            pull_request.repository.owner.login,
            "protected":
            self.protection.get(pull_request.base.ref,
                                {}).get("enabled", False),
            "title":
            pull_request.title,
            "number":
            pull_request.number,
        }
        parsed["test_status"] = ({}
                                 if pull_request.merged or not get_test_status
                                 else self._get_test_status(pull_request))
        parsed["owners"] = self.get_pr_codeowners(pull_request, reviews)

        if (previous and parsed["author"] == self.CONFIG["user"]
                and parsed["test_status"]):
            self._state_change_notification(parsed, previous)
        return parsed

    def _state_change_notification(self, current_pr, previous_pr):
        if (current_pr["test_status"]["outcome"] !=
                "pending") and (previous_pr["test_status"]["outcome"] !=
                                current_pr["test_status"]["outcome"]):
            self._notify(
                title="Test status change",
                subtitle=f"{current_pr['description']}",
                message=f"{current_pr['test_status']['outcome']}",
                open=current_pr["browser_url"],
            )
        if (current_pr["mergeable_state"] == "dirty"
                and previous_pr["mergeable_state"] != "dirty"):
            self._notify(
                title="Merge conflict",
                message=f"{current_pr['description']}",
                open=current_pr["browser_url"],
            )

    def _get_test_status(self, pull_request):
        repo = self._get_full_repo(pull_request)
        commit = repo.commit(repo.branch(pull_request.head.ref).latest_sha())
        protected = self.protection[pull_request.base.ref]["enabled"]
        in_progress = False
        suite_outcome = None
        runs = {}
        conclusions = set()
        for check in commit.check_runs():
            required = check.name in self.protection[
                pull_request.base.ref]["required_status_checks"].get(
                    "contexts", [])
            runs[check.name] = (check.conclusion, required)
            if required:
                if check.status == "completed":
                    conclusions.add(check.conclusion)
                else:
                    in_progress = True
        if in_progress:
            suite_outcome = "in_progress"
        if protected and conclusions:
            if ("failure" in conclusions or "timed_out" in conclusions
                    or "action_required" in conclusions):
                suite_outcome = "failure"
            elif "cancelled" in conclusions:
                suite_outcome = "cancelled"
            else:
                suite_outcome = "success"
        return {"outcome": suite_outcome if protected else None, "runs": runs}
Ejemplo n.º 24
0
 def __init__(self):
     storage = FileStorage.FileStorage('database.fs')
     db = DB(storage)
     self.dba = db
     db.pack()
     self.connection = db.open()
Ejemplo n.º 25
0
class Database(_uniq):
    def __init__(self, name, read_only=True, thresh=1024):
        if not hasattr(self, 'name'):
            self.read_only = read_only
            self.name = name
            self._thresh = thresh
            self._load_()

    def _load_(self):
        import BTrees.OOBTree
        from ZODB import FileStorage, DB
        name = self.name
        read_only = self.read_only
        thresh = self._thresh
        if not os.path.exists("%s/%s"%(SAGE_DATA,name)):
            try:
                os.makedirs("%s/%s"%(SAGE_DATA,name))
            except OSError:    # for online calculator...
                pass
        self._dbname = "%s/%s/%s"%(SAGE_DATA, name, name)
        if self.read_only and not os.path.exists(self._dbname):
            raise RuntimeError, "The database %s is not installed."%self._dbname
        fs = FileStorage.FileStorage(self._dbname, read_only=self.read_only)
        self._storage = sage.databases.compressed_storage.CompressedStorage(fs, thresh=self._thresh)
        self._db = DB(self._storage)
        self.conn = self._db.open()
        self._root = self.conn.root()
        if not self._root.has_key("btree"):
            self._root["btree"] = BTrees.OOBTree.OOBTree()
        self.root = self._root["btree"]

    def begin(self):
        r"""Start a new database transaction"""
        import transaction
        transaction.get().begin()

    def abort(self):
        r"""Abort the current database transaction, without committing"""
        import transaction
        transaction.get().abort()

    def commit(self):
        """
        Commit the new version of this object to the database file.
        
        Note that if a data item corresponding to a key is changed,
        you still have to tell the database that that data item
        was changed by calling the changed method with that key.
        """
        if self.read_only:
            raise RuntimeError, "Cannot commit read only database."
        self._root._p_changed = 1
        import transaction
        transaction.get().commit()
        #get_transaction().commit()

    def changed(self, key):
        """
        Informs the database that some items corresponding to
        the given key may have changed.  This does not commit
        the changes to disk (use the commit function after
        calling changed to do that).
        """
        self.root._p_changed = 1
        X = self.root[key]
        self.root[key] = X

    def pack(self):
        """
        This function is not implemented -- I couldn't get pack
        working with compressed storage.  You can use the rebuild
        function instead, though it's slower than the usual ZODB pack,
        since it completely rebuilds the database from scratch.
        """
        raise NotImplementedError
        self._db.pack()
        self.commit()
        
    def rebuild(self, thresh=None):
        """
        Completely rebuild the database from scratch, by going
        through and writing everything out to a temporary database,
        then moving the temporary database files over self's
        files.  This can take a long time.

        The main reason for this function is that unfortunately I
        can't get pack to work on compressed ZODB databases.

        A copy of the old database file is created before rebuild.

        If you specify a thresh then that threshold is used for
        recompressing all the objects.  Note that the threshold is
        not saved as part of the database, so new objects will be
        compressed using whatever threshold you use when creating
        the database object.
        """
        import BTrees.OOBTree
        from ZODB import FileStorage, DB
        if self.read_only:
            raise RuntimeError, "Cannot pack read only database."
        if thresh == None:
            thresh = self._thresh
        else:
            self._thresh = thresh
        rebuild_name = self._dbname + "_rebuild"
        shutil.copy2(self._dbname, self._dbname + ".old")
        if os.path.exists(rebuild_name):
            os.unlink(rebuild_name)
        fs = FileStorage.FileStorage(rebuild_name, read_only=False)
        storage = sage.databases.compressed_storage.CompressedStorage(fs, thresh)
        db = DB(storage)
        conn = db.open()
        _root = conn.root()
        root = BTrees.OOBTree.OOBTree()
        _root["btree"] = root
        for k, x in self.root.iteritems():
            root[k] = x
        _root._p_changed = 1
        #get_transaction().commit()
        import transaction
        transaction.get().commit()
        shutil.move(rebuild_name, self._dbname)
        os.unlink(rebuild_name + ".tmp")
        os.unlink(rebuild_name + ".index")
        os.unlink(rebuild_name + ".lock")
        self.read_only = True
        

    def __repr__(self):
        return "Database %s"%self.name

    def __setitem__(self, x, y):
        try:
            self.root[x] = y
        except AttributeError:
            self._init()
            self.root[x] = y
            
    def __getitem__(self, x):
        try:
            if not isinstance(x, slice):
                return self.root[x]
            return [self[k] for k in range(x.start, x.stop, x.step)]
        except AttributeError:
            self._init()
            return self.root[x]

    def __delitem__(self, x):
        del self.root[x]

    def has_key(self, x):
        return bool(self.root.has_key(x))

    def keys(self):
        return self.root.keys()

    def as_dict(self, keys=None):
        """
        Return a dict representation of the database.
        
        Since the database could be large, if the optional keys
        parameter is given then only the elements of the database
        with key in keys are listed.
        """
        X = {}
        if keys == None:
            keys = self.root.keys()
        for k in keys:
            if self.has_key(k):
                X[k] = self.root[k]
        return X

    def dump_as_dict(self, filename, keys):
        from sage.misc.misc import sage_makedirs
        X = self.as_dict(keys)
        print "Dumping %s..."%filename
        s = cPickle.dumps(X,2)
        dir = "%s/pickles/"%SAGE_DATA
        sage_makedirs(dir)
        open("%s/%s"%(dir,filename), "w").write(s)

    def dump_as_dict_intervals(self, basename, Nstart, Nstop, length):
        N = Nstart
        while N <= Nstop:
            N2 = min(Nstop, N+length)
            Z = xrange(N, N2+1)
            self.dump_as_dict("%s_%s-%s"%(basename,N,N2), Z)
            N += length
        
    def restore_from_dict(self, filename):
        """
        Restore from the filename which must store a pickled dict.

        After loading the database is committed. 
        """
        if self.read_only:
            raise RuntimeError, "%s is read only."%self
        dir = "%s/pickles/"%SAGE_DATA
        s = open("%s/%s"%(dir,filename)).read()
        print "Restoring %s..."%filename
        X = cPickle.loads(s,2)
        for k, x in X.iteritems():
            self.root[k] = x
        self.commit()

    def restore_from_dict_all(self, basename):
        """
        Restore all files that start with the given basename.

        Each file is loaded then committed to disk before the next
        file is loaded. 
        """
        X = os.listdir("%s/pickles/"%SAGE_DATA)
        n = len(basename)
        for F in X:
            if F[:n] == basename:
                self.restore_from_dict(F)

    def delete_all(self):
        """
        Delete every entry in the database.
        """
        import BTrees.OOBTree
        del self._root["btree"]
        self._root["btree"] = BTrees.OOBTree.OOBTree()
        self.root = self._root["btree"]

    def clone(self, new_name):
        """
        Copy the database to a new database with the given new_name.
        There must not be a database with the new_name already, or a
        RuntimeError exception is raised.
        """
        if os.path.exists("%s/%s"%(SAGE_DATA,new_name)):
            raise RuntimeError, "Cannot clone to %s since that database already exists."%name
        os.path.makedirs("%s/%s"%(SAGE_DATA,new_name))
        shutil.copy2("%s/%s/%s"%(SAGE_DATA,name,name), "%s/%s"%(SAGE_DATA,new_name))
Ejemplo n.º 26
0
class ZodbBackend(object):

    description = 'Backend that directly uses ZODB 3.7.0'
    backend_args_help = """
    (no backend options)
    """

    __test__ = False

    BTree = OOBTree
    PDict = PersistentMapping
    PList = PersistentList

    TestMethods_CreatesDatabase = TestMethods_CreatesDatabase
    TestMethods_CreatesSchema = TestMethods_CreatesSchema
    TestMethods_EvolvesSchemata = TestMethods_EvolvesSchemata

    def __init__(self, filename):
        self._filename = filename
        self._is_open = False
        self.open()

    @classmethod
    def args_from_string(cls, s):
        """Return a dictionary of keyword arguments based on a string given
        to a command-line tool."""
        kw = {}
        if s is not None:
            for arg in (p.strip() for p in s.split(',')):
                name, value = (p2.strip() for p2 in arg.split('='))
                raise KeyError(
                    '%s is not a valid name for backend args' % name)
        return kw

    @classmethod
    def usable_by_backend(cls, filename):
        """Return (True, additional_backend_args) if the named file is
        usable by this backend, or False if not."""
        # Get first 128 bytes of file.
        f = open(filename, 'rb')
        header = f.read(128)
        f.close()
        # Look for ZODB signatures.
        if header[:4] == 'FS21' and 'persistent.mapping' in header:
            return (True, {})
        return False

    def open(self):
        if not self._is_open:
            self.storage = FileStorage(self._filename)
            self.zodb = DB(self.storage)
            self.conn = self.zodb.open()
            self._is_open = True

    def get_root(self):
        """Return the connection 'root' object."""
        return self.conn.root()

    @property
    def has_db(self):
        """Return True if the backend has a schevo db."""
        return self.get_root().has_key('SCHEVO')

    def commit(self):
        """Commit the current transaction."""
        transaction.commit()

    def rollback(self):
        """Abort the current transaction."""
        transaction.abort()

    def pack(self):
        """Pack the underlying storage."""
        self.zodb.pack()

    def close(self):
        """Close the underlying storage (and the connection if needed)."""
        self.rollback()
        self.conn.close()
        self.zodb.close()
        self.storage.close()
        self._is_open = False