Exemplo n.º 1
0
class WMLoggingTest(unittest.TestCase):
    def setUp(self):
        # Make an instance of the server
        self.server = CouchServer(os.getenv("COUCHURL", 'http://*****:*****@localhost:5984'))
        testname = self.id().split('.')[-1]
        # Create a database, drop an existing one first
        self.dbname = 'cmscouch_unittest_%s' % testname.lower()

        if self.dbname in self.server.listDatabases():
            self.server.deleteDatabase(self.dbname)

        self.server.createDatabase(self.dbname)
        self.db = self.server.connectDatabase(self.dbname)

    def tearDown(self):
        # This used to test self._exc_info to only run on success. Broke in 2.7. Removed.
        self.server.deleteDatabase(self.dbname)

    def testLog(self):
        """
        Write ten log messages to the database at three different levels
        """
        my_logger = logging.getLogger('MyLogger')
        my_logger.setLevel(logging.DEBUG)
        handler = CouchHandler(self.server.url, self.dbname)
        formatter = logging.Formatter('%(message)s')
        handler.setFormatter(formatter)
        my_logger.addHandler(handler)

        for _ in range(10):
            my_logger.debug('This is probably all noise.')
            my_logger.info('Jackdaws love my big sphinx of quartz.')
            my_logger.error('HOLLY CRAP!')
        logs = self.db.allDocs()['rows']
        self.assertEqual(30, len(logs))
Exemplo n.º 2
0
class CouchAppTestHarness:
    """
    Test Harness for installing a couch database instance with several couchapps
    in a unittest.setUp and wiping it out in a unittest.tearDown
    
    
    """
    def __init__(self, dbName, couchUrl = None):
        self.couchUrl = os.environ.get("COUCHURL", couchUrl)
        self.dbName = dbName
        if self.couchUrl == None:
            msg = "COUCHRURL env var not set..."
            raise RuntimeError, msg
        self.couchServer = CouchServer(self.couchUrl)
        self.couchappConfig = Config()


    def create(self):
        """create couch db instance"""
        if self.dbName in self.couchServer.listDatabases():
            self.drop()

        self.couchServer.createDatabase(self.dbName)

    def drop(self):
        """blow away the couch db instance"""
        self.couchServer.deleteDatabase(self.dbName)

    def pushCouchapps(self, *couchappdirs):
        """
        push a list of couchapps to the database
        """
        for couchappdir in  couchappdirs:
            couchapppush(self.couchappConfig, couchappdir, "%s/%s" % (self.couchUrl, urllib.quote_plus(self.dbName)))
Exemplo n.º 3
0
class WMLoggingTest(unittest.TestCase):
    def setUp(self):
        # Make an instance of the server
        self.server = CouchServer(os.getenv("COUCHURL", 'http://*****:*****@localhost:5984'))
        testname = self.id().split('.')[-1]
        # Create a database, drop an existing one first
        self.dbname = 'cmscouch_unittest_%s' % testname.lower()

        if self.dbname in self.server.listDatabases():
            self.server.deleteDatabase(self.dbname)

        self.server.createDatabase(self.dbname)
        self.db = self.server.connectDatabase(self.dbname)

    def tearDown(self):
        # This used to test self._exc_info to only run on success. Broke in 2.7. Removed.
        self.server.deleteDatabase(self.dbname)

    def testLog(self):
        """
        Write ten log messages to the database at three different levels
        """
        my_logger = logging.getLogger('MyLogger')
        my_logger.setLevel(logging.DEBUG)
        handler = CouchHandler(self.server.url, self.dbname)
        formatter = logging.Formatter('%(message)s')
        handler.setFormatter(formatter)
        my_logger.addHandler(handler)

        for _ in range(10):
            my_logger.debug('This is probably all noise.')
            my_logger.info('Jackdaws love my big sphinx of quartz.')
            my_logger.error('HOLLY CRAP!')
        logs = self.db.allDocs()['rows']
        self.assertEqual(30, len(logs))
Exemplo n.º 4
0
Arquivo: API.py Projeto: dmwm/DQIS
    def setUp(self):        
        couch = CouchServer(dburl=self.DB_URL)
        if self.DB_NAME in couch.listDatabases():
            couch.deleteDatabase(self.DB_NAME)
        
        cdb = couch.connectDatabase(self.DB_NAME)

        #for dq_t in test_data.demo_data:
        #    cdb.queue(dq_t)
        
        cdb.commit()
        
        self.db = Database(dbname=self.DB_NAME)
Exemplo n.º 5
0
class CouchAppTestHarness:
    """
    Test Harness for installing a couch database instance with several couchapps
    in a unittest.setUp and wiping it out in a unittest.tearDown


    """
    def __init__(self, dbName, couchUrl=None):
        self.couchUrl = os.environ.get("COUCHURL", couchUrl)
        self.dbName = dbName
        if self.couchUrl == None:
            msg = "COUCHRURL env var not set..."
            raise RuntimeError(msg)
        if self.couchUrl.endswith('/'):
            raise RuntimeError("COUCHURL env var shouldn't end with /")
        self.couchServer = CouchServer(self.couchUrl)
        self.couchappConfig = Config()

    def create(self, dropExistingDb=True):
        """create couch db instance"""
        #import pdb
        #pdb.set_trace()
        if self.dbName in self.couchServer.listDatabases():
            if not dropExistingDb:
                return
            self.drop()

        self.couchServer.createDatabase(self.dbName)

    def drop(self):
        """blow away the couch db instance"""
        self.couchServer.deleteDatabase(self.dbName)

    def pushCouchapps(self, *couchappdirs):
        """
        push a list of couchapps to the database
        """
        for couchappdir in couchappdirs:
            couchapppush(
                self.couchappConfig, couchappdir,
                "%s/%s" % (self.couchUrl, urllib.quote_plus(self.dbName)))
Exemplo n.º 6
0
class RotatingDatabaseTest(unittest.TestCase):
    def setUp(self):
        self.couchURL = os.getenv("COUCHURL")
        self.server = CouchServer(self.couchURL)
        # Kill off any databases left over from previous runs
        for db in [
                db for db in self.server.listDatabases()
                if db.startswith('rotdb_unittest_')
        ]:
            try:
                self.server.deleteDatabase(db)
            except:
                pass
        # Create a database, drop an existing one first
        testname = self.id().split('.')[-1].lower()
        self.dbname = 'rotdb_unittest_%s' % testname
        self.arcname = 'rotdb_unittest_%s_archive' % testname
        self.seedname = 'rotdb_unittest_%s_seedcfg' % testname
        # set a long value for times, tests do operations explicitly
        self.timing = {
            'archive': timedelta(seconds=1),
            'expire': timedelta(seconds=2)
        }

        self.db = RotatingDatabase(dbname=self.dbname,
                                   url=self.couchURL,
                                   archivename=self.arcname,
                                   timing=self.timing)

    def tearDown(self):
        testname = self.id().split('.')[-1].lower()
        if sys.exc_info()[0] == None:
            # This test has passed, clean up after it
            to_go = [
                db for db in self.server.listDatabases()
                if db.startswith('rotdb_unittest_%s' % testname)
            ]
            for dbname in to_go:
                try:
                    self.server.deleteDatabase(dbname)
                except CouchNotFoundError:
                    # db has already gone
                    pass

    def testRotate(self):
        """
        Test that rotation works
        """
        start_name = self.db.name
        self.db._rotate()
        end_name = self.db.name
        databases = [
            db for db in self.server.listDatabases()
            if db.startswith('rotdb_unittest_')
        ]
        self.assertTrue(start_name in databases)
        self.assertTrue(end_name in databases)

    def testArchive(self):
        """
        Test that archiving views works
        """
        dummy_view = {
            '_id': '_design/foo',
            'language': 'javascript',
            'views': {
                'bar': {
                    'map': "function(doc) {if (doc.foo) {emit(doc.int, 1);}}",
                    'reduce': '_sum'
                }
            }
        }
        archive_view = {
            '_id': '_design/foo',
            'language': 'javascript',
            'views': {
                'bar': {
                    'map': "function(doc) {emit(doc.key, doc.value);}",
                    'reduce': '_sum'
                }
            }
        }

        seed_db = self.server.connectDatabase(self.seedname)
        seed_db.commit(dummy_view)
        # Need to have the timing long enough so the data isn't archived by accident
        self.timing = {
            'archive': timedelta(seconds=1000),
            'expire': timedelta(seconds=2000)
        }
        self.db = RotatingDatabase(dbname=self.dbname,
                                   url=self.couchURL,
                                   views=['foo/bar'],
                                   archivename=self.arcname,
                                   timing=self.timing)
        self.db.archive_db.commitOne(archive_view)
        runs = 5
        docs = 5
        for run in range(runs):
            for i in range(docs):
                self.db.queue({'foo': 'bar', 'int': i, 'run': run})
            self.db.commit()
            self.db._rotate()
        self.db._archive()
        view_result = self.db.archive_db.loadView('foo', 'bar')
        arch_sum = view_result['rows'][0]['value']
        self.assertEqual(arch_sum, runs * docs)

    def testExpire(self):
        """
        Test that expiring databases works
        """
        # rotate out the original db
        self.db._rotate()
        archived = self.db.archived_dbs()
        self.assertEqual(1, len(archived),
                         'test not starting from clean state, bail!')
        # Make sure the db has expired
        sleep(2)
        self.db._expire()
        self.assertEqual(0, len(self.db.archived_dbs()))
        self.assertFalse(archived[0] in self.server.listDatabases())

    @attr("integration")
    def testCycle(self):
        """
        Test that committing data to different databases happens
        This is a bit of a dodgy test - if timings go funny it will fail
        """
        self.timing = {
            'archive': timedelta(seconds=0.5),
            'expire': timedelta(seconds=1)
        }
        self.db = RotatingDatabase(dbname=self.dbname,
                                   url=self.couchURL,
                                   archivename=self.arcname,
                                   timing=self.timing)
        my_name = self.db.name
        self.db.commit({'foo': 'bar'})
        sleep(5)
        self.db.commit({'foo': 'bar'})
        # the initial db should have expired by now
        self.db.commit({'foo': 'bar'})
        self.assertFalse(my_name in self.server.listDatabases(), "")
Exemplo n.º 7
0
class CMSCouchTest(unittest.TestCase):

    test_counter = 0

    def setUp(self):
        # Make an instance of the server
        self.server = CouchServer(os.getenv("COUCHURL", 'http://*****:*****@localhost:5984'))
        self.testname = self.id().split('.')[-1]
        # Create a database, drop an existing one first
        dbname = 'cmscouch_unittest_%s' % self.testname.lower()

        if dbname in self.server.listDatabases():
            self.server.deleteDatabase(dbname)

        self.server.createDatabase(dbname)
        self.db = self.server.connectDatabase(dbname)

    def tearDown(self):
        if sys.exc_info()[0] == None:
            # This test has passed, clean up after it
            dbname = 'cmscouch_unittest_%s' % self.testname.lower()
            self.server.deleteDatabase(dbname)

    def testCommitOne(self):
        # Can I commit one dict
        doc = {'foo':123, 'bar':456}
        id = self.db.commitOne(doc)[0]['id']
        # What about a Document
        doc = Document(inputDict = doc)
        id = self.db.commitOne(doc)[0]['id']

    def testCommitOneWithQueue(self):
        """
        CommitOne bypasses the queue, but it should maintain the queue if
        present for a future call to commit.
        """
        # Queue up five docs
        doc = {'foo':123, 'bar':456}
        for i in range(1,6):
            self.db.queue(doc)
        # Commit one Document
        doc = Document(inputDict = doc)
        id = self.db.commitOne(doc)[0]['id']
        self.assertEqual(1, len(self.db.allDocs()['rows']))
        self.db.commit()
        self.assertEqual(6, len(self.db.allDocs()['rows']))

    def testTimeStamping(self):
        doc = {'foo':123, 'bar':456}
        id = self.db.commitOne(doc, timestamp=True)[0]['id']
        doc = self.db.document(id)
        self.assertTrue('timestamp' in doc.keys())

    def testDeleteDoc(self):
        doc = {'foo':123, 'bar':456}
        self.db.commitOne(doc)
        all_docs = self.db.allDocs()
        self.assertEqual(1, len(all_docs['rows']))

        # The db.delete_doc is immediate
        id = all_docs['rows'][0]['id']
        self.db.delete_doc(id)
        all_docs = self.db.allDocs()
        self.assertEqual(0, len(all_docs['rows']))

    def testDeleteQueuedDocs(self):
        doc1 = {'foo':123, 'bar':456}
        doc2 = {'foo':789, 'bar':101112}
        self.db.queue(doc1)
        self.db.queue(doc2)
        self.db.commit()

        all_docs = self.db.allDocs()
        self.assertEqual(2, len(all_docs['rows']))
        for res in all_docs['rows']:
            id = res['id']
            doc = self.db.document(id)
            self.db.queueDelete(doc)
        all_docs = self.db.allDocs()
        self.assertEqual(2, len(all_docs['rows']))

        self.db.commit()

        all_docs = self.db.allDocs()
        self.assertEqual(0, len(all_docs['rows']))

    def testReplicate(self):
        repl_db = self.server.connectDatabase(self.db.name + 'repl')

        doc_id = self.db.commitOne({'foo':123}, timestamp=True)[0]['id']
        doc_v1 = self.db.document(doc_id)

        #replicate
        self.server.replicate(self.db.name, repl_db.name)

        # wait for a few seconds to replication to be triggered.
        time.sleep(1)
        self.assertEqual(self.db.document(doc_id), repl_db.document(doc_id))
        self.server.deleteDatabase(repl_db.name)

    def testSlashInDBName(self):
        """
        Slashes are a valid character in a database name, and are useful as it
        creates a directory strucutre for the couch data files.
        """
        db_name = 'wmcore/unittests'
        try:
            self.server.deleteDatabase(db_name)
        except:
            # Ignore this - the database shouldn't already exist
            pass

        db = self.server.createDatabase(db_name)
        info = db.info()
        assert info['db_name'] == db_name

        db_name = 'wmcore/unittests'
        db = self.server.connectDatabase(db_name)
        info = db.info()
        assert info['db_name'] == db_name

        db = Database(db_name, url = os.environ["COUCHURL"])
        info = db.info()
        assert info['db_name'] == db_name

        self.server.deleteDatabase(db_name)

    def testInvalidName(self):
        """
        Capitol letters are not allowed in database names.
        """
        db_name = 'Not A Valid Name'
        self.assertRaises(ValueError, self.server.createDatabase, db_name)
        self.assertRaises(ValueError, self.server.deleteDatabase, db_name)
        self.assertRaises(ValueError, self.server.connectDatabase, db_name)
        self.assertRaises(ValueError, Database, db_name)

    def testDocumentSerialisation(self):
        """
        A document should be writable into the couchdb with a timestamp.
        """
        d = Document()
        d['foo'] = 'bar'
        doc_info = self.db.commit(doc=d, timestamp=True)[0]
        d_from_db = self.db.document(doc_info['id'])
        self.assertEqual(d['foo'], d_from_db['foo'])
        self.assertEqual(d['timestamp'], d_from_db['timestamp'])

    def testAttachments(self):
        """
        Test uploading attachments with and without checksumming
        """
        doc = self.db.commitOne({'foo':'bar'}, timestamp=True)[0]
        attachment1 = "Hello"
        attachment2 = "How are you today?"
        attachment3 = "I'm very well, thanks for asking"
        attachment4 = "Lovely weather we're having"
        attachment5 = "Goodbye"
        keyhash = hashlib.md5()
        keyhash.update(attachment5)
        attachment5_md5 = keyhash.digest()
        attachment5_md5 = base64.b64encode(attachment5_md5)
        attachment6 = "Good day to you, sir!"
        #TODO: add a binary attachment - e.g. tar.gz
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment1)
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment2, contentType="foo/bar")
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment3, name="my_greeting")
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment4, add_checksum=True)
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment5, checksum=attachment5_md5)

        self.assertRaises(CouchInternalServerError, self.db.addAttachment, doc['id'], doc['rev'], attachment6, checksum='123')

    def testRevisionHandling(self):
        # This test won't work from an existing database, conflicts will be preserved, so
        # ruthlessly remove the databases to get a clean slate.
        try:
            self.server.deleteDatabase(self.db.name)
        except CouchNotFoundError:
            pass # Must have been deleted already

        try:
            self.server.deleteDatabase(self.db.name + 'repl')
        except CouchNotFoundError:
            pass # Must have been deleted already

        # I'm going to create a conflict, so need a replica db
        self.db = self.server.connectDatabase(self.db.name)
        repl_db = self.server.connectDatabase(self.db.name + 'repl')

        doc_id = self.db.commitOne({'foo':123}, timestamp=True)[0]['id']
        doc_v1 = self.db.document(doc_id)

        #replicate
        self.server.replicate(self.db.name, repl_db.name)
        time.sleep(1)

        doc_v2 = self.db.document(doc_id)
        doc_v2['bar'] = 456
        doc_id_rev2 = self.db.commitOne(doc_v2)[0]
        doc_v2 = self.db.document(doc_id)

        #now update the replica
        conflict_doc = repl_db.document(doc_id)
        conflict_doc['bar'] = 101112
        repl_db.commitOne(conflict_doc)

        #replicate, creating the conflict
        self.server.replicate(self.db.name, repl_db.name)
        time.sleep(1)

        conflict_view = {'map':"function(doc) {if(doc._conflicts) {emit(doc._conflicts, null);}}"}
        data = repl_db.post('/%s/_temp_view' % repl_db.name, conflict_view)

        # Should have one conflict in the repl database
        self.assertEqual(data['total_rows'], 1)
        # Should have no conflicts in the source database
        self.assertEqual(self.db.post('/%s/_temp_view' % self.db.name, conflict_view)['total_rows'], 0)
        self.assertTrue(repl_db.documentExists(data['rows'][0]['id'], rev=data['rows'][0]['key'][0]))

        repl_db.delete_doc(data['rows'][0]['id'], rev=data['rows'][0]['key'][0])
        data = repl_db.post('/%s/_temp_view' % repl_db.name, conflict_view)

        self.assertEqual(data['total_rows'], 0)
        self.server.deleteDatabase(repl_db.name)

        #update it again
        doc_v3 = self.db.document(doc_id)
        doc_v3['baz'] = 789
        doc_id_rev3 = self.db.commitOne(doc_v3)[0]
        doc_v3 = self.db.document(doc_id)

        #test that I can pull out an old revision
        doc_v1_test = self.db.document(doc_id, rev=doc_v1['_rev'])
        self.assertEqual(doc_v1, doc_v1_test)

        #test that I can check a revision exists
        self.assertTrue(self.db.documentExists(doc_id, rev=doc_v2['_rev']))

        self.assertFalse(self.db.documentExists(doc_id, rev='1'+doc_v2['_rev']))

        #why you shouldn't rely on rev
        self.db.compact(blocking=True)
        self.assertFalse(self.db.documentExists(doc_id, rev=doc_v1['_rev']))
        self.assertFalse(self.db.documentExists(doc_id, rev=doc_v2['_rev']))
        self.assertTrue(self.db.documentExists(doc_id, rev=doc_v3['_rev']))

    def testCommit(self):
        """
        Test queue and commit modes
        """
        # try to commit 2 random docs
        doc = {'foo':123, 'bar':456}
        self.db.queue(doc)
        self.db.queue(doc)
        self.assertEqual(2, len(self.db.commit()))

        # committing 2 docs with the same id will fail
        self.db.queue(Document(id = "1", inputDict = {'foo':123, 'bar':456}))
        self.db.queue(Document(id = "1", inputDict = {'foo':1234, 'bar':456}))
        answer = self.db.commit()
        self.assertEqual(2, len(answer))
        self.assertEqual(answer[0]['ok'], True)
        self.assertEqual(answer[1]['error'], 'conflict')

        # all_or_nothing mode ignores conflicts
        self.db.queue(Document(id = "2", inputDict = doc))
        self.db.queue(Document(id = "2", inputDict = {'foo':1234, 'bar':456}))
        answer = self.db.commit(all_or_nothing = True)
        self.assertEqual(2, len(answer))
        self.assertEqual(answer[0].get('error'), None)
        self.assertEqual(answer[0].get('error'), None)
        self.assertEqual(answer[0]['id'], '2')
        self.assertEqual(answer[1]['id'], '2')

        # callbacks can do stuff when conflicts arise
        # this particular one just overwrites the document
        def callback(db, data, result):
            for doc in data['docs']:
                if doc['_id'] == result['id']:
                    doc['_rev'] = db.document(doc['_id'])['_rev']
                    retval = db.commitOne(doc)
            return retval[0]

        self.db.queue(Document(id = "2", inputDict = {'foo':5, 'bar':6}))
        answer = self.db.commit(callback = callback)
        self.assertEqual(1, len(answer))
        self.assertEqual(answer[0].get('error'), None)
        updatedDoc = self.db.document('2')
        self.assertEqual(updatedDoc['foo'], 5)
        self.assertEqual(updatedDoc['bar'], 6)

        return

    def testUpdateHandler(self):
        """
        Test that update function support works
        """

        update_ddoc = {
            '_id':'_design/foo',
            'language': 'javascript',
            'updates':{
                "bump-counter" : 'function(doc, req) {if (!doc.counter) {doc.counter = 0};doc.counter += 1;return [doc,"bumped it!"];}',
            }
        }
        self.db.commit(update_ddoc)
        doc = {'foo': 123, 'counter': 0}
        doc_id = self.db.commit(doc)[0]['id']
        self.assertEqual("bumped it!", self.db.updateDocument(doc_id, 'foo', 'bump-counter'))

        self.assertEqual(1, self.db.document(doc_id)['counter'])


    def testList(self):
        """
        Test list function works ok
        """
        update_ddoc = {
            '_id':'_design/foo',
            'language': 'javascript',
            'views' : {
                       'all' : {
                                'map' : 'function(doc) {emit(null, null) }'
                                },
                       },
            'lists' : {
                'errorinoutput' : 'function(doc, req) {send("A string with the word error in")}',
                'malformed' : 'function(doc, req) {somethingtoraiseanerror}',
            }
        }
        self.db.commit(update_ddoc)
        # approriate errors raised
        self.assertRaises(CouchNotFoundError, self.db.loadList, 'foo', 'error', 'view_doesnt_exist')
        self.assertRaises(CouchInternalServerError, self.db.loadList, 'foo', 'malformed', 'all')
        # error in list output string shouldn't raise an error
        self.assertEqual(self.db.loadList('foo', 'errorinoutput', 'all'),
                         "A string with the word error in")

    def testAllDocs(self):
        """
        Test AllDocs with options
        """
        self.db.queue(Document(id = "1", inputDict = {'foo':123, 'bar':456}))
        self.db.queue(Document(id = "2", inputDict = {'foo':123, 'bar':456}))
        self.db.queue(Document(id = "3", inputDict = {'foo':123, 'bar':456}))

        self.db.commit()
        self.assertEqual(3, len(self.db.allDocs()['rows']))
        self.assertEqual(2, len(self.db.allDocs({'startkey': "2"})['rows']))
        self.assertEqual(2, len(self.db.allDocs(keys = ["1", "3"])['rows']))
        self.assertEqual(1, len(self.db.allDocs({'limit':1}, ["1", "3"])['rows']))
        self.assertTrue('error' in self.db.allDocs(keys = ["1", "4"])['rows'][1])

    def testUpdateBulkDocuments(self):
        """
        Test AllDocs with options
        """
        self.db.queue(Document(id="1", inputDict={'foo':123, 'bar':456}))
        self.db.queue(Document(id="2", inputDict={'foo':123, 'bar':456}))
        self.db.queue(Document(id="3", inputDict={'foo':123, 'bar':456}))
        self.db.commit()

        self.db.updateBulkDocumentsWithConflictHandle(["1", "2", "3"], {'foo': 333}, 2)
        result = self.db.allDocs({"include_docs": True})['rows']
        self.assertEqual(3, len(result))
        for item in result:
            self.assertEqual(333, item['doc']['foo'])

        self.db.updateBulkDocumentsWithConflictHandle(["1", "2", "3"], {'foo': 222}, 10)
        result = self.db.allDocs({"include_docs": True})['rows']
        self.assertEqual(3, len(result))
        for item in result:
            self.assertEqual(222, item['doc']['foo'])

    def testUpdateHandlerAndBulkUpdateProfile(self):
        """
        Test that update function support works
        """
        # for actual test increase the size value: For 10000 records, 96 sec vs 4 sec
        size = 100
        for i in range(size):
            self.db.queue(Document(id="%s" % i, inputDict={'name':123, 'counter':0}))

        update_doc = {
            '_id':'_design/foo',
            'language': 'javascript',
            'updates':{
                "change-counter" : """function(doc, req) { if (doc) { var data = JSON.parse(req.body);
                                      for (var field in data) {doc.field = data.field;} return [doc, 'OK'];}}""",
            }
        }

        self.db.commit(update_doc)
        start = time.time()
        for id in range(size):
            doc_id = "%s" % id
            self.db.updateDocument(doc_id, 'foo', 'change-counter', {'counter': 1}, useBody=True)
        end = time.time()

        print("update handler: %s sec" % (end - start))

        start = time.time()
        ids = []
        for id in range(size):
            doc_id = "%s" % id
            ids.append(doc_id)
        self.db.updateBulkDocumentsWithConflictHandle(ids, {'counter': 2}, 1000)
        end = time.time()

        print("bulk update: %s sec" % (end - start))
Exemplo n.º 8
0
class LogDBBackend(object):
    """
    Represents persistent storage for LogDB
    """
    def __init__(self, db_url, db_name, identifier, thread_name, **kwds):
        self.db_url = db_url
        self.server = CouchServer(db_url)
        self.db_name = db_name
        self.dbid = identifier
        self.thread_name = thread_name
        self.agent = kwds.get('agent', 0)
        self.db = self.server.connectDatabase(db_name, create=False)
        self.design = 'LogDB'  # name of design document
        self.view = 'requests'  # name of view to look-up requests
        self.tsview = 'tstamp'  # name of tsview to look-up requests
        self.threadview = 'logByRequestAndThread'
        self.requestview = 'logByRequest'

    def deleteDatabase(self):
        """Delete back-end database"""
        if self.db_name in self.server.listDatabases():
            self.server.deleteDatabase(self.db_name)

    def check(self, request, mtype=None):
        """Check that given request name is valid"""
        # TODO: we may add some logic to check request name, etc.
        if not request:
            raise LogDBError("Request name is empty")
        if mtype and mtype not in LOGDB_MSG_TYPES:
            raise LogDBError("Unsupported message type: '%s', supported types %s" \
                    % (mtype, LOGDB_MSG_TYPES))

    def docid(self, request, mtype):
        """Generate doc id, we use double dash to avoid dashes from thread names"""
        return gen_hash('--'.join(
            (request, self.dbid, self.thread_name, mtype)))

    def prefix(self, mtype):
        """Generate agent specific prefix for given message type"""
        if self.agent:
            # we add prefix for agent messages, all others will not have this index
            mtype = 'agent-%s' % mtype
        return mtype

    def agent_update(self, request, msg='', mtype="info"):
        """Update agent info in LogDB for given request"""
        self.check(request, mtype)
        mtype = self.prefix(mtype)
        rec = {"ts": tstamp(), "msg": msg}
        doc = {
            "_id": self.docid(request, mtype),
            "messages": [rec],
            "request": request,
            "identifier": self.dbid,
            "thr": self.thread_name,
            "type": mtype
        }
        try:
            exist_doc = self.db.document(doc["_id"])
            doc["_rev"] = exist_doc["_rev"]
        except CouchNotFoundError:
            # this means document is not exist so we will just insert
            pass
        finally:
            res = self.db.commitOne(doc)
        return res

    def user_update(self, request, msg, mtype='comment'):
        """Update user info in LogDB for given request"""
        rec = {"ts": tstamp(), "msg": msg}
        doc = {
            "_id": self.docid(request, mtype),
            "messages": [rec],
            "request": request,
            "identifier": self.dbid,
            "thr": self.thread_name,
            "type": mtype
        }
        try:
            exist_doc = self.db.document(doc["_id"])
            doc["_rev"] = exist_doc["_rev"]
            doc["messages"] += exist_doc["messages"]
        except CouchNotFoundError:
            # this means document is not exist so we will just insert
            pass
        finally:
            res = self.db.commitOne(doc)
        return res

    def get(self, request, mtype=None, detail=True, agent=True):
        """Retrieve all entries from LogDB for given request"""
        self.check(request, mtype)
        if agent and mtype:
            mtype = self.prefix(mtype)
        options = {'reduce': False}
        if mtype:
            keys = [[request, mtype]]
        else:
            keys = []
            options.update({'startkey': [request], 'endkey': [request, {}]})
        if detail:
            options.update({'include_docs': True})
        docs = self.db.loadView(self.design, self.view, options, keys=keys)
        return docs

    def get_by_thread(self, request, mtype='error', detail=False, agent=True):
        self.check(request, mtype)
        if agent and mtype:
            mtype = self.prefix(mtype)
        keys = [[request, self.dbid, self.thread_name, mtype]]
        options = {'reduce': False}
        if detail:
            options.update({'include_docs': True})
        docs = self.db.loadView(self.design, self.threadview, options, keys)
        return docs

    def get_by_request(self, request):
        keys = [request]
        options = {'reduce': False}
        docs = self.db.loadView(self.design, self.requestview, options, keys)
        return docs

    def get_all_requests(self):
        """Retrieve all entries from LogDB"""
        options = {'reduce': True, 'group_level': 1}
        docs = self.db.loadView(self.design, self.view, options)
        return docs

    def delete(self, request, mtype=None, this_thread=False, agent=True):
        """Delete entry in LogDB for given request"""
        if mtype:
            self.check(request, mtype)
        else:
            self.check(request)
        if this_thread:
            docs = self.get_by_thread(request,
                                      mtype=mtype,
                                      detail=False,
                                      agent=agent)
        else:
            docs = self.get(request, mtype=mtype, detail=False, agent=agent)
        ids = [r['id'] for r in docs.get('rows', [])]
        res = self.db.bulkDeleteByIDs(ids)
        return res

    def cleanup(self, thr):
        """
        Clean-up docs older then given threshold (thr should be specified in seconds).
        This is done via tstamp view end endkey, e.g.
        curl "http://127.0.0.1:5984/logdb/_design/LogDB/_view/tstamp?endkey=1427912282"
        """
        cutoff = round(time.time() - thr)
        #docs = self.db.allDocs() # may need another view to look-up old docs
        spec = {'endkey': cutoff, 'reduce': False}
        docs = self.db.loadView(self.design, self.tsview, spec)
        ids = [d['id'] for d in docs.get('rows', [])]
        self.db.bulkDeleteByIDs(ids)
Exemplo n.º 9
0
class CMSCouchTest(unittest.TestCase):
    test_counter = 0

    def setUp(self):
        # Make an instance of the server
        self.server = CouchServer(
            os.getenv("COUCHURL", 'http://*****:*****@localhost:5984'))
        self.testname = self.id().split('.')[-1]
        # Create a database, drop an existing one first
        dbname = 'cmscouch_unittest_%s' % self.testname.lower()

        if dbname in self.server.listDatabases():
            self.server.deleteDatabase(dbname)

        self.server.createDatabase(dbname)
        self.db = self.server.connectDatabase(dbname)

    def tearDown(self):
        if sys.exc_info()[0] == None:
            # This test has passed, clean up after it
            dbname = 'cmscouch_unittest_%s' % self.testname.lower()
            self.server.deleteDatabase(dbname)

    def testCommitOne(self):
        # Can I commit one dict
        doc = {'foo': 123, 'bar': 456}
        id = self.db.commitOne(doc, returndocs=True)[0]['id']
        # What about a Document
        doc = Document(inputDict=doc)
        id = self.db.commitOne(doc, returndocs=True)[0]['id']

    def testCommitOneWithQueue(self):
        """
        CommitOne bypasses the queue, but it should maintain the queue if
        present for a future call to commit.
        """
        # Queue up five docs
        doc = {'foo': 123, 'bar': 456}
        for i in range(1, 6):
            self.db.queue(doc)
        # Commit one Document
        doc = Document(inputDict=doc)
        id = self.db.commitOne(doc, returndocs=True)[0]['id']
        self.assertEqual(1, len(self.db.allDocs()['rows']))
        self.db.commit()
        self.assertEqual(6, len(self.db.allDocs()['rows']))

    def testTimeStamping(self):
        doc = {'foo': 123, 'bar': 456}
        id = self.db.commitOne(doc, timestamp=True, returndocs=True)[0]['id']
        doc = self.db.document(id)
        self.assertTrue('timestamp' in doc.keys())

    def testDeleteDoc(self):
        doc = {'foo': 123, 'bar': 456}
        self.db.commitOne(doc)
        all_docs = self.db.allDocs()
        self.assertEqual(1, len(all_docs['rows']))

        # The db.delete_doc is immediate
        id = all_docs['rows'][0]['id']
        self.db.delete_doc(id)
        all_docs = self.db.allDocs()
        self.assertEqual(0, len(all_docs['rows']))

    def testDeleteQueuedDocs(self):
        doc1 = {'foo': 123, 'bar': 456}
        doc2 = {'foo': 789, 'bar': 101112}
        self.db.queue(doc1)
        self.db.queue(doc2)
        self.db.commit()

        all_docs = self.db.allDocs()
        self.assertEqual(2, len(all_docs['rows']))
        for res in all_docs['rows']:
            id = res['id']
            doc = self.db.document(id)
            self.db.queueDelete(doc)
        all_docs = self.db.allDocs()
        self.assertEqual(2, len(all_docs['rows']))

        self.db.commit()

        all_docs = self.db.allDocs()
        self.assertEqual(0, len(all_docs['rows']))

    def testWriteReadDocNoID(self):
        doc = {}

    def testReplicate(self):
        repl_db = self.server.connectDatabase(self.db.name + 'repl')

        doc_id = self.db.commitOne({'foo': 123},
                                   timestamp=True,
                                   returndocs=True)[0]['id']
        doc_v1 = self.db.document(doc_id)

        #replicate
        self.server.replicate(self.db.name, repl_db.name)

        self.assertEqual(self.db.document(doc_id), repl_db.document(doc_id))
        self.server.deleteDatabase(repl_db.name)

    def testSlashInDBName(self):
        """
        Slashes are a valid character in a database name, and are useful as it
        creates a directory strucutre for the couch data files.
        """
        db_name = 'wmcore/unittests'
        try:
            self.server.deleteDatabase(db_name)
        except:
            # Ignore this - the database shouldn't already exist
            pass

        db = self.server.createDatabase(db_name)
        info = db.info()
        assert info['db_name'] == db_name

        db_name = 'wmcore/unittests'
        db = self.server.connectDatabase(db_name)
        info = db.info()
        assert info['db_name'] == db_name

        db = Database(db_name, url=os.environ["COUCHURL"])
        info = db.info()
        assert info['db_name'] == db_name

        self.server.deleteDatabase(db_name)

    def testInvalidName(self):
        """
        Capitol letters are not allowed in database names.
        """
        db_name = 'Not A Valid Name'
        self.assertRaises(ValueError, self.server.createDatabase, db_name)
        self.assertRaises(ValueError, self.server.deleteDatabase, db_name)
        self.assertRaises(ValueError, self.server.connectDatabase, db_name)
        self.assertRaises(ValueError, Database, db_name)

    def testDocumentSerialisation(self):
        """
        A document should be writable into the couchdb with a timestamp.
        """
        d = Document()
        d['foo'] = 'bar'
        doc_info = self.db.commit(doc=d, timestamp=True)[0]
        d_from_db = self.db.document(doc_info['id'])
        self.assertEqual(d['foo'], d_from_db['foo'])
        self.assertEqual(d['timestamp'], d_from_db['timestamp'])

    def testAttachments(self):
        """
        Test uploading attachments with and without checksumming
        """
        doc = self.db.commitOne({'foo': 'bar'},
                                timestamp=True,
                                returndocs=True)[0]
        attachment1 = "Hello"
        attachment2 = "How are you today?"
        attachment3 = "I'm very well, thanks for asking"
        attachment4 = "Lovely weather we're having"
        attachment5 = "Goodbye"
        keyhash = hashlib.md5()
        keyhash.update(attachment5)
        attachment5_md5 = keyhash.digest()
        attachment5_md5 = base64.b64encode(attachment5_md5)
        attachment6 = "Good day to you, sir!"
        #TODO: add a binary attachment - e.g. tar.gz
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment1)
        doc = self.db.addAttachment(doc['id'],
                                    doc['rev'],
                                    attachment2,
                                    contentType="foo/bar")
        doc = self.db.addAttachment(doc['id'],
                                    doc['rev'],
                                    attachment3,
                                    name="my_greeting")
        doc = self.db.addAttachment(doc['id'],
                                    doc['rev'],
                                    attachment4,
                                    add_checksum=True)
        doc = self.db.addAttachment(doc['id'],
                                    doc['rev'],
                                    attachment5,
                                    checksum=attachment5_md5)

        self.assertRaises(CouchInternalServerError,
                          self.db.addAttachment,
                          doc['id'],
                          doc['rev'],
                          attachment6,
                          checksum='123')

    def testRevisionHandling(self):
        # This test won't work from an existing database, conflicts will be preserved, so
        # ruthlessly remove the databases to get a clean slate.
        try:
            self.server.deleteDatabase(self.db.name)
        except CouchNotFoundError:
            pass  # Must have been deleted already

        try:
            self.server.deleteDatabase(self.db.name + 'repl')
        except CouchNotFoundError:
            pass  # Must have been deleted already

        # I'm going to create a conflict, so need a replica db
        self.db = self.server.connectDatabase(self.db.name)
        repl_db = self.server.connectDatabase(self.db.name + 'repl')

        doc_id = self.db.commitOne({'foo': 123},
                                   timestamp=True,
                                   returndocs=True)[0]['id']
        doc_v1 = self.db.document(doc_id)

        #replicate
        self.server.replicate(self.db.name, repl_db.name)

        doc_v2 = self.db.document(doc_id)
        doc_v2['bar'] = 456
        doc_id_rev2 = self.db.commitOne(doc_v2, returndocs=True)[0]
        doc_v2 = self.db.document(doc_id)

        #now update the replica
        conflict_doc = repl_db.document(doc_id)
        conflict_doc['bar'] = 101112
        repl_db.commitOne(conflict_doc)

        #replicate, creating the conflict
        self.server.replicate(self.db.name, repl_db.name)
        conflict_view = {
            'map':
            "function(doc) {if(doc._conflicts) {emit(doc._conflicts, null);}}"
        }
        data = repl_db.post('/%s/_temp_view' % repl_db.name, conflict_view)

        # Should have one conflict in the repl database
        self.assertEqual(data['total_rows'], 1)
        # Should have no conflicts in the source database
        self.assertEqual(
            self.db.post('/%s/_temp_view' % self.db.name,
                         conflict_view)['total_rows'], 0)
        self.assertTrue(
            repl_db.documentExists(data['rows'][0]['id'],
                                   rev=data['rows'][0]['key'][0]))

        repl_db.delete_doc(data['rows'][0]['id'],
                           rev=data['rows'][0]['key'][0])
        data = repl_db.post('/%s/_temp_view' % repl_db.name, conflict_view)

        self.assertEqual(data['total_rows'], 0)
        self.server.deleteDatabase(repl_db.name)

        #update it again
        doc_v3 = self.db.document(doc_id)
        doc_v3['baz'] = 789
        doc_id_rev3 = self.db.commitOne(doc_v3, returndocs=True)[0]
        doc_v3 = self.db.document(doc_id)

        #test that I can pull out an old revision
        doc_v1_test = self.db.document(doc_id, rev=doc_v1['_rev'])
        self.assertEqual(doc_v1, doc_v1_test)

        #test that I can check a revision exists
        self.assertTrue(self.db.documentExists(doc_id, rev=doc_v2['_rev']))

        self.assertFalse(
            self.db.documentExists(doc_id, rev='1' + doc_v2['_rev']))

        #why you shouldn't rely on rev
        self.db.compact(blocking=True)
        self.assertFalse(self.db.documentExists(doc_id, rev=doc_v1['_rev']))
        self.assertFalse(self.db.documentExists(doc_id, rev=doc_v2['_rev']))
        self.assertTrue(self.db.documentExists(doc_id, rev=doc_v3['_rev']))

    def testCommit(self):
        """
        Test queue and commit modes
        """
        # try to commit 2 random docs
        doc = {'foo': 123, 'bar': 456}
        self.db.queue(doc)
        self.db.queue(doc)
        self.assertEqual(2, len(self.db.commit()))

        # committing 2 docs with the same id will fail
        self.db.queue(Document(id="1", inputDict={'foo': 123, 'bar': 456}))
        self.db.queue(Document(id="1", inputDict={'foo': 1234, 'bar': 456}))
        answer = self.db.commit()
        self.assertEqual(2, len(answer))
        self.assertEqual(answer[0]['error'], 'conflict')
        self.assertEqual(answer[1]['error'], 'conflict')

        # all_or_nothing mode ignores conflicts
        self.db.queue(Document(id="2", inputDict=doc))
        self.db.queue(Document(id="2", inputDict={'foo': 1234, 'bar': 456}))
        answer = self.db.commit(all_or_nothing=True)
        self.assertEqual(2, len(answer))
        self.assertEqual(answer[0].get('error'), None)
        self.assertEqual(answer[0].get('error'), None)
        self.assertEqual(answer[0]['id'], '2')
        self.assertEqual(answer[1]['id'], '2')

        # callbacks can do stuff when conflicts arise
        # this particular one just overwrites the document
        def callback(db, data, result):
            for doc in data['docs']:
                if doc['_id'] == result['id']:
                    doc['_rev'] = db.document(doc['_id'])['_rev']
                    retval = db.commitOne(doc)
            return retval[0]

        self.db.queue(Document(id="2", inputDict={'foo': 5, 'bar': 6}))
        answer = self.db.commit(callback=callback)
        self.assertEqual(1, len(answer))
        self.assertEqual(answer[0].get('error'), None)
        updatedDoc = self.db.document('2')
        self.assertEqual(updatedDoc['foo'], 5)
        self.assertEqual(updatedDoc['bar'], 6)

        return

    def testUpdateHandler(self):
        """
        Test that update function support works
        """

        update_ddoc = {
            '_id': '_design/foo',
            'language': 'javascript',
            'updates': {
                "bump-counter":
                'function(doc, req) {if (!doc.counter) {doc.counter = 0};doc.counter += 1;return [doc,"bumped it!"];}',
            }
        }
        self.db.commit(update_ddoc)
        doc = {'foo': 123, 'counter': 0}
        doc_id = self.db.commit(doc)[0]['id']
        self.assertEqual("bumped it!",
                         self.db.updateDocument(doc_id, 'foo', 'bump-counter'))

        self.assertEqual(1, self.db.document(doc_id)['counter'])

    def testList(self):
        """
        Test list function works ok
        """
        update_ddoc = {
            '_id': '_design/foo',
            'language': 'javascript',
            'views': {
                'all': {
                    'map': 'function(doc) {emit(null, null) }'
                },
            },
            'lists': {
                'errorinoutput':
                'function(doc, req) {send("A string with the word error in")}',
                'malformed': 'function(doc, req) {somethingtoraiseanerror}',
            }
        }
        self.db.commit(update_ddoc)
        # approriate errors raised
        self.assertRaises(CouchNotFoundError, self.db.loadList, 'foo', 'error',
                          'view_doesnt_exist')
        self.assertRaises(CouchInternalServerError, self.db.loadList, 'foo',
                          'malformed', 'all')
        # error in list output string shouldn't raise an error
        self.assertEqual(self.db.loadList('foo', 'errorinoutput', 'all'),
                         "A string with the word error in")

    def testAllDocs(self):
        """
        Test AllDocs with options
        """
        self.db.queue(Document(id="1", inputDict={'foo': 123, 'bar': 456}))
        self.db.queue(Document(id="2", inputDict={'foo': 123, 'bar': 456}))
        self.db.queue(Document(id="3", inputDict={'foo': 123, 'bar': 456}))

        self.db.commit()
        self.assertEqual(3, len(self.db.allDocs()['rows']))
        self.assertEqual(2, len(self.db.allDocs({'startkey': "2"})['rows']))
        self.assertEqual(2, len(self.db.allDocs(keys=["1", "3"])['rows']))
        self.assertEqual(
            1, len(self.db.allDocs({'limit': 1}, ["1", "3"])['rows']))
        self.assertTrue('error' in self.db.allDocs(keys=["1", "4"])['rows'][1])
Exemplo n.º 10
0
class DASCouchcache(Cache):
    """
    Base DAS couchdb cache class based on couchdb, see
    http://couchdb.apache.org/, The client API based on 
    http://wiki.apache.org/couchdb/Getting_started_with_Python
    in particular we use couchdb-python library
    http://couchdb-python.googlecode.com/
    """
    def __init__(self, config):
        Cache.__init__(self, config)
        uri = config['couch_servers'] # in a future I may have several
        self.logger = config['logger']
        if  not self.logger:
            self.logger = DummyLogger()
        self.limit  = config['couch_lifetime']
        self.uri    = uri.replace('http://', '')
        self.server = CouchServer(self.uri)
        self.dbname = "das"
        self.cdb    = None # cached couch DB handler
        self.future = 9999999999 # unreachable timestamp
        self.logger.info('Init couchcache %s' % self.uri)

        self.views = { 
            'query': {'map': """
function(doc) {
    if(doc.hash) {
        emit([doc.hash, doc.expire], doc.results);
    }
}"""
            },
#            'incache': {'map': """
#function(doc) {
#    if(doc.hash) {
#        emit([doc.hash, doc.expire], null);
#    }
#}"""
#            },
        }

        self.adminviews = { 

            'system' : {'map': """
function(doc) {
    if(doc.results.system) {
        emit(doc.results.system, doc);
    }
}"""
            },

            'cleaner' : {'map': """
function(doc) {
    if(doc.expire) {
        emit(doc.expire, doc);
    }
}"""
            },

            'timer' : {'map': """
function(doc) {
    if(doc.timestamp) {
        emit(doc.timestamp, doc);
    }
}"""
            },
            'all_queries' : {'map': """
function(doc) {
    if (doc.query) {
        emit(doc.query, null);
    }
}""",
                        'reduce' : """
function(keys, values) {
   return null;
}"""
            },

        }

    def connect(self, url):
        """
        Connect to different Couch DB URL
        """
        self.uri    = url.replace('http://', '')
        del self.server
        self.server = CouchServer(self.uri)

    def create_view(self, dbname, design, view_dict):
        """
        Create new view in couch db.
        """
        cdb  = self.couchdb(dbname)
        # check provided view_dict that it has all keys
        for view, definition in view_dict.items():
            if  type(definition) is not dict:
                msg = 'View "%s" has improper definition' % view
                raise Exception(msg)
            if  'map' not in definition:
                msg = 'View "%s" does not have map'
                raise Exception(msg)
        view = dict(_id='_design/%s' % design, language='javascript', 
                        doctype='view', views=view_dict)
        cdb.commit(view)

    def delete_view(self, dbname, design, view_name):
        """
        Delete given view in couch db
        """
        print("Delete view", dbname, design, view_name)

    def dbinfo(self, dbname='das'):
        """
        Provide couch db info
        """
        cdb = self.couchdb(dbname)
        if  cdb:
            self.logger.info(cdb.info())
        else:
            self.logger.warning("No '%s' found in couch db" % dbname)
        if  not cdb:
            return "Unable to connect to %s" % dbname
        return cdb.info()

    def delete_cache(self, dbname=None, system=None):
        """
        Delete either couchh db (dbname) or particular docs
        for provided system, e.g. all sitedb docs.
        """
        cdb = self.couchdb(dbname)
        if  cdb:
            if  system:
                key = '"%s"' % system
                options = {'key' : key}
                results = self.get_view('dasadmin', 'system', options)
                for doc in results:
                    cdb.queuedelete(doc)
                cdb.commit()
            else:
                self.server.deleteDatabase(dbname)
        return

    def couchdb(self, dbname):
        """
        look up db in couch db server, if found give it back to user
        """
        if  self.cdb:
            return self.cdb
        couch_db_list = []
        try:
            couch_db_list = self.server.listDatabases()
        except:
            return None
        if  dbname not in couch_db_list:
            self.logger.info("DASCouchcache::couchdb, create db %s" % dbname)
            cdb = self.server.createDatabase(dbname)
            self.create_view(self.dbname, 'dasviews', self.views)
            self.create_view(self.dbname, 'dasadmin', self.adminviews)
        else:
            self.logger.info("DASCouchcache::couchdb, connect db %s" % dbname)
            cdb = self.server.connectDatabase(dbname)
        self.cdb = cdb
        return cdb

    def incache(self, query):
        """
        Check if query exists in cache
        """
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if  not cdb:
            return
        key  = genkey(query)
        #TODO:check how to query 1 result, I copied the way from get_from_cache
        skey = ["%s" % key, timestamp()]
        ekey = ["%s" % key, self.future]
        options = {'startkey': skey, 'endkey': ekey}
#        results = cdb.loadView('dasviews', 'incache', options)
        results = cdb.loadView('dasviews', 'query', options)
        try:
            res = len(results['rows'])
        except:
            traceback.print_exc()
            return
        if  res:
            return True
        return False

    def get_from_cache(self, query, idx=0, limit=0, skey=None, order='asc'):
        """
        Retreieve results from cache, otherwise return null.
        """
        id      = 0
        idx     = int(idx)
        limit   = long(limit)
        stop    = idx + limit # get upper bound for range
        dbname  = self.dbname
        cdb     = self.couchdb(dbname)
        if  not cdb:
            return
        key     = genkey(query)

        skey    = ["%s" % key, timestamp()]
        ekey    = ["%s" % key, self.future]
        options = {'startkey': skey, 'endkey': ekey}
        results = cdb.loadView('dasviews', 'query', options)
        try:
            res = [row['value'] for row in results['rows']]
            for row in results['rows']:
                row['id'] = id
                if  limit:
                    if  id >= idx and id <= stop:
                        yield row
                else:
                    yield row
                id += 1
        except:
            traceback.print_exc()
            return
        if  res:
            self.logger.info("DASCouchcache::get_from_cache for %s" % query)
#        if  len(res) == 1:
#            return res[0]
#        return res

    def update_cache(self, query, results, expire):
        """
        Insert results into cache. We use bulk insert operation, 
        db.update over entire set, rather looping for every single 
        row and use db.create. The speed up is factor of 10
        """
        if  not expire:
            raise Exception('Expire parameter is null')
        self.logger.info("DASCouchcache::update_cache for %s" % query)
        if  not results:
            return
        dbname = self.dbname
        viewlist = []
        for key in self.views.keys():
            viewlist.append("/%s/_design/dasviews/_view/%s" % (dbname, key))
        cdb = self.couchdb(dbname)
        self.clean_cache()
        if  not cdb:
            if  type(results) is list or \
                type(results) is types.GeneratorType:
                for row in results:
                    yield row
            else:
                yield results
            return
        if  type(results) is list or \
            type(results) is types.GeneratorType:
            for row in results:
                res = results2couch(query, row, expire)
                cdb.queue(res, viewlist=viewlist)
                yield row
        else:
            res = results2couch(query, results, expire)
            yield results
            cdb.queue(res, viewlist=viewlist)
        cdb.commit(viewlist=viewlist)

    def remove_from_cache(self, query):
        """
        Delete query from cache
        """
        self.logger.debug('DASCouchcache::remove_from_cache(%s)' \
                % (query, ))
        return

    def get_view(self, design, view, options={}):
        """
        Retreieve results from cache based on provided Couchcache view
        """
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if  not cdb:
            return
        results = cdb.loadView(design, view, options)
        res = [row['value'] for row in results['rows']]
        if  len(res) == 1:
            return res[0]
        return res

    def list_views(self):
        """
        Return a list of Couchcache views
        """

    def clean_cache(self):
        """
        Clean expired docs in couch db.
        """
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if  not cdb:
            return
        skey = 0
        ekey = timestamp()
        options = {'startkey': skey, 'endkey': ekey}
        results = cdb.loadView('dasadmin', 'cleaner', options)

        ndocs = 0
        for doc in results['rows']:
            cdb.queueDelete(doc['value'])
            ndocs += 1

        self.logger.info("DASCouchcache::clean_couch, will remove %s doc's" \
            % ndocs )
        if  not ndocs:
            return
        cdb.commit()  # bulk delete
        cdb.compact() # remove them permanently
        
    def list_between(self, time_begin, time_end):
        """
        Retreieve results from cache for time range
        """
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if  not cdb:
            return
        skey = time_begin
        ekey = time_end
        options = {'startkey': skey, 'endkey': ekey}
        results = cdb.loadView('dasadmin', 'timer', options)
        try:
            res = [row['value'] for row in results['rows']]
        except:
            traceback.print_exc()
            return
        if  len(res) == 1:
            return res[0]
        return res

    def list_queries_in(self, system, idx=0, limit=0):
        """
        Retrieve results from cache for provided system, e.g. sitedb
        """
        idx = int(idx)
        limit = long(limit)
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if  not cdb:
            return
        skey = system
        ekey = system
        options = {'startkey': skey, 'endkey': ekey}
        results = cdb.loadView('dasadmin', 'system', options)
        try:
            res = [row['value'] for row in results['rows']]
        except:
            traceback.print_exc()
            return
        if  len(res) == 1:
            return res[0]
        return res

    def get_all_views(self, dbname=None):
        """
        Method to get all degined views in couch db. The couch db doesn't have
        a clear way to extract view documents. Instead we need to ask for
        _all_docs and provide proper start/end-keys. Once we retrieve
        _design docs, we loop over them and get the doc of particular view, e.g
        http://localhost:5984/das/_design/dasviews
        """
        if  not dbname:
            dbname = self.dbname
        qqq  = 'startkey=%22_design%2F%22&endkey=%22_design0%22'
        host = 'http://' + self.uri
        path = '/%s/_all_docs?%s' % (dbname, qqq)
        kwds = {}
        req  = 'GET'
        debug   = 0
        results = httplib_request(host, path, kwds, req, debug)
        designdocs = json.loads(results)
        results    = {}
        for item in designdocs['rows']:
            doc   = item['key']
#            print "design:", doc
            path  = '/%s/%s' % (dbname, doc)
            res   = httplib_request(host, path, kwds, req, debug)
            rdict = json.loads(res)
            views = []
            for view_name, view_dict in rdict['views'].items():
#                print "  view:", view_name
#                print "   map:", view_dict['map']
                if  'reduce' in view_dict:
#                    print "reduce:", view_dict['reduce']
                    rdef = view_dict['reduce']
                    defrow = dict(map=view_dict['map'], 
                                        reduce=view_dict['reduce'])
                else:
                    defrow = dict(map=view_dict['map'])
                row = {'%s' % view_name : defrow}
                views.append(row)
            results[doc] = views
        return results

    def get_all_queries(self, idx=0, limit=0):
        """
        Retreieve DAS queries from the cache.
        """
        idx = int(idx)
        limit = long(limit)
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if  not cdb:
            return

        options = {}
        results = cdb.loadView('dasadmin', 'all_queries', options)
        try:
            res = [row['value'] for row in results['rows']]
        except:
            traceback.print_exc()
            return
        if  len(res) == 1:
            return res[0]
        return res
Exemplo n.º 11
0
class CMSCouchTest(unittest.TestCase):
    test_counter = 0
    def setUp(self):
        # Make an instance of the server
        self.server = CouchServer(os.getenv("COUCHURL", 'http://*****:*****@localhost:5984'))
        self.testname = self.id().split('.')[-1]
        # Create a database, drop an existing one first
        dbname = 'cmscouch_unittest_%s' % self.testname.lower()

        if dbname in self.server.listDatabases():
            self.server.deleteDatabase(dbname)

        self.server.createDatabase(dbname)
        self.db = self.server.connectDatabase(dbname)

    def tearDown(self):
        if sys.exc_info()[0] == None:
            # This test has passed, clean up after it
            dbname = 'cmscouch_unittest_%s' % self.testname.lower()
            self.server.deleteDatabase(dbname)

    def testCommitOne(self):
        # Can I commit one dict
        doc = {'foo':123, 'bar':456}
        id = self.db.commitOne(doc, returndocs=True)[0]['id']
        # What about a Document
        doc = Document(inputDict = doc)
        id = self.db.commitOne(doc, returndocs=True)[0]['id']

    def testCommitOneWithQueue(self):
        """
        CommitOne bypasses the queue, but it should maintain the queue if
        present for a future call to commit.
        """
        # Queue up five docs
        doc = {'foo':123, 'bar':456}
        for i in range(1,6):
            self.db.queue(doc)
        # Commit one Document
        doc = Document(inputDict = doc)
        id = self.db.commitOne(doc, returndocs=True)[0]['id']
        self.assertEqual(1, len(self.db.allDocs()['rows']))
        self.db.commit()
        self.assertEqual(6, len(self.db.allDocs()['rows']))

    def testTimeStamping(self):
        doc = {'foo':123, 'bar':456}
        id = self.db.commitOne(doc, timestamp=True, returndocs=True)[0]['id']
        doc = self.db.document(id)
        self.assertTrue('timestamp' in doc.keys())

    def testDeleteDoc(self):
        doc = {'foo':123, 'bar':456}
        self.db.commitOne(doc)
        all_docs = self.db.allDocs()
        self.assertEqual(1, len(all_docs['rows']))

        # The db.delete_doc is immediate
        id = all_docs['rows'][0]['id']
        self.db.delete_doc(id)
        all_docs = self.db.allDocs()
        self.assertEqual(0, len(all_docs['rows']))

    def testDeleteQueuedDocs(self):
        doc1 = {'foo':123, 'bar':456}
        doc2 = {'foo':789, 'bar':101112}
        self.db.queue(doc1)
        self.db.queue(doc2)
        self.db.commit()

        all_docs = self.db.allDocs()
        self.assertEqual(2, len(all_docs['rows']))
        for res in all_docs['rows']:
            id = res['id']
            doc = self.db.document(id)
            self.db.queueDelete(doc)
        all_docs = self.db.allDocs()
        self.assertEqual(2, len(all_docs['rows']))

        self.db.commit()

        all_docs = self.db.allDocs()
        self.assertEqual(0, len(all_docs['rows']))

    def testWriteReadDocNoID(self):
        doc = {}

    def testReplicate(self):
        repl_db = self.server.connectDatabase(self.db.name + 'repl')

        doc_id = self.db.commitOne({'foo':123}, timestamp=True, returndocs=True)[0]['id']
        doc_v1 = self.db.document(doc_id)

        #replicate
        self.server.replicate(self.db.name, repl_db.name)

        self.assertEqual(self.db.document(doc_id), repl_db.document(doc_id))
        self.server.deleteDatabase(repl_db.name)

    def testSlashInDBName(self):
        """
        Slashes are a valid character in a database name, and are useful as it
        creates a directory strucutre for the couch data files.
        """
        db_name = 'wmcore/unittests'
        try:
            self.server.deleteDatabase(db_name)
        except:
            # Ignore this - the database shouldn't already exist
            pass

        db = self.server.createDatabase(db_name)
        info = db.info()
        assert info['db_name'] == db_name

        db_name = 'wmcore/unittests'
        db = self.server.connectDatabase(db_name)
        info = db.info()
        assert info['db_name'] == db_name

        db = Database(db_name)
        info = db.info()
        assert info['db_name'] == db_name

        self.server.deleteDatabase(db_name)

    def testInvalidName(self):
        """
        Capitol letters are not allowed in database names.
        """
        db_name = 'Not A Valid Name'
        self.assertRaises(ValueError, self.server.createDatabase, db_name)
        self.assertRaises(ValueError, self.server.deleteDatabase, db_name)
        self.assertRaises(ValueError, self.server.connectDatabase, db_name)
        self.assertRaises(ValueError, Database, db_name)

    def testDocumentSerialisation(self):
        """
        A document should be writable into the couchdb with a timestamp.
        """
        d = Document()
        d['foo'] = 'bar'
        doc_info = self.db.commit(doc=d, timestamp=True)[0]
        d_from_db = self.db.document(doc_info['id'])
        self.assertEquals(d['foo'], d_from_db['foo'])
        self.assertEquals(d['timestamp'], d_from_db['timestamp'])

    def testAttachments(self):
        """
        Test uploading attachments with and without checksumming
        """
        doc = self.db.commitOne({'foo':'bar'}, timestamp=True, returndocs=True)[0]
        attachment1 = "Hello"
        attachment2 = "How are you today?"
        attachment3 = "I'm very well, thanks for asking"
        attachment4 = "Lovely weather we're having"
        attachment5 = "Goodbye"
        keyhash = hashlib.md5()
        keyhash.update(attachment5)
        attachment5_md5 = keyhash.digest()
        attachment5_md5 = base64.b64encode(attachment5_md5)
        attachment6 = "Good day to you, sir!"
        #TODO: add a binary attachment - e.g. tar.gz
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment1)
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment2, contentType="foo/bar")
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment3, name="my_greeting")
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment4, add_checksum=True)
        doc = self.db.addAttachment(doc['id'], doc['rev'], attachment5, checksum=attachment5_md5)

        self.assertRaises(CouchInternalServerError, self.db.addAttachment, doc['id'], doc['rev'], attachment6, checksum='123')

    def testRevisionHandling(self):
        # This test won't work from an existing database, conflicts will be preserved, so
        # ruthlessly remove the databases to get a clean slate.
        try:
            self.server.deleteDatabase(self.db.name)
        except CouchNotFoundError:
            pass # Must have been deleted already

        try:
            self.server.deleteDatabase(self.db.name + 'repl')
        except CouchNotFoundError:
            pass # Must have been deleted already

        # I'm going to create a conflict, so need a replica db
        self.db = self.server.connectDatabase(self.db.name)
        repl_db = self.server.connectDatabase(self.db.name + 'repl')

        doc_id = self.db.commitOne({'foo':123}, timestamp=True, returndocs=True)[0]['id']
        doc_v1 = self.db.document(doc_id)

        #replicate
        self.server.replicate(self.db.name, repl_db.name)

        doc_v2 = self.db.document(doc_id)
        doc_v2['bar'] = 456
        doc_id_rev2 = self.db.commitOne(doc_v2, returndocs=True)[0]
        doc_v2 = self.db.document(doc_id)

        #now update the replica
        conflict_doc = repl_db.document(doc_id)
        conflict_doc['bar'] = 101112
        repl_db.commitOne(conflict_doc)

        #replicate, creating the conflict
        self.server.replicate(self.db.name, repl_db.name)
        conflict_view = {'map':"function(doc) {if(doc._conflicts) {emit(doc._conflicts, null);}}"}
        data = repl_db.post('/%s/_temp_view' % repl_db.name, conflict_view)

        # Should have one conflict in the repl database
        self.assertEquals(data['total_rows'], 1)
        # Should have no conflicts in the source database
        self.assertEquals(self.db.post('/%s/_temp_view' % self.db.name, conflict_view)['total_rows'], 0)
        self.assertTrue(repl_db.documentExists(data['rows'][0]['id'], rev=data['rows'][0]['key'][0]))

        repl_db.delete_doc(data['rows'][0]['id'], rev=data['rows'][0]['key'][0])
        data = repl_db.post('/%s/_temp_view' % repl_db.name, conflict_view)

        self.assertEquals(data['total_rows'], 0)
        self.server.deleteDatabase(repl_db.name)

        #update it again
        doc_v3 = self.db.document(doc_id)
        doc_v3['baz'] = 789
        doc_id_rev3 = self.db.commitOne(doc_v3, returndocs=True)[0]
        doc_v3 = self.db.document(doc_id)

        #test that I can pull out an old revision
        doc_v1_test = self.db.document(doc_id, rev=doc_v1['_rev'])
        self.assertEquals(doc_v1, doc_v1_test)

        #test that I can check a revision exists
        self.assertTrue(self.db.documentExists(doc_id, rev=doc_v2['_rev']))

        self.assertFalse(self.db.documentExists(doc_id, rev='1'+doc_v2['_rev']))

        #why you shouldn't rely on rev
        self.db.compact(blocking=True)
        self.assertFalse(self.db.documentExists(doc_id, rev=doc_v1['_rev']))
        self.assertFalse(self.db.documentExists(doc_id, rev=doc_v2['_rev']))
        self.assertTrue(self.db.documentExists(doc_id, rev=doc_v3['_rev']))

    def testCommit(self):
        """
        Test queue and commit modes
        """
        # try to commit 2 random docs
        doc = {'foo':123, 'bar':456}
        self.db.queue(doc)
        self.db.queue(doc)
        self.assertEqual(2, len(self.db.commit()))

        # committing 2 docs with the same id will fail
        self.db.queue(Document(id = "1", inputDict = {'foo':123, 'bar':456}))
        self.db.queue(Document(id = "1", inputDict = {'foo':1234, 'bar':456}))
        answer = self.db.commit()
        self.assertEqual(2, len(answer))
        self.assertEqual(answer[0]['error'], 'conflict')
        self.assertEqual(answer[1]['error'], 'conflict')

        # all_or_nothing mode ignores conflicts
        self.db.queue(Document(id = "2", inputDict = doc))
        self.db.queue(Document(id = "2", inputDict = {'foo':1234, 'bar':456}))
        answer = self.db.commit(all_or_nothing = True)
        self.assertEqual(2, len(answer))
        self.assertEqual(answer[0].get('error'), None)
        self.assertEqual(answer[0].get('error'), None)
        self.assertEqual(answer[0]['id'], '2')
        self.assertEqual(answer[1]['id'], '2')

    def testUpdateHandler(self):
        """
        Test that update function support works
        """

        update_ddoc = {
            '_id':'_design/foo',
            'language': 'javascript',
            'updates':{
                "bump-counter" : 'function(doc, req) {if (!doc.counter) {doc.counter = 0};doc.counter += 1;return [doc,"bumped it!"];}',
            }
        }
        self.db.commit(update_ddoc)
        doc = {'foo': 123, 'counter': 0}
        doc_id = self.db.commit(doc)[0]['id']
        self.assertEquals("bumped it!", self.db.updateDocument(doc_id, 'foo', 'bump-counter'))

        self.assertEquals(1, self.db.document(doc_id)['counter'])
Exemplo n.º 12
0
class AsyncTransfer_t(unittest.TestCase):
    """
    TestCase for TestAsyncTransfer module
    """

    def setUp(self):
        """
        setup for test.
        """
        self.myThread = threading.currentThread()
        self.myThread.dbFactory = None
        self.myThread.logger = None
        self.database_interface = None
        if hasattr(self.myThread, 'dbi'):
            self.database_interface = self.myThread.dbi
        self.testInit = TestInitCouchApp(__file__)
        self.testInit.setLogging()
        self.config = self.getConfig()
        self.testInit.setupCouch("wmagent_jobdump/fwjrs", "FWJRDump")
        self.testInit.setupCouch("agent_database", "Agent")
        couchapps = "../../../src/couchapp"
        self.async_couchapp = "%s/AsyncTransfer" % couchapps
        self.publication_couchapp = "%s/DBSPublisher" % couchapps
        self.monitor_couchapp = "%s/monitor" % couchapps
        self.user_monitoring_couchapp = "%s/UserMonitoring" % couchapps
        self.stat_couchapp = "%s/stat" % couchapps
        harness = CouchAppTestHarness("asynctransfer")
        harness.create()
        harness.pushCouchapps(self.async_couchapp)
        harness.pushCouchapps(self.publication_couchapp)
        harness.pushCouchapps(self.monitor_couchapp)
        harness_user_mon = CouchAppTestHarness("user_monitoring_asynctransfer")
        harness_user_mon.create()
        harness_user_mon.pushCouchapps(self.user_monitoring_couchapp)
        harness_stat = CouchAppTestHarness("asynctransfer_stat")
        harness_stat.create()
        harness_stat.pushCouchapps(self.stat_couchapp)

        # Connect to db
        self.async_server = CouchServer( os.getenv("COUCHURL") )
        self.db = self.async_server.connectDatabase( "asynctransfer" )
        self.monitoring_db = self.async_server.connectDatabase( "user_monitoring_asynctransfer" )
        self.dbStat = self.async_server.connectDatabase( "asynctransfer_stat" )
        self.dbSource = self.async_server.connectDatabase( "wmagent_jobdump/fwjrs" )
        doc = {"_id": "T1_IT_INFN",
               "state": "running",
               "countries": [
                   "IT",
                   "AT",
                   "HU",
                   "PL"
               ],
               "url": "https://fts.cr.cnaf.infn.it:8443/glite-data-transfer-fts/services/FileTransfer",
               "couchapp": {
               }
            }
        self.db.queue(doc, True)
        self.db.commit()
        doc = {
           "_id": "MONITORING_DB_UPDATE",
           "db_update": 1,
           "couchapp": {
           }
        }
        self.monitoring_db.queue(doc, True)
        self.monitoring_db.commit()
        self.config = self.getConfig()
        self.testConfig = self.getTestConfig()
        self.users = ['fred', 'barney', 'wilma', 'betty']
        self.sites = ['T2_IT_Pisa', 'T2_IT_Rome', 'T2_IT_Bari']
        self.lfn = ['/this/is/a/lfnA', '/this/is/a/lfnB', '/this/is/a/lfnC', '/this/is/a/lfnD', '/this/is/a/lfnE']

        return

    def tearDown(self):
        """
        Delete database
        """
        self.testInit.tearDownCouch(  )
        if  self.database_interface:
            self.myThread.dbi = self.database_interface
        self.async_server.deleteDatabase( "asynctransfer" )
        self.async_server.deleteDatabase( "asynctransfer_stat" )
        self.async_server.deleteDatabase( "user_monitoring_asynctransfer" )


    def getConfig(self):
        """
        _createConfig_
        General config file
        """
        config = loadConfigurationFile('../../../configuration/Example.py')
        config.CoreDatabase.connectUrl = os.getenv("COUCHURL") + "/agent_database"
        config.AsyncTransfer.couch_instance = os.getenv("COUCHURL")
        config.AsyncTransfer.couch_statinstance = os.getenv("COUCHURL")
        config.AsyncTransfer.couch_user_monitoring_instance = os.getenv("COUCHURL")
        config.AsyncTransfer.files_database = "asynctransfer"
        config.AsyncTransfer.statitics_database = "asynctransfer_stat"
        config.AsyncTransfer.user_monitoring_db = "user_monitoring_asynctransfer"
        config.AsyncTransfer.data_source = os.getenv("COUCHURL")
        config.AsyncTransfer.couch_statinstance = os.getenv("COUCHURL")
        config.AsyncTransfer.db_source = "wmagent_jobdump/fwjrs"
        config.AsyncTransfer.log_level = logging.DEBUG
        config.AsyncTransfer.pluginName = "JSM"
        config.AsyncTransfer.max_retry = 2
        config.AsyncTransfer.expiration_days = 1
        config.AsyncTransfer.pluginDir = "AsyncStageOut.Plugins"
        config.AsyncTransfer.serviceCert = os.getenv('X509_USER_PROXY')
        config.AsyncTransfer.componentDir = self.testInit.generateWorkDir(config)
        config.DBSPublisher.pollInterval = 10
        config.DBSPublisher.publication_pool_size = 1
        config.DBSPublisher.componentDir = self.testInit.generateWorkDir(config)
        config.DBSPublisher.namespace = 'AsyncStageOut.DBSPublisher'
        config.DBSPublisher.log_level = logging.DEBUG
        config.DBSPublisher.files_database = "asynctransfer_1"
        config.DBSPublisher.couch_instance = os.getenv("COUCHURL")
        config.DBSPublisher.publication_max_retry = 0
        config.DBSPublisher.serviceCert = os.getenv('X509_USER_PROXY')
        config.DBSPublisher.max_files_per_block = 100
        config.DBSPublisher.workflow_expiration_time = 3

        return config

    def getTestConfig(self):
        """
        _createConfig_

        General config file
        """
        config = self.testInit.getConfiguration( connectUrl = os.getenv("COUCHURL") + "/agent_database" )
        config.component_("AsyncTransferTest")
        config.AsyncTransferTest.couch_instance = os.getenv("COUCHURL")
        config.AsyncTransferTest.files_database = "asynctransfer"
        config.AsyncTransferTest.data_source = os.getenv("COUCHURL")
        config.AsyncTransferTest.db_source = "wmagent_jobdump/fwjrs"
        config.AsyncTransferTest.log_level = logging.DEBUG
        config.AsyncTransferTest.pluginName = "JSM"
        config.AsyncTransferTest.pluginDir = "AsyncStageOut.Plugins"
        config.AsyncTransferTest.max_files_per_transfer = 10
        config.AsyncTransferTest.pool_size = 3
        config.AsyncTransferTest.max_retry = 2
        config.AsyncTransferTest.max_retry = 1000
        config.AsyncTransferTest.pollInterval = 10
        config.AsyncTransferTest.serviceCert = os.getenv('X509_USER_PROXY')
        config.AsyncTransferTest.componentDir = self.testInit.generateWorkDir(config)

        return config

    def createTestDocinFilesDB(self, site = None):
        """
        Creates a test document in files_db

        """
        doc = {}
        doc['dn'] = "/C=IT/O=INFN/OU=Personal Certificate/L=Perugia/CN=Hassen Riahi"
        doc['jobid'] = '1'
        doc['retry_count'] = []
        doc['source'] = random.choice(self.sites)
        if not site:
            doc['destination'] = random.choice(self.sites)
        else:
            doc['destination'] = site
        doc['user'] = random.choice(self.users)
        doc['group'] = 'someGroup'
        doc['role'] = 'someRole'
        doc['lfn'] = '/store/user/riahi/TestUnit'
        doc['state'] = 'new'
        doc['workflow'] = 'someWorkflow'
        doc['checksums'] = 'someChecksums'
        doc['start_time'] = str(datetime.datetime.now())
        doc['end_time'] = str(datetime.datetime.now())
        doc['job_end_time'] = str(time.time())
        doc['dbSource_url'] = 'someUrl'
        self.db.queue(doc, True)
        self.db.commit()

        return doc

    def createFileDocinFilesDB(self, doc_id = '', state = 'new', publication_state = 'not_published'):
        """
        Creates a test document in files_db
        """
        doc = {}
        lfn = random.choice(self.lfn) + doc_id
        doc['_id'] = getHashLfn(lfn)
        doc['dn'] = "/C=IT/O=INFN/OU=Personal Certificate/L=Perugia/CN=Hassen Riahi"
        doc['workflow'] = 'someWorkflow'
        doc['jobid'] = '1'
        doc['lfn'] = lfn
        doc['retry_count'] = []
        doc['source'] = random.choice(self.sites)
        doc['destination'] = random.choice(self.sites)
        doc['user'] = random.choice(self.users)
        doc['group'] = 'someGroup'
        doc['role'] = 'someRole'
        doc['state'] = state
        doc['checksums'] = 'someChecksums'
        doc['start_time'] = str(datetime.datetime.now())
        doc['end_time'] = str(datetime.datetime.now())
        doc['dbSource_url'] = 'someUrl'
        doc['size'] = 1000
        doc['end_time'] = 10000
        doc['last_update'] = 10000
        doc['job_end_time'] = 10000
        doc['publication_state'] = publication_state
        doc['publication_retry_count'] = []
        doc['publish_dbs_url'] = 'https://cmsdbsprod.cern.ch:8443/cms_dbs_ph_analysis_02_writer/servlet/DBSServlet'
        doc['inputdataset'] = '/RelValProdTTbar/JobRobot-MC_3XY_V24_JobRobot-v1/GEN-SIM-DIGI-RECO'
        doc['dbs_url'] = 'http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet'
        self.db.queue(doc, True)
        self.db.commit()

        return doc

    def createTestFileFinishedYesterdayinFilesDB( self ):
        """
        Creates a test document in files_db

        """
        doc = {}
        doc['_id'] = getHashLfn("/this/is/a/lfnA")
        doc['dn'] = "/C=IT/O=INFN/OU=Personal Certificate/L=Perugia/CN=Hassen Riahi"
        doc['workflow'] = 'someWorkflow'
        doc['size'] = 999999
        doc['jobid'] = '1'
        doc['lfn'] = '/this/is/a/lfnA'
        doc['retry_count'] = []
        doc['source'] = random.choice(self.sites)
        doc['destination'] = random.choice(self.sites)
        doc['user'] = random.choice(self.users)
        doc['state'] = 'done'
        doc['start_time'] = str(datetime.datetime.now()).\
replace(str(datetime.datetime.now()).split(" ")[0].split("-")[2], \
str(int(str(datetime.datetime.now()).split(" ")[0].split("-")[2]) - 3))
        doc['end_time'] = str(datetime.datetime.now()).\
replace(str(datetime.datetime.now()).split(" ")[0].split("-")[2], \
str(int(str(datetime.datetime.now()).split(" ")[0].split("-")[2]) - 2))
        doc['job_end_time'] = str(time.time())
        doc['dbSource_url'] = 'someUrl'
        self.db.queue(doc, True)
        self.db.commit()

        return doc

    def DeleteTestDocinFilesDB(self, doc):
        """
        Remove the test documents in files_db
        """
        document = self.db.document( doc )
        self.db.queueDelete(document)
        self.db.commit()

        return

    def DeleteTestDocinDBStat(self, doc):
        """
        Remove the test documents from statdb
        """
        document = self.dbStat.document( doc )
        self.db.queueDelete(document)
        self.db.commit()

        return

    def createTestDocinDBSource(self, doc_id = ''):
        """
        Creates a JSM document
        """
        doc = {\
   "timestamp": time.time(),\
   "jobid": 7,\
   "retrycount": 0,\
   "fwjr": {\
       "task": "/CmsRunAnalysis-22_121713/Analysis",\
       "steps": {\
           "logArch1": {\
               "status": 0,\
               "logs": {\
               },\
               "stop": 1290425610,\
               "site": {\
               },\
               "input": {\
               },\
               "errors": [\
               ],\
               "parameters": {\
               },\
               "analysis": {\
               },\
               "start": 1290425601,\
               "cleanup": {\
               },\
               "output": {\
                   "logArchive": [\
                       {\
                           "runs": {\
                           },\
"lfn": "/store/user/riahi/lfnB"+doc_id,\
"pfn": "srm://this/is/a/pfnB",\
                           "module_label": "logArchive",\
                           "location": "T2_IT_Bari",\
                           "events": 0,\
                           "size": 0\
                       }\
                   ]\
               }\
           },\
           "cmsRun1": {\
               "status": 0,\
               "logs": {\
               },\
               "stop": 1290425590,\
               "site": {\
               },\
               "input": {\
                   "source": [\
                       {\
                           "runs": {\
                               "1": [\
                                   39,\
                                   60,\
                                   73,\
                                   78,\
                                   80,\
                                   112\
                               ]\
                           },\
                           "input_source_class": "PoolSource",\
                           "input_type": "primaryFiles",\
                           "lfn": "/store/user/riahi/lfnB"+doc_id,\
                           "pfn": "file:/this/is/a/pfnB",\
                           "module_label": "source",\
                           "guid": "D005BB56-CA2B-DF11-BA08-0030487C60AE",\
                           "events": 600\
                       }\
                   ]\
               },\
               "errors": [\
               ],\
               "parameters": {\
               },\
               "analysis": {\
               },\
               "start": 1290425561,\
               "cleanup": {\
               },\
               "output": {\
                   "output": [\
                       {\
                           "branch_hash": "8dbc25d29c96c171aa2700e3c3249274",\
                           "user_dn": "/C=IT/O=INFN/OU=Personal Certificate/L=Perugia/CN=Hassen Riahi",\
                           "lfn": "/store/temp/user/riahi/lfnB"+doc_id,\
                           "dataset": {\
                               "applicationName": "cmsRun",\
                               "applicationVersion": "CMSSW_3_6_1_patch7",\
                               "dataTier": "USER",\
                           },\
"InputPFN": "/home/cmsint/globus-tmp.fermi11.13080.0/\
https_3a_2f_2fcert-rb-01.cnaf.infn.it_3a9000_2frYxxZQTCljEAG6Q6-QgmhQ/job/\
WMTaskSpace/cmsRun1/output.root",\
                           "checksums": {\
                               "adler32": "fc729f97",\
                               "cksum": "2529793973"\
                           },\
                           "guid": "5CD0D341-2CF6-DF11-9A92-0030487DA364",\
                           "size": 16621775,\
                           "location": "T2_IT_Bari",\
                           "async_dest": "T2_IT_Pisa",\
                           "events": 600,\
                           "ouput_module_class": "PoolOutputModule",\
                           "pfn": "/this/is/a/pfn",\
                           "catalog": "",\
                           "module_label": "output",\
                           "input": [\
"/store/mc/JobRobot/RelValProdTTbar/GEN-SIM-DIGI-RECO/MC_3XY_V24_JobRobot-v1/0000/D005BB56-CA2B-DF11-BA08-0030487C60AE.root"\
                           ],\
                           "StageOutCommand": "srmv2-lcg",\
                           "runs": {\
                               "1": [\
                                   39,\
                                   60,\
                                   73,\
                                   78,\
                                   80,\
                                   112\
                               ]\
                           },\
"OutputPFN": "storm-se-01.ba.infn.it:8444/srm/managerv2?SFN=/cms/store/user/grandi/22_121713/0000/5CD0D341-2CF6-DF11-9A92-0030487DA364.root"\
                       }\
                   ]\
               }\
           },\
           "stageOut1": {\
               "status": 0,\
               "logs": {\
               },\
               "stop": 1290425601,\
               "site": {\
               },\
               "input": {\
               },\
               "errors": [\
               ],\
               "parameters": {\
               },\
               "analysis": {\
               },\
               "start": 1290425591,\
               "cleanup": {\
               },\
               "output": {\
               }\
           }\
       }\
   },\
   "type": "fwjr"\
}


        self.dbSource.queue(doc)
        self.dbSource.commit()

        return doc

    def DeleteTestDocinDBSource(self, doc):
        """
        Deletes test docs from DB source
        """
        document = self.dbSource.document( doc )
        self.dbSource.queueDelete( document )
        self.dbSource.commit()

        return

    def testA_BasicTest_testLoadFtscpView(self):
        """
       _BasicFunctionTest_
        Tests the components, by seeing if the AsyncTransfer view load correctly files from couch.
        """
        doc = self.createTestDocinFilesDB()
        query = {'reduce':False,
         'key':[doc['user'], doc['group'], doc['role'], doc['destination'], doc['source'], doc['dn']]}
        active_files = self.db.loadView('AsyncTransfer', 'ftscp', query)['rows']
        assert len(active_files) == 1
        for i in range(1, 5):
            self.createTestDocinFilesDB()
        query = {'reduce':False}
        all_active_files = self.db.loadView('AsyncTransfer', 'ftscp', query)['rows']

        assert len(all_active_files) == 5

    def testA_BasicTest_testFileTransfer(self):
        """
        _BasicFunctionTest_
        Tests the components, by seeing if they can process documents.
        """
        self.createFileDocinFilesDB()
        Transfer = TransferDaemon(config = self.config)
        Transfer.algorithm( )
        query = {'reduce':False}
        files_acquired = self.db.loadView('monitor', 'filesAcquired', query)['rows']
        query = {'reduce':False}
        files_new = self.db.loadView('monitor', 'filesNew', query)['rows']

        assert ( len(files_acquired) + len(files_new) ) == 1

        for i in range(1, 5):
            self.createFileDocinFilesDB( str(i) )
        query = {'reduce':False}
        files_acquired = self.db.loadView('monitor', 'filesAcquired', query)['rows']
        query = {'reduce':False}
        files_new = self.db.loadView('monitor', 'filesNew', query)['rows']

        assert ( len(files_acquired) + len(files_new) ) == 5

    def testB_InteractionWithTheSource_testDocumentDuplicationAndThenTransfer(self):
        """
        _testB_InteractionWithTheSource_testDocumentDuplication_
        Tests the components: gets data from DB source and duplicate
        them in files_db and see if the component can process them.
        """
        self.createTestDocinDBSource()
        LFNDuplicator = LFNSourceDuplicator(config = self.config)
        LFNDuplicator.algorithm( )
        time.sleep(10)
        query = { 'reduce':False }
        active_files = self.db.loadView('AsyncTransfer', 'ftscp', query)['rows']

        assert len(active_files) == 1

        Transfer = TransferDaemon(config = self.config)
        Transfer.algorithm( )
        query = {'reduce':False}
        files_acquired = self.db.loadView('monitor', 'filesAcquired', query)['rows']
        query = {'reduce':False}
        files_new = self.db.loadView('monitor', 'filesNew', query)['rows']

        assert ( len(files_acquired) + len(files_new) ) == 1

        for i in range(1, 5):
            self.createTestDocinDBSource( str(i) )
        LFNDuplicator_1 = LFNSourceDuplicator(config = self.config)
        LFNDuplicator_1.algorithm( )
        time.sleep(20)
        query = {'reduce':False }
        active1_files = self.db.loadView('AsyncTransfer', 'ftscp', query)['rows']

        assert len(active1_files) == 5

        Transfer_1 = TransferDaemon(config = self.config)
        Transfer_1.algorithm( )
        query = {'reduce':False}
        files_acquired = self.db.loadView('monitor', 'filesAcquired', query)['rows']
        query = {'reduce':False}
        files_new = self.db.loadView('monitor', 'filesNew', query)['rows']

        assert ( len(files_acquired) + len(files_new) ) == 5

    def testC_StatWork_testDocRemovalFromRuntimeDB(self):
        """
        _StatWork_BasicFunctionTest_
        Test statisticWorker, by seeing if it can remove an expired doc from runtimeDB.
        """
        doc = self.createTestFileFinishedYesterdayinFilesDB( )
        statWorker = StatisticDaemon(config = self.config)
        statWorker.algorithm( )
        query = {'reduce':False,
         'key':[doc['user'], doc['destination'], doc['source'], doc['dn'] ] }
        active_files = self.db.loadView('AsyncTransfer', 'ftscp', query)['rows']

        assert len(active_files) == 0

    def testD_StatWork_testNumberOfDocsPerIteration(self):
        """
        _StatWork_testNumberOfDocsPerIteration_
        Test if the stat daemon creates a new document per iteration
        """
        self.createTestFileFinishedYesterdayinFilesDB( )
        statWorker = StatisticDaemon(config = self.config)
        statWorker.algorithm( )
        self.createTestFileFinishedYesterdayinFilesDB( )
        statWorker = StatisticDaemon(config = self.config)
        statWorker.algorithm( )
        query = {}
        serverRows = self.dbStat.loadView('stat', 'ftservers', query)['rows']

        assert len(serverRows) == 2

    def testD_InteractionWithTheSource_testUpdateFWJR(self):
        """
        _testD_InteractionWithTheSource_testUpdateFWJR_
        Tests the components: gets data from DB source and duplicate
        them in files_db and see if the component can update the fwjr when the transfer is done.
        """
        self.createTestDocinDBSource()
        LFNDuplicator = LFNSourceDuplicator(config = self.config)
        LFNDuplicator.algorithm( )
        time.sleep(10)
        # Run the daemon
        Transfer = TransferDaemon(config = self.config)
        Transfer.algorithm( )
        query = {'reduce':False}
        files_acquired = self.db.loadView('monitor', 'filesAcquired', query)['rows']
        # Get files acuired
        document = self.db.document(files_acquired[0]['id'])
        sites = self.sites
        site_tfc_map = {}
        for site in sites:
            site_tfc_map[site] = get_tfc_rules(site)
        # Mark the document as good
        worker = TransferWorker([document['user'], None, None], site_tfc_map, self.config.AsyncTransfer)
        worker.mark_good([document['lfn']])
        query = { 'reduce':False, 'key':[ document['jobid'] , document['job_end_time'] ] }
        result = self.dbSource.loadView('FWJRDump', 'fwjrByJobIDTimestamp', query)['rows']
        docSource = self.dbSource.document(result[0]['id'])

        assert docSource['fwjr']['steps'].has_key('asyncStageOut1') == True

    def testE_FixBug1196_PoolWorkersFromAgent_FunctionTest(self):
        """
        _BasicPoolWorkers_FunctionTest_
        Tests the components, by seeing if it can spawn process
        using the multiprocessing without problems
        """
        myThread = threading.currentThread()
        self.createTestDocinFilesDB()
        Transfer = AsyncTransferTest(config = self.testConfig)
        Transfer.prepareToStart()
        # Set sleep time to 3 days and you will reproduce the
        # problem described in #1196
        time.sleep(30)
        myThread.workerThreadManager.terminateWorkers()
        while threading.activeCount() > 1:
            time.sleep(1)

    def testF_TestIfgetHashLfnHashCorrectlyLFNs(self):
        """
        _testF_TestIfgetHashLfnHashCorrectlyLFNs
        Tests if the getHashLfn function of the AsyncStageOut module module hashs correctly LFNs.
        """
        lfn = "/My/lfn/path"
        hashedLfn = getHashLfn(lfn)
        assert hashlib.sha224(lfn).hexdigest() == hashedLfn

    def testG_PrePostTransferCleaning(self):
        """
        _testG_PrePostTransferCleaning
        Tests if the cleanSpace method removes correctly files.
        """
        file_doc = self.createTestDocinFilesDB('T2_IT_Rome')
        sites = self.sites
        site_tfc_map = {}
        for site in sites:
            site_tfc_map[site] = get_tfc_rules(site)
        pfn = apply_tfc('T2_IT_Rome'+':'+file_doc['lfn'], site_tfc_map, 'T2_IT_Rome')
        emptyFile = os.getcwd() + '/__init__.py'
        command = 'srmcp -debug=true file:///' + emptyFile + ' ' + pfn + ' -2'
        log_dir = '%s/logs/%s' % (self.config.AsyncTransfer.componentDir, file_doc['user'])
        try:
            os.makedirs(log_dir)
        except OSError, e:
            if e.errno == errno.EEXIST:
                pass
            else: raise
        stdout_log = open('%s/%s.srmcp_out_log' % (log_dir, file_doc['destination']), 'w')
        stderr_log = open('%s/%s.srmcp_err_log' % (log_dir, file_doc['destination']), 'w')
        proc = subprocess.Popen(
                        ["/bin/bash"], shell=True, cwd=os.environ['PWD'],
                        stdout=stdout_log,
                        stderr=stderr_log,
                        stdin=subprocess.PIPE,
                            )
        proc.stdin.write(command)
        stdout, stderr = proc.communicate()
        rc = proc.returncode
        stdout_log.close()
        stderr_log.close()
        worker = TransferWorker([file_doc['user'], None, None], site_tfc_map, self.config.AsyncTransfer)
        to_clean = {(str(pfn), str(pfn)): 'T2_IT_Rome'}
        worker.cleanSpace(to_clean)
        commandLs = 'srmls ' + pfn
        stdoutls_log = open('%s/%s.srmls_out_log' % (log_dir, 'T2_IT_Rome'), 'w')
        stderrls_log = open('%s/%s.srmls_err_log' % (log_dir, 'T2_IT_Rome'), 'w')
        procls = subprocess.Popen(
                        ["/bin/bash"], shell=True, cwd=os.environ['PWD'],
                        stdout=stdoutls_log,
                        stderr=stderrls_log,
                        stdin=subprocess.PIPE,
                            )
        procls.stdin.write(commandLs)
        stdoutls, stderrls = procls.communicate()
        rcls = procls.returncode
        stdoutls_log.close()
        stderrls_log.close()
        assert rcls == 1
Exemplo n.º 13
0
class LogDBBackend(object):
    """
    Represents persistent storage for LogDB
    """
    def __init__(self, db_url, db_name, identifier, thread_name, **kwds):
        self.db_url = db_url
        self.server = CouchServer(db_url)
        self.db_name = db_name
        self.dbid = identifier
        self.thread_name = thread_name
        self.agent = kwds.get('agent', 0)
        create = kwds.get('create', False)
        size = kwds.get('size', 10000)
        self.db = self.server.connectDatabase(db_name,
                                              create=create,
                                              size=size)
        self.design = kwds.get('design', 'LogDB')  # name of design document
        self.view = kwds.get('view',
                             'requests')  # name of view to look-up requests
        self.tsview = kwds.get('tsview',
                               'tstamp')  # name of tsview to look-up requests
        if create:
            uri = '/%s/_design/%s' % (db_name, self.design)
            data = design_doc()
            try:
                # insert design doc, if fails due to conflict continue
                # conflict may happen due to concurrent client connection who
                # created first this doc
                self.db.put(uri, data)
            except CouchConflictError:
                pass

    def deleteDatabase(self):
        """Delete back-end database"""
        if self.db_name in self.server.listDatabases():
            self.server.deleteDatabase(self.db_name)

    def check(self, request, mtype=None):
        """Check that given request name is valid"""
        # TODO: we may add some logic to check request name, etc.
        if not request:
            raise LogDBError("Request name is empty")
        if mtype and mtype not in LOGDB_MSG_TYPES:
            raise LogDBError("Unsupported message type: '%s', supported types %s" \
                    % (mtype, LOGDB_MSG_TYPES))

    def docid(self, request, mtype):
        """Generate doc id, we use double dash to avoid dashes from thread names"""
        return gen_hash('--'.join(
            (request, self.dbid, self.thread_name, mtype)))

    def prefix(self, mtype):
        """Generate agent specific prefix for given message type"""
        if self.agent:
            # we add prefix for agent messages, all others will not have this index
            mtype = 'agent-%s' % mtype
        return mtype

    def agent_update(self, request, msg='', mtype="info"):
        """Update agent info in LogDB for given request"""
        self.check(request, mtype)
        mtype = self.prefix(mtype)
        rec = {"ts": tstamp(), "msg": msg}
        doc = {
            "_id": self.docid(request, mtype),
            "messages": [rec],
            "request": request,
            "identifier": self.dbid,
            "thr": self.thread_name,
            "type": mtype
        }
        try:
            exist_doc = self.db.document(doc["_id"])
            doc["_rev"] = exist_doc["_rev"]
        except CouchNotFoundError:
            # this means document is not exist so we will just insert
            pass
        finally:
            res = self.db.commitOne(doc)
        return res

    def user_update(self, request, msg, mtype='comment'):
        """Update user info in LogDB for given request"""
        rec = {"ts": tstamp(), "msg": msg}
        doc = {
            "_id": self.docid(request, mtype),
            "messages": [rec],
            "request": request,
            "identifier": self.dbid,
            "thr": self.thread_name,
            "type": mtype
        }
        try:
            exist_doc = self.db.document(doc["_id"])
            doc["_rev"] = exist_doc["_rev"]
            doc["messages"] += exist_doc["messages"]
        except CouchNotFoundError:
            # this means document is not exist so we will just insert
            pass
        finally:
            res = self.db.commitOne(doc)
        return res

    def get(self, request, mtype=None, detail=True):
        """Retrieve all entries from LogDB for given request"""
        self.check(request, mtype)
        spec = {'request': request, 'reduce': False}
        if mtype:
            spec.update({'type': mtype})
        if detail:
            spec.update({'include_docs': True})
        docs = self.db.loadView(self.design, self.view, spec)
        return docs

    def get_all_requests(self):
        """Retrieve all entries from LogDB"""
        spec = {'reduce': True, 'group_level': 1}
        docs = self.db.loadView(self.design, self.view, spec)
        return docs

    def delete(self, request):
        """Delete entry in LogDB for given request"""
        self.check(request)
        docs = self.get(request, detail=False)
        ids = [r['id'] for r in docs.get('rows', [])]
        res = self.db.bulkDeleteByIDs(ids)
        return res

    def cleanup(self, thr):
        """
        Clean-up docs older then given threshold (thr should be specified in seconds).
        This is done via tstamp view end endkey, e.g.
        curl "http://127.0.0.1:5984/logdb/_design/LogDB/_view/tstamp?endkey=1427912282"
        """
        tstamp = round(time.time() - thr)
        docs = self.db.allDocs()  # may need another view to look-up old docs
        spec = {'endkey': tstamp, 'reduce': False}
        docs = self.db.loadView(self.design, self.tsview, spec)
        ids = [d['id'] for d in docs.get('rows', [])]
        self.db.bulkDeleteByIDs(ids)
Exemplo n.º 14
0
class RotatingDatabaseTest(unittest.TestCase):
    def setUp(self):
        self.couchURL = os.getenv("COUCHURL")
        self.server = CouchServer(self.couchURL)
        # Kill off any databases left over from previous runs
        for db in [db for db in self.server.listDatabases() if db.startswith('rotdb_unittest_')]:
            try:
                self.server.deleteDatabase(db)
            except:
                pass
        # Create a database, drop an existing one first
        testname = self.id().split('.')[-1].lower()
        self.dbname = 'rotdb_unittest_%s' % testname
        self.arcname = 'rotdb_unittest_%s_archive' % testname
        self.seedname = 'rotdb_unittest_%s_seedcfg' % testname
        # set a long value for times, tests do operations explicitly
        self.timing = {'archive':timedelta(seconds=1), 'expire':timedelta(seconds=2)}

        self.db = RotatingDatabase(dbname = self.dbname, url = self.couchURL,
                                   archivename = self.arcname, timing = self.timing)

    def tearDown(self):
        testname = self.id().split('.')[-1].lower()
        if sys.exc_info()[0] == None:
            # This test has passed, clean up after it
            to_go = [db for db in self.server.listDatabases() if db.startswith('rotdb_unittest_%s' % testname)]
            for dbname in to_go:
                try:
                    self.server.deleteDatabase(dbname)
                except CouchNotFoundError:
                    # db has already gone
                    pass

    def testRotate(self):
        """
        Test that rotation works
        """
        start_name = self.db.name
        self.db._rotate()
        end_name = self.db.name
        databases = [db for db in self.server.listDatabases() if db.startswith('rotdb_unittest_')]
        self.assertTrue(start_name in databases)
        self.assertTrue(end_name in databases)

    def testArchive(self):
        """
        Test that archiving views works
        """
        dummy_view = {'_id':'_design/foo', 'language': 'javascript','views':{
                        'bar':{'map':"function(doc) {if (doc.foo) {emit(doc.int, 1);}}", 'reduce':'_sum'}
                        }
                    }
        archive_view = {'_id':'_design/foo', 'language': 'javascript','views':{
                        'bar':{'map':"function(doc) {emit(doc.key, doc.value);}", 'reduce':'_sum'}
                        }
                    }

        seed_db = self.server.connectDatabase(self.seedname)
        seed_db.commit(dummy_view)
        # Need to have the timing long enough so the data isn't archived by accident
        self.timing = {'archive':timedelta(seconds=1000), 'expire':timedelta(seconds=2000)}
        self.db = RotatingDatabase(dbname = self.dbname, url = self.couchURL, views=['foo/bar'],
                                archivename = self.arcname, timing = self.timing)
        self.db.archive_db.commitOne(archive_view)
        runs = 5
        docs = 5
        for run in range(runs):
            for i in range(docs):
                self.db.queue({'foo':'bar', 'int': i, 'run': run})
            self.db.commit()
            self.db._rotate()
        self.db._archive()
        view_result = self.db.archive_db.loadView('foo','bar')
        arch_sum = view_result['rows'][0]['value']
        self.assertEqual(arch_sum, runs * docs)

    def testExpire(self):
        """
        Test that expiring databases works
        """
        # rotate out the original db
        self.db._rotate()
        archived = self.db.archived_dbs()
        self.assertEqual(1, len(archived), 'test not starting from clean state, bail!')
        # Make sure the db has expired
        sleep(2)
        self.db._expire()
        self.assertEqual(0, len(self.db.archived_dbs()))
        self.assertFalse(archived[0] in self.server.listDatabases())

    @attr("integration")
    def testCycle(self):
        """
        Test that committing data to different databases happens
        This is a bit of a dodgy test - if timings go funny it will fail
        """
        self.timing = {'archive':timedelta(seconds=0.5), 'expire':timedelta(seconds=1)}
        self.db = RotatingDatabase(dbname = self.dbname, url = self.couchURL,
                                   archivename = self.arcname, timing = self.timing)
        my_name = self.db.name
        self.db.commit({'foo':'bar'})
        sleep(5)
        self.db.commit({'foo':'bar'})
        # the initial db should have expired by now
        self.db.commit({'foo':'bar'})
        self.assertFalse(my_name in self.server.listDatabases(), "")
Exemplo n.º 15
0
class LogDBBackend(object):
    """
    Represents persistent storage for LogDB
    """
    def __init__(self, db_url, db_name, identifier, thread_name, **kwds):
        self.db_url = db_url
        self.server = CouchServer(db_url)
        self.db_name = db_name
        self.dbid = identifier
        self.thread_name = thread_name
        self.agent = kwds.get('agent', 0)
        create = kwds.get('create', False)
        size = kwds.get('size', 10000)
        self.db = self.server.connectDatabase(db_name, create=create, size=size)
        self.design = kwds.get('design', 'LogDB') # name of design document
        self.view = kwds.get('view', 'requests') # name of view to look-up requests
        self.tsview = kwds.get('tsview', 'tstamp') # name of tsview to look-up requests
        if  create:
            uri = '/%s/_design/%s' % (db_name, self.design)
            data = design_doc()
            try:
                # insert design doc, if fails due to conflict continue
                # conflict may happen due to concurrent client connection who
                # created first this doc
                self.db.put(uri, data)
            except CouchConflictError:
                pass

    def deleteDatabase(self):
        """Delete back-end database"""
        if  self.db_name in self.server.listDatabases():
            self.server.deleteDatabase(self.db_name)

    def check(self, request, mtype=None):
        """Check that given request name is valid"""
        # TODO: we may add some logic to check request name, etc.
        if  not request:
            raise LogDBError("Request name is empty")
        if  mtype and mtype not in LOGDB_MSG_TYPES:
            raise LogDBError("Unsupported message type: '%s', supported types %s" \
                    % (mtype, LOGDB_MSG_TYPES))

    def docid(self, request, mtype):
        """Generate doc id, we use double dash to avoid dashes from thread names"""
        return gen_hash('--'.join((request, self.dbid, self.thread_name, mtype)))

    def prefix(self, mtype):
        """Generate agent specific prefix for given message type"""
        if  self.agent:
            # we add prefix for agent messages, all others will not have this index
            mtype = 'agent-%s' % mtype
        return mtype

    def agent_update(self, request, msg='', mtype="info"):
        """Update agent info in LogDB for given request"""
        self.check(request, mtype)
        mtype = self.prefix(mtype)
        rec = {"ts":tstamp(), "msg":msg}
        doc = {"_id": self.docid(request, mtype), "messages": [rec],
                "request":request, "identifier":self.dbid,
                "thr":self.thread_name, "type":mtype}
        try:
            exist_doc = self.db.document(doc["_id"])
            doc["_rev"] = exist_doc["_rev"]
        except CouchNotFoundError:
            # this means document is not exist so we will just insert
            pass
        finally:
            res = self.db.commitOne(doc)
        return res

    def user_update(self, request, msg, mtype='comment'):
        """Update user info in LogDB for given request"""
        rec = {"ts":tstamp(), "msg":msg}
        doc = {"_id": self.docid(request, mtype), "messages": [rec],
                "request":request, "identifier":self.dbid,
                "thr":self.thread_name, "type":mtype}
        try:
            exist_doc = self.db.document(doc["_id"])
            doc["_rev"] = exist_doc["_rev"]
            doc["messages"] += exist_doc["messages"]
        except CouchNotFoundError:
            # this means document is not exist so we will just insert
            pass
        finally:
            res = self.db.commitOne(doc)
        return res

    def get(self, request, mtype=None, detail=True):
        """Retrieve all entries from LogDB for given request"""
        self.check(request, mtype)
        spec = {'request':request, 'reduce':False}
        if  mtype:
            spec.update({'type':mtype})
        if detail:
            spec.update({'include_docs': True})
        docs = self.db.loadView(self.design, self.view, spec)
        return docs

    def get_all_requests(self):
        """Retrieve all entries from LogDB"""
        spec = {'reduce':True, 'group_level':1}
        docs = self.db.loadView(self.design, self.view, spec)
        return docs

    def delete(self, request):
        """Delete entry in LogDB for given request"""
        self.check(request)
        docs = self.get(request, detail=False)
        ids = [r['id'] for r in docs.get('rows', [])]
        res = self.db.bulkDeleteByIDs(ids)
        return res

    def cleanup(self, thr):
        """
        Clean-up docs older then given threshold (thr should be specified in seconds).
        This is done via tstamp view end endkey, e.g.
        curl "http://127.0.0.1:5984/logdb/_design/LogDB/_view/tstamp?endkey=1427912282"
        """
        tstamp = round(time.time()-thr)
        docs = self.db.allDocs() # may need another view to look-up old docs
        spec = {'endkey':tstamp, 'reduce':False}
        docs = self.db.loadView(self.design, self.tsview, spec)
        ids = [d['id'] for d in docs.get('rows', [])]
        self.db.bulkDeleteByIDs(ids)
Exemplo n.º 16
0
    # Get the views we're going to profile
    app_path = os.path.realpath(os.path.join(os.path.dirname(__file__), opts.app))
    # and the dummy data
    data_path = os.path.realpath(os.path.join(os.path.dirname(__file__), opts.data))

    views_path = os.path.join(app_path, 'views')
    views = os.listdir(views_path)

    results = defaultdict(list)

    for iteration in range(opts.iterations):
        for i in range(1, len(views) + 1):
            timestamp = int(time.mktime(datetime.datetime.now().timetuple()))

            database = srv.connectDatabase('view_profiler_%s' % timestamp)

            push_data(data_path, database)

            push_views(views_path, views[:i], database)

            time.sleep(opts.sleep)

            results[str(views[:i])].append(profile(views[:i], database))

            if not opts.preserve:
                srv.deleteDatabase('view_profiler_%s' % timestamp)

    print_report(results)

    if opts.store:
        save_report(dict(results), srv)
Exemplo n.º 17
0
class DASCouchcache(Cache):
    """
    Base DAS couchdb cache class based on couchdb, see
    http://couchdb.apache.org/, The client API based on 
    http://wiki.apache.org/couchdb/Getting_started_with_Python
    in particular we use couchdb-python library
    http://couchdb-python.googlecode.com/
    """
    def __init__(self, config):
        Cache.__init__(self, config)
        uri = config['couch_servers']  # in a future I may have several
        self.logger = config['logger']
        if not self.logger:
            self.logger = DummyLogger()
        self.limit = config['couch_lifetime']
        self.uri = uri.replace('http://', '')
        self.server = CouchServer(self.uri)
        self.dbname = "das"
        self.cdb = None  # cached couch DB handler
        self.future = 9999999999  # unreachable timestamp
        self.logger.info('Init couchcache %s' % self.uri)

        self.views = {
            'query': {
                'map':
                """
function(doc) {
    if(doc.hash) {
        emit([doc.hash, doc.expire], doc.results);
    }
}"""
            },
            #            'incache': {'map': """
            #function(doc) {
            #    if(doc.hash) {
            #        emit([doc.hash, doc.expire], null);
            #    }
            #}"""
            #            },
        }

        self.adminviews = {
            'system': {
                'map':
                """
function(doc) {
    if(doc.results.system) {
        emit(doc.results.system, doc);
    }
}"""
            },
            'cleaner': {
                'map':
                """
function(doc) {
    if(doc.expire) {
        emit(doc.expire, doc);
    }
}"""
            },
            'timer': {
                'map':
                """
function(doc) {
    if(doc.timestamp) {
        emit(doc.timestamp, doc);
    }
}"""
            },
            'all_queries': {
                'map': """
function(doc) {
    if (doc.query) {
        emit(doc.query, null);
    }
}""",
                'reduce': """
function(keys, values) {
   return null;
}"""
            },
        }

    def connect(self, url):
        """
        Connect to different Couch DB URL
        """
        self.uri = url.replace('http://', '')
        del self.server
        self.server = CouchServer(self.uri)

    def create_view(self, dbname, design, view_dict):
        """
        Create new view in couch db.
        """
        cdb = self.couchdb(dbname)
        # check provided view_dict that it has all keys
        for view, definition in view_dict.items():
            if type(definition) is not dict:
                msg = 'View "%s" has improper definition' % view
                raise Exception(msg)
            if 'map' not in definition:
                msg = 'View "%s" does not have map'
                raise Exception(msg)
        view = dict(_id='_design/%s' % design,
                    language='javascript',
                    doctype='view',
                    views=view_dict)
        cdb.commit(view)

    def delete_view(self, dbname, design, view_name):
        """
        Delete given view in couch db
        """
        print("Delete view", dbname, design, view_name)

    def dbinfo(self, dbname='das'):
        """
        Provide couch db info
        """
        cdb = self.couchdb(dbname)
        if cdb:
            self.logger.info(cdb.info())
        else:
            self.logger.warning("No '%s' found in couch db" % dbname)
        if not cdb:
            return "Unable to connect to %s" % dbname
        return cdb.info()

    def delete_cache(self, dbname=None, system=None):
        """
        Delete either couchh db (dbname) or particular docs
        for provided system, e.g. all sitedb docs.
        """
        cdb = self.couchdb(dbname)
        if cdb:
            if system:
                key = '"%s"' % system
                options = {'key': key}
                results = self.get_view('dasadmin', 'system', options)
                for doc in results:
                    cdb.queuedelete(doc)
                cdb.commit()
            else:
                self.server.deleteDatabase(dbname)
        return

    def couchdb(self, dbname):
        """
        look up db in couch db server, if found give it back to user
        """
        if self.cdb:
            return self.cdb
        couch_db_list = []
        try:
            couch_db_list = self.server.listDatabases()
        except:
            return None
        if dbname not in couch_db_list:
            self.logger.info("DASCouchcache::couchdb, create db %s" % dbname)
            cdb = self.server.createDatabase(dbname)
            self.create_view(self.dbname, 'dasviews', self.views)
            self.create_view(self.dbname, 'dasadmin', self.adminviews)
        else:
            self.logger.info("DASCouchcache::couchdb, connect db %s" % dbname)
            cdb = self.server.connectDatabase(dbname)
        self.cdb = cdb
        return cdb

    def incache(self, query):
        """
        Check if query exists in cache
        """
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if not cdb:
            return
        key = genkey(query)
        #TODO:check how to query 1 result, I copied the way from get_from_cache
        skey = ["%s" % key, timestamp()]
        ekey = ["%s" % key, self.future]
        options = {'startkey': skey, 'endkey': ekey}
        #        results = cdb.loadView('dasviews', 'incache', options)
        results = cdb.loadView('dasviews', 'query', options)
        try:
            res = len(results['rows'])
        except:
            traceback.print_exc()
            return
        if res:
            return True
        return False

    def get_from_cache(self, query, idx=0, limit=0, skey=None, order='asc'):
        """
        Retreieve results from cache, otherwise return null.
        """
        id = 0
        idx = int(idx)
        limit = long(limit)
        stop = idx + limit  # get upper bound for range
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if not cdb:
            return
        key = genkey(query)

        skey = ["%s" % key, timestamp()]
        ekey = ["%s" % key, self.future]
        options = {'startkey': skey, 'endkey': ekey}
        results = cdb.loadView('dasviews', 'query', options)
        try:
            res = [row['value'] for row in results['rows']]
            for row in results['rows']:
                row['id'] = id
                if limit:
                    if id >= idx and id <= stop:
                        yield row
                else:
                    yield row
                id += 1
        except:
            traceback.print_exc()
            return
        if res:
            self.logger.info("DASCouchcache::get_from_cache for %s" % query)
#        if  len(res) == 1:
#            return res[0]
#        return res

    def update_cache(self, query, results, expire):
        """
        Insert results into cache. We use bulk insert operation, 
        db.update over entire set, rather looping for every single 
        row and use db.create. The speed up is factor of 10
        """
        if not expire:
            raise Exception('Expire parameter is null')
        self.logger.info("DASCouchcache::update_cache for %s" % query)
        if not results:
            return
        dbname = self.dbname
        viewlist = []
        for key in self.views.keys():
            viewlist.append("/%s/_design/dasviews/_view/%s" % (dbname, key))
        cdb = self.couchdb(dbname)
        self.clean_cache()
        if not cdb:
            if  type(results) is list or \
                type(results) is types.GeneratorType:
                for row in results:
                    yield row
            else:
                yield results
            return
        if  type(results) is list or \
            type(results) is types.GeneratorType:
            for row in results:
                res = results2couch(query, row, expire)
                cdb.queue(res, viewlist=viewlist)
                yield row
        else:
            res = results2couch(query, results, expire)
            yield results
            cdb.queue(res, viewlist=viewlist)
        cdb.commit(viewlist=viewlist)

    def remove_from_cache(self, query):
        """
        Delete query from cache
        """
        self.logger.debug('DASCouchcache::remove_from_cache(%s)' \
                % (query, ))
        return

    def get_view(self, design, view, options={}):
        """
        Retreieve results from cache based on provided Couchcache view
        """
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if not cdb:
            return
        results = cdb.loadView(design, view, options)
        res = [row['value'] for row in results['rows']]
        if len(res) == 1:
            return res[0]
        return res

    def list_views(self):
        """
        Return a list of Couchcache views
        """

    def clean_cache(self):
        """
        Clean expired docs in couch db.
        """
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if not cdb:
            return
        skey = 0
        ekey = timestamp()
        options = {'startkey': skey, 'endkey': ekey}
        results = cdb.loadView('dasadmin', 'cleaner', options)

        ndocs = 0
        for doc in results['rows']:
            cdb.queueDelete(doc['value'])
            ndocs += 1

        self.logger.info("DASCouchcache::clean_couch, will remove %s doc's" \
            % ndocs )
        if not ndocs:
            return
        cdb.commit()  # bulk delete
        cdb.compact()  # remove them permanently

    def list_between(self, time_begin, time_end):
        """
        Retreieve results from cache for time range
        """
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if not cdb:
            return
        skey = time_begin
        ekey = time_end
        options = {'startkey': skey, 'endkey': ekey}
        results = cdb.loadView('dasadmin', 'timer', options)
        try:
            res = [row['value'] for row in results['rows']]
        except:
            traceback.print_exc()
            return
        if len(res) == 1:
            return res[0]
        return res

    def list_queries_in(self, system, idx=0, limit=0):
        """
        Retrieve results from cache for provided system, e.g. sitedb
        """
        idx = int(idx)
        limit = long(limit)
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if not cdb:
            return
        skey = system
        ekey = system
        options = {'startkey': skey, 'endkey': ekey}
        results = cdb.loadView('dasadmin', 'system', options)
        try:
            res = [row['value'] for row in results['rows']]
        except:
            traceback.print_exc()
            return
        if len(res) == 1:
            return res[0]
        return res

    def get_all_views(self, dbname=None):
        """
        Method to get all degined views in couch db. The couch db doesn't have
        a clear way to extract view documents. Instead we need to ask for
        _all_docs and provide proper start/end-keys. Once we retrieve
        _design docs, we loop over them and get the doc of particular view, e.g
        http://localhost:5984/das/_design/dasviews
        """
        if not dbname:
            dbname = self.dbname
        qqq = 'startkey=%22_design%2F%22&endkey=%22_design0%22'
        host = 'http://' + self.uri
        path = '/%s/_all_docs?%s' % (dbname, qqq)
        kwds = {}
        req = 'GET'
        debug = 0
        results = httplib_request(host, path, kwds, req, debug)
        designdocs = json.loads(results)
        results = {}
        for item in designdocs['rows']:
            doc = item['key']
            #            print "design:", doc
            path = '/%s/%s' % (dbname, doc)
            res = httplib_request(host, path, kwds, req, debug)
            rdict = json.loads(res)
            views = []
            for view_name, view_dict in rdict['views'].items():
                #                print "  view:", view_name
                #                print "   map:", view_dict['map']
                if 'reduce' in view_dict:
                    #                    print "reduce:", view_dict['reduce']
                    rdef = view_dict['reduce']
                    defrow = dict(map=view_dict['map'],
                                  reduce=view_dict['reduce'])
                else:
                    defrow = dict(map=view_dict['map'])
                row = {'%s' % view_name: defrow}
                views.append(row)
            results[doc] = views
        return results

    def get_all_queries(self, idx=0, limit=0):
        """
        Retreieve DAS queries from the cache.
        """
        idx = int(idx)
        limit = long(limit)
        dbname = self.dbname
        cdb = self.couchdb(dbname)
        if not cdb:
            return

        options = {}
        results = cdb.loadView('dasadmin', 'all_queries', options)
        try:
            res = [row['value'] for row in results['rows']]
        except:
            traceback.print_exc()
            return
        if len(res) == 1:
            return res[0]
        return res
Exemplo n.º 18
0
class LogDBBackend(object):
    """
    Represents persistent storage for LogDB
    """
    def __init__(self, db_url, db_name, identifier, thread_name, **kwds):
        self.db_url = db_url
        self.server = CouchServer(db_url)
        self.db_name = db_name
        self.dbid = identifier
        self.thread_name = thread_name
        self.agent = kwds.get('agent', 0)
        self.db = self.server.connectDatabase(db_name, create=False)
        self.design = 'LogDB' # name of design document
        self.view = 'requests' # name of view to look-up requests
        self.tsview = 'tstamp' # name of tsview to look-up requests
        self.threadview = 'logByRequestAndThread'
        self.requestview = 'logByRequest'

    def deleteDatabase(self):
        """Delete back-end database"""
        if  self.db_name in self.server.listDatabases():
            self.server.deleteDatabase(self.db_name)

    def check(self, request, mtype=None):
        """Check that given request name is valid"""
        # TODO: we may add some logic to check request name, etc.
        if  not request:
            raise LogDBError("Request name is empty")
        if  mtype and mtype not in LOGDB_MSG_TYPES:
            raise LogDBError("Unsupported message type: '%s', supported types %s" \
                    % (mtype, LOGDB_MSG_TYPES))

    def docid(self, request, mtype):
        """Generate doc id, we use double dash to avoid dashes from thread names"""
        return gen_hash('--'.join((request, self.dbid, self.thread_name, mtype)))

    def prefix(self, mtype):
        """Generate agent specific prefix for given message type"""
        if  self.agent:
            # we add prefix for agent messages, all others will not have this index
            mtype = 'agent-%s' % mtype
        return mtype

    def agent_update(self, request, msg='', mtype="info"):
        """Update agent info in LogDB for given request"""
        self.check(request, mtype)
        mtype = self.prefix(mtype)
        rec = {"ts":tstamp(), "msg":msg}
        doc = {"_id": self.docid(request, mtype), "messages": [rec],
                "request":request, "identifier":self.dbid,
                "thr":self.thread_name, "type":mtype}
        try:
            exist_doc = self.db.document(doc["_id"])
            doc["_rev"] = exist_doc["_rev"]
        except CouchNotFoundError:
            # this means document is not exist so we will just insert
            pass
        finally:
            res = self.db.commitOne(doc)
        return res

    def user_update(self, request, msg, mtype='comment'):
        """Update user info in LogDB for given request"""
        rec = {"ts":tstamp(), "msg":msg}
        doc = {"_id": self.docid(request, mtype), "messages": [rec],
                "request":request, "identifier":self.dbid,
                "thr":self.thread_name, "type":mtype}
        try:
            exist_doc = self.db.document(doc["_id"])
            doc["_rev"] = exist_doc["_rev"]
            doc["messages"] += exist_doc["messages"]
        except CouchNotFoundError:
            # this means document is not exist so we will just insert
            pass
        finally:
            res = self.db.commitOne(doc)
        return res

    def get(self, request, mtype=None, detail=True, agent=True):
        """Retrieve all entries from LogDB for given request"""
        self.check(request, mtype)
        if agent and mtype:
            mtype = self.prefix(mtype)
        options = {'reduce':False}
        if mtype:
            keys = [[request, mtype]]
        else:
            keys=[]
            options.update({'startkey': [request], 'endkey':[request, {}]})
        if detail:
            options.update({'include_docs': True})
        docs = self.db.loadView(self.design, self.view, options, keys=keys)
        return docs
    
    def get_by_thread(self, request, mtype='error', detail=False, agent=True):
        self.check(request, mtype)
        if agent and mtype:
            mtype = self.prefix(mtype)
        keys = [[request, self.dbid, self.thread_name, mtype]] 
        options = {'reduce':False}
        if detail:
            options.update({'include_docs': True})
        docs = self.db.loadView(self.design, self.threadview, options, keys)
        return docs

    def get_by_request(self, request):
        keys = [request] 
        options = {'reduce':False}
        docs = self.db.loadView(self.design, self.requestview, options, keys)
        return docs
    
    def get_all_requests(self):
        """Retrieve all entries from LogDB"""
        options = {'reduce':True, 'group_level':1}
        docs = self.db.loadView(self.design, self.view, options)
        return docs

    def delete(self, request, mtype=None, this_thread=False, agent=True):
        """Delete entry in LogDB for given request"""
        if mtype:
            self.check(request, mtype)
        else:   
            self.check(request)
        if this_thread:
            docs = self.get_by_thread(request, mtype=mtype, detail=False, agent=agent)
        else:
            docs = self.get(request, mtype=mtype, detail=False, agent=agent)
        ids = [r['id'] for r in docs.get('rows', [])]
        res = self.db.bulkDeleteByIDs(ids)
        return res
    
    def cleanup(self, thr):
        """
        Clean-up docs older then given threshold (thr should be specified in seconds).
        This is done via tstamp view end endkey, e.g.
        curl "http://127.0.0.1:5984/logdb/_design/LogDB/_view/tstamp?endkey=1427912282"
        """
        cutoff = round(time.time()-thr)
        #docs = self.db.allDocs() # may need another view to look-up old docs
        spec = {'endkey':cutoff, 'reduce':False}
        docs = self.db.loadView(self.design, self.tsview, spec)
        ids = [d['id'] for d in docs.get('rows', [])]
        self.db.bulkDeleteByIDs(ids)